aboutsummaryrefslogtreecommitdiff
path: root/arch/m68k/mm
diff options
context:
space:
mode:
authorPeter Zijlstra2020-01-31 13:45:35 +0100
committerGeert Uytterhoeven2020-02-10 10:57:48 +0100
commit5ad272abee9fe0a781d49b03f334c0a3b3e418c1 (patch)
tree56bda1957ead57e29ffcfe23454d360baea5108f /arch/m68k/mm
parent13076a29d52e91d29ab6b13e7279c9eacd0b6dbb (diff)
m68k: mm: Move the pointer table allocator to motorola.c
Only the Motorola MMU makes use of this allocator, it is a waste of .text to include it for Sun3/ColdFire. Also, this is going to avoid build issues when we're going to make it more Motorola specific. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Greg Ungerer <gerg@linux-m68k.org> Tested-by: Michael Schmitz <schmitzmic@gmail.com> Tested-by: Greg Ungerer <gerg@linux-m68k.org> Link: https://lore.kernel.org/r/20200131125403.654652162@infradead.org Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/memory.c102
-rw-r--r--arch/m68k/mm/motorola.c102
2 files changed, 102 insertions, 102 deletions
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 36683f2a28d0..65e0c4071912 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -22,108 +22,6 @@
#include <asm/machdep.h>
-/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
- struct page instead of separately kmalloced struct. Stolen from
- arch/sparc/mm/srmmu.c ... */
-
-typedef struct list_head ptable_desc;
-static LIST_HEAD(ptable_list);
-
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
-#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
-#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
-
-#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
-
-void __init init_pointer_table(unsigned long ptable)
-{
- ptable_desc *dp;
- unsigned long page = ptable & PAGE_MASK;
- unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
-
- dp = PD_PTABLE(page);
- if (!(PD_MARKBITS(dp) & mask)) {
- PD_MARKBITS(dp) = 0xff;
- list_add(dp, &ptable_list);
- }
-
- PD_MARKBITS(dp) &= ~mask;
- pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
-
- /* unreserve the page so it's possible to free that page */
- __ClearPageReserved(PD_PAGE(dp));
- init_page_count(PD_PAGE(dp));
-
- return;
-}
-
-pmd_t *get_pointer_table (void)
-{
- ptable_desc *dp = ptable_list.next;
- unsigned char mask = PD_MARKBITS (dp);
- unsigned char tmp;
- unsigned int off;
-
- /*
- * For a pointer table for a user process address space, a
- * table is taken from a page allocated for the purpose. Each
- * page can hold 8 pointer tables. The page is remapped in
- * virtual address space to be noncacheable.
- */
- if (mask == 0) {
- void *page;
- ptable_desc *new;
-
- if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
- return NULL;
-
- mmu_page_ctor(page);
-
- new = PD_PTABLE(page);
- PD_MARKBITS(new) = 0xfe;
- list_add_tail(new, dp);
-
- return (pmd_t *)page;
- }
-
- for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
- ;
- PD_MARKBITS(dp) = mask & ~tmp;
- if (!PD_MARKBITS(dp)) {
- /* move to end of list */
- list_move_tail(dp, &ptable_list);
- }
- return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
-}
-
-int free_pointer_table (pmd_t *ptable)
-{
- ptable_desc *dp;
- unsigned long page = (unsigned long)ptable & PAGE_MASK;
- unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
-
- dp = PD_PTABLE(page);
- if (PD_MARKBITS (dp) & mask)
- panic ("table already free!");
-
- PD_MARKBITS (dp) |= mask;
-
- if (PD_MARKBITS(dp) == 0xff) {
- /* all tables in page are free, free page */
- list_del(dp);
- mmu_page_dtor((void *)page);
- free_page (page);
- return 1;
- } else if (ptable_list.next != dp) {
- /*
- * move this descriptor to the front of the list, since
- * it has one or more free tables.
- */
- list_move(dp, &ptable_list);
- }
- return 0;
-}
-
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index be61f35a7432..2102f9397c94 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -67,6 +67,108 @@ void mmu_page_dtor(void *page)
cache_page(page);
}
+/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
+ struct page instead of separately kmalloced struct. Stolen from
+ arch/sparc/mm/srmmu.c ... */
+
+typedef struct list_head ptable_desc;
+static LIST_HEAD(ptable_list);
+
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
+#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
+
+#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
+
+void __init init_pointer_table(unsigned long ptable)
+{
+ ptable_desc *dp;
+ unsigned long page = ptable & PAGE_MASK;
+ unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
+
+ dp = PD_PTABLE(page);
+ if (!(PD_MARKBITS(dp) & mask)) {
+ PD_MARKBITS(dp) = 0xff;
+ list_add(dp, &ptable_list);
+ }
+
+ PD_MARKBITS(dp) &= ~mask;
+ pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
+
+ /* unreserve the page so it's possible to free that page */
+ __ClearPageReserved(PD_PAGE(dp));
+ init_page_count(PD_PAGE(dp));
+
+ return;
+}
+
+pmd_t *get_pointer_table (void)
+{
+ ptable_desc *dp = ptable_list.next;
+ unsigned char mask = PD_MARKBITS (dp);
+ unsigned char tmp;
+ unsigned int off;
+
+ /*
+ * For a pointer table for a user process address space, a
+ * table is taken from a page allocated for the purpose. Each
+ * page can hold 8 pointer tables. The page is remapped in
+ * virtual address space to be noncacheable.
+ */
+ if (mask == 0) {
+ void *page;
+ ptable_desc *new;
+
+ if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
+ return NULL;
+
+ mmu_page_ctor(page);
+
+ new = PD_PTABLE(page);
+ PD_MARKBITS(new) = 0xfe;
+ list_add_tail(new, dp);
+
+ return (pmd_t *)page;
+ }
+
+ for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
+ ;
+ PD_MARKBITS(dp) = mask & ~tmp;
+ if (!PD_MARKBITS(dp)) {
+ /* move to end of list */
+ list_move_tail(dp, &ptable_list);
+ }
+ return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
+}
+
+int free_pointer_table (pmd_t *ptable)
+{
+ ptable_desc *dp;
+ unsigned long page = (unsigned long)ptable & PAGE_MASK;
+ unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
+
+ dp = PD_PTABLE(page);
+ if (PD_MARKBITS (dp) & mask)
+ panic ("table already free!");
+
+ PD_MARKBITS (dp) |= mask;
+
+ if (PD_MARKBITS(dp) == 0xff) {
+ /* all tables in page are free, free page */
+ list_del(dp);
+ mmu_page_dtor((void *)page);
+ free_page (page);
+ return 1;
+ } else if (ptable_list.next != dp) {
+ /*
+ * move this descriptor to the front of the list, since
+ * it has one or more free tables.
+ */
+ list_move(dp, &ptable_list);
+ }
+ return 0;
+}
+
/* size of memory already mapped in head.S */
extern __initdata unsigned long m68k_init_mapped_size;