aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds2018-06-07 18:39:37 -0700
committerLinus Torvalds2018-06-07 18:39:37 -0700
commit68abbe729567cef128b2c2141f2ed2567f3b8372 (patch)
treeaa75c39cc815eee4d7cc8db2988fe10879fccd3e /include
parentba1b7309fc2e909a5828c36a7cd187e5d7df6f53 (diff)
parent016e92da037e0b43dd5e5848c19b0b9749506963 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc things - ocfs2 updates - v9fs updates - MM - procfs updates - lib/ updates - autofs updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits) autofs: small cleanup in autofs_getpath() autofs: clean up includes autofs: comment on selinux changes needed for module autoload autofs: update MAINTAINERS entry for autofs autofs: use autofs instead of autofs4 in documentation autofs: rename autofs documentation files autofs: create autofs Kconfig and Makefile autofs: delete fs/autofs4 source files autofs: update fs/autofs4/Makefile autofs: update fs/autofs4/Kconfig autofs: copy autofs4 to autofs autofs4: use autofs instead of autofs4 everywhere autofs4: merge auto_fs.h and auto_fs4.h fs/binfmt_misc.c: do not allow offset overflow checkpatch: improve patch recognition lib/ucs2_string.c: add MODULE_LICENSE() lib/mpi: headers cleanup lib/percpu_ida.c: use _irqsave() instead of local_irq_save() + spin_lock lib/idr.c: remove simple_ida_lock lib/bitmap.c: micro-optimization for __bitmap_complement() ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/int-ll64.h19
-rw-r--r--include/linux/dax.h4
-rw-r--r--include/linux/gfp.h14
-rw-r--r--include/linux/hmm.h8
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/memcontrol.h51
-rw-r--r--include/linux/memfd.h16
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mm.h12
-rw-r--r--include/linux/mm_types.h239
-rw-r--r--include/linux/mpi.h61
-rw-r--r--include/linux/page-flags.h51
-rw-r--r--include/linux/page_counter.h26
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/shmem_fs.h13
-rw-r--r--include/linux/slab_def.h7
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/types.h34
-rw-r--r--include/linux/userfaultfd_k.h6
-rw-r--r--include/uapi/linux/auto_fs.h169
-rw-r--r--include/uapi/linux/auto_fs4.h153
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
24 files changed, 437 insertions, 476 deletions
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index ffb68d67be5f..a248545f1e18 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -13,17 +13,14 @@
#ifndef __ASSEMBLY__
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
#define S8_C(x) x
#define U8_C(x) x ## U
diff --git a/include/linux/dax.h b/include/linux/dax.h
index c99692ddd4b5..88504e87cd6c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -125,8 +125,8 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t pfn);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fc5ab85278d5..a6afcec53795 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -24,6 +24,7 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
+#define ___GFP_WRITE 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
@@ -36,11 +37,10 @@ struct vm_area_struct;
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_WRITE 0x800000u
-#define ___GFP_KSWAPD_RECLAIM 0x1000000u
+#define ___GFP_DIRECT_RECLAIM 0x200000u
+#define ___GFP_KSWAPD_RECLAIM 0x400000u
#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x2000000u
+#define ___GFP_NOLOCKDEP 0x800000u
#else
#define ___GFP_NOLOCKDEP 0
#endif
@@ -205,7 +205,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -343,7 +343,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
- * 0x4 => DMA32 or DMA or NORMAL
+ * 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
@@ -351,7 +351,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
- * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 2f1327c37a63..4c92e3ba3e16 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -522,9 +522,7 @@ void hmm_devmem_remove(struct hmm_devmem *devmem);
static inline void hmm_devmem_page_set_drvdata(struct page *page,
unsigned long data)
{
- unsigned long *drvdata = (unsigned long *)&page->pgmap;
-
- drvdata[1] = data;
+ page->hmm_data = data;
}
/*
@@ -535,9 +533,7 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
*/
static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
{
- const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
-
- return drvdata[1];
+ return page->hmm_data;
}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7aed92624531..7c4e8f1f72d8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -29,6 +29,7 @@
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 44368b19b27e..161e8164abcf 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -37,17 +37,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-static inline struct stable_node *page_stable_node(struct page *page)
-{
- return PageKsm(page) ? page_rmapping(page) : NULL;
-}
-
-static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
-{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
-}
-
/*
* When do_swap_page() first faults in from swap what used to be a KSM page,
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
@@ -89,12 +78,6 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline int page_referenced_ksm(struct page *page,
- struct mem_cgroup *memcg, unsigned long *vm_flags)
-{
- return 0;
-}
-
static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d99b71bc2c66..4f52ec755725 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,9 +53,17 @@ enum memcg_memory_event {
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
+ MEMCG_SWAP_MAX,
+ MEMCG_SWAP_FAIL,
MEMCG_NR_MEMORY_EVENTS,
};
+enum mem_cgroup_protection {
+ MEMCG_PROT_NONE,
+ MEMCG_PROT_LOW,
+ MEMCG_PROT_MIN,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
@@ -158,6 +166,15 @@ enum memcg_kmem_state {
KMEM_ONLINE,
};
+#if defined(CONFIG_SMP)
+struct memcg_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define MEMCG_PADDING(name) struct memcg_padding name;
+#else
+#define MEMCG_PADDING(name)
+#endif
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -179,8 +196,7 @@ struct mem_cgroup {
struct page_counter kmem;
struct page_counter tcpmem;
- /* Normal memory consumption range */
- unsigned long low;
+ /* Upper bound of normal memory consumption range */
unsigned long high;
/* Range enforcement for interrupt charges */
@@ -205,9 +221,11 @@ struct mem_cgroup {
int oom_kill_disable;
/* memory.events */
- atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
struct cgroup_file events_file;
+ /* handle for "memory.swap.events" */
+ struct cgroup_file swap_events_file;
+
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -225,19 +243,26 @@ struct mem_cgroup {
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
+ /* taken only while moving_account > 0 */
+ spinlock_t move_lock;
+ unsigned long move_lock_flags;
+
+ MEMCG_PADDING(_pad1_);
+
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
struct task_struct *move_lock_task;
- unsigned long move_lock_flags;
/* memory.stat */
struct mem_cgroup_stat_cpu __percpu *stat_cpu;
+
+ MEMCG_PADDING(_pad2_);
+
atomic_long_t stat[MEMCG_NR_STAT];
atomic_long_t events[NR_VM_EVENT_ITEMS];
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -285,7 +310,8 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
@@ -462,7 +488,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
void mem_cgroup_handle_over_high(void);
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -730,10 +756,10 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
{
}
-static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+static inline enum mem_cgroup_protection mem_cgroup_protected(
+ struct mem_cgroup *root, struct mem_cgroup *memcg)
{
- return false;
+ return MEMCG_PROT_NONE;
}
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -853,7 +879,7 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0;
}
-static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}
@@ -1093,7 +1119,6 @@ static inline void dec_lruvec_page_state(struct page *page,
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
new file mode 100644
index 000000000000..4f1600413f91
--- /dev/null
+++ b/include/linux/memfd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MEMFD_H
+#define __LINUX_MEMFD_H
+
+#include <linux/file.h>
+
+#ifdef CONFIG_MEMFD_CREATE
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+#else
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 2b0265265c28..4e9828cda7a2 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -107,7 +107,6 @@ static inline bool movable_node_is_enabled(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size,
struct vmem_altmap *altmap);
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 29c5458b4b5e..4c3881b44ef1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1851,6 +1851,7 @@ static inline bool pgtable_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
+ __SetPageTable(page);
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}
@@ -1858,6 +1859,7 @@ static inline bool pgtable_page_ctor(struct page *page)
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
+ __ClearPageTable(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
@@ -2303,10 +2305,10 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int __must_check write_one_page(struct page *page);
@@ -2431,8 +2433,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
-int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -2530,12 +2532,10 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
-extern bool page_is_poisoned(struct page *page);
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
-static inline bool page_is_poisoned(struct page *page) { return false; }
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 21612347d311..99ce070e7dcb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -33,29 +33,27 @@ struct hmm;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it. If you allocate the page using alloc_pages(), you
- * can use some of the space in struct page for your own purposes.
+ * who is mapping it.
*
- * Pages that were once in the page cache may be found under the RCU lock
- * even after they have been recycled to a different purpose. The page
- * cache reads and writes some of the fields in struct page to pin the
- * page before checking that it's still in the page cache. It is vital
- * that all users of struct page:
- * 1. Use the first word as PageFlags.
- * 2. Clear or preserve bit 0 of page->compound_head. It is used as
- * PageTail for compound pages, and the page cache must not see false
- * positives. Some users put a pointer here (guaranteed to be at least
- * 4-byte aligned), other users avoid using the field altogether.
- * 3. page->_refcount must either not be used, or must be used in such a
- * way that other CPUs temporarily incrementing and then decrementing the
- * refcount does not cause problems. On receiving the page from
- * alloc_pages(), the refcount will be positive.
- * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
+ * If you allocate the page using alloc_pages(), you can use some of the
+ * space in struct page for your own purposes. The five words in the main
+ * union are available, except for bit 0 of the first word which must be
+ * kept clear. Many users use this word to store a pointer to an object
+ * which is guaranteed to be aligned. If you use the same storage as
+ * page->mapping, you must restore it to NULL before freeing the page.
*
- * If you allocate pages of order > 0, you can use the fields in the struct
- * page associated with each page, but bear in mind that the pages may have
- * been inserted individually into the page cache, so you must use the above
- * four fields in a compatible way for each struct page.
+ * If your page will not be mapped to userspace, you can also use the four
+ * bytes in the mapcount union, but you must call page_mapcount_reset()
+ * before freeing it.
+ *
+ * If you want to use the refcount field, it must be used in such a way
+ * that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ *
+ * If you allocate pages of order > 0, you can use some of the fields
+ * in each subpage, but you may need to restore some of their values
+ * afterwards.
*
* SLUB uses cmpxchg_double() to atomically update its freelist and
* counters. That requires that freelist & counters be adjacent and
@@ -65,135 +63,122 @@ struct hmm;
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
-#define _slub_counter_t unsigned long
#else
-#define _slub_counter_t unsigned int
-#endif
-#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
#define _struct_page_alignment
-#define _slub_counter_t unsigned int
-#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#endif
struct page {
- /* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- union {
- /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
- struct address_space *mapping;
-
- void *s_mem; /* slab first object */
- atomic_t compound_mapcount; /* first tail page */
- /* page_deferred_list().next -- second tail page */
- };
-
- /* Second double word */
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
-
- union {
- _slub_counter_t counters;
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
-
- struct { /* Page cache */
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- /*
- * Usage count, *USE WRAPPER FUNCTION* when manual
- * accounting. See page_ref.h
- */
- atomic_t _refcount;
- };
- };
-
/*
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
+ * Five words (20/40 bytes) are available in this union.
+ * WARNING: bit 0 of the first word is used for PageTail(). That
+ * means the other users of this union MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
+ struct { /* Page cache and anonymous pages */
+ /**
+ * @lru: Pageout list, eg. active_list protected by
+ * zone_lru_lock. Sometimes used as a generic list
+ * by the page owner.
+ */
+ struct list_head lru;
+ /* See page-flags.h for PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+ pgoff_t index; /* Our offset within mapping. */
+ /**
+ * @private: Mapping-private opaque data.
+ * Usually used for buffer_heads if PagePrivate.
+ * Used for swp_entry_t if PageSwapCache.
+ * Indicates order in the buddy system if PageBuddy.
+ */
+ unsigned long private;
+ };
+ struct { /* slab, slob and slub */
+ union {
+ struct list_head slab_list; /* uses lru */
+ struct { /* Partial pages */
+ struct page *next;
#ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
+ int pages; /* Nr of pages left */
+ int pobjects; /* Approximate count */
#else
- short int pages;
- short int pobjects;
+ short int pages;
+ short int pobjects;
#endif
+ };
+ };
+ struct kmem_cache *slab_cache; /* not slob */
+ /* Double-word boundary */
+ void *freelist; /* first free object */
+ union {
+ void *s_mem; /* slab: first object */
+ unsigned long counters; /* SLUB */
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ };
};
-
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
+ struct { /* Tail pages of compound page */
+ unsigned long compound_head; /* Bit zero is set */
/* First tail page only */
unsigned char compound_dtor;
unsigned char compound_order;
- /* two/six bytes available here */
+ atomic_t compound_mapcount;
};
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct { /* Second tail page of compound page */
+ unsigned long _compound_pad_1; /* compound_head */
+ unsigned long _compound_pad_2;
+ struct list_head deferred_list;
};
+ struct { /* Page table pages */
+ unsigned long _pt_pad_1; /* compound_head */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ unsigned long _pt_pad_2; /* mapping */
+ struct mm_struct *pt_mm; /* x86 pgds only */
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
#endif
+ };
+ struct { /* ZONE_DEVICE pages */
+ /** @pgmap: Points to the hosting device page map. */
+ struct dev_pagemap *pgmap;
+ unsigned long hmm_data;
+ unsigned long _zd_pad_1; /* uses mapping */
+ };
+
+ /** @rcu_head: You can use this to free a page by RCU. */
+ struct rcu_head rcu_head;
};
- union {
+ union { /* This union is 4 bytes in size. */
/*
- * Mapping-private opaque data:
- * Usually used for buffer_heads if PagePrivate
- * Used for swp_entry_t if PageSwapCache
- * Indicates order in the buddy system if PageBuddy
+ * If the page can be mapped to userspace, encodes the number
+ * of times this page is referenced by a page table.
*/
- unsigned long private;
-#if USE_SPLIT_PTE_PTLOCKS
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
-#endif
- struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
+ atomic_t _mapcount;
+
+ /*
+ * If the page is neither PageSlab nor mappable to userspace,
+ * the value stored here may help determine what this page
+ * is used for. See page-flags.h for a list of page types
+ * which are currently stored here.
+ */
+ unsigned int page_type;
+
+ unsigned int active; /* SLAB */
+ int units; /* SLOB */
};
+ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
+ atomic_t _refcount;
+
#ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup;
#endif
@@ -413,6 +398,8 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
+
+ spinlock_t arg_lock; /* protect the below fields */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
@@ -627,9 +614,9 @@ struct vm_special_mapping {
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
- int (*fault)(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma,
- struct vm_fault *vmf);
+ vm_fault_t (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 1cc5ffb769af..7cd1473c64a4 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -53,93 +53,32 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_is_neg(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-MPI mpi_alloc_secure(unsigned nlimbs);
-MPI mpi_alloc_like(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-int mpi_copy(MPI *copy, const MPI a);
-void mpi_clear(MPI a);
-int mpi_set(MPI w, MPI u);
-int mpi_set_ui(MPI w, ulong u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_m_check(MPI a);
-void mpi_swap(MPI a, MPI b);
/*-- mpicoder.c --*/
-MPI do_encode_md(const void *sha_buffer, unsigned nbits);
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
-int mpi_fromstr(MPI val, const char *str);
-u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
-void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-#define log_mpidump g10_log_mpidump
-
-/*-- mpi-add.c --*/
-int mpi_add_ui(MPI w, MPI u, ulong v);
-int mpi_add(MPI w, MPI u, MPI v);
-int mpi_addm(MPI w, MPI u, MPI v, MPI m);
-int mpi_sub_ui(MPI w, MPI u, ulong v);
-int mpi_sub(MPI w, MPI u, MPI v);
-int mpi_subm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-mul.c --*/
-int mpi_mul_ui(MPI w, MPI u, ulong v);
-int mpi_mul_2exp(MPI w, MPI u, ulong cnt);
-int mpi_mul(MPI w, MPI u, MPI v);
-int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-div.c --*/
-ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor);
-int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
-int mpi_tdiv_r(MPI rem, MPI num, MPI den);
-int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count);
-int mpi_divisible_ui(const MPI dividend, ulong divisor);
-
-/*-- mpi-gcd.c --*/
-int mpi_gcd(MPI g, const MPI a, const MPI b);
-
/*-- mpi-pow.c --*/
-int mpi_pow(MPI w, MPI u, MPI v);
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
-/*-- mpi-mpow.c --*/
-int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod);
-
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-/*-- mpi-scan.c --*/
-int mpi_getbyte(MPI a, unsigned idx);
-void mpi_putbyte(MPI a, unsigned idx, int value);
-unsigned mpi_trailing_zeros(MPI a);
-
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
-int mpi_test_bit(MPI a, unsigned n);
-int mpi_set_bit(MPI a, unsigned n);
-int mpi_set_highbit(MPI a, unsigned n);
-void mpi_clear_highbit(MPI a, unsigned n);
-void mpi_clear_bit(MPI a, unsigned n);
-int mpi_rshift(MPI x, MPI a, unsigned n);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI u, MPI v);
/* inline functions */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e34a27727b9a..901943e4754b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -642,49 +642,62 @@ PAGEFLAG_FALSE(DoubleMap)
#endif
/*
- * For pages that are never mapped to userspace, page->mapcount may be
- * used for storing extra information about page type. Any value used
- * for this purpose must be <= -2, but it's better start not too close
- * to -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a special page.
+ * For pages that are never mapped to userspace (and aren't PageSlab),
+ * page_type may be used. Because it is initialised to -1, we invert the
+ * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
+ * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
+ * low bits so that an underflow or overflow of page_mapcount() won't be
+ * mistaken for a page type value.
*/
-#define PAGE_MAPCOUNT_OPS(uname, lname) \
+
+#define PAGE_TYPE_BASE 0xf0000000
+/* Reserve 0x0000007f to catch underflows of page_mapcount */
+#define PG_buddy 0x00000080
+#define PG_balloon 0x00000100
+#define PG_kmemcg 0x00000200
+#define PG_table 0x00000400
+
+#define PageType(page, flag) \
+ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+
+#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \
{ \
- return atomic_read(&page->_mapcount) == \
- PAGE_##lname##_MAPCOUNT_VALUE; \
+ return PageType(page, PG_##lname); \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
- atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
+ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
+ page->page_type &= ~PG_##lname; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- atomic_set(&page->_mapcount, -1); \
+ page->page_type |= PG_##lname; \
}
/*
- * PageBuddy() indicate that the page is free and in the buddy system
+ * PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
+PAGE_TYPE_OPS(Buddy, buddy)
/*
- * PageBalloon() is set on pages that are on the balloon page list
+ * PageBalloon() is true for pages that are on the balloon page list
* (see mm/balloon_compaction.c).
*/
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
+PAGE_TYPE_OPS(Balloon, balloon)
/*
* If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
* pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
*/
-#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
-PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
+PAGE_TYPE_OPS(Kmemcg, kmemcg)
+
+/*
+ * Marks pages in use as page tables.
+ */
+PAGE_TYPE_OPS(Table, table)
extern bool is_free_buddy_page(struct page *page);
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index c15ab80ad32d..bab7e57f659b 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -7,10 +7,22 @@
#include <asm/page.h>
struct page_counter {
- atomic_long_t count;
- unsigned long limit;
+ atomic_long_t usage;
+ unsigned long min;
+ unsigned long low;
+ unsigned long max;
struct page_counter *parent;
+ /* effective memory.min and memory.min usage tracking */
+ unsigned long emin;
+ atomic_long_t min_usage;
+ atomic_long_t children_min_usage;
+
+ /* effective memory.low and memory.low usage tracking */
+ unsigned long elow;
+ atomic_long_t low_usage;
+ atomic_long_t children_low_usage;
+
/* legacy */
unsigned long watermark;
unsigned long failcnt;
@@ -25,14 +37,14 @@ struct page_counter {
static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent)
{
- atomic_long_set(&counter->count, 0);
- counter->limit = PAGE_COUNTER_MAX;
+ atomic_long_set(&counter->usage, 0);
+ counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
{
- return atomic_long_read(&counter->count);
+ return atomic_long_read(&counter->usage);
}
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
@@ -41,7 +53,9 @@ bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_limit(struct page_counter *counter, unsigned long limit);
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index a03c2642a87c..21713dc14ce2 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -122,7 +122,7 @@ pud_t pud_mkdevmap(pud_t pud);
#endif
#endif /* __HAVE_ARCH_PTE_DEVMAP */
-#ifdef __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline bool pfn_t_special(pfn_t pfn)
{
return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
@@ -132,5 +132,5 @@ static inline bool pfn_t_special(pfn_t pfn)
{
return false;
}
-#endif /* __HAVE_ARCH_PTE_SPECIAL */
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 76a8cb4ef178..44d356f5e47c 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -163,9 +163,13 @@ static inline gfp_t current_gfp_context(gfp_t flags)
}
#ifdef CONFIG_LOCKDEP
+extern void __fs_reclaim_acquire(void);
+extern void __fs_reclaim_release(void);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
+static inline void __fs_reclaim_acquire(void) { }
+static inline void __fs_reclaim_release(void) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 73b5e655a76e..f155dc607112 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -110,19 +110,6 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
-#ifdef CONFIG_TMPFS
-
-extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-
-#else
-
-static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
-{
- return -EINVAL;
-}
-
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
#else
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d9228e4d0320..3485c58cfd1c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -67,9 +67,10 @@ struct kmem_cache {
/*
* If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. size contains the total
- * object size including these internal fields, the following two
- * variables contain the offset to the user object and its size.
+ * fields and/or padding to every object. 'size' contains the total
+ * object size including these internal fields, while 'obj_offset'
+ * and 'object_size' contain the offset to the user object and its
+ * size.
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3773e26c08c1..09fa2c6f0e68 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,7 +101,6 @@ struct kmem_cache {
void (*ctor)(void *);
unsigned int inuse; /* Offset to metadata */
unsigned int align; /* Alignment */
- unsigned int reserved; /* Reserved bytes at the end of slabs */
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
diff --git a/include/linux/types.h b/include/linux/types.h
index ec13d02b3481..9834e90aa010 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -10,14 +10,14 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-typedef __u32 __kernel_dev_t;
+typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
-typedef __u32 nlink_t;
+typedef u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
@@ -95,29 +95,29 @@ typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
-typedef __u8 u_int8_t;
-typedef __s8 int8_t;
-typedef __u16 u_int16_t;
-typedef __s16 int16_t;
-typedef __u32 u_int32_t;
-typedef __s32 int32_t;
+typedef u8 u_int8_t;
+typedef s8 int8_t;
+typedef u16 u_int16_t;
+typedef s16 int16_t;
+typedef u32 u_int32_t;
+typedef s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
-typedef __u8 uint8_t;
-typedef __u16 uint16_t;
-typedef __u32 uint32_t;
+typedef u8 uint8_t;
+typedef u16 uint16_t;
+typedef u32 uint32_t;
#if defined(__GNUC__)
-typedef __u64 uint64_t;
-typedef __u64 u_int64_t;
-typedef __s64 int64_t;
+typedef u64 uint64_t;
+typedef u64 u_int64_t;
+typedef s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
-#define aligned_u64 __u64 __attribute__((aligned(8)))
-#define aligned_be64 __be64 __attribute__((aligned(8)))
-#define aligned_le64 __le64 __attribute__((aligned(8)))
+#define aligned_u64 __aligned_u64
+#define aligned_be64 __aligned_be64
+#define aligned_le64 __aligned_le64
/**
* The type used for indexing onto a disc or disc partition.
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2f3b68ba910..e091f0a11b11 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -31,10 +31,12 @@
extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
- unsigned long src_start, unsigned long len);
+ unsigned long src_start, unsigned long len,
+ bool *mmap_changing);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start,
- unsigned long len);
+ unsigned long len,
+ bool *mmap_changing);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 2a4432c7a4b4..e13eec3dfb2f 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
+ * Copyright 2005-2006,2013,2017-2018 Ian Kent <raven@themaw.net>
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
@@ -8,7 +10,6 @@
*
* ----------------------------------------------------------------------- */
-
#ifndef _UAPI_LINUX_AUTO_FS_H
#define _UAPI_LINUX_AUTO_FS_H
@@ -18,13 +19,11 @@
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
+#define AUTOFS_PROTO_VERSION 5
+#define AUTOFS_MIN_PROTO_VERSION 3
+#define AUTOFS_MAX_PROTO_VERSION 5
-/* This file describes autofs v3 */
-#define AUTOFS_PROTO_VERSION 3
-
-/* Range of protocol versions defined */
-#define AUTOFS_MAX_PROTO_VERSION AUTOFS_PROTO_VERSION
-#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+#define AUTOFS_PROTO_SUBVERSION 2
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
@@ -76,9 +75,155 @@ enum {
#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD)
#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD)
#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD)
-#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int)
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t)
-#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long)
-#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire)
+#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOVER_CMD, int)
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ compat_ulong_t)
+#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_SETTIMEOUT_CMD, \
+ unsigned long)
+#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_CMD, \
+ struct autofs_packet_expire)
+
+/* autofs version 4 and later definitions */
+
+/* Mask for expire behaviour */
+#define AUTOFS_EXP_IMMEDIATE 1
+#define AUTOFS_EXP_LEAVES 2
+
+#define AUTOFS_TYPE_ANY 0U
+#define AUTOFS_TYPE_INDIRECT 1U
+#define AUTOFS_TYPE_DIRECT 2U
+#define AUTOFS_TYPE_OFFSET 4U
+
+static inline void set_autofs_type_indirect(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_INDIRECT;
+}
+
+static inline unsigned int autofs_type_indirect(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_INDIRECT);
+}
+
+static inline void set_autofs_type_direct(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_DIRECT;
+}
+
+static inline unsigned int autofs_type_direct(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT);
+}
+
+static inline void set_autofs_type_offset(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_OFFSET;
+}
+
+static inline unsigned int autofs_type_offset(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_OFFSET);
+}
+
+static inline unsigned int autofs_type_trigger(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
+}
+
+/*
+ * This isn't really a type as we use it to say "no type set" to
+ * indicate we want to search for "any" mount in the
+ * autofs_dev_ioctl_ismountpoint() device ioctl function.
+ */
+static inline void set_autofs_type_any(unsigned int *type)
+{
+ *type = AUTOFS_TYPE_ANY;
+}
+
+static inline unsigned int autofs_type_any(unsigned int type)
+{
+ return (type == AUTOFS_TYPE_ANY);
+}
+
+/* Daemon notification packet types */
+enum autofs_notify {
+ NFY_NONE,
+ NFY_MOUNT,
+ NFY_EXPIRE
+};
+
+/* Kernel protocol version 4 packet types */
+
+/* Expire entry (umount request) */
+#define autofs_ptype_expire_multi 2
+
+/* Kernel protocol version 5 packet types */
+
+/* Indirect mount missing and expire requests. */
+#define autofs_ptype_missing_indirect 3
+#define autofs_ptype_expire_indirect 4
+
+/* Direct mount missing and expire requests */
+#define autofs_ptype_missing_direct 5
+#define autofs_ptype_expire_direct 6
+
+/* v4 multi expire (via pipe) */
+struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[NAME_MAX+1];
+};
+
+union autofs_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_packet_missing missing;
+ struct autofs_packet_expire expire;
+ struct autofs_packet_expire_multi expire_multi;
+};
+
+/* autofs v5 common packet struct */
+struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[NAME_MAX+1];
+};
+
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
+
+union autofs_v5_packet_union {
+ struct autofs_packet_hdr hdr;
+ struct autofs_v5_packet v5_packet;
+ autofs_packet_missing_indirect_t missing_indirect;
+ autofs_packet_expire_indirect_t expire_indirect;
+ autofs_packet_missing_direct_t missing_direct;
+ autofs_packet_expire_direct_t expire_direct;
+};
+
+enum {
+ AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
+ AUTOFS_IOC_PROTOSUBVER_CMD,
+ AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
+};
+
+#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, \
+ AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
+#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_PROTOSUBVER_CMD, int)
+#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, \
+ AUTOFS_IOC_ASKUMOUNT_CMD, int)
#endif /* _UAPI_LINUX_AUTO_FS_H */
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h
index 1f608e27a06f..d01ef0a0189c 100644
--- a/include/uapi/linux/auto_fs4.h
+++ b/include/uapi/linux/auto_fs4.h
@@ -7,156 +7,9 @@
* option, any later version, incorporated herein by reference.
*/
-#ifndef _LINUX_AUTO_FS4_H
-#define _LINUX_AUTO_FS4_H
+#ifndef _UAPI_LINUX_AUTO_FS4_H
+#define _UAPI_LINUX_AUTO_FS4_H
-/* Include common v3 definitions */
-#include <linux/types.h>
#include <linux/auto_fs.h>
-/* autofs v4 definitions */
-#undef AUTOFS_PROTO_VERSION
-#undef AUTOFS_MIN_PROTO_VERSION
-#undef AUTOFS_MAX_PROTO_VERSION
-
-#define AUTOFS_PROTO_VERSION 5
-#define AUTOFS_MIN_PROTO_VERSION 3
-#define AUTOFS_MAX_PROTO_VERSION 5
-
-#define AUTOFS_PROTO_SUBVERSION 2
-
-/* Mask for expire behaviour */
-#define AUTOFS_EXP_IMMEDIATE 1
-#define AUTOFS_EXP_LEAVES 2
-
-#define AUTOFS_TYPE_ANY 0U
-#define AUTOFS_TYPE_INDIRECT 1U
-#define AUTOFS_TYPE_DIRECT 2U
-#define AUTOFS_TYPE_OFFSET 4U
-
-static inline void set_autofs_type_indirect(unsigned int *type)
-{
- *type = AUTOFS_TYPE_INDIRECT;
-}
-
-static inline unsigned int autofs_type_indirect(unsigned int type)
-{
- return (type == AUTOFS_TYPE_INDIRECT);
-}
-
-static inline void set_autofs_type_direct(unsigned int *type)
-{
- *type = AUTOFS_TYPE_DIRECT;
-}
-
-static inline unsigned int autofs_type_direct(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT);
-}
-
-static inline void set_autofs_type_offset(unsigned int *type)
-{
- *type = AUTOFS_TYPE_OFFSET;
-}
-
-static inline unsigned int autofs_type_offset(unsigned int type)
-{
- return (type == AUTOFS_TYPE_OFFSET);
-}
-
-static inline unsigned int autofs_type_trigger(unsigned int type)
-{
- return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET);
-}
-
-/*
- * This isn't really a type as we use it to say "no type set" to
- * indicate we want to search for "any" mount in the
- * autofs_dev_ioctl_ismountpoint() device ioctl function.
- */
-static inline void set_autofs_type_any(unsigned int *type)
-{
- *type = AUTOFS_TYPE_ANY;
-}
-
-static inline unsigned int autofs_type_any(unsigned int type)
-{
- return (type == AUTOFS_TYPE_ANY);
-}
-
-/* Daemon notification packet types */
-enum autofs_notify {
- NFY_NONE,
- NFY_MOUNT,
- NFY_EXPIRE
-};
-
-/* Kernel protocol version 4 packet types */
-
-/* Expire entry (umount request) */
-#define autofs_ptype_expire_multi 2
-
-/* Kernel protocol version 5 packet types */
-
-/* Indirect mount missing and expire requests. */
-#define autofs_ptype_missing_indirect 3
-#define autofs_ptype_expire_indirect 4
-
-/* Direct mount missing and expire requests */
-#define autofs_ptype_missing_direct 5
-#define autofs_ptype_expire_direct 6
-
-/* v4 multi expire (via pipe) */
-struct autofs_packet_expire_multi {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- int len;
- char name[NAME_MAX+1];
-};
-
-union autofs_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_packet_missing missing;
- struct autofs_packet_expire expire;
- struct autofs_packet_expire_multi expire_multi;
-};
-
-/* autofs v5 common packet struct */
-struct autofs_v5_packet {
- struct autofs_packet_hdr hdr;
- autofs_wqt_t wait_queue_token;
- __u32 dev;
- __u64 ino;
- __u32 uid;
- __u32 gid;
- __u32 pid;
- __u32 tgid;
- __u32 len;
- char name[NAME_MAX+1];
-};
-
-typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
-typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
-typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
-
-union autofs_v5_packet_union {
- struct autofs_packet_hdr hdr;
- struct autofs_v5_packet v5_packet;
- autofs_packet_missing_indirect_t missing_indirect;
- autofs_packet_expire_indirect_t expire_indirect;
- autofs_packet_missing_direct_t missing_direct;
- autofs_packet_expire_direct_t expire_direct;
-};
-
-enum {
- AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */
- AUTOFS_IOC_PROTOSUBVER_CMD,
- AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */
-};
-
-#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
-#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int)
-#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int)
-
-#endif /* _LINUX_AUTO_FS4_H */
+#endif /* _UAPI_LINUX_AUTO_FS4_H */
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index fa139841ec18..21b9113c69da 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -35,6 +35,6 @@
#define KPF_BALLOON 23
#define KPF_ZERO_PAGE 24
#define KPF_IDLE 25
-
+#define KPF_PGTABLE 26
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */