diff options
author | Linus Torvalds | 2023-11-02 20:53:31 -1000 |
---|---|---|
committer | Linus Torvalds | 2023-11-02 20:53:31 -1000 |
commit | 8f6f76a6a29f36d2f3e4510d0bde5046672f6924 (patch) | |
tree | 41e31fd924728d6bec5078db0f95b10dec5e5b6e /fs | |
parent | ecae0bd5173b1014f95a14a8dfbe40ec10367dcf (diff) | |
parent | 6620999f0d41e4fd6f047727936a964c3399d249 (diff) |
Merge tag 'mm-nonmm-stable-2023-11-02-14-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton:
"As usual, lots of singleton and doubleton patches all over the tree
and there's little I can say which isn't in the individual changelogs.
The lengthier patch series are
- 'kdump: use generic functions to simplify crashkernel reservation
in arch', from Baoquan He. This is mainly cleanups and
consolidation of the 'crashkernel=' kernel parameter handling
- After much discussion, David Laight's 'minmax: Relax type checks in
min() and max()' is here. Hopefully reduces some typecasting and
the use of min_t() and max_t()
- A group of patches from Oleg Nesterov which clean up and slightly
fix our handling of reads from /proc/PID/task/... and which remove
task_struct.thread_group"
* tag 'mm-nonmm-stable-2023-11-02-14-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (64 commits)
scripts/gdb/vmalloc: disable on no-MMU
scripts/gdb: fix usage of MOD_TEXT not defined when CONFIG_MODULES=n
.mailmap: add address mapping for Tomeu Vizoso
mailmap: update email address for Claudiu Beznea
tools/testing/selftests/mm/run_vmtests.sh: lower the ptrace permissions
.mailmap: map Benjamin Poirier's address
scripts/gdb: add lx_current support for riscv
ocfs2: fix a spelling typo in comment
proc: test ProtectionKey in proc-empty-vm test
proc: fix proc-empty-vm test with vsyscall
fs/proc/base.c: remove unneeded semicolon
do_io_accounting: use sig->stats_lock
do_io_accounting: use __for_each_thread()
ocfs2: replace BUG_ON() at ocfs2_num_free_extents() with ocfs2_error()
ocfs2: fix a typo in a comment
scripts/show_delta: add __main__ judgement before main code
treewide: mark stuff as __ro_after_init
fs: ocfs2: check status values
proc: test /proc/${pid}/statm
compiler.h: move __is_constexpr() to compiler.h
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/anon_inodes.c | 4 | ||||
-rw-r--r-- | fs/buffer.c | 4 | ||||
-rw-r--r-- | fs/char_dev.c | 2 | ||||
-rw-r--r-- | fs/dcache.c | 8 | ||||
-rw-r--r-- | fs/direct-io.c | 2 | ||||
-rw-r--r-- | fs/eventpoll.c | 6 | ||||
-rw-r--r-- | fs/fcntl.c | 2 | ||||
-rw-r--r-- | fs/file_table.c | 2 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 9 | ||||
-rw-r--r-- | fs/inode.c | 8 | ||||
-rw-r--r-- | fs/kernfs/mount.c | 5 | ||||
-rw-r--r-- | fs/locks.c | 4 | ||||
-rw-r--r-- | fs/namespace.c | 16 | ||||
-rw-r--r-- | fs/notify/dnotify/dnotify.c | 6 | ||||
-rw-r--r-- | fs/notify/fanotify/fanotify_user.c | 8 | ||||
-rw-r--r-- | fs/notify/inotify/inotify_user.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/alloc.c | 11 | ||||
-rw-r--r-- | fs/ocfs2/buffer_head_io.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlmfs/dlmfs.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/journal.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 8 | ||||
-rw-r--r-- | fs/ocfs2/quota_local.c | 4 | ||||
-rw-r--r-- | fs/pipe.c | 2 | ||||
-rw-r--r-- | fs/proc/array.c | 7 | ||||
-rw-r--r-- | fs/proc/base.c | 52 | ||||
-rw-r--r-- | fs/proc/inode.c | 11 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 11 | ||||
-rw-r--r-- | fs/userfaultfd.c | 2 |
28 files changed, 110 insertions, 95 deletions
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 24192a7667ed..d26222b7eefe 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -24,8 +24,8 @@ #include <linux/uaccess.h> -static struct vfsmount *anon_inode_mnt __read_mostly; -static struct inode *anon_inode_inode; +static struct vfsmount *anon_inode_mnt __ro_after_init; +static struct inode *anon_inode_inode __ro_after_init; /* * anon_inodefs_dname() is called from d_path(). diff --git a/fs/buffer.c b/fs/buffer.c index 657a62bab73d..967f34b70aa8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2983,13 +2983,13 @@ EXPORT_SYMBOL(try_to_free_buffers); /* * Buffer-head allocation */ -static struct kmem_cache *bh_cachep __read_mostly; +static struct kmem_cache *bh_cachep __ro_after_init; /* * Once the number of bh's in the machine exceeds this level, we start * stripping them in writeback. */ -static unsigned long max_buffer_heads; +static unsigned long max_buffer_heads __ro_after_init; int buffer_heads_over_limit; diff --git a/fs/char_dev.c b/fs/char_dev.c index 6ba032442b39..57cc096c498a 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -25,7 +25,7 @@ #include "internal.h" -static struct kobj_map *cdev_map; +static struct kobj_map *cdev_map __ro_after_init; static DEFINE_MUTEX(chrdevs_lock); diff --git a/fs/dcache.c b/fs/dcache.c index 796e23761ba0..c82ae731df9a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -78,7 +78,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(rename_lock); -static struct kmem_cache *dentry_cache __read_mostly; +static struct kmem_cache *dentry_cache __ro_after_init; const struct qstr empty_name = QSTR_INIT("", 0); EXPORT_SYMBOL(empty_name); @@ -96,9 +96,9 @@ EXPORT_SYMBOL(dotdot_name); * information, yet avoid using a prime hash-size or similar. */ -static unsigned int d_hash_shift __read_mostly; +static unsigned int d_hash_shift __ro_after_init; -static struct hlist_bl_head *dentry_hashtable __read_mostly; +static struct hlist_bl_head *dentry_hashtable __ro_after_init; static inline struct hlist_bl_head *d_hash(unsigned int hash) { @@ -3332,7 +3332,7 @@ static void __init dcache_init(void) } /* SLAB cache for __getname() consumers */ -struct kmem_cache *names_cachep __read_mostly; +struct kmem_cache *names_cachep __ro_after_init; EXPORT_SYMBOL(names_cachep); void __init vfs_caches_init_early(void) diff --git a/fs/direct-io.c b/fs/direct-io.c index 7bc494ee56b9..20533266ade6 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -151,7 +151,7 @@ struct dio { }; } ____cacheline_aligned_in_smp; -static struct kmem_cache *dio_cache __read_mostly; +static struct kmem_cache *dio_cache __ro_after_init; /* * How many pages are in the queue? diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1d9a71a0c4c1..2877cc01cff1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -256,10 +256,10 @@ static u64 loop_check_gen = 0; static struct eventpoll *inserting_into; /* Slab cache used to allocate "struct epitem" */ -static struct kmem_cache *epi_cache __read_mostly; +static struct kmem_cache *epi_cache __ro_after_init; /* Slab cache used to allocate "struct eppoll_entry" */ -static struct kmem_cache *pwq_cache __read_mostly; +static struct kmem_cache *pwq_cache __ro_after_init; /* * List of files with newly added links, where we may need to limit the number @@ -271,7 +271,7 @@ struct epitems_head { }; static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR; -static struct kmem_cache *ephead_cache __read_mostly; +static struct kmem_cache *ephead_cache __ro_after_init; static inline void free_ephead(struct epitems_head *head) { diff --git a/fs/fcntl.c b/fs/fcntl.c index e871009f6c88..c80a6acad742 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -844,7 +844,7 @@ int send_sigurg(struct fown_struct *fown) } static DEFINE_SPINLOCK(fasync_lock); -static struct kmem_cache *fasync_cache __read_mostly; +static struct kmem_cache *fasync_cache __ro_after_init; static void fasync_free_rcu(struct rcu_head *head) { diff --git a/fs/file_table.c b/fs/file_table.c index fa92743ba6a9..de4a2915bfd4 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -40,7 +40,7 @@ static struct files_stat_struct files_stat = { }; /* SLAB cache for file structures */ -static struct kmem_cache *filp_cachep __read_mostly; +static struct kmem_cache *filp_cachep __ro_after_init; static struct percpu_counter nr_files __cacheline_aligned_in_smp; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 33ca04733e93..ecf789b7168c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1126,8 +1126,7 @@ static int init_threads(struct gfs2_sbd *sdp) return 0; fail: - kthread_stop(sdp->sd_logd_process); - put_task_struct(sdp->sd_logd_process); + kthread_stop_put(sdp->sd_logd_process); sdp->sd_logd_process = NULL; return error; } @@ -1135,13 +1134,11 @@ fail: void gfs2_destroy_threads(struct gfs2_sbd *sdp) { if (sdp->sd_logd_process) { - kthread_stop(sdp->sd_logd_process); - put_task_struct(sdp->sd_logd_process); + kthread_stop_put(sdp->sd_logd_process); sdp->sd_logd_process = NULL; } if (sdp->sd_quotad_process) { - kthread_stop(sdp->sd_quotad_process); - put_task_struct(sdp->sd_quotad_process); + kthread_stop_put(sdp->sd_quotad_process); sdp->sd_quotad_process = NULL; } } diff --git a/fs/inode.c b/fs/inode.c index 4f8984b97df0..edcd8a61975f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -54,9 +54,9 @@ * inode_hash_lock */ -static unsigned int i_hash_mask __read_mostly; -static unsigned int i_hash_shift __read_mostly; -static struct hlist_head *inode_hashtable __read_mostly; +static unsigned int i_hash_mask __ro_after_init; +static unsigned int i_hash_shift __ro_after_init; +static struct hlist_head *inode_hashtable __ro_after_init; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); /* @@ -70,7 +70,7 @@ EXPORT_SYMBOL(empty_aops); static DEFINE_PER_CPU(unsigned long, nr_inodes); static DEFINE_PER_CPU(unsigned long, nr_unused); -static struct kmem_cache *inode_cachep __read_mostly; +static struct kmem_cache *inode_cachep __ro_after_init; static long get_nr_inodes(void) { diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 79b96e74a8a0..4628edde2e7e 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -21,8 +21,9 @@ #include "kernfs-internal.h" -struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache; -struct kernfs_global_locks *kernfs_locks; +struct kmem_cache *kernfs_node_cache __ro_after_init; +struct kmem_cache *kernfs_iattrs_cache __ro_after_init; +struct kernfs_global_locks *kernfs_locks __ro_after_init; static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) { diff --git a/fs/locks.c b/fs/locks.c index d4e49a990a8d..46d88b9e222c 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -167,8 +167,8 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); */ static DEFINE_SPINLOCK(blocked_lock_lock); -static struct kmem_cache *flctx_cache __read_mostly; -static struct kmem_cache *filelock_cache __read_mostly; +static struct kmem_cache *flctx_cache __ro_after_init; +static struct kmem_cache *filelock_cache __ro_after_init; static struct file_lock_context * locks_get_lock_context(struct inode *inode, int type) diff --git a/fs/namespace.c b/fs/namespace.c index 6bde71735efa..fbf0e596fcd3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -39,10 +39,10 @@ /* Maximum number of mounts in a mount namespace */ static unsigned int sysctl_mount_max __read_mostly = 100000; -static unsigned int m_hash_mask __read_mostly; -static unsigned int m_hash_shift __read_mostly; -static unsigned int mp_hash_mask __read_mostly; -static unsigned int mp_hash_shift __read_mostly; +static unsigned int m_hash_mask __ro_after_init; +static unsigned int m_hash_shift __ro_after_init; +static unsigned int mp_hash_mask __ro_after_init; +static unsigned int mp_hash_shift __ro_after_init; static __initdata unsigned long mhash_entries; static int __init set_mhash_entries(char *str) @@ -68,9 +68,9 @@ static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); -static struct hlist_head *mount_hashtable __read_mostly; -static struct hlist_head *mountpoint_hashtable __read_mostly; -static struct kmem_cache *mnt_cache __read_mostly; +static struct hlist_head *mount_hashtable __ro_after_init; +static struct hlist_head *mountpoint_hashtable __ro_after_init; +static struct kmem_cache *mnt_cache __ro_after_init; static DECLARE_RWSEM(namespace_sem); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ @@ -86,7 +86,7 @@ struct mount_kattr { }; /* /sys/fs */ -struct kobject *fs_kobj; +struct kobject *fs_kobj __ro_after_init; EXPORT_SYMBOL_GPL(fs_kobj); /* diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 869b016014d2..1cb9ad7e884e 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -39,9 +39,9 @@ static void __init dnotify_sysctl_init(void) #define dnotify_sysctl_init() do { } while (0) #endif -static struct kmem_cache *dnotify_struct_cache __read_mostly; -static struct kmem_cache *dnotify_mark_cache __read_mostly; -static struct fsnotify_group *dnotify_group __read_mostly; +static struct kmem_cache *dnotify_struct_cache __ro_after_init; +static struct kmem_cache *dnotify_mark_cache __ro_after_init; +static struct fsnotify_group *dnotify_group __ro_after_init; /* * dnotify will attach one of these to each inode (i_fsnotify_marks) which diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 62fe0b679e58..45aecdc302f4 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -112,10 +112,10 @@ static void __init fanotify_sysctls_init(void) extern const struct fsnotify_ops fanotify_fsnotify_ops; -struct kmem_cache *fanotify_mark_cache __read_mostly; -struct kmem_cache *fanotify_fid_event_cachep __read_mostly; -struct kmem_cache *fanotify_path_event_cachep __read_mostly; -struct kmem_cache *fanotify_perm_event_cachep __read_mostly; +struct kmem_cache *fanotify_mark_cache __ro_after_init; +struct kmem_cache *fanotify_fid_event_cachep __ro_after_init; +struct kmem_cache *fanotify_path_event_cachep __ro_after_init; +struct kmem_cache *fanotify_perm_event_cachep __ro_after_init; #define FANOTIFY_EVENT_ALIGN 4 #define FANOTIFY_FID_INFO_HDR_LEN \ diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 1c4bfdab008d..a3809ae92170 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -49,7 +49,7 @@ /* configurable via /proc/sys/fs/inotify/ */ static int inotify_max_queued_events __read_mostly; -struct kmem_cache *inotify_inode_mark_cachep __read_mostly; +struct kmem_cache *inotify_inode_mark_cachep __ro_after_init; #ifdef CONFIG_SYSCTL diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f0937902f7b4..91b32b2377ac 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -967,7 +967,14 @@ int ocfs2_num_free_extents(struct ocfs2_extent_tree *et) el = &eb->h_list; } - BUG_ON(el->l_tree_depth != 0); + if (el->l_tree_depth != 0) { + retval = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has leaf extent block %llu with an invalid l_tree_depth of %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + (unsigned long long)last_eb_blk, + le16_to_cpu(el->l_tree_depth)); + goto bail; + } retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); bail: @@ -7642,7 +7649,7 @@ out_mutex: goto next_group; } out: - range->len = trimmed * sb->s_blocksize; + range->len = trimmed * osb->s_clustersize; return ret; } diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 196638a22b48..cdb9b9bdea1f 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -158,7 +158,7 @@ read_failure: if (new_bh && bh) { /* If middle bh fails, let previous bh * finish its read and then put it to - * aovoid bh leak + * avoid bh leak */ if (!buffer_jbd(bh)) wait_on_buffer(bh); @@ -345,7 +345,7 @@ read_failure: if (new_bh && bh) { /* If middle bh fails, let previous bh * finish its read and then put it to - * aovoid bh leak + * avoid bh leak */ if (!buffer_jbd(bh)) wait_on_buffer(bh); diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 9b57d012fd5c..85215162c9dd 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -80,8 +80,7 @@ static int param_set_dlmfs_capabilities(const char *val, static int param_get_dlmfs_capabilities(char *buffer, const struct kernel_param *kp) { - return strlcpy(buffer, DLMFS_CAPABILITIES, - strlen(DLMFS_CAPABILITIES) + 1); + return sysfs_emit(buffer, DLMFS_CAPABILITIES); } module_param_call(capabilities, param_set_dlmfs_capabilities, param_get_dlmfs_capabilities, NULL, 0444); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index ce215565d061..604fea3a26ff 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -90,7 +90,7 @@ enum ocfs2_replay_state { struct ocfs2_replay_map { unsigned int rm_slots; enum ocfs2_replay_state rm_state; - unsigned char rm_replay_slots[]; + unsigned char rm_replay_slots[] __counted_by(rm_slots); }; static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 681e9501cdd3..814733ba2f4b 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -1597,6 +1597,10 @@ static int ocfs2_rename(struct mnt_idmap *idmap, if (update_dot_dot) { status = ocfs2_update_entry(old_inode, handle, &old_inode_dot_dot_res, new_dir); + if (status < 0) { + mlog_errno(status); + goto bail; + } drop_nlink(old_dir); if (new_inode) { drop_nlink(new_inode); @@ -1636,6 +1640,10 @@ static int ocfs2_rename(struct mnt_idmap *idmap, INODE_CACHE(old_dir), old_dir_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } fe = (struct ocfs2_dinode *) old_dir_bh->b_data; ocfs2_set_links_count(fe, old_dir->i_nlink); ocfs2_journal_dirty(handle, old_dir_bh); diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index dfaae1e52412..e09842fc9d4d 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -1240,6 +1240,10 @@ int ocfs2_create_local_dquot(struct dquot *dquot) &od->dq_local_phys_blk, &pcount, NULL); + if (status < 0) { + mlog_errno(status); + goto out; + } /* Initialize dquot structure on disk */ status = ocfs2_local_write_dquot(dquot); diff --git a/fs/pipe.c b/fs/pipe.c index 8916c455a469..804a7d789452 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -864,7 +864,7 @@ void free_pipe_info(struct pipe_inode_info *pipe) kfree(pipe); } -static struct vfsmount *pipe_mnt __read_mostly; +static struct vfsmount *pipe_mnt __ro_after_init; /* * pipefs_dname() is called from d_path(). diff --git a/fs/proc/array.c b/fs/proc/array.c index 2c2efbe685d8..ff08a8957552 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -536,12 +536,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, /* add up live thread stats at the group level */ if (whole) { - struct task_struct *t = task; - do { + struct task_struct *t; + + __for_each_thread(sig, t) { min_flt += t->min_flt; maj_flt += t->maj_flt; gtime += task_gtime(t); - } while_each_thread(task, t); + } min_flt += sig->min_flt; maj_flt += sig->maj_flt; diff --git a/fs/proc/base.c b/fs/proc/base.c index 83396ab14998..dd31e3b6bf77 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1153,11 +1153,10 @@ err_unlock: static ssize_t oom_adj_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char buffer[PROC_NUMBUF]; + char buffer[PROC_NUMBUF] = {}; int oom_adj; int err; - memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) { @@ -1213,11 +1212,10 @@ static ssize_t oom_score_adj_read(struct file *file, char __user *buf, static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char buffer[PROC_NUMBUF]; + char buffer[PROC_NUMBUF] = {}; int oom_score_adj; int err; - memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) { @@ -1358,13 +1356,13 @@ static ssize_t proc_fault_inject_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task; - char buffer[PROC_NUMBUF]; + char buffer[PROC_NUMBUF] = {}; int make_it_fail; int rv; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; - memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) @@ -1509,11 +1507,10 @@ sched_autogroup_write(struct file *file, const char __user *buf, { struct inode *inode = file_inode(file); struct task_struct *p; - char buffer[PROC_NUMBUF]; + char buffer[PROC_NUMBUF] = {}; int nice; int err; - memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) @@ -1666,10 +1663,9 @@ static ssize_t comm_write(struct file *file, const char __user *buf, { struct inode *inode = file_inode(file); struct task_struct *p; - char buffer[TASK_COMM_LEN]; + char buffer[TASK_COMM_LEN] = {}; const size_t maxlen = sizeof(buffer) - 1; - memset(buffer, 0, sizeof(buffer)); if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count)) return -EFAULT; @@ -2976,8 +2972,7 @@ static const struct file_operations proc_coredump_filter_operations = { #ifdef CONFIG_TASK_IO_ACCOUNTING static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) { - struct task_io_accounting acct = task->ioac; - unsigned long flags; + struct task_io_accounting acct; int result; result = down_read_killable(&task->signal->exec_update_lock); @@ -2989,15 +2984,28 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh goto out_unlock; } - if (whole && lock_task_sighand(task, &flags)) { - struct task_struct *t = task; + if (whole) { + struct signal_struct *sig = task->signal; + struct task_struct *t; + unsigned int seq = 1; + unsigned long flags; + + rcu_read_lock(); + do { + seq++; /* 2 on the 1st/lockless path, otherwise odd */ + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); - task_io_accounting_add(&acct, &task->signal->ioac); - while_each_thread(task, t) - task_io_accounting_add(&acct, &t->ioac); + acct = sig->ioac; + __for_each_thread(sig, t) + task_io_accounting_add(&acct, &t->ioac); - unlock_task_sighand(task, &flags); + } while (need_seqretry(&sig->stats_lock, seq)); + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); + rcu_read_unlock(); + } else { + acct = task->ioac; } + seq_printf(m, "rchar: %llu\n" "wchar: %llu\n" @@ -3818,7 +3826,7 @@ static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos, for_each_thread(task, pos) { if (!nr--) goto found; - }; + } fail: pos = NULL; goto out; @@ -3840,10 +3848,8 @@ static struct task_struct *next_tid(struct task_struct *start) struct task_struct *pos = NULL; rcu_read_lock(); if (pid_alive(start)) { - pos = next_thread(start); - if (thread_group_leader(pos)) - pos = NULL; - else + pos = __next_thread(start); + if (pos) get_task_struct(pos); } rcu_read_unlock(); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 592ed2516f47..b33e490e3fd9 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -110,18 +110,15 @@ void __init proc_init_kmemcache(void) void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock) { - struct inode *inode; - struct proc_inode *ei; struct hlist_node *node; struct super_block *old_sb = NULL; rcu_read_lock(); - for (;;) { + while ((node = hlist_first_rcu(inodes))) { + struct proc_inode *ei = hlist_entry(node, struct proc_inode, sibling_inodes); struct super_block *sb; - node = hlist_first_rcu(inodes); - if (!node) - break; - ei = hlist_entry(node, struct proc_inode, sibling_inodes); + struct inode *inode; + spin_lock(lock); hlist_del_init_rcu(&ei->sibling_inodes); spin_unlock(lock); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4abd51053f76..ef2eb12906da 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -851,9 +851,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, static int show_smap(struct seq_file *m, void *v) { struct vm_area_struct *vma = v; - struct mem_size_stats mss; - - memset(&mss, 0, sizeof(mss)); + struct mem_size_stats mss = {}; smap_gather_stats(vma, &mss, 0); @@ -879,7 +877,7 @@ static int show_smap(struct seq_file *m, void *v) static int show_smaps_rollup(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; - struct mem_size_stats mss; + struct mem_size_stats mss = {}; struct mm_struct *mm = priv->mm; struct vm_area_struct *vma; unsigned long vma_start = 0, last_vma_end = 0; @@ -895,8 +893,6 @@ static int show_smaps_rollup(struct seq_file *m, void *v) goto out_put_task; } - memset(&mss, 0, sizeof(mss)); - ret = mmap_read_lock_killable(mm); if (ret) goto out_put_mm; @@ -1248,14 +1244,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; - char buffer[PROC_NUMBUF]; + char buffer[PROC_NUMBUF] = {}; struct mm_struct *mm; struct vm_area_struct *vma; enum clear_refs_types type; int itype; int rv; - memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ac616cfbacf5..e8af40b05549 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -49,7 +49,7 @@ static struct ctl_table vm_userfaultfd_table[] = { }; #endif -static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; +static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init; /* * Start with fault_pending_wqh and fault_wqh so they're more likely |