From 79bd9814e5ec9a288d6599f53aeac0b548fdfe52 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:42 -0500 Subject: cgroup, memcg: move cgroup_event implementation to memcg cgroup_event is way over-designed and tries to build a generic flexible event mechanism into cgroup - fully customizable event specification for each user of the interface. This is utterly unnecessary and overboard especially in the light of the planned unified hierarchy as there's gonna be single agent. Simply generating events at fixed points, or if that's too restrictive, configureable cadence or single set of configureable points should be enough. Thankfully, memcg is the only user and gets to keep it. Replacing it with something simpler on sane_behavior is strongly recommended. This patch moves cgroup_event and "cgroup.event_control" implementation to mm/memcontrol.c. Clearing of events on cgroup destruction is moved from cgroup_destroy_locked() to mem_cgroup_css_offline(), which shouldn't make any noticeable difference. cgroup_css() and __file_cft() are exported to enable the move; however, this will soon be reverted once the event code is updated to be memcg specific. Note that "cgroup.event_control" will now exist only on the hierarchy with memcg attached to it. While this change is visible to userland, it is unlikely to be noticeable as the file has never been meaningful outside memcg. Aside from the above change, this is pure code relocation. v2: Per Li Zefan's comments, init/Kconfig updated accordingly and poll.h inclusion moved from cgroup.c to memcontrol.c. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Balbir Singh --- mm/memcontrol.c | 248 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 13b9d0f221b8..02dae3292668 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +56,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -226,6 +228,36 @@ struct mem_cgroup_eventfd_list { struct eventfd_ctx *eventfd; }; +/* + * cgroup_event represents events which userspace want to receive. + */ +struct cgroup_event { + /* + * css which the event belongs to. + */ + struct cgroup_subsys_state *css; + /* + * Control file which the event associated. + */ + struct cftype *cft; + /* + * eventfd to signal userspace about the event. + */ + struct eventfd_ctx *eventfd; + /* + * Each of these stored in a list by the cgroup. + */ + struct list_head list; + /* + * All fields below needed to unregister event when + * userspace closes eventfd. + */ + poll_table pt; + wait_queue_head_t *wqh; + wait_queue_t wait; + struct work_struct remove; +}; + static void mem_cgroup_threshold(struct mem_cgroup *memcg); static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); @@ -5947,6 +5979,202 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) } #endif +/* + * Unregister event and free resources. + * + * Gets called from workqueue. + */ +static void cgroup_event_remove(struct work_struct *work) +{ + struct cgroup_event *event = container_of(work, struct cgroup_event, + remove); + struct cgroup_subsys_state *css = event->css; + + remove_wait_queue(event->wqh, &event->wait); + + event->cft->unregister_event(css, event->cft, event->eventfd); + + /* Notify userspace the event is going away. */ + eventfd_signal(event->eventfd, 1); + + eventfd_ctx_put(event->eventfd); + kfree(event); + css_put(css); +} + +/* + * Gets called on POLLHUP on eventfd when user closes it. + * + * Called with wqh->lock held and interrupts disabled. + */ +static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, + int sync, void *key) +{ + struct cgroup_event *event = container_of(wait, + struct cgroup_event, wait); + struct cgroup *cgrp = event->css->cgroup; + unsigned long flags = (unsigned long)key; + + if (flags & POLLHUP) { + /* + * If the event has been detached at cgroup removal, we + * can simply return knowing the other side will cleanup + * for us. + * + * We can't race against event freeing since the other + * side will require wqh->lock via remove_wait_queue(), + * which we hold. + */ + spin_lock(&cgrp->event_list_lock); + if (!list_empty(&event->list)) { + list_del_init(&event->list); + /* + * We are in atomic context, but cgroup_event_remove() + * may sleep, so we have to call it in workqueue. + */ + schedule_work(&event->remove); + } + spin_unlock(&cgrp->event_list_lock); + } + + return 0; +} + +static void cgroup_event_ptable_queue_proc(struct file *file, + wait_queue_head_t *wqh, poll_table *pt) +{ + struct cgroup_event *event = container_of(pt, + struct cgroup_event, pt); + + event->wqh = wqh; + add_wait_queue(wqh, &event->wait); +} + +/* + * Parse input and register new cgroup event handler. + * + * Input must be in format ' '. + * Interpretation of args is defined by control file implementation. + */ +static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, + struct cftype *cft, const char *buffer) +{ + struct cgroup *cgrp = dummy_css->cgroup; + struct cgroup_event *event; + struct cgroup_subsys_state *cfile_css; + unsigned int efd, cfd; + struct fd efile; + struct fd cfile; + char *endp; + int ret; + + efd = simple_strtoul(buffer, &endp, 10); + if (*endp != ' ') + return -EINVAL; + buffer = endp + 1; + + cfd = simple_strtoul(buffer, &endp, 10); + if ((*endp != ' ') && (*endp != '\0')) + return -EINVAL; + buffer = endp + 1; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + INIT_LIST_HEAD(&event->list); + init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc); + init_waitqueue_func_entry(&event->wait, cgroup_event_wake); + INIT_WORK(&event->remove, cgroup_event_remove); + + efile = fdget(efd); + if (!efile.file) { + ret = -EBADF; + goto out_kfree; + } + + event->eventfd = eventfd_ctx_fileget(efile.file); + if (IS_ERR(event->eventfd)) { + ret = PTR_ERR(event->eventfd); + goto out_put_efile; + } + + cfile = fdget(cfd); + if (!cfile.file) { + ret = -EBADF; + goto out_put_eventfd; + } + + /* the process need read permission on control file */ + /* AV: shouldn't we check that it's been opened for read instead? */ + ret = inode_permission(file_inode(cfile.file), MAY_READ); + if (ret < 0) + goto out_put_cfile; + + event->cft = __file_cft(cfile.file); + if (IS_ERR(event->cft)) { + ret = PTR_ERR(event->cft); + goto out_put_cfile; + } + + if (!event->cft->ss) { + ret = -EBADF; + goto out_put_cfile; + } + + /* + * Determine the css of @cfile, verify it belongs to the same + * cgroup as cgroup.event_control, and associate @event with it. + * Remaining events are automatically removed on cgroup destruction + * but the removal is asynchronous, so take an extra ref. + */ + rcu_read_lock(); + + ret = -EINVAL; + event->css = cgroup_css(cgrp, event->cft->ss); + cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss); + if (event->css && event->css == cfile_css && css_tryget(event->css)) + ret = 0; + + rcu_read_unlock(); + if (ret) + goto out_put_cfile; + + if (!event->cft->register_event || !event->cft->unregister_event) { + ret = -EINVAL; + goto out_put_css; + } + + ret = event->cft->register_event(event->css, event->cft, + event->eventfd, buffer); + if (ret) + goto out_put_css; + + efile.file->f_op->poll(efile.file, &event->pt); + + spin_lock(&cgrp->event_list_lock); + list_add(&event->list, &cgrp->event_list); + spin_unlock(&cgrp->event_list_lock); + + fdput(cfile); + fdput(efile); + + return 0; + +out_put_css: + css_put(event->css); +out_put_cfile: + fdput(cfile); +out_put_eventfd: + eventfd_ctx_put(event->eventfd); +out_put_efile: + fdput(efile); +out_kfree: + kfree(event); + + return ret; +} + static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -5993,6 +6221,12 @@ static struct cftype mem_cgroup_files[] = { .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, + { + .name = "cgroup.event_control", + .write_string = cgroup_write_event_control, + .flags = CFTYPE_NO_PREFIX, + .mode = S_IWUGO, + }, { .name = "swappiness", .read_u64 = mem_cgroup_swappiness_read, @@ -6326,6 +6560,20 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct cgroup *cgrp = css->cgroup; + struct cgroup_event *event, *tmp; + + /* + * Unregister events and notify userspace. + * Notify userspace about cgroup removing only after rmdir of cgroup + * directory to avoid race between userspace and kernelspace. + */ + spin_lock(&cgrp->event_list_lock); + list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { + list_del_init(&event->list); + schedule_work(&event->remove); + } + spin_unlock(&cgrp->event_list_lock); kmem_cgroup_css_offline(memcg); -- cgit v1.2.3 From b5557c4c3b1a38074d7001b87c2482eda3a0834a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:42 -0500 Subject: memcg: cgroup_write_event_control() now knows @css is for memcg @css for cgroup_write_event_control() is now always for memcg and the target file should be a memcg file too. Drop code which assumes @css is dummy_css and the target file may belong to different subsystems. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov --- mm/memcontrol.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 02dae3292668..d00368110b08 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6056,10 +6056,10 @@ static void cgroup_event_ptable_queue_proc(struct file *file, * Input must be in format ' '. * Interpretation of args is defined by control file implementation. */ -static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, +static int cgroup_write_event_control(struct cgroup_subsys_state *css, struct cftype *cft, const char *buffer) { - struct cgroup *cgrp = dummy_css->cgroup; + struct cgroup *cgrp = css->cgroup; struct cgroup_event *event; struct cgroup_subsys_state *cfile_css; unsigned int efd, cfd; @@ -6082,6 +6082,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, if (!event) return -ENOMEM; + event->css = css; INIT_LIST_HEAD(&event->list); init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc); init_waitqueue_func_entry(&event->wait, cgroup_event_wake); @@ -6117,23 +6118,17 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, goto out_put_cfile; } - if (!event->cft->ss) { - ret = -EBADF; - goto out_put_cfile; - } - /* - * Determine the css of @cfile, verify it belongs to the same - * cgroup as cgroup.event_control, and associate @event with it. - * Remaining events are automatically removed on cgroup destruction - * but the removal is asynchronous, so take an extra ref. + * Verify @cfile should belong to @css. Also, remaining events are + * automatically removed on cgroup destruction but the removal is + * asynchronous, so take an extra ref on @css. */ rcu_read_lock(); ret = -EINVAL; - event->css = cgroup_css(cgrp, event->cft->ss); - cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss); - if (event->css && event->css == cfile_css && css_tryget(event->css)) + cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, + &mem_cgroup_subsys); + if (cfile_css == css && css_tryget(css)) ret = 0; rcu_read_unlock(); @@ -6145,7 +6140,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, goto out_put_css; } - ret = event->cft->register_event(event->css, event->cft, + ret = event->cft->register_event(css, event->cft, event->eventfd, buffer); if (ret) goto out_put_css; @@ -6162,7 +6157,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, return 0; out_put_css: - css_put(event->css); + css_put(css); out_put_cfile: fdput(cfile); out_put_eventfd: -- cgit v1.2.3 From fba94807837850e211f8975e1970e23e7804ff4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:43 -0500 Subject: cgroup, memcg: move cgroup->event_list[_lock] and event callbacks into memcg cgroup_event is being moved from cgroup core to memcg and the implementation is already moved by the previous patch. This patch moves the data fields and callbacks. * cgroup->event_list[_lock] are moved to mem_cgroup. * cftype->[un]register_event() are moved to cgroup_event. This makes it impossible for individual cftype definitions to specify their event callbacks. This is worked around by simply hard-coding filename to event callback mapping in cgroup_write_event_control(). This is awkward and inflexible, which is actually desirable given that we don't want to grow more usages of this feature. * eventfd_ctx declaration is removed from cgroup.h, which makes vmpressure.h miss eventfd_ctx declaration. Include eventfd.h from vmpressure.h. v2: Use file name from dentry instead of cftype. This will allow removing all cftype handling in the function. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Balbir Singh --- include/linux/cgroup.h | 24 ------------- include/linux/vmpressure.h | 1 + kernel/cgroup.c | 2 -- mm/memcontrol.c | 87 ++++++++++++++++++++++++++++++++-------------- 4 files changed, 61 insertions(+), 53 deletions(-) (limited to 'mm') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 40c2427806c9..612adc5b87c5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -29,7 +29,6 @@ struct cgroup_subsys; struct inode; struct cgroup; struct css_id; -struct eventfd_ctx; extern int cgroup_init_early(void); extern int cgroup_init(void); @@ -239,10 +238,6 @@ struct cgroup { struct rcu_head rcu_head; struct work_struct destroy_work; - /* List of events which userspace want to receive */ - struct list_head event_list; - spinlock_t event_list_lock; - /* directory xattrs */ struct simple_xattrs xattrs; }; @@ -506,25 +501,6 @@ struct cftype { int (*trigger)(struct cgroup_subsys_state *css, unsigned int event); int (*release)(struct inode *inode, struct file *file); - - /* - * register_event() callback will be used to add new userspace - * waiter for changes related to the cftype. Implement it if - * you want to provide this functionality. Use eventfd_signal() - * on eventfd to send notification to userspace. - */ - int (*register_event)(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, - const char *args); - /* - * unregister_event() callback will be called when userspace - * closes the eventfd or on cgroup removing. - * This callback must be implemented, if you want provide - * notification functionality. - */ - void (*unregister_event)(struct cgroup_subsys_state *css, - struct cftype *cft, - struct eventfd_ctx *eventfd); }; /* diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 3f3788d49362..9dd1914f1a6c 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -7,6 +7,7 @@ #include #include #include +#include struct vmpressure { unsigned long scanned; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 4bccaa7dda35..feda7c54fa6b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1352,8 +1352,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) INIT_LIST_HEAD(&cgrp->pidlists); mutex_init(&cgrp->pidlist_mutex); cgrp->dummy_css.cgroup = cgrp; - INIT_LIST_HEAD(&cgrp->event_list); - spin_lock_init(&cgrp->event_list_lock); simple_xattrs_init(&cgrp->xattrs); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d00368110b08..2fcacb18404b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -248,6 +248,22 @@ struct cgroup_event { * Each of these stored in a list by the cgroup. */ struct list_head list; + /* + * register_event() callback will be used to add new userspace + * waiter for changes related to this event. Use eventfd_signal() + * on eventfd to send notification to userspace. + */ + int (*register_event)(struct cgroup_subsys_state *css, + struct cftype *cft, struct eventfd_ctx *eventfd, + const char *args); + /* + * unregister_event() callback will be called when userspace closes + * the eventfd or on cgroup removing. This callback must be set, + * if you want provide notification functionality. + */ + void (*unregister_event)(struct cgroup_subsys_state *css, + struct cftype *cft, + struct eventfd_ctx *eventfd); /* * All fields below needed to unregister event when * userspace closes eventfd. @@ -362,6 +378,10 @@ struct mem_cgroup { atomic_t numainfo_updating; #endif + /* List of events which userspace want to receive */ + struct list_head event_list; + spinlock_t event_list_lock; + struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; @@ -5992,7 +6012,7 @@ static void cgroup_event_remove(struct work_struct *work) remove_wait_queue(event->wqh, &event->wait); - event->cft->unregister_event(css, event->cft, event->eventfd); + event->unregister_event(css, event->cft, event->eventfd); /* Notify userspace the event is going away. */ eventfd_signal(event->eventfd, 1); @@ -6012,7 +6032,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, { struct cgroup_event *event = container_of(wait, struct cgroup_event, wait); - struct cgroup *cgrp = event->css->cgroup; + struct mem_cgroup *memcg = mem_cgroup_from_css(event->css); unsigned long flags = (unsigned long)key; if (flags & POLLHUP) { @@ -6025,7 +6045,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, * side will require wqh->lock via remove_wait_queue(), * which we hold. */ - spin_lock(&cgrp->event_list_lock); + spin_lock(&memcg->event_list_lock); if (!list_empty(&event->list)) { list_del_init(&event->list); /* @@ -6034,7 +6054,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, */ schedule_work(&event->remove); } - spin_unlock(&cgrp->event_list_lock); + spin_unlock(&memcg->event_list_lock); } return 0; @@ -6059,12 +6079,13 @@ static void cgroup_event_ptable_queue_proc(struct file *file, static int cgroup_write_event_control(struct cgroup_subsys_state *css, struct cftype *cft, const char *buffer) { - struct cgroup *cgrp = css->cgroup; + struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct cgroup_event *event; struct cgroup_subsys_state *cfile_css; unsigned int efd, cfd; struct fd efile; struct fd cfile; + const char *name; char *endp; int ret; @@ -6118,6 +6139,31 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, goto out_put_cfile; } + /* + * Determine the event callbacks and set them in @event. This used + * to be done via struct cftype but cgroup core no longer knows + * about these events. The following is crude but the whole thing + * is for compatibility anyway. + */ + name = cfile.file->f_dentry->d_name.name; + + if (!strcmp(name, "memory.usage_in_bytes")) { + event->register_event = mem_cgroup_usage_register_event; + event->unregister_event = mem_cgroup_usage_unregister_event; + } else if (!strcmp(name, "memory.oom_control")) { + event->register_event = mem_cgroup_oom_register_event; + event->unregister_event = mem_cgroup_oom_unregister_event; + } else if (!strcmp(name, "memory.pressure_level")) { + event->register_event = vmpressure_register_event; + event->unregister_event = vmpressure_unregister_event; + } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { + event->register_event = mem_cgroup_usage_register_event; + event->unregister_event = mem_cgroup_usage_unregister_event; + } else { + ret = -EINVAL; + goto out_put_cfile; + } + /* * Verify @cfile should belong to @css. Also, remaining events are * automatically removed on cgroup destruction but the removal is @@ -6135,21 +6181,15 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, if (ret) goto out_put_cfile; - if (!event->cft->register_event || !event->cft->unregister_event) { - ret = -EINVAL; - goto out_put_css; - } - - ret = event->cft->register_event(css, event->cft, - event->eventfd, buffer); + ret = event->register_event(css, event->cft, event->eventfd, buffer); if (ret) goto out_put_css; efile.file->f_op->poll(efile.file, &event->pt); - spin_lock(&cgrp->event_list_lock); - list_add(&event->list, &cgrp->event_list); - spin_unlock(&cgrp->event_list_lock); + spin_lock(&memcg->event_list_lock); + list_add(&event->list, &memcg->event_list); + spin_unlock(&memcg->event_list_lock); fdput(cfile); fdput(efile); @@ -6175,8 +6215,6 @@ static struct cftype mem_cgroup_files[] = { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), .read = mem_cgroup_read, - .register_event = mem_cgroup_usage_register_event, - .unregister_event = mem_cgroup_usage_unregister_event, }, { .name = "max_usage_in_bytes", @@ -6236,14 +6274,10 @@ static struct cftype mem_cgroup_files[] = { .name = "oom_control", .read_map = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, - .register_event = mem_cgroup_oom_register_event, - .unregister_event = mem_cgroup_oom_unregister_event, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, { .name = "pressure_level", - .register_event = vmpressure_register_event, - .unregister_event = vmpressure_unregister_event, }, #ifdef CONFIG_NUMA { @@ -6291,8 +6325,6 @@ static struct cftype memsw_cgroup_files[] = { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), .read = mem_cgroup_read, - .register_event = mem_cgroup_usage_register_event, - .unregister_event = mem_cgroup_usage_unregister_event, }, { .name = "memsw.max_usage_in_bytes", @@ -6483,6 +6515,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); vmpressure_init(&memcg->vmpressure); + INIT_LIST_HEAD(&memcg->event_list); + spin_lock_init(&memcg->event_list_lock); return &memcg->css; @@ -6555,7 +6589,6 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - struct cgroup *cgrp = css->cgroup; struct cgroup_event *event, *tmp; /* @@ -6563,12 +6596,12 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) * Notify userspace about cgroup removing only after rmdir of cgroup * directory to avoid race between userspace and kernelspace. */ - spin_lock(&cgrp->event_list_lock); - list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { + spin_lock(&memcg->event_list_lock); + list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { list_del_init(&event->list); schedule_work(&event->remove); } - spin_unlock(&cgrp->event_list_lock); + spin_unlock(&memcg->event_list_lock); kmem_cgroup_css_offline(memcg); -- cgit v1.2.3 From 347c4a8747104a945ecced358944e42879176ca5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:43 -0500 Subject: memcg: remove cgroup_event->cft The only use of cgroup_event->cft is distinguishing "usage_in_bytes" and "memsw.usgae_in_bytes" for mem_cgroup_usage_[un]register_event(), which can be done by adding an explicit argument to the function and implementing two wrappers so that the two cases can be distinguished from the function alone. Remove cgroup_event->cft and the related code including [un]register_events() methods. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov Acked-by: Michal Hocko --- include/linux/vmpressure.h | 2 -- mm/memcontrol.c | 65 +++++++++++++++++++++++++--------------------- mm/vmpressure.c | 14 +++------- 3 files changed, 38 insertions(+), 43 deletions(-) (limited to 'mm') diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 9dd1914f1a6c..b048365a7ed9 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -36,11 +36,9 @@ extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); extern int vmpressure_register_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, const char *args); extern void vmpressure_unregister_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd); #else static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2fcacb18404b..3c93dcfd26da 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -236,10 +236,6 @@ struct cgroup_event { * css which the event belongs to. */ struct cgroup_subsys_state *css; - /* - * Control file which the event associated. - */ - struct cftype *cft; /* * eventfd to signal userspace about the event. */ @@ -254,15 +250,13 @@ struct cgroup_event { * on eventfd to send notification to userspace. */ int (*register_event)(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, - const char *args); + struct eventfd_ctx *eventfd, const char *args); /* * unregister_event() callback will be called when userspace closes * the eventfd or on cgroup removing. This callback must be set, * if you want provide notification functionality. */ void (*unregister_event)(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd); /* * All fields below needed to unregister event when @@ -5688,13 +5682,12 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) mem_cgroup_oom_notify_cb(iter); } -static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) +static int __mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd, const char *args, enum res_type type) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - enum res_type type = MEMFILE_TYPE(cft->private); u64 threshold, usage; int i, size, ret; @@ -5771,13 +5764,24 @@ unlock: return ret; } -static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd) +static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd, const char *args) +{ + return __mem_cgroup_usage_register_event(css, eventfd, args, _MEM); +} + +static int memsw_cgroup_usage_register_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd, const char *args) +{ + return __mem_cgroup_usage_register_event(css, eventfd, args, _MEMSWAP); +} + +static void __mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd, enum res_type type) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - enum res_type type = MEMFILE_TYPE(cft->private); u64 usage; int i, j, size; @@ -5850,14 +5854,24 @@ unlock: mutex_unlock(&memcg->thresholds_lock); } +static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd) +{ + return __mem_cgroup_usage_unregister_event(css, eventfd, _MEM); +} + +static void memsw_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, + struct eventfd_ctx *eventfd) +{ + return __mem_cgroup_usage_unregister_event(css, eventfd, _MEMSWAP); +} + static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) + struct eventfd_ctx *eventfd, const char *args) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_eventfd_list *event; - enum res_type type = MEMFILE_TYPE(cft->private); - BUG_ON(type != _OOM_TYPE); event = kmalloc(sizeof(*event), GFP_KERNEL); if (!event) return -ENOMEM; @@ -5876,13 +5890,10 @@ static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css, } static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd) + struct eventfd_ctx *eventfd) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_eventfd_list *ev, *tmp; - enum res_type type = MEMFILE_TYPE(cft->private); - - BUG_ON(type != _OOM_TYPE); spin_lock(&memcg_oom_lock); @@ -6012,7 +6023,7 @@ static void cgroup_event_remove(struct work_struct *work) remove_wait_queue(event->wqh, &event->wait); - event->unregister_event(css, event->cft, event->eventfd); + event->unregister_event(css, event->eventfd); /* Notify userspace the event is going away. */ eventfd_signal(event->eventfd, 1); @@ -6133,12 +6144,6 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, if (ret < 0) goto out_put_cfile; - event->cft = __file_cft(cfile.file); - if (IS_ERR(event->cft)) { - ret = PTR_ERR(event->cft); - goto out_put_cfile; - } - /* * Determine the event callbacks and set them in @event. This used * to be done via struct cftype but cgroup core no longer knows @@ -6157,8 +6162,8 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, event->register_event = vmpressure_register_event; event->unregister_event = vmpressure_unregister_event; } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { - event->register_event = mem_cgroup_usage_register_event; - event->unregister_event = mem_cgroup_usage_unregister_event; + event->register_event = memsw_cgroup_usage_register_event; + event->unregister_event = memsw_cgroup_usage_unregister_event; } else { ret = -EINVAL; goto out_put_cfile; @@ -6181,7 +6186,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, if (ret) goto out_put_cfile; - ret = event->register_event(css, event->cft, event->eventfd, buffer); + ret = event->register_event(css, event->eventfd, buffer); if (ret) goto out_put_css; diff --git a/mm/vmpressure.c b/mm/vmpressure.c index e0f62837c3f4..0f25a996d150 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -279,7 +279,6 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) /** * vmpressure_register_event() - Bind vmpressure notifications to an eventfd * @css: css that is interested in vmpressure notifications - * @cft: cgroup control files handle * @eventfd: eventfd context to link notifications with * @args: event arguments (used to set up a pressure level threshold) * @@ -289,13 +288,10 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or * "critical"). * - * This function should not be used directly, just pass it to (struct - * cftype).register_event, and then cgroup core will handle everything by - * itself. + * To be used as memcg event method. */ int vmpressure_register_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd, - const char *args) + struct eventfd_ctx *eventfd, const char *args) { struct vmpressure *vmpr = css_to_vmpressure(css); struct vmpressure_event *ev; @@ -326,19 +322,15 @@ int vmpressure_register_event(struct cgroup_subsys_state *css, /** * vmpressure_unregister_event() - Unbind eventfd from vmpressure * @css: css handle - * @cft: cgroup control files handle * @eventfd: eventfd context that was used to link vmpressure with the @cg * * This function does internal manipulations to detach the @eventfd from * the vmpressure notifications, and then frees internal resources * associated with the @eventfd (but the @eventfd itself is not freed). * - * This function should not be used directly, just pass it to (struct - * cftype).unregister_event, and then cgroup core will handle everything - * by itself. + * To be used as memcg event method. */ void vmpressure_unregister_event(struct cgroup_subsys_state *css, - struct cftype *cft, struct eventfd_ctx *eventfd) { struct vmpressure *vmpr = css_to_vmpressure(css); -- cgit v1.2.3 From 59b6f87344ab5eb3057e5844b8cd8a39e668f477 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:43 -0500 Subject: memcg: make cgroup_event deal with mem_cgroup instead of cgroup_subsys_state cgroup_event is now memcg specific. Replace cgroup_event->css with ->memcg and convert [un]register_event() callbacks to take mem_cgroup pointer instead of cgroup_subsys_state one. This simplifies the code slightly and makes css_to_vmpressure() unnecessary which is removed. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov Acked-by: Michal Hocko --- include/linux/vmpressure.h | 5 ++--- mm/memcontrol.c | 53 +++++++++++++++++++--------------------------- mm/vmpressure.c | 12 +++++------ 3 files changed, 30 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index b048365a7ed9..3e4535876d37 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -34,11 +34,10 @@ extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); -extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); -extern int vmpressure_register_event(struct cgroup_subsys_state *css, +extern int vmpressure_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args); -extern void vmpressure_unregister_event(struct cgroup_subsys_state *css, +extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3c93dcfd26da..42f2843af1a7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -233,9 +233,9 @@ struct mem_cgroup_eventfd_list { */ struct cgroup_event { /* - * css which the event belongs to. + * memcg which the event belongs to. */ - struct cgroup_subsys_state *css; + struct mem_cgroup *memcg; /* * eventfd to signal userspace about the event. */ @@ -249,14 +249,14 @@ struct cgroup_event { * waiter for changes related to this event. Use eventfd_signal() * on eventfd to send notification to userspace. */ - int (*register_event)(struct cgroup_subsys_state *css, + int (*register_event)(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args); /* * unregister_event() callback will be called when userspace closes * the eventfd or on cgroup removing. This callback must be set, * if you want provide notification functionality. */ - void (*unregister_event)(struct cgroup_subsys_state *css, + void (*unregister_event)(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); /* * All fields below needed to unregister event when @@ -535,11 +535,6 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; } -struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css) -{ - return &mem_cgroup_from_css(css)->vmpressure; -} - static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); @@ -5682,10 +5677,9 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) mem_cgroup_oom_notify_cb(iter); } -static int __mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, +static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args, enum res_type type) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; u64 threshold, usage; @@ -5764,22 +5758,21 @@ unlock: return ret; } -static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, +static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args) { - return __mem_cgroup_usage_register_event(css, eventfd, args, _MEM); + return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); } -static int memsw_cgroup_usage_register_event(struct cgroup_subsys_state *css, +static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args) { - return __mem_cgroup_usage_register_event(css, eventfd, args, _MEMSWAP); + return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); } -static void __mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, +static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, enum res_type type) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; u64 usage; @@ -5854,22 +5847,21 @@ unlock: mutex_unlock(&memcg->thresholds_lock); } -static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, +static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd) { - return __mem_cgroup_usage_unregister_event(css, eventfd, _MEM); + return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); } -static void memsw_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, +static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd) { - return __mem_cgroup_usage_unregister_event(css, eventfd, _MEMSWAP); + return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); } -static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css, +static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_eventfd_list *event; event = kmalloc(sizeof(*event), GFP_KERNEL); @@ -5889,10 +5881,9 @@ static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css, return 0; } -static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css, +static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_eventfd_list *ev, *tmp; spin_lock(&memcg_oom_lock); @@ -6019,18 +6010,18 @@ static void cgroup_event_remove(struct work_struct *work) { struct cgroup_event *event = container_of(work, struct cgroup_event, remove); - struct cgroup_subsys_state *css = event->css; + struct mem_cgroup *memcg = event->memcg; remove_wait_queue(event->wqh, &event->wait); - event->unregister_event(css, event->eventfd); + event->unregister_event(memcg, event->eventfd); /* Notify userspace the event is going away. */ eventfd_signal(event->eventfd, 1); eventfd_ctx_put(event->eventfd); kfree(event); - css_put(css); + css_put(&memcg->css); } /* @@ -6043,7 +6034,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, { struct cgroup_event *event = container_of(wait, struct cgroup_event, wait); - struct mem_cgroup *memcg = mem_cgroup_from_css(event->css); + struct mem_cgroup *memcg = event->memcg; unsigned long flags = (unsigned long)key; if (flags & POLLHUP) { @@ -6114,7 +6105,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, if (!event) return -ENOMEM; - event->css = css; + event->memcg = memcg; INIT_LIST_HEAD(&event->list); init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc); init_waitqueue_func_entry(&event->wait, cgroup_event_wake); @@ -6186,7 +6177,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, if (ret) goto out_put_cfile; - ret = event->register_event(css, event->eventfd, buffer); + ret = event->register_event(memcg, event->eventfd, buffer); if (ret) goto out_put_css; diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 0f25a996d150..196970a4541f 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -278,7 +278,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) /** * vmpressure_register_event() - Bind vmpressure notifications to an eventfd - * @css: css that is interested in vmpressure notifications + * @memcg: memcg that is interested in vmpressure notifications * @eventfd: eventfd context to link notifications with * @args: event arguments (used to set up a pressure level threshold) * @@ -290,10 +290,10 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) * * To be used as memcg event method. */ -int vmpressure_register_event(struct cgroup_subsys_state *css, +int vmpressure_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args) { - struct vmpressure *vmpr = css_to_vmpressure(css); + struct vmpressure *vmpr = memcg_to_vmpressure(memcg); struct vmpressure_event *ev; int level; @@ -321,7 +321,7 @@ int vmpressure_register_event(struct cgroup_subsys_state *css, /** * vmpressure_unregister_event() - Unbind eventfd from vmpressure - * @css: css handle + * @memcg: memcg handle * @eventfd: eventfd context that was used to link vmpressure with the @cg * * This function does internal manipulations to detach the @eventfd from @@ -330,10 +330,10 @@ int vmpressure_register_event(struct cgroup_subsys_state *css, * * To be used as memcg event method. */ -void vmpressure_unregister_event(struct cgroup_subsys_state *css, +void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd) { - struct vmpressure *vmpr = css_to_vmpressure(css); + struct vmpressure *vmpr = memcg_to_vmpressure(memcg); struct vmpressure_event *ev; mutex_lock(&vmpr->events_lock); -- cgit v1.2.3 From 3bc942f372af383f49d56aab599469561a5e39ec Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 22 Nov 2013 18:20:44 -0500 Subject: memcg: rename cgroup_event to mem_cgroup_event cgroup_event is only available in memcg now. Let's brand it that way. While at it, add a comment encouraging deprecation of the feature and remove the respective section from cgroup documentation. This patch is cosmetic. v3: Typo update as per Li Zefan. v2: Index in cgroups.txt updated accordingly as suggested by Li Zefan. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Kirill A. Shutemov Acked-by: Michal Hocko --- Documentation/cgroups/cgroups.txt | 20 -------------- mm/memcontrol.c | 57 +++++++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 638bf17ff869..821de56d1580 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt @@ -24,7 +24,6 @@ CONTENTS: 2.1 Basic Usage 2.2 Attaching processes 2.3 Mounting hierarchies by name - 2.4 Notification API 3. Kernel API 3.1 Overview 3.2 Synchronization @@ -472,25 +471,6 @@ you give a subsystem a name. The name of the subsystem appears as part of the hierarchy description in /proc/mounts and /proc//cgroups. -2.4 Notification API --------------------- - -There is mechanism which allows to get notifications about changing -status of a cgroup. - -To register a new notification handler you need to: - - create a file descriptor for event notification using eventfd(2); - - open a control file to be monitored (e.g. memory.usage_in_bytes); - - write " " to cgroup.event_control. - Interpretation of args is defined by control file implementation; - -eventfd will be woken up by control file implementation or when the -cgroup is removed. - -To unregister a notification handler just close eventfd. - -NOTE: Support of notifications should be implemented for the control -file. See documentation for the subsystem. 3. Kernel API ============= diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 42f2843af1a7..ec8582b3a232 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -231,7 +231,7 @@ struct mem_cgroup_eventfd_list { /* * cgroup_event represents events which userspace want to receive. */ -struct cgroup_event { +struct mem_cgroup_event { /* * memcg which the event belongs to. */ @@ -6001,15 +6001,28 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) } #endif +/* + * DO NOT USE IN NEW FILES. + * + * "cgroup.event_control" implementation. + * + * This is way over-engineered. It tries to support fully configurable + * events for each user. Such level of flexibility is completely + * unnecessary especially in the light of the planned unified hierarchy. + * + * Please deprecate this and replace with something simpler if at all + * possible. + */ + /* * Unregister event and free resources. * * Gets called from workqueue. */ -static void cgroup_event_remove(struct work_struct *work) +static void memcg_event_remove(struct work_struct *work) { - struct cgroup_event *event = container_of(work, struct cgroup_event, - remove); + struct mem_cgroup_event *event = + container_of(work, struct mem_cgroup_event, remove); struct mem_cgroup *memcg = event->memcg; remove_wait_queue(event->wqh, &event->wait); @@ -6029,11 +6042,11 @@ static void cgroup_event_remove(struct work_struct *work) * * Called with wqh->lock held and interrupts disabled. */ -static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, - int sync, void *key) +static int memcg_event_wake(wait_queue_t *wait, unsigned mode, + int sync, void *key) { - struct cgroup_event *event = container_of(wait, - struct cgroup_event, wait); + struct mem_cgroup_event *event = + container_of(wait, struct mem_cgroup_event, wait); struct mem_cgroup *memcg = event->memcg; unsigned long flags = (unsigned long)key; @@ -6062,27 +6075,29 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode, return 0; } -static void cgroup_event_ptable_queue_proc(struct file *file, +static void memcg_event_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { - struct cgroup_event *event = container_of(pt, - struct cgroup_event, pt); + struct mem_cgroup_event *event = + container_of(pt, struct mem_cgroup_event, pt); event->wqh = wqh; add_wait_queue(wqh, &event->wait); } /* + * DO NOT USE IN NEW FILES. + * * Parse input and register new cgroup event handler. * * Input must be in format ' '. * Interpretation of args is defined by control file implementation. */ -static int cgroup_write_event_control(struct cgroup_subsys_state *css, - struct cftype *cft, const char *buffer) +static int memcg_write_event_control(struct cgroup_subsys_state *css, + struct cftype *cft, const char *buffer) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - struct cgroup_event *event; + struct mem_cgroup_event *event; struct cgroup_subsys_state *cfile_css; unsigned int efd, cfd; struct fd efile; @@ -6107,9 +6122,9 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, event->memcg = memcg; INIT_LIST_HEAD(&event->list); - init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc); - init_waitqueue_func_entry(&event->wait, cgroup_event_wake); - INIT_WORK(&event->remove, cgroup_event_remove); + init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); + init_waitqueue_func_entry(&event->wait, memcg_event_wake); + INIT_WORK(&event->remove, memcg_event_remove); efile = fdget(efd); if (!efile.file) { @@ -6140,6 +6155,8 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *css, * to be done via struct cftype but cgroup core no longer knows * about these events. The following is crude but the whole thing * is for compatibility anyway. + * + * DO NOT ADD NEW FILES. */ name = cfile.file->f_dentry->d_name.name; @@ -6251,8 +6268,8 @@ static struct cftype mem_cgroup_files[] = { .read_u64 = mem_cgroup_hierarchy_read, }, { - .name = "cgroup.event_control", - .write_string = cgroup_write_event_control, + .name = "cgroup.event_control", /* XXX: for compat */ + .write_string = memcg_write_event_control, .flags = CFTYPE_NO_PREFIX, .mode = S_IWUGO, }, @@ -6585,7 +6602,7 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - struct cgroup_event *event, *tmp; + struct mem_cgroup_event *event, *tmp; /* * Unregister events and notify userspace. -- cgit v1.2.3 From 4f024f3797c43cb4b73cd2c50cec728842d0e49e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 11 Oct 2013 15:44:27 -0700 Subject: block: Abstract out bvec iterator Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet Cc: Jens Axboe Cc: Geert Uytterhoeven Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: "Ed L. Cashin" Cc: Nick Piggin Cc: Lars Ellenberg Cc: Jiri Kosina Cc: Matthew Wilcox Cc: Geoff Levand Cc: Yehuda Sadeh Cc: Sage Weil Cc: Alex Elder Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris Cc: Philip Kelleher Cc: Rusty Russell Cc: "Michael S. Tsirkin" Cc: Konrad Rzeszutek Wilk Cc: Jeremy Fitzhardinge Cc: Neil Brown Cc: Alasdair Kergon Cc: Mike Snitzer Cc: dm-devel@redhat.com Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: linux390@de.ibm.com Cc: Boaz Harrosh Cc: Benny Halevy Cc: "James E.J. Bottomley" Cc: Greg Kroah-Hartman Cc: "Nicholas A. Bellinger" Cc: Alexander Viro Cc: Chris Mason Cc: "Theodore Ts'o" Cc: Andreas Dilger Cc: Jaegeuk Kim Cc: Steven Whitehouse Cc: Dave Kleikamp Cc: Joern Engel Cc: Prasad Joshi Cc: Trond Myklebust Cc: KONISHI Ryusuke Cc: Mark Fasheh Cc: Joel Becker Cc: Ben Myers Cc: xfs@oss.sgi.com Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Len Brown Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Herton Ronaldo Krzesinski Cc: Ben Hutchings Cc: Andrew Morton Cc: Guo Chao Cc: Tejun Heo Cc: Asai Thambi S P Cc: Selvan Mani Cc: Sam Bradshaw Cc: Wei Yongjun Cc: "Roger Pau MonnĂ©" Cc: Jan Beulich Cc: Stefano Stabellini Cc: Ian Campbell Cc: Sebastian Ott Cc: Christian Borntraeger Cc: Minchan Kim Cc: Jiang Liu Cc: Nitin Gupta Cc: Jerome Marchand Cc: Joe Perches Cc: Peng Tao Cc: Andy Adamson Cc: fanchaoting Cc: Jie Liu Cc: Sunil Mushran Cc: "Martin K. Petersen" Cc: Namjae Jeon Cc: Pankaj Kumar Cc: Dan Magenheimer Cc: Mel Gorman 6 --- Documentation/block/biodoc.txt | 7 +-- arch/m68k/emu/nfblock.c | 2 +- arch/powerpc/sysdev/axonram.c | 3 +- block/blk-core.c | 36 ++++++------ block/blk-flush.c | 2 +- block/blk-lib.c | 12 ++-- block/blk-map.c | 6 +- block/blk-merge.c | 4 +- block/blk-mq.c | 2 +- block/blk-throttle.c | 14 ++--- block/elevator.c | 2 +- drivers/block/aoe/aoecmd.c | 6 +- drivers/block/brd.c | 4 +- drivers/block/drbd/drbd_actlog.c | 2 +- drivers/block/drbd/drbd_bitmap.c | 2 +- drivers/block/drbd/drbd_receiver.c | 6 +- drivers/block/drbd/drbd_req.c | 6 +- drivers/block/drbd/drbd_req.h | 2 +- drivers/block/floppy.c | 4 +- drivers/block/loop.c | 4 +- drivers/block/mtip32xx/mtip32xx.c | 7 ++- drivers/block/nvme-core.c | 25 ++++---- drivers/block/pktcdvd.c | 54 +++++++++-------- drivers/block/ps3disk.c | 2 +- drivers/block/ps3vram.c | 2 +- drivers/block/rbd.c | 21 +++---- drivers/block/rsxx/dev.c | 6 +- drivers/block/rsxx/dma.c | 4 +- drivers/block/umem.c | 9 +-- drivers/block/xen-blkback/blkback.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/md/bcache/btree.c | 4 +- drivers/md/bcache/debug.c | 2 +- drivers/md/bcache/io.c | 26 ++++----- drivers/md/bcache/journal.c | 12 ++-- drivers/md/bcache/movinggc.c | 4 +- drivers/md/bcache/request.c | 58 +++++++++--------- drivers/md/bcache/super.c | 16 ++--- drivers/md/bcache/util.c | 4 +- drivers/md/bcache/writeback.c | 6 +- drivers/md/bcache/writeback.h | 2 +- drivers/md/dm-bio-record.h | 12 ++-- drivers/md/dm-bufio.c | 2 +- drivers/md/dm-cache-policy-mq.c | 4 +- drivers/md/dm-cache-target.c | 22 +++---- drivers/md/dm-crypt.c | 19 +++--- drivers/md/dm-delay.c | 7 ++- drivers/md/dm-flakey.c | 7 ++- drivers/md/dm-io.c | 6 +- drivers/md/dm-linear.c | 3 +- drivers/md/dm-raid1.c | 16 ++--- drivers/md/dm-region-hash.c | 3 +- drivers/md/dm-snap.c | 18 +++--- drivers/md/dm-stripe.c | 13 +++-- drivers/md/dm-switch.c | 4 +- drivers/md/dm-thin.c | 22 +++---- drivers/md/dm-verity.c | 8 +-- drivers/md/dm.c | 25 ++++---- drivers/md/faulty.c | 19 +++--- drivers/md/linear.c | 12 ++-- drivers/md/md.c | 10 ++-- drivers/md/multipath.c | 13 +++-- drivers/md/raid0.c | 16 ++--- drivers/md/raid1.c | 75 ++++++++++++------------ drivers/md/raid10.c | 91 ++++++++++++++++------------- drivers/md/raid5.c | 72 ++++++++++++----------- drivers/s390/block/dcssblk.c | 5 +- drivers/s390/block/xpram.c | 9 +-- drivers/scsi/osd/osd_initiator.c | 2 +- drivers/staging/lustre/lustre/llite/lloop.c | 12 ++-- drivers/staging/zram/zram_drv.c | 14 +++-- drivers/target/target_core_iblock.c | 2 +- fs/bio-integrity.c | 8 +-- fs/bio.c | 56 +++++++++--------- fs/btrfs/check-integrity.c | 8 +-- fs/btrfs/compression.c | 17 +++--- fs/btrfs/extent_io.c | 14 ++--- fs/btrfs/file-item.c | 19 +++--- fs/btrfs/inode.c | 22 +++---- fs/btrfs/raid56.c | 22 +++---- fs/btrfs/scrub.c | 12 ++-- fs/btrfs/volumes.c | 12 ++-- fs/buffer.c | 12 ++-- fs/direct-io.c | 4 +- fs/ext4/page-io.c | 4 +- fs/f2fs/data.c | 2 +- fs/f2fs/segment.c | 2 +- fs/gfs2/lops.c | 2 +- fs/gfs2/ops_fstype.c | 2 +- fs/hfsplus/wrapper.c | 2 +- fs/jfs/jfs_logmgr.c | 12 ++-- fs/jfs/jfs_metapage.c | 9 +-- fs/logfs/dev_bdev.c | 20 +++---- fs/mpage.c | 2 +- fs/nfs/blocklayout/blocklayout.c | 9 +-- fs/nilfs2/segbuf.c | 3 +- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/xfs/xfs_aops.c | 2 +- fs/xfs/xfs_buf.c | 4 +- include/linux/bio.h | 16 ++--- include/linux/blk_types.h | 19 +++--- include/trace/events/bcache.h | 26 ++++----- include/trace/events/block.h | 26 ++++----- include/trace/events/f2fs.h | 4 +- kernel/power/block_io.c | 2 +- kernel/trace/blktrace.c | 15 ++--- mm/page_io.c | 10 ++-- 107 files changed, 700 insertions(+), 638 deletions(-) (limited to 'mm') diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 8df5e8e6dceb..2101e718670d 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -447,14 +447,13 @@ struct bio_vec { * main unit of I/O for the block layer and lower layers (ie drivers) */ struct bio { - sector_t bi_sector; struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; /* target device */ unsigned long bi_flags; /* status, command, etc */ unsigned long bi_rw; /* low bits: r/w, high: priority */ unsigned int bi_vcnt; /* how may bio_vec's */ - unsigned int bi_idx; /* current index into bio_vec array */ + struct bvec_iter bi_iter; /* current index into bio_vec array */ unsigned int bi_size; /* total size in bytes */ unsigned short bi_phys_segments; /* segments after physaddr coalesce*/ @@ -480,7 +479,7 @@ With this multipage bio design: - Code that traverses the req list can find all the segments of a bio by using rq_for_each_segment. This handles the fact that a request has multiple bios, each of which can have multiple segments. -- Drivers which can't process a large bio in one shot can use the bi_idx +- Drivers which can't process a large bio in one shot can use the bi_iter field to keep track of the next bio_vec entry to process. (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying @@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the nr_sectors and current_nr_sectors fields (based on the corresponding hard_xxx values and the number of bytes transferred) and updates it on every transfer that invokes end_that_request_first. It does the same for the -buffer, bio, bio->bi_idx fields too. +buffer, bio, bio->bi_iter fields too. The buffer field is just a virtual address mapping of the current segment of the i/o buffer in cases where the buffer resides in low-memory. For high diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 0721858fbd1e..0a9d0b3c794b 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -64,7 +64,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio) struct nfhd_device *dev = queue->queuedata; struct bio_vec *bvec; int i, dir, len, shift; - sector_t sec = bio->bi_sector; + sector_t sec = bio->bi_iter.bi_sector; dir = bio_data_dir(bio); shift = dev->bshift; diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 1c16141c031c..f33bcbaa6a07 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -113,7 +113,8 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) unsigned int transfered; unsigned short idx; - phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); + phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << + AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; bio_for_each_segment(vec, bio, idx) { diff --git a/block/blk-core.c b/block/blk-core.c index 8bdd0121212a..5c2ab2c74066 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) + if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) bio_endio(bio, error); } @@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page, bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = len; - bio->bi_size = len; + bio->bi_iter.bi_size = len; bio->bi_vcnt = 1; bio->bi_phys_segments = 1; @@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = bio; req->biotail = bio; - req->__data_len += bio->bi_size; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); @@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->__sector = bio->bi_sector; - req->__data_len += bio->bi_size; + req->__sector = bio->bi_iter.bi_sector; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); @@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_FAILFAST_MASK; req->errors = 0; - req->__sector = bio->bi_sector; + req->__sector = bio->bi_iter.bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio) if (bio_sectors(bio) && bdev != bdev->bd_contains) { struct hd_struct *p = bdev->bd_part; - bio->bi_sector += p->start_sect; + bio->bi_iter.bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, bdev->bd_dev, - bio->bi_sector - p->start_sect); + bio->bi_iter.bi_sector - p->start_sect); } } @@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) /* Test device or partition size, when known. */ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; if (maxsector) { - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { /* @@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio) "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bdevname(bio->bi_bdev, b), - (long long) bio->bi_sector); + (long long) bio->bi_iter.bi_sector); goto end_io; } @@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio) } part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_size) || + if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, - bio->bi_size)) + bio->bi_iter.bi_size)) goto end_io; /* @@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio) if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { - task_io_account_read(bio->bi_size); + task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, count); } @@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio) printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, bdevname(bio->bi_bdev, b), count); } @@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) for (bio = rq->bio; bio; bio = bio->bi_next) { if ((bio->bi_rw & ff) != ff) break; - bytes += bio->bi_size; + bytes += bio->bi_iter.bi_size; } /* this could lead to infinite loop */ @@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) total_bytes = 0; while (req->bio) { struct bio *bio = req->bio; - unsigned bio_bytes = min(bio->bi_size, nr_bytes); + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - if (bio_bytes == bio->bi_size) + if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; req_bio_endio(req, bio, bio_bytes, error); @@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->__data_len = bio->bi_size; + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) diff --git a/block/blk-flush.c b/block/blk-flush.c index fb6f3c0ffa49..9288aaf35c21 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, * copied from blk_rq_pos(rq). */ if (error_sector) - *error_sector = bio->bi_sector; + *error_sector = bio->bi_iter.bi_sector; bio_put(bio); return ret; diff --git a/block/blk-lib.c b/block/blk-lib.c index 9b5b561cb928..2da76c999ef3 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, req_sects = end_sect - sector; } - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_private = &bb; - bio->bi_size = req_sects << 9; + bio->bi_iter.bi_size = req_sects << 9; nr_sects -= req_sects; sector = end_sect; @@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, break; } - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_private = &bb; @@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); if (nr_sects > max_write_same_sectors) { - bio->bi_size = max_write_same_sectors << 9; + bio->bi_iter.bi_size = max_write_same_sectors << 9; nr_sects -= max_write_same_sectors; sector += max_write_same_sectors; } else { - bio->bi_size = nr_sects << 9; + bio->bi_iter.bi_size = nr_sects << 9; nr_sects = 0; } @@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, break; } - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_end_io = bio_batch_end_io; bio->bi_private = &bb; diff --git a/block/blk-map.c b/block/blk-map.c index 623e1cd4cffe..ae4ae1047fd9 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->__data_len += bio->bi_size; + rq->__data_len += bio->bi_iter.bi_size; } return 0; } @@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, ret = blk_rq_append_bio(q, rq, bio); if (!ret) - return bio->bi_size; + return bio->bi_iter.bi_size; /* if it was boucned we must call the end io function */ bio_endio(bio, 0); @@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); - if (bio->bi_size != len) { + if (bio->bi_iter.bi_size != len) { /* * Grab an extra reference to this bio, as bio_unmap_user() * expects to be able to drop it twice as it happens on the diff --git a/block/blk-merge.c b/block/blk-merge.c index 1ffc58977835..03bc083c28cf 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -543,9 +543,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) int blk_try_merge(struct request *rq, struct bio *bio) { - if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) + if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) return ELEVATOR_BACK_MERGE; - else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) + else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) return ELEVATOR_FRONT_MERGE; return ELEVATOR_NO_MERGE; } diff --git a/block/blk-mq.c b/block/blk-mq.c index cdc629cf075b..e4fbcc3fd2db 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -301,7 +301,7 @@ void blk_mq_complete_request(struct request *rq, int error) struct bio *next = bio->bi_next; bio->bi_next = NULL; - bytes += bio->bi_size; + bytes += bio->bi_iter.bi_size; blk_mq_bio_endio(rq, bio, error); bio = next; } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 06534049afba..20f820037775 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, do_div(tmp, HZ); bytes_allowed = tmp; - if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { + if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (wait) *wait = 0; return 1; } /* Calc approx time to dispatch */ - extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; + extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); if (!jiffy_wait) @@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) bool rw = bio_data_dir(bio); /* Charge the bio to the group */ - tg->bytes_disp[rw] += bio->bi_size; + tg->bytes_disp[rw] += bio->bi_iter.bi_size; tg->io_disp[rw]++; /* @@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) */ if (!(bio->bi_rw & REQ_THROTTLED)) { bio->bi_rw |= REQ_THROTTLED; - throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, - bio->bi_rw); + throtl_update_dispatch_stats(tg_to_blkg(tg), + bio->bi_iter.bi_size, bio->bi_rw); } } @@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) if (tg) { if (!tg->has_rules[rw]) { throtl_update_dispatch_stats(tg_to_blkg(tg), - bio->bi_size, bio->bi_rw); + bio->bi_iter.bi_size, bio->bi_rw); goto out_unlock_rcu; } } @@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) /* out-of-limit, queue to @tg */ throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", rw == READ ? 'R' : 'W', - tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], + tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], tg->io_disp[rw], tg->iops[rw], sq->nr_queued[READ], sq->nr_queued[WRITE]); diff --git a/block/elevator.c b/block/elevator.c index b7ff2861b6bd..42c45a7d6714 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) /* * See if our hash lookup can find a potential backmerge. */ - __rq = elv_rqhash_find(q, bio->bi_sector); + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_BACK_MERGE; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d2515435e23f..877ba119b3f8 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -929,8 +929,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio) memset(buf, 0, sizeof(*buf)); buf->rq = rq; buf->bio = bio; - buf->resid = bio->bi_size; - buf->sector = bio->bi_sector; + buf->resid = bio->bi_iter.bi_size; + buf->sector = bio->bi_iter.bi_sector; bio_pageinc(bio); buf->bv = bio_iovec(bio); buf->bv_resid = buf->bv->bv_len; @@ -1152,7 +1152,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) do { bio = rq->bio; bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); - } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); + } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); /* cf. http://lkml.org/lkml/2006/10/31/28 */ if (!fastfail) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index d91f1a56e861..66f5aaae15a2 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -333,13 +333,13 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) int i; int err = -EIO; - sector = bio->bi_sector; + sector = bio->bi_iter.bi_sector; if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) goto out; if (unlikely(bio->bi_rw & REQ_DISCARD)) { err = 0; - discard_from_brd(brd, sector, bio->bi_size); + discard_from_brd(brd, sector, bio->bi_iter.bi_size); goto out; } diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 28c73ca320a8..a9b13f2cc420 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, bio = bio_alloc_drbd(GFP_NOIO); bio->bi_bdev = bdev->md_bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, page, size, 0) != size) goto out; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index b12c11ec4bd2..597f111df67b 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must } else page = b->bm_pages[page_nr]; bio->bi_bdev = mdev->ldev->md_bdev; - bio->bi_sector = on_disk_sector; + bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ bio_add_page(bio, page, len, 0); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6fa6673b36b3..5326c22cdb9d 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1333,7 +1333,7 @@ next_bio: goto fail; } /* > peer_req->i.sector, unless this is the first bio */ - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = mdev->ldev->backing_bdev; bio->bi_rw = rw; bio->bi_private = peer_req; @@ -1353,7 +1353,7 @@ next_bio: dev_err(DEV, "bio_add_page failed for len=%u, " "bi_vcnt=0 (bi_sector=%llu)\n", - len, (unsigned long long)bio->bi_sector); + len, (uint64_t)bio->bi_iter.bi_sector); err = -ENOSPC; goto fail; } @@ -1615,7 +1615,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, mdev->recv_cnt += data_size>>9; bio = req->master_bio; - D_ASSERT(sector == bio->bi_sector); + D_ASSERT(sector == bio->bi_iter.bi_sector); bio_for_each_segment(bvec, bio, i) { void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index fec7bef44994..104a040f24de 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, req->epoch = 0; drbd_clear_interval(&req->i); - req->i.sector = bio_src->bi_sector; - req->i.size = bio_src->bi_size; + req->i.sector = bio_src->bi_iter.bi_sector; + req->i.size = bio_src->bi_iter.bi_size; req->i.local = true; req->i.waiting = false; @@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); + D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); inc_ap_bio(mdev); __drbd_make_request(mdev, bio, start_time); diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 978cb1addc98..28e15d91197a 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi /* Short lived temporary struct on the stack. * We could squirrel the error to be returned into - * bio->bi_size, or similar. But that would be too ugly. */ + * bio->bi_iter.bi_size, or similar. But that would be too ugly. */ struct bio_and_error { struct bio *bio; int error; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 000abe2f105c..6a86fe7b730f 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev) bio_vec.bv_len = size; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; - bio.bi_size = size; + bio.bi_iter.bi_size = size; bio.bi_bdev = bdev; - bio.bi_sector = 0; + bio.bi_iter.bi_sector = 0; bio.bi_flags = (1 << BIO_QUIET); init_completion(&complete); bio.bi_private = &complete; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c8dac7305244..f5e39989adde 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -415,7 +415,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) loff_t pos; int ret; - pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; + pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { struct file *file = lo->lo_backing_file; @@ -444,7 +444,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) goto out; } ret = file->f_op->fallocate(file, mode, pos, - bio->bi_size); + bio->bi_iter.bi_size); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) ret = -EIO; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 050c71267f14..69e9eb5a6b34 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3993,7 +3993,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } if (unlikely(bio->bi_rw & REQ_DISCARD)) { - bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, + bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, bio_sectors(bio))); return; } @@ -4006,7 +4006,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && dd->unal_qdepth) { - if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ + if (bio->bi_iter.bi_sector % 8 != 0) + /* Unaligned on 4k boundaries */ unaligned = 1; else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ unaligned = 1; @@ -4035,7 +4036,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) /* Issue the read/write. */ mtip_hw_submit_io(dd, - bio->bi_sector, + bio->bi_iter.bi_sector, bio_sectors(bio), nents, tag, diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 26d03fa0bf26..53d217381873 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -468,7 +468,7 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, { struct nvme_bio_pair *bp; - BUG_ON(len > bio->bi_size); + BUG_ON(len > bio->bi_iter.bi_size); BUG_ON(idx > bio->bi_vcnt); bp = kmalloc(sizeof(*bp), GFP_ATOMIC); @@ -479,11 +479,11 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, bp->b1 = *bio; bp->b2 = *bio; - bp->b1.bi_size = len; - bp->b2.bi_size -= len; + bp->b1.bi_iter.bi_size = len; + bp->b2.bi_iter.bi_size -= len; bp->b1.bi_vcnt = idx; - bp->b2.bi_idx = idx; - bp->b2.bi_sector += len >> 9; + bp->b2.bi_iter.bi_idx = idx; + bp->b2.bi_iter.bi_sector += len >> 9; if (offset) { bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), @@ -552,11 +552,12 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, { struct bio_vec *bvec, *bvprv = NULL; struct scatterlist *sg = NULL; - int i, length = 0, nsegs = 0, split_len = bio->bi_size; + int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; if (nvmeq->dev->stripe_size) split_len = nvmeq->dev->stripe_size - - ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); + ((bio->bi_iter.bi_sector << 9) & + (nvmeq->dev->stripe_size - 1)); sg_init_table(iod->sg, psegs); bio_for_each_segment(bvec, bio, i) { @@ -584,7 +585,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) return -ENOMEM; - BUG_ON(length != bio->bi_size); + BUG_ON(length != bio->bi_iter.bi_size); return length; } @@ -608,8 +609,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, iod->npages = 0; range->cattr = cpu_to_le32(0); - range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); - range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); + range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; @@ -674,7 +675,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, } result = -ENOMEM; - iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); + iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); if (!iod) goto nomem; iod->private = bio; @@ -723,7 +724,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, cmnd->rw.nsid = cpu_to_le32(ns->ns_id); length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, GFP_ATOMIC); - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ff8668c5efb1..ce986bacf7b7 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s for (;;) { tmp = rb_entry(n, struct pkt_rb_node, rb_node); - if (s <= tmp->bio->bi_sector) + if (s <= tmp->bio->bi_iter.bi_sector) next = n->rb_left; else next = n->rb_right; @@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s n = next; } - if (s > tmp->bio->bi_sector) { + if (s > tmp->bio->bi_iter.bi_sector) { tmp = pkt_rbtree_next(tmp); if (!tmp) return NULL; } - BUG_ON(s > tmp->bio->bi_sector); + BUG_ON(s > tmp->bio->bi_iter.bi_sector); return tmp; } @@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod { struct rb_node **p = &pd->bio_queue.rb_node; struct rb_node *parent = NULL; - sector_t s = node->bio->bi_sector; + sector_t s = node->bio->bi_iter.bi_sector; struct pkt_rb_node *tmp; while (*p) { parent = *p; tmp = rb_entry(parent, struct pkt_rb_node, rb_node); - if (s < tmp->bio->bi_sector) + if (s < tmp->bio->bi_iter.bi_sector) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) spin_lock(&pd->iosched.lock); bio = bio_list_peek(&pd->iosched.write_queue); spin_unlock(&pd->iosched.lock); - if (bio && (bio->bi_sector == pd->iosched.last_write)) + if (bio && (bio->bi_iter.bi_sector == + pd->iosched.last_write)) need_write_seek = 0; if (need_write_seek && reads_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { @@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) continue; if (bio_data_dir(bio) == READ) - pd->iosched.successive_reads += bio->bi_size >> 10; + pd->iosched.successive_reads += + bio->bi_iter.bi_size >> 10; else { pd->iosched.successive_reads = 0; pd->iosched.last_write = bio_end_sector(bio); @@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", bio, (unsigned long long)pkt->sector, - (unsigned long long)bio->bi_sector, err); + (unsigned long long)bio->bi_iter.bi_sector, err); if (err) atomic_inc(&pkt->io_errors); @@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) memset(written, 0, sizeof(written)); spin_lock(&pkt->lock); bio_list_for_each(bio, &pkt->orig_bios) { - int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); - int num_frames = bio->bi_size / CD_FRAMESIZE; + int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / + (CD_FRAMESIZE >> 9); + int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); BUG_ON(first_frame < 0); BUG_ON(first_frame + num_frames > pkt->frames); @@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) bio = pkt->r_bios[f]; bio_reset(bio); - bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); + bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); bio->bi_bdev = pd->bdev; bio->bi_end_io = pkt_end_io_read; bio->bi_private = pkt; @@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt) bio_reset(pkt->bio); pkt->bio->bi_bdev = pd->bdev; pkt->bio->bi_rw = REQ_WRITE; - pkt->bio->bi_sector = new_sector; - pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; + pkt->bio->bi_iter.bi_sector = new_sector; + pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; pkt->bio->bi_vcnt = pkt->frames; pkt->bio->bi_end_io = pkt_end_io_packet_write; @@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) node = first_node; while (node) { bio = node->bio; - zone = get_zone(bio->bi_sector, pd); + zone = get_zone(bio->bi_iter.bi_sector, pd); list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { if (p->sector == zone) { bio = NULL; @@ -1252,14 +1255,14 @@ try_next_bio: pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); while ((node = pkt_rbtree_find(pd, zone)) != NULL) { bio = node->bio; - pkt_dbg(2, pd, "found zone=%llx\n", - (unsigned long long)get_zone(bio->bi_sector, pd)); - if (get_zone(bio->bi_sector, pd) != zone) + pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) + get_zone(bio->bi_iter.bi_sector, pd)); + if (get_zone(bio->bi_iter.bi_sector, pd) != zone) break; pkt_rbtree_erase(pd, node); spin_lock(&pkt->lock); bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += bio->bi_size / CD_FRAMESIZE; + pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; spin_unlock(&pkt->lock); } /* check write congestion marks, and if bio_queue_size is @@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) struct bio_vec *bvec = pkt->w_bio->bi_io_vec; bio_reset(pkt->w_bio); - pkt->w_bio->bi_sector = pkt->sector; + pkt->w_bio->bi_iter.bi_sector = pkt->sector; pkt->w_bio->bi_bdev = pd->bdev; pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; @@ -2370,20 +2373,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) if (!test_bit(PACKET_WRITABLE, &pd->flags)) { pkt_notice(pd, "WRITE for ro device (%llu)\n", - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); goto end_io; } - if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { + if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { pkt_err(pd, "wrong bio size\n"); goto end_io; } blk_queue_bounce(q, &bio); - zone = get_zone(bio->bi_sector, pd); + zone = get_zone(bio->bi_iter.bi_sector, pd); pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio_end_sector(bio)); /* Check if we have to split the bio */ @@ -2395,7 +2398,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) last_zone = get_zone(bio_end_sector(bio) - 1, pd); if (last_zone != zone) { BUG_ON(last_zone != zone + pd->settings.size); - first_sectors = last_zone - bio->bi_sector; + first_sectors = last_zone - bio->bi_iter.bi_sector; bp = bio_split(bio, first_sectors); BUG_ON(!bp); pkt_make_request(q, &bp->bio1); @@ -2417,7 +2420,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) if ((pkt->state == PACKET_WAITING_STATE) || (pkt->state == PACKET_READ_WAIT_STATE)) { bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += bio->bi_size / CD_FRAMESIZE; + pkt->write_size += + bio->bi_iter.bi_size / CD_FRAMESIZE; if ((pkt->write_size >= pkt->frames) && (pkt->state == PACKET_WAITING_STATE)) { atomic_inc(&pkt->run_sm); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index d754a88d7585..464be78a0836 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -104,7 +104,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u segs %u sectors from %lu\n", __func__, __LINE__, i, bio_segments(iter.bio), - bio_sectors(iter.bio), iter.bio->bi_sector); + bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); size = bvec->bv_len; buf = bvec_kmap_irq(bvec, &flags); diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 06a2e53e5f37..320bbfc9b902 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -553,7 +553,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int write = bio_data_dir(bio) == WRITE; const char *op = write ? "write" : "read"; - loff_t offset = bio->bi_sector << 9; + loff_t offset = bio->bi_iter.bi_sector << 9; int error = 0; struct bio_vec *bvec; unsigned int i; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index cb1db2979d3d..a8f4fe2d4d1b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1183,14 +1183,14 @@ static struct bio *bio_clone_range(struct bio *bio_src, /* Handle the easy case for the caller */ - if (!offset && len == bio_src->bi_size) + if (!offset && len == bio_src->bi_iter.bi_size) return bio_clone(bio_src, gfpmask); if (WARN_ON_ONCE(!len)) return NULL; - if (WARN_ON_ONCE(len > bio_src->bi_size)) + if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size)) return NULL; - if (WARN_ON_ONCE(offset > bio_src->bi_size - len)) + if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len)) return NULL; /* Find first affected segment... */ @@ -1220,7 +1220,8 @@ static struct bio *bio_clone_range(struct bio *bio_src, return NULL; /* ENOMEM */ bio->bi_bdev = bio_src->bi_bdev; - bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector + + (offset >> SECTOR_SHIFT); bio->bi_rw = bio_src->bi_rw; bio->bi_flags |= 1 << BIO_CLONED; @@ -1239,8 +1240,7 @@ static struct bio *bio_clone_range(struct bio *bio_src, } bio->bi_vcnt = vcnt; - bio->bi_size = len; - bio->bi_idx = 0; + bio->bi_iter.bi_size = len; return bio; } @@ -1271,7 +1271,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, /* Build up a chain of clone bios up to the limit */ - if (!bi || off >= bi->bi_size || !len) + if (!bi || off >= bi->bi_iter.bi_size || !len) return NULL; /* Nothing to clone */ end = &chain; @@ -1283,7 +1283,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, rbd_warn(NULL, "bio_chain exhausted with %u left", len); goto out_err; /* EINVAL; ran out of bio's */ } - bi_size = min_t(unsigned int, bi->bi_size - off, len); + bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len); bio = bio_clone_range(bi, off, bi_size, gfpmask); if (!bio) goto out_err; /* ENOMEM */ @@ -1292,7 +1292,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, end = &bio->bi_next; off += bi_size; - if (off == bi->bi_size) { + if (off == bi->bi_iter.bi_size) { bi = bi->bi_next; off = 0; } @@ -2186,7 +2186,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, if (type == OBJ_REQUEST_BIO) { bio_list = data_desc; - rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); + rbd_assert(img_offset == + bio_list->bi_iter.bi_sector << SECTOR_SHIFT); } else { rbd_assert(type == OBJ_REQUEST_PAGES); pages = data_desc; diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 2284f5d3a54a..2839d37e5af7 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) if (!card) goto req_err; - if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) + if (bio_end_sector(bio) > get_capacity(card->gendisk)) goto req_err; if (unlikely(card->halt)) { @@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) goto req_err; } - if (bio->bi_size == 0) { + if (bio->bi_iter.bi_size == 0) { dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); goto req_err; } @@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", bio_data_dir(bio) ? 'W' : 'R', bio_meta, - (u64)bio->bi_sector << 9, bio->bi_size); + (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, bio_dma_done_cb, bio_meta); diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index fc88ba3e1bd2..3716633be3c2 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -696,7 +696,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, int st; int i; - addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ + addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ atomic_set(n_dmas, 0); for (i = 0; i < card->n_targets; i++) { @@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, } if (bio->bi_rw & REQ_DISCARD) { - bv_len = bio->bi_size; + bv_len = bio->bi_iter.bi_size; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index ad70868f8a96..dab4f1afeae9 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -352,8 +352,8 @@ static int add_bio(struct cardinfo *card) bio = card->currentbio; if (!bio && card->bio) { card->currentbio = card->bio; - card->current_idx = card->bio->bi_idx; - card->current_sector = card->bio->bi_sector; + card->current_idx = card->bio->bi_iter.bi_idx; + card->current_sector = card->bio->bi_iter.bi_sector; card->bio = card->bio->bi_next; if (card->bio == NULL) card->biotail = &card->bio; @@ -451,7 +451,7 @@ static void process_page(unsigned long data) if (page->idx >= bio->bi_vcnt) { page->bio = bio->bi_next; if (page->bio) - page->idx = page->bio->bi_idx; + page->idx = page->bio->bi_iter.bi_idx; } pci_unmap_page(card->dev, desc->data_dma_handle, @@ -532,7 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; pr_debug("mm_make_request %llu %u\n", - (unsigned long long)bio->bi_sector, bio->bi_size); + (unsigned long long)bio->bi_iter.bi_sector, + bio->bi_iter.bi_size); spin_lock_irq(&card->lock); *card->biotail = bio; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 6620b73d0490..4b97b86da926 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio->bi_sector = preq.sector_number; + bio->bi_iter.bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 432db1b59b00..80e86307dd4b 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info) for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, - (unsigned int)(bio->bi_size >> 9) - offset); + (unsigned int)bio_sectors(bio) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); bio_trim(cloned_bio, offset, size); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5e2765aadce1..038a6d2aced3 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b) bio = bch_bbio_alloc(b->c); bio->bi_rw = REQ_META|READ_SYNC; - bio->bi_size = KEY_SIZE(&b->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; @@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; - b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); + b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c); bch_bio_map(b->bio, i); /* diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 264fcfbd6290..92b3fd468a03 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) dc->disk.c, "verify failed at dev %s sector %llu", bdevname(dc->bdev, name), - (uint64_t) bio->bi_sector); + (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9056632995b1..cc4ba2da5fb6 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error) static void bch_generic_make_request_hack(struct bio *bio) { - if (bio->bi_idx) { + if (bio->bi_iter.bi_idx) { struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); memcpy(clone->bi_io_vec, bio_iovec(bio), bio_segments(bio) * sizeof(struct bio_vec)); - clone->bi_sector = bio->bi_sector; + clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; clone->bi_bdev = bio->bi_bdev; clone->bi_rw = bio->bi_rw; clone->bi_vcnt = bio_segments(bio); - clone->bi_size = bio->bi_size; + clone->bi_iter.bi_size = bio->bi_iter.bi_size; clone->bi_private = bio; clone->bi_end_io = bch_bi_idx_hack_endio; @@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio) struct bio *bch_bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { - unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; + unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; struct bio_vec *bv; struct bio *ret = NULL; @@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, } bio_for_each_segment(bv, bio, idx) { - vcnt = idx - bio->bi_idx; + vcnt = idx - bio->bi_iter.bi_idx; if (!nbytes) { ret = bio_alloc_bioset(gfp, vcnt, bs); @@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, } out: ret->bi_bdev = bio->bi_bdev; - ret->bi_sector = bio->bi_sector; - ret->bi_size = sectors << 9; + ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; + ret->bi_iter.bi_size = sectors << 9; ret->bi_rw = bio->bi_rw; ret->bi_vcnt = vcnt; ret->bi_max_vecs = vcnt; - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; - bio->bi_idx = idx; + bio->bi_iter.bi_sector += sectors; + bio->bi_iter.bi_size -= sectors << 9; + bio->bi_iter.bi_idx = idx; if (bio_integrity(bio)) { if (bio_integrity_clone(ret, bio, gfp)) { @@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio) bio_for_each_segment(bv, bio, i) { struct bvec_merge_data bvm = { .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, + .bi_sector = bio->bi_iter.bi_sector, .bi_size = ret << 9, .bi_rw = bio->bi_rw, }; @@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); - bio->bi_sector = PTR_OFFSET(&b->key, 0); - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; + bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); + bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; b->submit_time_us = local_clock_us(); closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ecdaa671bd50..7eafdf09a0ae 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset; len = min_t(unsigned, left, PAGE_SECTORS * 8); bio_reset(bio); - bio->bi_sector = bucket + offset; + bio->bi_iter.bi_sector = bucket + offset; bio->bi_bdev = ca->bdev; bio->bi_rw = READ; - bio->bi_size = len << 9; + bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; @@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); bio_init(bio); - bio->bi_sector = bucket_to_sector(ca->set, + bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_max_vecs = 1; bio->bi_io_vec = bio->bi_inline_vecs; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; closure_get(&ca->set->cl); @@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl) atomic_long_add(sectors, &ca->meta_sectors_written); bio_reset(bio); - bio->bi_sector = PTR_OFFSET(k, i); + bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; - bio->bi_size = sectors << 9; + bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 7c1275e66025..581f95df8265 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io) bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&io->w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS); bio->bi_private = &io->cl; @@ -98,7 +98,7 @@ static void write_moving(struct closure *cl) if (!op->error) { moving_init(io); - io->bio.bio.bi_sector = KEY_START(&io->w->key); + io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); op->write_prio = 1; op->bio = &io->bio.bio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 78bab4154e97..47a9bbc75124 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl) struct bio *bio = op->bio; pr_debug("invalidating %i sectors from %llu", - bio_sectors(bio), (uint64_t) bio->bi_sector); + bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); while (bio_sectors(bio)) { unsigned sectors = min(bio_sectors(bio), @@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl) if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) goto out; - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; + bio->bi_iter.bi_sector += sectors; + bio->bi_iter.bi_size -= sectors << 9; bch_keylist_add(&op->insert_keys, - &KEY(op->inode, bio->bi_sector, sectors)); + &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); } op->insert_data_done = true; @@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl) k = op->insert_keys.top; bkey_init(k); SET_KEY_INODE(k, op->inode); - SET_KEY_OFFSET(k, bio->bi_sector); + SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), op->write_point, op->write_prio, @@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) (bio->bi_rw & REQ_WRITE))) goto skip; - if (bio->bi_sector & (c->sb.block_size - 1) || + if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { pr_debug("skipping unaligned io"); goto skip; @@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) spin_lock(&dc->io_lock); - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) - if (i->last == bio->bi_sector && + hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) + if (i->last == bio->bi_iter.bi_sector && time_before(jiffies, i->jiffies)) goto found; @@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) add_sequential(task); i->sequential = 0; found: - if (i->sequential + bio->bi_size > i->sequential) - i->sequential += bio->bi_size; + if (i->sequential + bio->bi_iter.bi_size > i->sequential) + i->sequential += bio->bi_iter.bi_size; i->last = bio_end_sector(bio); i->jiffies = jiffies + msecs_to_jiffies(5000); @@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) struct bkey *bio_key; unsigned ptr; - if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) return MAP_CONTINUE; if (KEY_INODE(k) != s->iop.inode || - KEY_START(k) > bio->bi_sector) { + KEY_START(k) > bio->bi_iter.bi_sector) { unsigned bio_sectors = bio_sectors(bio); unsigned sectors = KEY_INODE(k) == s->iop.inode ? min_t(uint64_t, INT_MAX, - KEY_START(k) - bio->bi_sector) + KEY_START(k) - bio->bi_iter.bi_sector) : INT_MAX; int ret = s->d->cache_miss(b, s, bio, sectors); @@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) s->read_dirty_data = true; n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, - KEY_OFFSET(k) - bio->bi_sector), + KEY_OFFSET(k) - bio->bi_iter.bi_sector), GFP_NOIO, s->d->bio_split); bio_key = &container_of(n, struct bbio, bio)->key; bch_bkey_copy_single_ptr(bio_key, k, ptr); - bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); + bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); n->bi_end_io = bch_cache_read_endio; @@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl) struct bio *bio = &s->bio.bio; int ret = bch_btree_map_keys(&s->op, s->iop.c, - &KEY(s->iop.inode, bio->bi_sector, 0), + &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup_fn, MAP_END_KEY); if (ret == -EAGAIN) continue_at(cl, cache_lookup, bcache_wq); @@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); - s->iop.bio->bi_sector = s->cache_miss->bi_sector; + s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; - s->iop.bio->bi_size = s->insert_bio_sectors << 9; + s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); bio_copy_data(s->cache_miss, s->iop.bio); @@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); s->iop.replace_key = KEY(s->iop.inode, - bio->bi_sector + s->insert_bio_sectors, + bio->bi_iter.bi_sector + s->insert_bio_sectors, s->insert_bio_sectors); ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); @@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (!cache_bio) goto out_submit; - cache_bio->bi_sector = miss->bi_sector; - cache_bio->bi_bdev = miss->bi_bdev; - cache_bio->bi_size = s->insert_bio_sectors << 9; + cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; + cache_bio->bi_bdev = miss->bi_bdev; + cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = request_endio; cache_bio->bi_private = &s->cl; @@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; struct bio *bio = &s->bio.bio; - struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); + struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); @@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) part_stat_unlock(); bio->bi_bdev = dc->bdev; - bio->bi_sector += dc->sb.data_offset; + bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { s = search_alloc(bio, d); trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, sectors -= j; } - bio_advance(bio, min(sectors << 9, bio->bi_size)); + bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); - if (!bio->bi_size) + if (!bio->bi_iter.bi_size) return MAP_DONE; return MAP_CONTINUE; @@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) bcache_wq); } else if (rw) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, - &KEY(d->id, bio->bi_sector, 0), + &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 1d9ee67d14ec..60fb6044b953 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); unsigned i; - bio->bi_sector = SB_SECTOR; - bio->bi_rw = REQ_SYNC|REQ_META; - bio->bi_size = SB_SIZE; + bio->bi_iter.bi_sector = SB_SECTOR; + bio->bi_rw = REQ_SYNC|REQ_META; + bio->bi_iter.bi_size = SB_SIZE; bch_bio_map(bio, NULL); out->offset = cpu_to_le64(sb->offset); @@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, struct bio *bio = bch_bbio_alloc(c); bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = KEY_SIZE(k) << 9; + bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; @@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) closure_init_stack(cl); - bio->bi_sector = bucket * ca->sb.bucket_size; - bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; + bio->bi_bdev = ca->bdev; + bio->bi_rw = REQ_SYNC|REQ_META|rw; + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; bio->bi_private = ca; diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 462214eeacbe..c57621e49dc0 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) void bch_bio_map(struct bio *bio, void *base) { - size_t size = bio->bi_size; + size_t size = bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec; - BUG_ON(!bio->bi_size); + BUG_ON(!bio->bi_iter.bi_size); BUG_ON(bio->bi_vcnt); bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 99053b1251be..04657e93f4fd 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w) if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); bio->bi_private = w; bio->bi_io_vec = bio->bi_inline_vecs; @@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl) dirty_init(w); io->bio.bi_rw = WRITE; - io->bio.bi_sector = KEY_START(&w->key); + io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_bdev = io->dc->bdev; io->bio.bi_end_io = dirty_endio; @@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc) io->dc = dc; dirty_init(w); - io->bio.bi_sector = PTR_OFFSET(&w->key, 0); + io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_bdev = PTR_CACHE(dc->disk.c, &w->key, 0)->bdev; io->bio.bi_rw = READ; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c9ddcf4614b9..e2f8598937ac 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, return false; if (dc->partial_stripes_expensive && - bcache_dev_stripe_dirty(dc, bio->bi_sector, + bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, bio_sectors(bio))) return true; diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 3a8cfa2645c7..5ace48ee9f58 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -40,10 +40,10 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) { unsigned i; - bd->bi_sector = bio->bi_sector; + bd->bi_sector = bio->bi_iter.bi_sector; bd->bi_bdev = bio->bi_bdev; - bd->bi_size = bio->bi_size; - bd->bi_idx = bio->bi_idx; + bd->bi_size = bio->bi_iter.bi_size; + bd->bi_idx = bio->bi_iter.bi_idx; bd->bi_flags = bio->bi_flags; for (i = 0; i < bio->bi_vcnt; i++) { @@ -56,10 +56,10 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) { unsigned i; - bio->bi_sector = bd->bi_sector; + bio->bi_iter.bi_sector = bd->bi_sector; bio->bi_bdev = bd->bi_bdev; - bio->bi_size = bd->bi_size; - bio->bi_idx = bd->bi_idx; + bio->bi_iter.bi_size = bd->bi_size; + bio->bi_iter.bi_idx = bd->bi_idx; bio->bi_flags = bd->bi_flags; for (i = 0; i < bio->bi_vcnt; i++) { diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb20d104..4113b6044b80 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, bio_init(&b->bio); b->bio.bi_io_vec = b->bio_vec; b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; - b->bio.bi_sector = block << b->c->sectors_per_block_bits; + b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_bdev = b->c->bdev; b->bio.bi_end_io = end_io; diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 416b7b752a6e..bfba97dcde2d 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t) static void iot_update_stats(struct io_tracker *t, struct bio *bio) { - if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) + if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) t->nr_seq_samples++; else { /* @@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio) t->nr_rand_samples++; } - t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); + t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); } static void iot_check_for_pattern_switch(struct io_tracker *t) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9efcf1059b99..86f9c83eb30c 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -664,15 +664,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio) static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) - bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + - sector_div(bi_sector, cache->sectors_per_block); + bio->bi_iter.bi_sector = + (from_cblock(cblock) * cache->sectors_per_block) + + sector_div(bi_sector, cache->sectors_per_block); else - bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | - (bi_sector & (cache->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (from_cblock(cblock) << cache->sectors_per_block_shift) | + (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) @@ -712,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); @@ -1027,7 +1029,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) { return (bio_data_dir(bio) == WRITE) && - (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); } static void avoid_copy(struct dm_cache_migration *mg) @@ -1252,7 +1254,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); - BUG_ON(bio->bi_size); + BUG_ON(bio->bi_iter.bi_size); if (!pb->req_nr) remap_to_origin(cache, bio); else @@ -1275,9 +1277,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) */ static void process_discard_bio(struct cache *cache, struct bio *bio) { - dm_block_t start_block = dm_sector_div_up(bio->bi_sector, + dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, cache->discard_block_size); - dm_block_t end_block = bio->bi_sector + bio_sectors(bio); + dm_block_t end_block = bio_end_sector(bio); dm_block_t b; end_block = block_div(end_block, cache->discard_block_size); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 81b0fa660452..1e2e5465d28e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -828,8 +828,8 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->bio_out = bio_out; ctx->offset_in = 0; ctx->offset_out = 0; - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; + ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0; + ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0; ctx->cc_sector = sector + cc->iv_offset; init_completion(&ctx->restart); } @@ -1021,7 +1021,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, size -= len; } - if (!clone->bi_size) { + if (!clone->bi_iter.bi_size) { bio_put(clone); return NULL; } @@ -1161,7 +1161,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) crypt_inc_pending(io); clone_init(io, clone); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; generic_make_request(clone); return 0; @@ -1209,7 +1209,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) /* crypt_convert should have filled the clone bio */ BUG_ON(io->ctx.idx_out < clone->bi_vcnt); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; if (async) kcryptd_queue_io(io); @@ -1224,7 +1224,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; - unsigned remaining = io->base_bio->bi_size; + unsigned remaining = io->base_bio->bi_iter.bi_size; sector_t sector = io->sector; int r; @@ -1248,7 +1248,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.bio_out = clone; io->ctx.idx_out = 0; - remaining -= clone->bi_size; + remaining -= clone->bi_iter.bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); @@ -1869,11 +1869,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = cc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3646a5..84c860191a2e 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -281,14 +281,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio) if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) - bio->bi_sector = dc->start_write + - dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_write + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; - bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_read + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->read_delay, bio); } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c80a0ec5f126..b257e46876d3 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = fc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + flakey_map_sector(ti, bio->bi_iter.bi_sector); } static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) @@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, - (bio_data_dir(bio) == WRITE) ? 'w' : 'r', - bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, + (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); } } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2a20986a2fec..01558b093307 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -304,14 +304,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); - bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { /* @@ -320,7 +320,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; offset = 0; remaining -= num_sectors; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 4f99d267340c..53e848c10939 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = lc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = linear_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + linear_map_sector(ti, bio->bi_iter.bi_sector); } static int linear_map(struct dm_target *ti, struct bio *bio) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9584443c5614..9f6d8e6baa7d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) region_t region = dm_rh_bio_to_region(ms->rh, bio); if (log->type->in_sync(log, region, 0)) - return choose_mirror(ms, bio->bi_sector) ? 1 : 0; + return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; return 0; } @@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) */ static sector_t map_sector(struct mirror *m, struct bio *bio) { - if (unlikely(!bio->bi_size)) + if (unlikely(!bio->bi_iter.bi_size)) return 0; - return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); + return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); } static void map_bio(struct mirror *m, struct bio *bio) { bio->bi_bdev = m->dev->bdev; - bio->bi_sector = map_sector(m, bio); + bio->bi_iter.bi_sector = map_sector(m, bio); } static void map_region(struct dm_io_region *io, struct mirror *m, @@ -527,7 +527,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) struct dm_io_request io_req = { .bi_rw = READ, .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, .notify.fn = read_callback, .notify.context = bio, .client = m->ms->io_client, @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * We can only read balance if the region is in sync. */ if (likely(region_in_sync(ms, region, 1))) - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); else if (m && atomic_read(&m->error_count)) m = NULL; @@ -630,7 +630,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_request io_req = { .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx, .notify.fn = write_callback, .notify.context = bio, .client = ms->io_client, @@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) * The region is in-sync and we can perform reads directly. * Store enough information so we can retry if it fails. */ - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); if (unlikely(!m)) return -EIO; diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e03eb34..b929fd5f4984 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) { - return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); + return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - + rh->target_begin); } EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index aec57d76db5d..3ded8c729dfb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1562,11 +1562,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { bio->bi_bdev = s->cow->bdev; - bio->bi_sector = chunk_to_sector(s->store, - dm_chunk_number(e->new_chunk) + - (chunk - e->old_chunk)) + - (bio->bi_sector & - s->store->chunk_mask); + bio->bi_iter.bi_sector = + chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + + (chunk - e->old_chunk)) + + (bio->bi_iter.bi_sector & s->store->chunk_mask); } static int snapshot_map(struct dm_target *ti, struct bio *bio) @@ -1584,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); /* Full snapshots are not usable */ /* To get here the table must be live so s->active is always set. */ @@ -1645,7 +1644,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) r = DM_MAPIO_SUBMITTED; if (!pe->started && - bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { + bio->bi_iter.bi_size == + (s->store->chunk_size << SECTOR_SHIFT)) { pe->started = 1; up_write(&s->lock); start_full_bio(pe, bio); @@ -1701,7 +1701,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); down_write(&s->lock); @@ -2038,7 +2038,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) - r = __origin_write(&o->snapshots, bio->bi_sector, bio); + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); up_read(&_origins_lock); return r; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 73c1712dad96..d1600d2aa2e2 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, { sector_t begin, end; - stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); + stripe_map_range_sector(sc, bio->bi_iter.bi_sector, + target_stripe, &begin); stripe_map_range_sector(sc, bio_end_sector(bio), target_stripe, &end); if (begin < end) { bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; - bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; - bio->bi_size = to_bytes(end - begin); + bio->bi_iter.bi_sector = begin + + sc->stripe[target_stripe].physical_start; + bio->bi_iter.bi_size = to_bytes(end - begin); return DM_MAPIO_REMAPPED; } else { /* The range doesn't map to the target stripe */ @@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return stripe_map_range(sc, bio, target_bio_nr); } - stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); + stripe_map_sector(sc, bio->bi_iter.bi_sector, + &stripe, &bio->bi_iter.bi_sector); - bio->bi_sector += sc->stripe[stripe].physical_start; + bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; bio->bi_bdev = sc->stripe[stripe].dev->bdev; return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ff9ac4be4721..09a688b3d48c 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -311,11 +311,11 @@ error: static int switch_map(struct dm_target *ti, struct bio *bio) { struct switch_ctx *sctx = ti->private; - sector_t offset = dm_target_offset(ti, bio->bi_sector); + sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); unsigned path_nr = switch_get_path_nr(sctx, offset); bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; - bio->bi_sector = sctx->path_list[path_nr].start + offset; + bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; return DM_MAPIO_REMAPPED; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2c0cf511ec23..a65402480c8c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool) static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (block_size_is_power_of_two(pool)) block_nr >>= pool->sectors_per_block_shift; @@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) { struct pool *pool = tc->pool; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = tc->pool_dev->bdev; if (block_size_is_power_of_two(pool)) - bio->bi_sector = (block << pool->sectors_per_block_shift) | - (bi_sector & (pool->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (block << pool->sectors_per_block_shift) | + (bi_sector & (pool->sectors_per_block - 1)); else - bio->bi_sector = (block * pool->sectors_per_block) + + bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + sector_div(bi_sector, pool->sectors_per_block); } @@ -721,7 +722,8 @@ static void process_prepared(struct pool *pool, struct list_head *head, */ static int io_overlaps_block(struct pool *pool, struct bio *bio) { - return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); + return bio->bi_iter.bi_size == + (pool->sectors_per_block << SECTOR_SHIFT); } static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -1130,7 +1132,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_detain(pool, &key, bio, &cell)) return; - if (bio_data_dir(bio) == WRITE && bio->bi_size) + if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) break_sharing(tc, bio, block, &key, lookup_result, cell); else { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -1153,7 +1155,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block /* * Remap empty bios (flushes) immediately, without provisioning. */ - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { inc_all_io_entry(pool, bio); cell_defer_no_holder(tc, cell); @@ -1253,7 +1255,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) r = dm_thin_find_block(tc->td, block, 1, &lookup_result); switch (r) { case 0: - if (lookup_result.shared && (rw == WRITE) && bio->bi_size) + if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) bio_io_error(bio); else { inc_all_io_entry(tc->pool, bio); @@ -2867,7 +2869,7 @@ out_unlock: static int thin_map(struct dm_target *ti, struct bio *bio) { - bio->bi_sector = dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); return thin_bio_map(ti, bio); } diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b7941db3aff..132b3154d466 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -493,9 +493,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio) struct dm_verity_io *io; bio->bi_bdev = v->data_dev->bdev; - bio->bi_sector = verity_map_sector(v, bio->bi_sector); + bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); - if (((unsigned)bio->bi_sector | bio_sectors(bio)) & + if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { DMERR_LIMIT("unaligned io"); return -EIO; @@ -514,8 +514,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio) io->v = v; io->orig_bi_end_io = bio->bi_end_io; io->orig_bi_private = bio->bi_private; - io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); - io->n_blocks = bio->bi_size >> v->data_dev_block_bits; + io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); + io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; bio->bi_end_io = verity_end_io; bio->bi_private = io; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0704c523a76b..ccd064ea4fe6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io) atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); } @@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io) part_stat_unlock(); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, &io->stats_aux); /* @@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error) if (io_error == DM_ENDIO_REQUEUE) return; - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { + if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue * without REQ_FLUSH. @@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error) struct dm_rq_clone_bio_info *info = clone->bi_private; struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; - unsigned int nr_bytes = info->orig->bi_size; + unsigned int nr_bytes = info->orig->bi_iter.bi_size; bio_put(clone); @@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio) * this io. */ atomic_inc(&tio->io->io_count); - sector = clone->bi_sector; + sector = clone->bi_iter.bi_sector; r = ti->type->map(ti, clone); if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ @@ -1160,13 +1160,13 @@ struct clone_info { static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) { - bio->bi_sector = sector; - bio->bi_size = to_bytes(len); + bio->bi_iter.bi_sector = sector; + bio->bi_iter.bi_size = to_bytes(len); } static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) { - bio->bi_idx = idx; + bio->bi_iter.bi_idx = idx; bio->bi_vcnt = idx + bv_count; bio->bi_flags &= ~(1 << BIO_SEG_VALID); } @@ -1202,7 +1202,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, clone->bi_rw = bio->bi_rw; clone->bi_vcnt = 1; clone->bi_io_vec->bv_offset = offset; - clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_io_vec->bv_len = clone->bi_iter.bi_size; clone->bi_flags |= 1 << BIO_CLONED; clone_bio_integrity(bio, clone, idx, len, offset, 1); @@ -1222,7 +1222,8 @@ static void clone_bio(struct dm_target_io *tio, struct bio *bio, bio_setup_sector(clone, sector, len); bio_setup_bv(clone, idx, bv_count); - if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) + if (idx != bio->bi_iter.bi_idx || + clone->bi_iter.bi_size < bio->bi_iter.bi_size) trim = 1; clone_bio_integrity(bio, clone, idx, len, 0, trim); } @@ -1510,8 +1511,8 @@ static void __split_and_process_bio(struct mapped_device *md, ci.io->bio = bio; ci.io->md = md; spin_lock_init(&ci.io->endio_lock); - ci.sector = bio->bi_sector; - ci.idx = bio->bi_idx; + ci.sector = bio->bi_iter.bi_sector; + ci.idx = bio->bi_iter.bi_idx; start_io_acct(ci.io); diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 3193aefe982b..e8b4574956c7 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error) { struct bio *b = bio->bi_private; - b->bi_size = bio->bi_size; - b->bi_sector = bio->bi_sector; + b->bi_iter.bi_size = bio->bi_iter.bi_size; + b->bi_iter.bi_sector = bio->bi_iter.bi_sector; bio_put(bio); @@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio) return; } - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { - add_sector(conf, bio->bi_sector, WritePersistent); + add_sector(conf, bio->bi_iter.bi_sector, + WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { - add_sector(conf, bio->bi_sector, ReadPersistent); + add_sector(conf, bio->bi_iter.bi_sector, + ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { - add_sector(conf, bio->bi_sector, ReadFixable); + add_sector(conf, bio->bi_iter.bi_sector, + ReadFixable); failit = 1; } } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd2b37b..fb3b0d04edfb 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -297,19 +297,19 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) } rcu_read_lock(); - tmp_dev = which_dev(mddev, bio->bi_sector); + tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; - if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) - || (bio->bi_sector < start_sector))) { + if (unlikely(bio->bi_iter.bi_sector >= (tmp_dev->end_sector) + || (bio->bi_iter.bi_sector < start_sector))) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "md/linear:%s: make_request: Sector %llu out of bounds on " "dev %s: %llu sectors, offset %llu\n", mdname(mddev), - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, bdevname(tmp_dev->rdev->bdev, b), (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); @@ -326,7 +326,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) rcu_read_unlock(); - bp = bio_split(bio, end_sector - bio->bi_sector); + bp = bio_split(bio, end_sector - bio->bi_iter.bi_sector); linear_make_request(mddev, &bp->bio1); linear_make_request(mddev, &bp->bio2); @@ -335,7 +335,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) } bio->bi_bdev = tmp_dev->rdev->bdev; - bio->bi_sector = bio->bi_sector - start_sector + bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector + tmp_dev->rdev->data_offset; rcu_read_unlock(); diff --git a/drivers/md/md.c b/drivers/md/md.c index 739b1ec54e28..b07fed398fd7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; - if (bio->bi_size == 0) + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio, 0); else { @@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; @@ -785,13 +785,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; if (metadata_op) - bio->bi_sector = sector + rdev->sb_start; + bio->bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && (rdev->mddev->reshape_backwards == (sector >= rdev->mddev->reshape_position))) - bio->bi_sector = sector + rdev->new_data_offset; + bio->bi_iter.bi_sector = sector + rdev->new_data_offset; else - bio->bi_sector = sector + rdev->data_offset; + bio->bi_iter.bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); submit_bio_wait(rw, bio); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1642eae75a33..849ad39f547b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error) md_error (mp_bh->mddev, rdev); printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, error); @@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) multipath = conf->multipaths + mp_bh->path; mp_bh->bio = *bio; - mp_bh->bio.bi_sector += multipath->rdev->data_offset; + mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_bdev = multipath->rdev->bdev; mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; @@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread) spin_unlock_irqrestore(&conf->device_lock, flags); bio = &mp_bh->bio; - bio->bi_sector = mp_bh->master_bio->bi_sector; + bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { printk(KERN_ALERT "multipath: %s: unrecoverable IO read" " error for block %llu\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, -EIO); } else { printk(KERN_ERR "multipath: %s: redirecting sector %llu" " to another IO path\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); - bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; + bio->bi_iter.bi_sector += + conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; bio->bi_rw |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c4d420b7d2f4..e38d1d3226f3 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, unsigned int chunk_sects, struct bio *bio) { if (likely(is_power_of_2(chunk_sects))) { - return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) + return chunk_sects >= + ((bio->bi_iter.bi_sector & (chunk_sects-1)) + bio_sectors(bio)); } else{ - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; return chunk_sects >= (sector_div(sector, chunk_sects) + bio_sectors(bio)); } @@ -524,7 +525,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) chunk_sects = mddev->chunk_sectors; if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio_segments(bio) > 1) @@ -544,12 +545,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) return; } - sector_offset = bio->bi_sector; + sector_offset = bio->bi_iter.bi_sector; zone = find_zone(mddev->private, §or_offset); - tmp_dev = map_sector(mddev, zone, bio->bi_sector, + tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector, §or_offset); bio->bi_bdev = tmp_dev->bdev; - bio->bi_sector = sector_offset + zone->dev_start + + bio->bi_iter.bi_sector = sector_offset + zone->dev_start + tmp_dev->data_offset; if (unlikely((bio->bi_rw & REQ_DISCARD) && @@ -566,7 +567,8 @@ bad_map: printk("md/raid0:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects / 2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); + (unsigned long long)bio->bi_iter.bi_sector, + bio_sectors(bio) / 2); bio_io_error(bio); return; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1e5a540995e9..db3b9d7314f1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio) int done; struct r1conf *conf = r1_bio->mddev->private; sector_t start_next_window = r1_bio->start_next_window; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio) if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { pr_debug("raid1: sync end %s on sectors %llu-%llu\n", (bio_data_dir(bio) == WRITE) ? "write" : "read", - (unsigned long long) bio->bi_sector, - (unsigned long long) bio->bi_sector + - bio_sectors(bio) - 1); + (unsigned long long) bio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(bio) - 1); call_bio_endio(r1_bio); } @@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error) struct bio *mbio = r1_bio->master_bio; pr_debug("raid1: behind end write sectors" " %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - bio_sectors(mbio) - 1); + (unsigned long long) mbio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(mbio) - 1); call_bio_endio(r1_bio); } } @@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) else if ((conf->next_resync - RESYNC_WINDOW_SECTORS >= bio_end_sector(bio)) || (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector)) + <= bio->bi_iter.bi_sector)) wait = false; else wait = true; @@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) if (bio && bio_data_dir(bio) == WRITE) { if (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector) { + <= bio->bi_iter.bi_sector) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + NEXT_NORMALIO_DISTANCE; if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) - <= bio->bi_sector) + <= bio->bi_iter.bi_sector) conf->next_window_requests++; else conf->current_window_requests++; } - if (bio->bi_sector >= conf->start_next_window) + if (bio->bi_iter.bi_sector >= conf->start_next_window) sector = conf->start_next_window; } @@ -1028,7 +1026,8 @@ do_sync_io: if (bvecs[i].bv_page) put_page(bvecs[i].bv_page); kfree(bvecs); - pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); + pr_debug("%dB behind alloc failed, doing sync I/O\n", + bio->bi_iter.bi_size); } struct raid1_plug_cb { @@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_data_dir(bio) == WRITE && bio_end_sector(bio) > mddev->suspend_lo && - bio->bi_sector < mddev->suspend_hi) { + bio->bi_iter.bi_sector < mddev->suspend_hi) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_sector >= mddev->suspend_hi) + bio->bi_iter.bi_sector >= mddev->suspend_hi) break; schedule(); } @@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r1_bio->sectors = bio_sectors(bio); r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector; + r1_bio->sector = bio->bi_iter.bi_sector; /* We might need to issue multiple reads to different * devices if there are bad blocks around, so we keep @@ -1181,12 +1180,13 @@ read_again: r1_bio->read_disk = rdisk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_sector, + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); r1_bio->bios[rdisk] = read_bio; - read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_rw = READ | do_sync; @@ -1198,7 +1198,7 @@ read_again: */ sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1219,7 +1219,8 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1322,7 +1323,7 @@ read_again: if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf, start_next_window, bio->bi_sector); + allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); start_next_window = wait_barrier(conf, bio); /* @@ -1349,7 +1350,7 @@ read_again: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); @@ -1361,7 +1362,7 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); if (first_clone) { /* do behind I/O ? @@ -1395,7 +1396,7 @@ read_again: r1_bio->bios[i] = mbio; - mbio->bi_sector = (r1_bio->sector + + mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; @@ -1435,7 +1436,7 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; goto retry_write; } @@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio) /* fixup the bio for reuse */ bio_reset(b); b->bi_vcnt = vcnt; - b->bi_size = r1_bio->sectors << 9; - b->bi_sector = r1_bio->sector + + b->bi_iter.bi_size = r1_bio->sectors << 9; + b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; b->bi_end_io = end_sync_read; b->bi_private = r1_bio; - size = b->bi_size; + size = b->bi_iter.bi_size; for (j = 0; j < vcnt ; j++) { struct bio_vec *bi; bi = &b->bi_io_vec[j]; @@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) } wbio->bi_rw = WRITE; - wbio->bi_sector = r1_bio->sector; - wbio->bi_size = r1_bio->sectors << 9; + wbio->bi_iter.bi_sector = r1_bio->sector; + wbio->bi_iter.bi_size = r1_bio->sectors << 9; bio_trim(wbio, sector - r1_bio->sector, sectors); - wbio->bi_sector += rdev->data_offset; + wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; if (submit_bio_wait(WRITE, wbio) == 0) /* failure! */ @@ -2339,7 +2340,8 @@ read_more: } r1_bio->read_disk = disk; bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; printk_ratelimited(KERN_ERR @@ -2348,7 +2350,7 @@ read_more: mdname(mddev), (unsigned long long)r1_bio->sector, bdevname(rdev->bdev, b)); - bio->bi_sector = r1_bio->sector + rdev->data_offset; + bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio->bi_rw = READ | do_sync; @@ -2357,7 +2359,7 @@ read_more: /* Drat - have to split this up more */ struct bio *mbio = r1_bio->master_bio; int sectors_handled = (r1_bio->sector + max_sectors - - mbio->bi_sector); + - mbio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2375,7 +2377,8 @@ read_more: r1_bio->state = 0; set_bit(R1BIO_ReadError, &r1_bio->state); r1_bio->mddev = mddev; - r1_bio->sector = mbio->bi_sector + sectors_handled; + r1_bio->sector = mbio->bi_iter.bi_sector + + sectors_handled; goto read_more; } else @@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp } if (bio->bi_end_io) { atomic_inc(&rdev->nr_pending); - bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } @@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp continue; /* remove last page from this bio */ bio->bi_vcnt--; - bio->bi_size -= len; + bio->bi_iter.bi_size -= len; bio->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c504e8389e69..dbf3b63c2754 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1182,7 +1182,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* If this request crosses a chunk boundary, we need to * split it. This will only happen for 1 PAGE (or less) requests. */ - if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) + if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + bio_sectors(bio) > chunk_sects && (conf->geo.near_copies < conf->geo.raid_disks || conf->prev.near_copies < conf->prev.raid_disks))) { @@ -1193,8 +1193,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ - bp = bio_split(bio, - chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); + bp = bio_split(bio, chunk_sects - + (bio->bi_iter.bi_sector & (chunk_sects - 1))); /* Each of these 'make_request' calls will call 'wait_barrier'. * If the first succeeds but the second blocks due to the resync @@ -1221,7 +1221,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) bad_map: printk("md/raid10:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); + (unsigned long long)bio->bi_iter.bi_sector, + bio_sectors(bio) / 2); bio_io_error(bio); return; @@ -1238,24 +1239,25 @@ static void make_request(struct mddev *mddev, struct bio * bio) sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio->bi_sector < conf->reshape_progress && - bio->bi_sector + sectors > conf->reshape_progress) { + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { /* IO spans the reshape position. Need to wait for * reshape to pass */ allow_barrier(conf); wait_event(conf->wait_barrier, - conf->reshape_progress <= bio->bi_sector || - conf->reshape_progress >= bio->bi_sector + sectors); + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); wait_barrier(conf); } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio_data_dir(bio) == WRITE && (mddev->reshape_backwards - ? (bio->bi_sector < conf->reshape_safe && - bio->bi_sector + sectors > conf->reshape_progress) - : (bio->bi_sector + sectors > conf->reshape_safe && - bio->bi_sector < conf->reshape_progress))) { + ? (bio->bi_iter.bi_sector < conf->reshape_safe && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) + : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && + bio->bi_iter.bi_sector < conf->reshape_progress))) { /* Need to update reshape_position in metadata */ mddev->reshape_position = conf->reshape_progress; set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -1273,7 +1275,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r10_bio->sectors = sectors; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector; + r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; /* We might need to issue multiple reads to different @@ -1302,13 +1304,13 @@ read_again: slot = r10_bio->read_slot; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r10_bio->sector - bio->bi_sector, + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; - read_bio->bi_sector = r10_bio->devs[slot].addr + + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; @@ -1320,7 +1322,7 @@ read_again: * need another r10_bio. */ sectors_handled = (r10_bio->sectors + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1341,7 +1343,8 @@ read_again: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->state = 0; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1499,7 +1502,8 @@ retry_write: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r10_bio->sector + max_sectors - + bio->bi_iter.bi_sector; atomic_set(&r10_bio->remaining, 1); bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); @@ -1510,11 +1514,11 @@ retry_write: if (r10_bio->devs[i].bio) { struct md_rdev *rdev = conf->mirrors[d].rdev; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr+ + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1553,11 +1557,11 @@ retry_write: rdev = conf->mirrors[d].rdev; } mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].repl_bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr + + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + choose_data_offset( r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1591,7 +1595,7 @@ retry_write: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; r10_bio->state = 0; goto retry_write; } @@ -2124,10 +2128,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_reset(tbio); tbio->bi_vcnt = vcnt; - tbio->bi_size = r10_bio->sectors << 9; + tbio->bi_iter.bi_size = r10_bio->sectors << 9; tbio->bi_rw = WRITE; tbio->bi_private = r10_bio; - tbio->bi_sector = r10_bio->devs[i].addr; + tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; for (j=0; j < vcnt ; j++) { tbio->bi_io_vec[j].bv_offset = 0; @@ -2144,7 +2148,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); - tbio->bi_sector += conf->mirrors[d].rdev->data_offset; + tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_bdev = conf->mirrors[d].rdev->bdev; generic_make_request(tbio); } @@ -2614,8 +2618,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) sectors = sect_to_write; /* Write at 'sector' for 'sectors' */ wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(wbio, sector - bio->bi_sector, sectors); - wbio->bi_sector = (r10_bio->devs[i].addr+ + bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); + wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; @@ -2687,10 +2691,10 @@ read_more: (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = bio; r10_bio->devs[slot].rdev = rdev; - bio->bi_sector = r10_bio->devs[slot].addr + bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; bio->bi_rw = READ | do_sync; @@ -2701,7 +2705,7 @@ read_more: struct bio *mbio = r10_bio->master_bio; int sectors_handled = r10_bio->sector + max_sectors - - mbio->bi_sector; + - mbio->bi_iter.bi_sector; r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2719,7 +2723,7 @@ read_more: set_bit(R10BIO_ReadError, &r10_bio->state); r10_bio->mddev = mddev; - r10_bio->sector = mbio->bi_sector + r10_bio->sector = mbio->bi_iter.bi_sector + sectors_handled; goto read_more; @@ -3157,7 +3161,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_end_io = end_sync_read; bio->bi_rw = READ; from_addr = r10_bio->devs[j].addr; - bio->bi_sector = from_addr + rdev->data_offset; + bio->bi_iter.bi_sector = from_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&rdev->nr_pending); /* and we write to 'i' (if not in_sync) */ @@ -3181,7 +3186,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + bio->bi_iter.bi_sector = to_addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); @@ -3210,7 +3215,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + rdev->data_offset; + bio->bi_iter.bi_sector = to_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); break; @@ -3328,7 +3334,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio->bi_rw = READ; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; count++; @@ -3350,7 +3356,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].replacement->data_offset; bio->bi_bdev = conf->mirrors[d].replacement->bdev; count++; @@ -3397,7 +3403,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio2 = bio2->bi_next) { /* remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; @@ -4417,7 +4423,7 @@ read_more: read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); read_bio->bi_bdev = rdev->bdev; - read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; @@ -4425,7 +4431,7 @@ read_more: read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; - read_bio->bi_size = 0; + read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; @@ -4451,7 +4457,8 @@ read_more: bio_reset(b); b->bi_bdev = rdev2->bdev; - b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; + b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + + rdev2->new_data_offset; b->bi_private = r10_bio; b->bi_end_io = end_reshape_write; b->bi_rw = WRITE; @@ -4478,7 +4485,7 @@ read_more: bio2 = bio2->bi_next) { /* Remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<bi_sector + sectors < sector + STRIPE_SECTORS) + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) return bio->bi_next; else return NULL; @@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi) return_bi = bi->bi_next; bi->bi_next = NULL; - bi->bi_size = 0; + bi->bi_iter.bi_size = 0; trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), bi, 0); bio_endio(bi, 0); @@ -854,10 +854,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->new_data_offset); else - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) bi->bi_rw |= REQ_NOMERGE; @@ -865,7 +865,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_offset = 0; - bi->bi_size = STRIPE_SIZE; + bi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -901,15 +901,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) rbi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->new_data_offset); else - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->data_offset); rbi->bi_vcnt = 1; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_offset = 0; - rbi->bi_size = STRIPE_SIZE; + rbi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -944,10 +944,10 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, struct async_submit_ctl submit; enum async_tx_flags flags = 0; - if (bio->bi_sector >= sector) - page_offset = (signed)(bio->bi_sector - sector) * 512; + if (bio->bi_iter.bi_sector >= sector) + page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; else - page_offset = (signed)(sector - bio->bi_sector) * -512; + page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; if (frombio) flags |= ASYNC_TX_FENCE; @@ -1014,7 +1014,7 @@ static void ops_complete_biofill(void *stripe_head_ref) BUG_ON(!dev->read); rbi = dev->read; dev->read = NULL; - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); if (!raid5_dec_bi_active_stripes(rbi)) { @@ -1050,7 +1050,7 @@ static void ops_run_biofill(struct stripe_head *sh) dev->read = rbi = dev->toread; dev->toread = NULL; spin_unlock_irq(&sh->stripe_lock); - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, dev->sector, tx); @@ -1392,7 +1392,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) wbi = dev->written = chosen; spin_unlock_irq(&sh->stripe_lock); - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { if (wbi->bi_rw & REQ_FUA) set_bit(R5_WantFUA, &dev->flags); @@ -2616,7 +2616,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in int firstwrite=0; pr_debug("adding bi b#%llu to stripe s#%llu\n", - (unsigned long long)bi->bi_sector, + (unsigned long long)bi->bi_iter.bi_sector, (unsigned long long)sh->sector); /* @@ -2634,12 +2634,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; - while (*bip && (*bip)->bi_sector < bi->bi_sector) { - if (bio_end_sector(*bip) > bi->bi_sector) + while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { + if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) goto overlap; bip = & (*bip)->bi_next; } - if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) + if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) goto overlap; BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); @@ -2653,7 +2653,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in sector_t sector = sh->dev[dd_idx].sector; for (bi=sh->dev[dd_idx].towrite; sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && - bi && bi->bi_sector <= sector; + bi && bi->bi_iter.bi_sector <= sector; bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { if (bio_end_sector(bi) >= sector) sector = bio_end_sector(bi); @@ -2663,7 +2663,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in } pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", - (unsigned long long)(*bip)->bi_sector, + (unsigned long long)(*bip)->bi_iter.bi_sector, (unsigned long long)sh->sector, dd_idx); spin_unlock_irq(&sh->stripe_lock); @@ -2738,7 +2738,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2757,7 +2757,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi = sh->dev[i].written; sh->dev[i].written = NULL; if (bi) bitmap_end = 1; - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2781,7 +2781,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); @@ -3005,7 +3005,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); if (!raid5_dec_bi_active_stripes(wbi)) { @@ -4097,7 +4097,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { - sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); + sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); @@ -4234,9 +4234,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) /* * compute position */ - align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, - 0, - &dd_idx, NULL); + align_bi->bi_iter.bi_sector = + raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, + 0, &dd_idx, NULL); end_sector = bio_end_sector(align_bi); rcu_read_lock(); @@ -4261,7 +4261,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); if (!bio_fits_rdev(align_bi) || - is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), + is_badblock(rdev, align_bi->bi_iter.bi_sector, + bio_sectors(align_bi), &first_bad, &bad_sectors)) { /* too big in some way, or has a known bad block */ bio_put(align_bi); @@ -4270,7 +4271,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) } /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_sector += rdev->data_offset; + align_bi->bi_iter.bi_sector += rdev->data_offset; spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, @@ -4282,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + raid_bio->bi_iter.bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4465,8 +4466,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) /* Skip discard while reshape is happening */ return; - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); - last_sector = bi->bi_sector + (bi->bi_size>>9); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); + last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -4570,7 +4571,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) return; } - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bio_end_sector(bi); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -5054,7 +5055,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) int remaining; int handled = 0; - logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = raid_bio->bi_iter.bi_sector & + ~((sector_t)STRIPE_SECTORS-1); sector = raid5_compute_sector(conf, logical_sector, 0, &dd_idx, NULL); last_sector = bio_end_sector(raid_bio); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 6eca019bcf30..16814a8457f8 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -819,7 +819,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { @@ -842,7 +843,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) } } - index = (bio->bi_sector >> 3); + index = (bio->bi_iter.bi_sector >> 3); bio_for_each_segment(bvec, bio, i) { page_addr = (unsigned long) page_address(bvec->bv_page) + bvec->bv_offset; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 464dd29d06c0..dd4e73fdb323 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -190,15 +190,16 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio) unsigned long bytes; int i; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; - if ((bio->bi_size >> 12) > xdev->size) + if ((bio->bi_iter.bi_size >> 12) > xdev->size) /* Request size is no page-aligned. */ goto fail; - if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) + if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) goto fail; - index = (bio->bi_sector >> 3) + xdev->offset; + index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; bio_for_each_segment(bvec, bio, i) { page_addr = (unsigned long) kmap(bvec->bv_page) + bvec->bv_offset; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index aa66361ed44b..bac04c2335aa 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or, bio->bi_rw &= ~REQ_WRITE; or->in.bio = bio; - or->in.total_bytes = bio->bi_size; + or->in.total_bytes = bio->bi_iter.bi_size; return 0; } diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index e2421ea61352..53741be754b4 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -220,7 +220,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) for (bio = head; bio != NULL; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); - offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; + offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; bio_for_each_segment(bvec, bio, i) { BUG_ON(bvec->bv_offset != 0); BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); @@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", - (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, + (unsigned long long)(*bio)->bi_iter.bi_sector, + (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) break; @@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio) goto err; CDEBUG(D_INFO, "submit bio sector %llu size %u\n", - (unsigned long long)old_bio->bi_sector, old_bio->bi_size); + (unsigned long long)old_bio->bi_iter.bi_sector, + old_bio->bi_iter.bi_size); spin_lock_irq(&lo->lo_lock); inactive = (lo->lo_state != LLOOP_BOUND); @@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio) loop_add_bio(lo, old_bio); return; err: - cfs_bio_io_error(old_bio, old_bio->bi_size); + cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size); } @@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio) while (bio) { struct bio *tmp = bio->bi_next; bio->bi_next = NULL; - cfs_bio_endio(bio, bio->bi_size, ret); + cfs_bio_endio(bio, bio->bi_iter.bi_size, ret); bio = tmp; } } diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 79ce363b2ea9..e9e6f984092b 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) u64 start, end, bound; /* unaligned request */ - if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) + if (unlikely(bio->bi_iter.bi_sector & + (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) return 0; - if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) + if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) return 0; - start = bio->bi_sector; - end = start + (bio->bi_size >> SECTOR_SHIFT); + start = bio->bi_iter.bi_sector; + end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT); bound = zram->disksize >> SECTOR_SHIFT; /* out of range range */ if (unlikely(start >= bound || end > bound || start > end)) @@ -684,8 +685,9 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) break; } - index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; - offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; + offset = (bio->bi_iter.bi_sector & + (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; bio_for_each_segment(bvec, bio, i) { int max_transfer_size = PAGE_SIZE - offset; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c87959f12760..2d29356d0c85 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) bio->bi_bdev = ib_dev->ibd_bd; bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; - bio->bi_sector = lba; + bio->bi_iter.bi_sector = lba; return bio; } diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index fc60b31453ee..08e3d1388c65 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -215,9 +215,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - BUG_ON(bio->bi_size == 0); + BUG_ON(bio->bi_iter.bi_size == 0); - return bi->tag_size * (bio->bi_size / bi->sector_size); + return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size); } EXPORT_SYMBOL(bio_integrity_tag_size); @@ -300,7 +300,7 @@ static void bio_integrity_generate(struct bio *bio) struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity_exchg bix; struct bio_vec *bv; - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; unsigned int i, sectors, total; void *prot_buf = bio->bi_integrity->bip_buf; @@ -387,7 +387,7 @@ int bio_integrity_prep(struct bio *bio) bip->bip_owns_buf = 1; bip->bip_buf = buf; bip->bip_size = len; - bip->bip_sector = bio->bi_sector; + bip->bip_sector = bio->bi_iter.bi_sector; /* Map it */ offset = offset_in_page(buf); diff --git a/fs/bio.c b/fs/bio.c index 33d79a4eb92d..a402ad6e753f 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -532,13 +532,13 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) * most users will be overriding ->bi_bdev with a new target, * so we don't set nor calculate new physical/hw segment counts here */ - bio->bi_sector = bio_src->bi_sector; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_bdev = bio_src->bi_bdev; bio->bi_flags |= 1 << BIO_CLONED; bio->bi_rw = bio_src->bi_rw; bio->bi_vcnt = bio_src->bi_vcnt; - bio->bi_size = bio_src->bi_size; - bio->bi_idx = bio_src->bi_idx; + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; + bio->bi_iter.bi_idx = bio_src->bi_iter.bi_idx; } EXPORT_SYMBOL(__bio_clone); @@ -612,7 +612,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page if (unlikely(bio_flagged(bio, BIO_CLONED))) return 0; - if (((bio->bi_size + len) >> 9) > max_sectors) + if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) return 0; /* @@ -635,8 +635,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page simulate merging updated prev_bvec as new bvec. */ .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, - .bi_size = bio->bi_size - prev_bv_len, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size - + prev_bv_len, .bi_rw = bio->bi_rw, }; @@ -684,8 +685,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page if (q->merge_bvec_fn) { struct bvec_merge_data bvm = { .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, - .bi_size = bio->bi_size, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size, .bi_rw = bio->bi_rw, }; @@ -708,7 +709,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page bio->bi_vcnt++; bio->bi_phys_segments++; done: - bio->bi_size += len; + bio->bi_iter.bi_size += len; return len; } @@ -807,22 +808,22 @@ void bio_advance(struct bio *bio, unsigned bytes) if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); - bio->bi_sector += bytes >> 9; - bio->bi_size -= bytes; + bio->bi_iter.bi_sector += bytes >> 9; + bio->bi_iter.bi_size -= bytes; if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) return; while (bytes) { - if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { + if (unlikely(bio->bi_iter.bi_idx >= bio->bi_vcnt)) { WARN_ONCE(1, "bio idx %d >= vcnt %d\n", - bio->bi_idx, bio->bi_vcnt); + bio->bi_iter.bi_idx, bio->bi_vcnt); break; } if (bytes >= bio_iovec(bio)->bv_len) { bytes -= bio_iovec(bio)->bv_len; - bio->bi_idx++; + bio->bi_iter.bi_idx++; } else { bio_iovec(bio)->bv_len -= bytes; bio_iovec(bio)->bv_offset += bytes; @@ -1485,7 +1486,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, if (IS_ERR(bio)) return bio; - if (bio->bi_size == len) + if (bio->bi_iter.bi_size == len) return bio; /* @@ -1763,16 +1764,16 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) return bp; trace_block_split(bdev_get_queue(bi->bi_bdev), bi, - bi->bi_sector + first_sectors); + bi->bi_iter.bi_sector + first_sectors); BUG_ON(bio_segments(bi) > 1); atomic_set(&bp->cnt, 3); bp->error = 0; bp->bio1 = *bi; bp->bio2 = *bi; - bp->bio2.bi_sector += first_sectors; - bp->bio2.bi_size -= first_sectors << 9; - bp->bio1.bi_size = first_sectors << 9; + bp->bio2.bi_iter.bi_sector += first_sectors; + bp->bio2.bi_iter.bi_size -= first_sectors << 9; + bp->bio1.bi_iter.bi_size = first_sectors << 9; if (bi->bi_vcnt != 0) { bp->bv1 = *bio_iovec(bi); @@ -1821,21 +1822,22 @@ void bio_trim(struct bio *bio, int offset, int size) int sofar = 0; size <<= 9; - if (offset == 0 && size == bio->bi_size) + if (offset == 0 && size == bio->bi_iter.bi_size) return; clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_advance(bio, offset << 9); - bio->bi_size = size; + bio->bi_iter.bi_size = size; /* avoid any complications with bi_idx being non-zero*/ - if (bio->bi_idx) { - memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, - (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); - bio->bi_vcnt -= bio->bi_idx; - bio->bi_idx = 0; + if (bio->bi_iter.bi_idx) { + memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_iter.bi_idx, + (bio->bi_vcnt - bio->bi_iter.bi_idx) * + sizeof(struct bio_vec)); + bio->bi_vcnt -= bio->bi_iter.bi_idx; + bio->bi_iter.bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { @@ -1871,7 +1873,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index, sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); sectors = 0; - if (index >= bio->bi_idx) + if (index >= bio->bi_iter.bi_idx) index = bio->bi_vcnt - 1; bio_for_each_segment_all(bv, bio, i) { diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 131d82800b3a..cb05e1c842c5 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, return -1; } bio->bi_bdev = block_ctx->dev->bdev; - bio->bi_sector = dev_bytenr >> 9; + bio->bi_iter.bi_sector = dev_bytenr >> 9; for (j = i; j < num_pages; j++) { ret = bio_add_page(bio, block_ctx->pagev[j], @@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) int bio_is_patched; char **mapped_datav; - dev_bytenr = 512 * bio->bi_sector; + dev_bytenr = 512 * bio->bi_iter.bi_sector; bio_is_patched = 0; if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) @@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) "submit_bio(rw=0x%x, bi_vcnt=%u," " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", rw, bio->bi_vcnt, - (unsigned long long)bio->bi_sector, dev_bytenr, - bio->bi_bdev); + (unsigned long long)bio->bi_iter.bi_sector, + dev_bytenr, bio->bi_bdev); mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, GFP_NOFS); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index eac6784e43d7..f5cdeb4b5538 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err) goto out; inode = cb->inode; - ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); + ret = check_compressed_csum(inode, cb, + (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -370,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { page = compressed_pages[pg_index]; page->mapping = inode->i_mapping; - if (bio->bi_size) + if (bio->bi_iter.bi_size) ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, PAGE_CACHE_SIZE, bio, 0); @@ -504,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, if (!em || last_offset < em->start || (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || - (em->block_start >> 9) != cb->orig_bio->bi_sector) { + (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, last_offset, end); unlock_page(page); @@ -550,7 +551,7 @@ next: * in it. We don't actually do IO on those pages but allocate new ones * to hold the compressed pages on disk. * - * bio->bi_sector points to the compressed extent on disk + * bio->bi_iter.bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages * bio->bi_vcnt is a count of pages * @@ -571,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, struct page *page; struct block_device *bdev; struct bio *comp_bio; - u64 cur_disk_byte = (u64)bio->bi_sector << 9; + u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; u64 em_len; u64 em_start; struct extent_map *em; @@ -657,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, page->mapping = inode->i_mapping; page->index = em_start >> PAGE_CACHE_SHIFT; - if (comp_bio->bi_size) + if (comp_bio->bi_iter.bi_size) ret = tree->ops->merge_bio_hook(READ, page, 0, PAGE_CACHE_SIZE, comp_bio, 0); @@ -685,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ } - sums += (comp_bio->bi_size + root->sectorsize - 1) / - root->sectorsize; + sums += (comp_bio->bi_iter.bi_size + + root->sectorsize - 1) / root->sectorsize; ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8b5f9e1d1f0e..bcb6f1b780d6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, bio = btrfs_io_bio_alloc(GFP_NOFS, 1); if (!bio) return -EIO; - bio->bi_size = 0; + bio->bi_iter.bi_size = 0; map_length = length; ret = btrfs_map_block(fs_info, WRITE, logical, @@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, } BUG_ON(mirror_num != bbio->mirror_num); sector = bbio->stripes[mirror_num-1].physical >> 9; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; dev = bbio->stripes[mirror_num-1].dev; kfree(bbio); if (!dev || !dev->bdev || !dev->writeable) { @@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, return -EIO; } bio->bi_end_io = failed_bio->bi_end_io; - bio->bi_sector = failrec->logical >> 9; + bio->bi_iter.bi_sector = failrec->logical >> 9; bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - bio->bi_size = 0; + bio->bi_iter.bi_size = 0; btrfs_failed_bio = btrfs_io_bio(failed_bio); if (btrfs_failed_bio->csum) { @@ -2412,7 +2412,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) struct inode *inode = page->mapping->host; pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " - "mirror=%lu\n", (u64)bio->bi_sector, err, + "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err, io_bio->mirror_num); tree = &BTRFS_I(inode)->io_tree; @@ -2543,7 +2543,7 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, if (bio) { bio->bi_bdev = bdev; - bio->bi_sector = first_sector; + bio->bi_iter.bi_sector = first_sector; btrfs_bio = btrfs_io_bio(bio); btrfs_bio->csum = NULL; btrfs_bio->csum_allocated = NULL; @@ -2637,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, if (bio_ret && *bio_ret) { bio = *bio_ret; if (old_compressed) - contig = bio->bi_sector == sector; + contig = bio->bi_iter.bi_sector == sector; else contig = bio_end_sector(bio) == sector; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6f3848860283..84a46a42d262 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, if (!path) return -ENOMEM; - nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits; + nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; if (!dst) { if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, @@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, csum = (u8 *)dst; } - if (bio->bi_size > PAGE_CACHE_SIZE * 8) + if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) path->reada = 2; WARN_ON(bio->bi_vcnt <= 0); @@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, path->skip_locking = 1; } - disk_bytenr = (u64)bio->bi_sector << 9; + disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; if (dio) offset = logical_offset; while (bio_index < bio->bi_vcnt) { @@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, struct btrfs_dio_private *dip, struct bio *bio, u64 offset) { - int len = (bio->bi_sector << 9) - dip->disk_bytenr; + int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); int ret; @@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, u64 offset; WARN_ON(bio->bi_vcnt <= 0); - sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); + sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), + GFP_NOFS); if (!sums) return -ENOMEM; - sums->len = bio->bi_size; + sums->len = bio->bi_iter.bi_size; INIT_LIST_HEAD(&sums->list); if (contig) @@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ - sums->bytenr = (u64)bio->bi_sector << 9; + sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; index = 0; while (bio_index < bio->bi_vcnt) { @@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); - bytes_left = bio->bi_size - total_bytes; + bytes_left = bio->bi_iter.bi_size - total_bytes; sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), GFP_NOFS); @@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, sums->len = bytes_left; ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); /* Logic error */ - sums->bytenr = ((u64)bio->bi_sector << 9) + + sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + total_bytes; index = 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d6630dc130ba..7ab0e94ad492 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; - u64 logical = (u64)bio->bi_sector << 9; + u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; int ret; @@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, if (bio_flags & EXTENT_BIO_COMPRESSED) return 0; - length = bio->bi_size; + length = bio->bi_iter.bi_size; map_length = length; ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, NULL, 0); @@ -6894,7 +6894,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " "sector %#Lx len %u err no %d\n", btrfs_ino(dip->inode), bio->bi_rw, - (unsigned long long)bio->bi_sector, bio->bi_size, err); + (unsigned long long)bio->bi_iter.bi_sector, + bio->bi_iter.bi_size, err); dip->errors = 1; /* @@ -6985,7 +6986,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, struct bio *bio; struct bio *orig_bio = dip->orig_bio; struct bio_vec *bvec = orig_bio->bi_io_vec; - u64 start_sector = orig_bio->bi_sector; + u64 start_sector = orig_bio->bi_iter.bi_sector; u64 file_offset = dip->logical_offset; u64 submit_len = 0; u64 map_length; @@ -6993,7 +6994,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int ret = 0; int async_submit = 0; - map_length = orig_bio->bi_size; + map_length = orig_bio->bi_iter.bi_size; ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, &map_length, NULL, 0); if (ret) { @@ -7001,7 +7002,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, return -EIO; } - if (map_length >= orig_bio->bi_size) { + if (map_length >= orig_bio->bi_iter.bi_size) { bio = orig_bio; goto submit; } @@ -7053,7 +7054,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, bio->bi_private = dip; bio->bi_end_io = btrfs_end_dio_bio; - map_length = orig_bio->bi_size; + map_length = orig_bio->bi_iter.bi_size; ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, &map_length, NULL, 0); @@ -7111,7 +7112,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, if (!skip_sum && !write) { csum_size = btrfs_super_csum_size(root->fs_info->super_copy); - sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; + sum_len = dio_bio->bi_iter.bi_size >> + inode->i_sb->s_blocksize_bits; sum_len *= csum_size; } else { sum_len = 0; @@ -7126,8 +7128,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, dip->private = dio_bio->bi_private; dip->inode = inode; dip->logical_offset = file_offset; - dip->bytes = dio_bio->bi_size; - dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; + dip->bytes = dio_bio->bi_iter.bi_size; + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; io_bio->bi_private = dip; dip->errors = 0; dip->orig_bio = io_bio; diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 24ac21840a9a..9af0b25d991a 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, /* see if we can add this page onto our existing bio */ if (last) { - last_end = (u64)last->bi_sector << 9; - last_end += last->bi_size; + last_end = (u64)last->bi_iter.bi_sector << 9; + last_end += last->bi_iter.bi_size; /* * we can't merge these if they are from different @@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, if (!bio) return -ENOMEM; - bio->bi_size = 0; + bio->bi_iter.bi_size = 0; bio->bi_bdev = stripe->dev->bdev; - bio->bi_sector = disk_start >> 9; + bio->bi_iter.bi_sector = disk_start >> 9; set_bit(BIO_UPTODATE, &bio->bi_flags); bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); @@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) spin_lock_irq(&rbio->bio_list_lock); bio_list_for_each(bio, &rbio->bio_list) { - start = (u64)bio->bi_sector << 9; + start = (u64)bio->bi_iter.bi_sector << 9; stripe_offset = start - rbio->raid_map[0]; page_index = stripe_offset >> PAGE_CACHE_SHIFT; @@ -1272,7 +1272,7 @@ cleanup: static int find_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { - u64 physical = bio->bi_sector; + u64 physical = bio->bi_iter.bi_sector; u64 stripe_start; int i; struct btrfs_bio_stripe *stripe; @@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio) { - u64 logical = bio->bi_sector; + u64 logical = bio->bi_iter.bi_sector; u64 stripe_start; int i; @@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) plug_list); struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, plug_list); - u64 a_sector = ra->bio_list.head->bi_sector; - u64 b_sector = rb->bio_list.head->bi_sector; + u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; + u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; if (a_sector < b_sector) return -1; @@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, if (IS_ERR(rbio)) return PTR_ERR(rbio); bio_list_add(&rbio->bio_list, bio); - rbio->bio_list_bytes = bio->bi_size; + rbio->bio_list_bytes = bio->bi_iter.bi_size; /* * don't plug on full rbios, just get them out the door @@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio->read_rebuild = 1; bio_list_add(&rbio->bio_list, bio); - rbio->bio_list_bytes = bio->bi_size; + rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1fd3f33c330a..bb9a928fa3a8 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, continue; } bio->bi_bdev = page->dev->bdev; - bio->bi_sector = page->physical >> 9; + bio->bi_iter.bi_sector = page->physical >> 9; bio_add_page(bio, page->page, PAGE_SIZE, 0); if (btrfsic_submit_bio_wait(READ, bio)) @@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, if (!bio) return -EIO; bio->bi_bdev = page_bad->dev->bdev; - bio->bi_sector = page_bad->physical >> 9; + bio->bi_iter.bi_sector = page_bad->physical >> 9; ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); if (PAGE_SIZE != ret) { @@ -1520,7 +1520,7 @@ again: bio->bi_private = sbio; bio->bi_end_io = scrub_wr_bio_end_io; bio->bi_bdev = sbio->dev->bdev; - bio->bi_sector = sbio->physical >> 9; + bio->bi_iter.bi_sector = sbio->physical >> 9; sbio->err = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical_for_dev_replace || @@ -1926,7 +1926,7 @@ again: bio->bi_private = sbio; bio->bi_end_io = scrub_bio_end_io; bio->bi_bdev = sbio->dev->bdev; - bio->bi_sector = sbio->physical >> 9; + bio->bi_iter.bi_sector = sbio->physical >> 9; sbio->err = 0; } else if (sbio->physical + sbio->page_count * PAGE_SIZE != spage->physical || @@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx, spin_unlock(&sctx->stat_lock); return -ENOMEM; } - bio->bi_size = 0; - bio->bi_sector = physical_for_dev_replace >> 9; + bio->bi_iter.bi_size = 0; + bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; bio->bi_bdev = dev->bdev; ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); if (ret != PAGE_CACHE_SIZE) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 92303f42baaa..f2130de0ddc2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5411,7 +5411,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio, if (!q->merge_bvec_fn) return 1; - bvm.bi_size = bio->bi_size - prev->bv_len; + bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) return 0; return 1; @@ -5426,7 +5426,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, bio->bi_private = bbio; btrfs_io_bio(bio)->stripe_index = dev_nr; bio->bi_end_io = btrfs_end_bio; - bio->bi_sector = physical >> 9; + bio->bi_iter.bi_sector = physical >> 9; #ifdef DEBUG { struct rcu_string *name; @@ -5464,7 +5464,7 @@ again: while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset) < bvec->bv_len) { - u64 len = bio->bi_size; + u64 len = bio->bi_iter.bi_size; atomic_inc(&bbio->stripes_pending); submit_stripe_bio(root, bbio, bio, physical, dev_nr, @@ -5486,7 +5486,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; - bio->bi_sector = logical >> 9; + bio->bi_iter.bi_sector = logical >> 9; kfree(bbio); bio_endio(bio, -EIO); } @@ -5497,7 +5497,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, { struct btrfs_device *dev; struct bio *first_bio = bio; - u64 logical = (u64)bio->bi_sector << 9; + u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; u64 *raid_map = NULL; @@ -5506,7 +5506,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int total_devs = 1; struct btrfs_bio *bbio = NULL; - length = bio->bi_size; + length = bio->bi_iter.bi_size; map_length = length; ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, diff --git a/fs/buffer.c b/fs/buffer.c index 6024877335ca..1c04ec66974e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) * let it through, and the IO layer will turn it into * an EIO. */ - if (unlikely(bio->bi_sector >= maxsector)) + if (unlikely(bio->bi_iter.bi_sector >= maxsector)) return; - maxsector -= bio->bi_sector; - bytes = bio->bi_size; + maxsector -= bio->bi_iter.bi_sector; + bytes = bio->bi_iter.bi_size; if (likely((bytes >> 9) <= maxsector)) return; @@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) bytes = maxsector << 9; /* Truncate the bio.. */ - bio->bi_size = bytes; + bio->bi_iter.bi_size = bytes; bio->bi_io_vec[0].bv_len = bytes; /* ..and clear the end of the buffer for reads */ @@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) */ bio = bio_alloc(GFP_NOIO, 1); - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_io_vec[0].bv_page = bh->b_page; bio->bi_io_vec[0].bv_len = bh->b_size; bio->bi_io_vec[0].bv_offset = bh_offset(bh); bio->bi_vcnt = 1; - bio->bi_size = bh->b_size; + bio->bi_iter.bi_size = bh->b_size; bio->bi_end_io = end_bio_bh_io_sync; bio->bi_private = bh; diff --git a/fs/direct-io.c b/fs/direct-io.c index 0e04142d5962..160a5489a939 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, bio = bio_alloc(GFP_KERNEL, nr_vecs); bio->bi_bdev = bdev; - bio->bi_sector = first_sector; + bio->bi_iter.bi_sector = first_sector; if (dio->is_async) bio->bi_end_io = dio_bio_end_aio; else @@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, if (sdio->bio) { loff_t cur_offset = sdio->cur_page_fs_offset; loff_t bio_next_offset = sdio->logical_offset_in_bio + - sdio->bio->bi_size; + sdio->bio->bi_iter.bi_size; /* * See whether this new request is contiguous with the old. diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index a31e4da14508..ab95508e3d40 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) static void ext4_end_bio(struct bio *bio, int error) { ext4_io_end_t *io_end = bio->bi_private; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; BUG_ON(!io_end); bio->bi_end_io = NULL; @@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io, bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); if (!bio) return -ENOMEM; - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a4949096cf4c..a2c8de8ba6ce 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -386,7 +386,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, bio = f2fs_bio_alloc(bdev, 1); /* Initialize the bio */ - bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); + bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); bio->bi_end_io = read_end_io; if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a90c6bc0d129..36e8afd8e1e4 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -682,7 +682,7 @@ retry: bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); - sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); + sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); sbi->bio[type]->bi_private = priv; /* * The end_io will be assigned at the sumbission phase. diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 010b9fb9fec6..985da945f0b5 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -272,7 +272,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) nrvecs = max(nrvecs/2, 1U); } - bio->bi_sector = blkno * (sb->s_blocksize >> 9); + bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio->bi_end_io = gfs2_end_log_write; bio->bi_private = sdp; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 82303b474958..16194da91652 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) lock_page(page); bio = bio_alloc(GFP_NOFS, 1); - bio->bi_sector = sector * (sb->s_blocksize >> 9); + bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio_add_page(bio, page, PAGE_SIZE, 0); diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index e9a97a0d4314..3f999649587f 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); bio = bio_alloc(GFP_NOIO, 1); - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = sb->s_bdev; if (!(rw & WRITE) && data) diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 360d27c48887..8d811e02b4b9 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) bio = bio_alloc(GFP_NOFS, 1); - bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_bdev = log->bdev; bio->bi_io_vec[0].bv_page = bp->l_page; bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_offset = bp->l_offset; bio->bi_vcnt = 1; - bio->bi_size = LOGPSIZE; + bio->bi_iter.bi_size = LOGPSIZE; bio->bi_end_io = lbmIODone; bio->bi_private = bp; /*check if journaling to disk has been disabled*/ if (log->no_integrity) { - bio->bi_size = 0; + bio->bi_iter.bi_size = 0; lbmIODone(bio, 0); } else { submit_bio(READ_SYNC, bio); @@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp) jfs_info("lbmStartIO\n"); bio = bio_alloc(GFP_NOFS, 1); - bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); + bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_bdev = log->bdev; bio->bi_io_vec[0].bv_page = bp->l_page; bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_offset = bp->l_offset; bio->bi_vcnt = 1; - bio->bi_size = LOGPSIZE; + bio->bi_iter.bi_size = LOGPSIZE; bio->bi_end_io = lbmIODone; bio->bi_private = bp; /* check if journaling to disk has been disabled */ if (log->no_integrity) { - bio->bi_size = 0; + bio->bi_iter.bi_size = 0; lbmIODone(bio, 0); } else { submit_bio(WRITE_SYNC, bio); diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index d165cde0c68d..49ba7ff1bbb9 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) * count from hitting zero before we're through */ inc_io(page); - if (!bio->bi_size) + if (!bio->bi_iter.bi_size) goto dump_bio; submit_bio(WRITE, bio); nr_underway++; @@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) bio = bio_alloc(GFP_NOFS, 1); bio->bi_bdev = inode->i_sb->s_bdev; - bio->bi_sector = pblock << (inode->i_blkbits - 9); + bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_write_end_io; bio->bi_private = page; @@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) if (bio) { if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) goto add_failed; - if (!bio->bi_size) + if (!bio->bi_iter.bi_size) goto dump_bio; submit_bio(WRITE, bio); @@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page) bio = bio_alloc(GFP_NOFS, 1); bio->bi_bdev = inode->i_sb->s_bdev; - bio->bi_sector = pblock << (inode->i_blkbits - 9); + bio->bi_iter.bi_sector = + pblock << (inode->i_blkbits - 9); bio->bi_end_io = metapage_read_end_io; bio->bi_private = page; len = xlen << inode->i_blkbits; diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index e6df3be3b31b..76279e11982d 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) bio_vec.bv_len = PAGE_SIZE; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; - bio.bi_size = PAGE_SIZE; bio.bi_bdev = bdev; - bio.bi_sector = page->index * (PAGE_SIZE >> 9); + bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); + bio.bi_iter.bi_size = PAGE_SIZE; return submit_bio_wait(rw, &bio); } @@ -92,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; - bio->bi_size = i * PAGE_SIZE; + bio->bi_iter.bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; - bio->bi_sector = ofs >> 9; + bio->bi_iter.bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); @@ -119,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, unlock_page(page); } bio->bi_vcnt = nr_pages; - bio->bi_size = nr_pages * PAGE_SIZE; + bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; bio->bi_bdev = super->s_bdev; - bio->bi_sector = ofs >> 9; + bio->bi_iter.bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); @@ -184,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; - bio->bi_size = i * PAGE_SIZE; + bio->bi_iter.bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; - bio->bi_sector = ofs >> 9; + bio->bi_iter.bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = erase_end_io; atomic_inc(&super->s_pending_writes); @@ -205,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, bio->bi_io_vec[i].bv_offset = 0; } bio->bi_vcnt = nr_pages; - bio->bi_size = nr_pages * PAGE_SIZE; + bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; bio->bi_bdev = super->s_bdev; - bio->bi_sector = ofs >> 9; + bio->bi_iter.bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = erase_end_io; atomic_inc(&super->s_pending_writes); diff --git a/fs/mpage.c b/fs/mpage.c index dd6d5878f4d9..4979ffa60aaa 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -93,7 +93,7 @@ mpage_alloc(struct block_device *bdev, if (bio) { bio->bi_bdev = bdev; - bio->bi_sector = first_sector; + bio->bi_iter.bi_sector = first_sector; } return bio; } diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index da768923bf7c..56ff823ca82e 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio) if (bio) { get_parallel(bio->bi_private); dprintk("%s submitting %s bio %u@%llu\n", __func__, - rw == READ ? "read" : "write", - bio->bi_size, (unsigned long long)bio->bi_sector); + rw == READ ? "read" : "write", bio->bi_iter.bi_size, + (unsigned long long)bio->bi_iter.bi_sector); submit_bio(rw, bio); } return NULL; @@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect, } if (bio) { - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; + bio->bi_iter.bi_sector = isect - be->be_f_offset + + be->be_v_offset; bio->bi_bdev = be->be_mdev; bio->bi_end_io = end_io; bio->bi_private = par; @@ -511,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be, isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + (offset / SECTOR_SIZE); - bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; + bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; bio->bi_bdev = be->be_mdev; bio->bi_end_io = bl_read_single_end_io; diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2d8be51f90dc..dc3a9efdaab8 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, } if (likely(bio)) { bio->bi_bdev = nilfs->ns_bdev; - bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); + bio->bi_iter.bi_sector = + start << (nilfs->ns_blocksize_bits - 9); } return bio; } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 73920ffda05b..bf482dfed14f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, } /* Must put everything in 512 byte sectors for the bio... */ - bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); + bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); bio->bi_bdev = reg->hr_bdev; bio->bi_private = wc; bio->bi_end_io = o2hb_bio_end_io; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 71c8c9d2b882..1b19b9cd692a 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -407,7 +407,7 @@ xfs_alloc_ioend_bio( struct bio *bio = bio_alloc(GFP_NOIO, nvecs); ASSERT(bio->bi_private == NULL); - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; return bio; } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..5f3ea443ebbe 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1255,7 +1255,7 @@ next_chunk: bio = bio_alloc(GFP_NOIO, nr_pages); bio->bi_bdev = bp->b_target->bt_bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_private = bp; @@ -1277,7 +1277,7 @@ next_chunk: total_nr_pages--; } - if (likely(bio->bi_size)) { + if (likely(bio->bi_iter.bi_size)) { if (xfs_buf_is_vmapped(bp)) { flush_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); diff --git a/include/linux/bio.h b/include/linux/bio.h index 060ff695085c..e2e0bc642ed1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -62,19 +62,19 @@ * on highmem page vectors */ #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) -#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) +#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) #define bio_page(bio) bio_iovec((bio))->bv_page #define bio_offset(bio) bio_iovec((bio))->bv_offset -#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) -#define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio))) +#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) +#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) +#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio->bi_vcnt) return bio_iovec(bio)->bv_len; else /* dataless requests such as discard */ - return bio->bi_size; + return bio->bi_iter.bi_size; } static inline void *bio_data(struct bio *bio) @@ -108,7 +108,7 @@ static inline void *bio_data(struct bio *bio) */ #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) -#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) +#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) /* Default implementation of BIOVEC_PHYS_MERGEABLE */ #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ @@ -150,7 +150,7 @@ static inline void *bio_data(struct bio *bio) i++) #define bio_for_each_segment(bvl, bio, i) \ - for (i = (bio)->bi_idx; \ + for (i = (bio)->bi_iter.bi_idx; \ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ i++) @@ -365,7 +365,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) #define bio_kmap_irq(bio, flags) \ - __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) + __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags)) #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 238ef0ed62f8..29b5b84d8a29 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -28,13 +28,19 @@ struct bio_vec { unsigned int bv_offset; }; +struct bvec_iter { + sector_t bi_sector; /* device address in 512 byte + sectors */ + unsigned int bi_size; /* residual I/O count */ + + unsigned int bi_idx; /* current index into bvl_vec */ +}; + /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) */ struct bio { - sector_t bi_sector; /* device address in 512 byte - sectors */ struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; unsigned long bi_flags; /* status, command, etc */ @@ -42,16 +48,13 @@ struct bio { * top bits priority */ - unsigned short bi_vcnt; /* how many bio_vec's */ - unsigned short bi_idx; /* current index into bvl_vec */ + struct bvec_iter bi_iter; /* Number of segments in this BIO after * physical address coalescing is performed. */ unsigned int bi_phys_segments; - unsigned int bi_size; /* residual I/O count */ - /* * To keep track of the max segment size, we account for the * sizes of the first and last mergeable segments in this bio. @@ -74,11 +77,13 @@ struct bio { struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif + unsigned short bi_vcnt; /* how many bio_vec's */ + /* * Everything starting with bi_max_vecs will be preserved by bio_reset() */ - unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ + unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ atomic_t bi_cnt; /* pin count */ diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index e2b9576d00e2..095c6e4fe1e8 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request, __entry->dev = bio->bi_bdev->bd_dev; __entry->orig_major = d->disk->major; __entry->orig_minor = d->disk->first_minor; - __entry->sector = bio->bi_sector; - __entry->orig_sector = bio->bi_sector - 16; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->orig_sector = bio->bi_iter.bi_sector - 16; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", @@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u", @@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); __entry->cache_hit = hit; __entry->bypass = bypass; ), @@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); __entry->writeback = writeback; __entry->bypass = bypass; ), diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 4c2301d2ef1a..e76ae19a8d6f 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce, TP_fast_assign( __entry->dev = bio->bi_bdev ? bio->bi_bdev->bd_dev : 0; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = error; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u [%d]", @@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq, TP_fast_assign( __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; - __entry->sector = bio ? bio->bi_sector : 0; + __entry->sector = bio ? bio->bi_iter.bi_sector : 0; __entry->nr_sector = bio ? bio_sectors(bio) : 0; blk_fill_rwbs(__entry->rwbs, bio ? bio->bi_rw : 0, __entry->nr_sector); @@ -538,9 +538,9 @@ TRACE_EVENT(block_split, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), @@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap, TP_fast_assign( __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; + __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index e0dc355fa317..bd3ee4fbe7a7 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -616,8 +616,8 @@ TRACE_EVENT(f2fs_do_submit_bio, __entry->dev = sb->s_dev; __entry->btype = btype; __entry->sync = sync; - __entry->sector = bio->bi_sector; - __entry->size = bio->bi_size; + __entry->sector = bio->bi_iter.bi_sector; + __entry->size = bio->bi_iter.bi_size; ), TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u", diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index d09dd10c5a5e..9a58bc258810 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c @@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector, struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_end_io = end_swap_bio_read; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index f785aef65799..b418cb0d7242 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, if (!error && !bio_flagged(bio, BIO_UPTODATE)) error = EIO; - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, - error, 0, NULL); + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio->bi_rw, what, error, 0, NULL); } static void blk_add_trace_bio_bounce(void *ignore, @@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore, if (bt) { __be64 rpdu = cpu_to_be64(pdu); - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, - BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), + __blk_add_trace(bt, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, + !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); } } @@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore, r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.sector_from = cpu_to_be64(from); - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, - BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), - sizeof(r), &r); + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio->bi_rw, BLK_TA_REMAP, + !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); } /** diff --git a/mm/page_io.c b/mm/page_io.c index 8c79a4764be0..f14eded987fa 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio = bio_alloc(gfp_flags, 1); if (bio) { - bio->bi_sector = map_swap_page(page, &bio->bi_bdev); - bio->bi_sector <<= PAGE_SHIFT - 9; + bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); + bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; bio->bi_vcnt = 1; - bio->bi_size = PAGE_SIZE; + bio->bi_iter.bi_size = PAGE_SIZE; bio->bi_end_io = end_io; } return bio; @@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err) printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); ClearPageReclaim(page); } end_page_writeback(page); @@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err) printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); goto out; } -- cgit v1.2.3 From 7988613b0e5b2638caf6cd493cc78e9595eba19c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 23 Nov 2013 17:19:00 -0800 Subject: block: Convert bio_for_each_segment() to bvec_iter More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet Cc: Jens Axboe Cc: Geert Uytterhoeven Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: "Ed L. Cashin" Cc: Nick Piggin Cc: Lars Ellenberg Cc: Jiri Kosina Cc: Paul Clements Cc: Jim Paris Cc: Geoff Levand Cc: Yehuda Sadeh Cc: Sage Weil Cc: Alex Elder Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris Cc: Philip Kelleher Cc: Konrad Rzeszutek Wilk Cc: Jeremy Fitzhardinge Cc: Neil Brown Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama Cc: Sreekanth Reddy Cc: support@lsi.com Cc: "James E.J. Bottomley" Cc: Greg Kroah-Hartman Cc: Alexander Viro Cc: Steven Whitehouse Cc: Herton Ronaldo Krzesinski Cc: Tejun Heo Cc: Andrew Morton Cc: Guo Chao Cc: Asai Thambi S P Cc: Selvan Mani Cc: Sam Bradshaw Cc: Matthew Wilcox Cc: Keith Busch Cc: Stephen Hemminger Cc: Quoc-Son Anh Cc: Sebastian Ott Cc: Nitin Gupta Cc: Minchan Kim Cc: Jerome Marchand Cc: Seth Jennings Cc: "Martin K. Petersen" Cc: Mike Snitzer Cc: Vivek Goyal Cc: "Darrick J. Wong" Cc: Chris Metcalf Cc: Jan Kara Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand --- arch/m68k/emu/nfblock.c | 11 ++--- arch/powerpc/sysdev/axonram.c | 18 ++++---- block/blk-core.c | 4 +- block/blk-merge.c | 49 ++++++++++---------- drivers/block/aoe/aoecmd.c | 16 +++---- drivers/block/brd.c | 12 ++--- drivers/block/drbd/drbd_main.c | 27 ++++++----- drivers/block/drbd/drbd_receiver.c | 13 +++--- drivers/block/drbd/drbd_worker.c | 8 ++-- drivers/block/floppy.c | 12 ++--- drivers/block/loop.c | 23 +++++----- drivers/block/mtip32xx/mtip32xx.c | 13 +++--- drivers/block/nbd.c | 12 ++--- drivers/block/nvme-core.c | 33 ++++++++------ drivers/block/ps3disk.c | 10 ++--- drivers/block/ps3vram.c | 10 ++--- drivers/block/rbd.c | 38 ++++++++-------- drivers/block/rsxx/dma.c | 11 ++--- drivers/md/bcache/btree.c | 4 +- drivers/md/bcache/debug.c | 19 ++++---- drivers/md/bcache/io.c | 69 ++++++++++++----------------- drivers/md/bcache/request.c | 26 +++++------ drivers/md/raid5.c | 12 ++--- drivers/s390/block/dasd_diag.c | 10 ++--- drivers/s390/block/dasd_eckd.c | 48 ++++++++++---------- drivers/s390/block/dasd_fba.c | 26 +++++------ drivers/s390/block/dcssblk.c | 16 +++---- drivers/s390/block/scm_blk.c | 8 ++-- drivers/s390/block/scm_blk_cluster.c | 4 +- drivers/s390/block/xpram.c | 10 ++--- drivers/scsi/mpt2sas/mpt2sas_transport.c | 31 ++++++------- drivers/scsi/mpt3sas/mpt3sas_transport.c | 31 ++++++------- drivers/staging/lustre/lustre/llite/lloop.c | 14 +++--- drivers/staging/zram/zram_drv.c | 19 ++++---- fs/bio-integrity.c | 30 +++++++------ fs/bio.c | 22 ++++----- include/linux/bio.h | 28 ++++++------ include/linux/blkdev.h | 7 +-- mm/bounce.c | 44 +++++++++--------- 39 files changed, 401 insertions(+), 397 deletions(-) (limited to 'mm') diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 0a9d0b3c794b..2d75ae246167 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -62,17 +62,18 @@ struct nfhd_device { static void nfhd_make_request(struct request_queue *queue, struct bio *bio) { struct nfhd_device *dev = queue->queuedata; - struct bio_vec *bvec; - int i, dir, len, shift; + struct bio_vec bvec; + struct bvec_iter iter; + int dir, len, shift; sector_t sec = bio->bi_iter.bi_sector; dir = bio_data_dir(bio); shift = dev->bshift; - bio_for_each_segment(bvec, bio, i) { - len = bvec->bv_len; + bio_for_each_segment(bvec, bio, iter) { + len = bvec.bv_len; len >>= 9; nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, - bvec_to_phys(bvec)); + bvec_to_phys(&bvec)); sec += len; } bio_endio(bio, 0); diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index f33bcbaa6a07..47b6b9f81d43 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -109,28 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; - struct bio_vec *vec; + struct bio_vec vec; unsigned int transfered; - unsigned short idx; + struct bvec_iter iter; phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; - bio_for_each_segment(vec, bio, idx) { - if (unlikely(phys_mem + vec->bv_len > phys_end)) { + bio_for_each_segment(vec, bio, iter) { + if (unlikely(phys_mem + vec.bv_len > phys_end)) { bio_io_error(bio); return; } - user_mem = page_address(vec->bv_page) + vec->bv_offset; + user_mem = page_address(vec.bv_page) + vec.bv_offset; if (bio_data_dir(bio) == READ) - memcpy(user_mem, (void *) phys_mem, vec->bv_len); + memcpy(user_mem, (void *) phys_mem, vec.bv_len); else - memcpy((void *) phys_mem, user_mem, vec->bv_len); + memcpy((void *) phys_mem, user_mem, vec.bv_len); - phys_mem += vec->bv_len; - transfered += vec->bv_len; + phys_mem += vec.bv_len; + transfered += vec.bv_len; } bio_endio(bio, 0); } diff --git a/block/blk-core.c b/block/blk-core.c index 5c2ab2c74066..5da8e900d3b1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2746,10 +2746,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, void rq_flush_dcache_pages(struct request *rq) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; rq_for_each_segment(bvec, rq, iter) - flush_dcache_page(bvec->bv_page); + flush_dcache_page(bvec.bv_page); } EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); #endif diff --git a/block/blk-merge.c b/block/blk-merge.c index 03bc083c28cf..a1ead9049ed6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -12,10 +12,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { - struct bio_vec *bv, *bvprv = NULL; - int cluster, i, high, highprv = 1; + struct bio_vec bv, bvprv = { NULL }; + int cluster, high, highprv = 1; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; + struct bvec_iter iter; if (!bio) return 0; @@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, seg_size = 0; nr_phys_segs = 0; for_each_bio(bio) { - bio_for_each_segment(bv, bio, i) { + bio_for_each_segment(bv, bio, iter) { /* * the trick here is making sure that a high page is * never considered part of another segment, since that * might change with the bounce page. */ - high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); - if (high || highprv) - goto new_segment; - if (cluster) { - if (seg_size + bv->bv_len + high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); + if (!high && !highprv && cluster) { + if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; - if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) + if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) goto new_segment; - if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) + if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) goto new_segment; - seg_size += bv->bv_len; + seg_size += bv.bv_len; bvprv = bv; continue; } @@ -54,7 +53,7 @@ new_segment: nr_phys_segs++; bvprv = bv; - seg_size = bv->bv_len; + seg_size = bv.bv_len; highprv = high; } bbio = bio; @@ -110,21 +109,21 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return 0; } -static void +static inline void __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, - struct scatterlist *sglist, struct bio_vec **bvprv, + struct scatterlist *sglist, struct bio_vec *bvprv, struct scatterlist **sg, int *nsegs, int *cluster) { int nbytes = bvec->bv_len; - if (*bvprv && *cluster) { + if (*sg && *cluster) { if ((*sg)->length + nbytes > queue_max_segment_size(q)) goto new_segment; - if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) goto new_segment; - if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) goto new_segment; (*sg)->length += nbytes; @@ -150,7 +149,7 @@ new_segment: sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); (*nsegs)++; } - *bvprv = bvec; + *bvprv = *bvec; } /* @@ -160,7 +159,7 @@ new_segment: int blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist) { - struct bio_vec *bvec, *bvprv; + struct bio_vec bvec, bvprv; struct req_iterator iter; struct scatterlist *sg; int nsegs, cluster; @@ -171,10 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, /* * for each bio in rq */ - bvprv = NULL; sg = NULL; rq_for_each_segment(bvec, rq, iter) { - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, &nsegs, &cluster); } /* segments in rq */ @@ -223,18 +221,17 @@ EXPORT_SYMBOL(blk_rq_map_sg); int blk_bio_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist) { - struct bio_vec *bvec, *bvprv; + struct bio_vec bvec, bvprv; struct scatterlist *sg; int nsegs, cluster; - unsigned long i; + struct bvec_iter iter; nsegs = 0; cluster = blk_queue_cluster(q); - bvprv = NULL; sg = NULL; - bio_for_each_segment(bvec, bio, i) { - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + bio_for_each_segment(bvec, bio, iter) { + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, &nsegs, &cluster); } /* segments in bio */ diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 77c24ab1898a..7a06aec1dedc 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -897,15 +897,15 @@ rqbiocnt(struct request *r) static void bio_pageinc(struct bio *bio) { - struct bio_vec *bv; + struct bio_vec bv; struct page *page; - int i; + struct bvec_iter iter; - bio_for_each_segment(bv, bio, i) { + bio_for_each_segment(bv, bio, iter) { /* Non-zero page count for non-head members of * compound pages is no longer allowed by the kernel. */ - page = compound_trans_head(bv->bv_page); + page = compound_trans_head(bv.bv_page); atomic_inc(&page->_count); } } @@ -913,12 +913,12 @@ bio_pageinc(struct bio *bio) static void bio_pagedec(struct bio *bio) { - struct bio_vec *bv; struct page *page; - int i; + struct bio_vec bv; + struct bvec_iter iter; - bio_for_each_segment(bv, bio, i) { - page = compound_trans_head(bv->bv_page); + bio_for_each_segment(bv, bio, iter) { + page = compound_trans_head(bv.bv_page); atomic_dec(&page->_count); } } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 66f5aaae15a2..e73b85cf0756 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -328,9 +328,9 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct brd_device *brd = bdev->bd_disk->private_data; int rw; - struct bio_vec *bvec; + struct bio_vec bvec; sector_t sector; - int i; + struct bvec_iter iter; int err = -EIO; sector = bio->bi_iter.bi_sector; @@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) if (rw == READA) rw = READ; - bio_for_each_segment(bvec, bio, i) { - unsigned int len = bvec->bv_len; - err = brd_do_bvec(brd, bvec->bv_page, len, - bvec->bv_offset, rw, sector); + bio_for_each_segment(bvec, bio, iter) { + unsigned int len = bvec.bv_len; + err = brd_do_bvec(brd, bvec.bv_page, len, + bvec.bv_offset, rw, sector); if (err) break; sector += len >> SECTOR_SHIFT; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9e3818b1bc83..f4e5440aba05 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) { - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; + /* hint all but last page with MSG_MORE */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int err; - err = _drbd_no_send_page(mdev, bvec->bv_page, - bvec->bv_offset, bvec->bv_len, - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); + err = _drbd_no_send_page(mdev, bvec.bv_page, + bvec.bv_offset, bvec.bv_len, + bio_iter_last(bio, iter) + ? 0 : MSG_MORE); if (err) return err; } @@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) { - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; + /* hint all but last page with MSG_MORE */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int err; - err = _drbd_send_page(mdev, bvec->bv_page, - bvec->bv_offset, bvec->bv_len, - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); + err = _drbd_send_page(mdev, bvec.bv_page, + bvec.bv_offset, bvec.bv_len, + bio_iter_last(bio, iter) ? 0 : MSG_MORE); if (err) return err; } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 5326c22cdb9d..d073305ffd5e 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, sector_t sector, int data_size) { - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *bio; - int dgs, err, i, expect; + int dgs, err, expect; void *dig_in = mdev->tconn->int_dig_in; void *dig_vv = mdev->tconn->int_dig_vv; @@ -1617,11 +1618,11 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, bio = req->master_bio; D_ASSERT(sector == bio->bi_iter.bi_sector); - bio_for_each_segment(bvec, bio, i) { - void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; - expect = min_t(int, data_size, bvec->bv_len); + bio_for_each_segment(bvec, bio, iter) { + void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; + expect = min_t(int, data_size, bvec.bv_len); err = drbd_recv_all_warn(mdev->tconn, mapped, expect); - kunmap(bvec->bv_page); + kunmap(bvec.bv_page); if (err) return err; data_size -= expect; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 891c0ecaa292..84d3175d493a 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * { struct hash_desc desc; struct scatterlist sg; - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; desc.tfm = tfm; desc.flags = 0; @@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * sg_init_table(&sg, 1); crypto_hash_init(&desc); - bio_for_each_segment(bvec, bio, i) { - sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); + bio_for_each_segment(bvec, bio, iter) { + sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); crypto_hash_update(&desc, &sg, sg.length); } crypto_hash_final(&desc, digest); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6a86fe7b730f..6b29c4422828 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) /* Compute maximal contiguous buffer size. */ static int buffer_chain_size(void) { - struct bio_vec *bv; + struct bio_vec bv; int size; struct req_iterator iter; char *base; @@ -2360,10 +2360,10 @@ static int buffer_chain_size(void) size = 0; rq_for_each_segment(bv, current_req, iter) { - if (page_address(bv->bv_page) + bv->bv_offset != base + size) + if (page_address(bv.bv_page) + bv.bv_offset != base + size) break; - size += bv->bv_len; + size += bv.bv_len; } return size >> 9; @@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size) static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ - struct bio_vec *bv; + struct bio_vec bv; char *buffer; char *dma_buffer; int size; @@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) if (!remaining) break; - size = bv->bv_len; + size = bv.bv_len; SUPBOUND(size, remaining); - buffer = page_address(bv->bv_page) + bv->bv_offset; + buffer = page_address(bv.bv_page) + bv.bv_offset; if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f5e39989adde..33fde3a39759 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) { int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, struct page *page); - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct page *page = NULL; - int i, ret = 0; + int ret = 0; if (lo->transfer != transfer_none) { page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); @@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) do_lo_send = do_lo_send_direct_write; } - bio_for_each_segment(bvec, bio, i) { - ret = do_lo_send(lo, bvec, pos, page); + bio_for_each_segment(bvec, bio, iter) { + ret = do_lo_send(lo, &bvec, pos, page); if (ret < 0) break; - pos += bvec->bv_len; + pos += bvec.bv_len; } if (page) { kunmap(page); @@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo, static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) { - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; ssize_t s; - int i; - bio_for_each_segment(bvec, bio, i) { - s = do_lo_receive(lo, bvec, bsize, pos); + bio_for_each_segment(bvec, bio, iter) { + s = do_lo_receive(lo, &bvec, bsize, pos); if (s < 0) return s; - if (s != bvec->bv_len) { + if (s != bvec.bv_len) { zero_fill_bio(bio); break; } - pos += bvec->bv_len; + pos += bvec.bv_len; } return 0; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 69e9eb5a6b34..52b2f2a71470 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) { struct driver_data *dd = queue->queuedata; struct scatterlist *sg; - struct bio_vec *bvec; - int i, nents = 0; + struct bio_vec bvec; + struct bvec_iter iter; + int nents = 0; int tag = 0, unaligned = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { @@ -4026,11 +4027,11 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { sg_set_page(&sg[nents], - bvec->bv_page, - bvec->bv_len, - bvec->bv_offset); + bvec.bv_page, + bvec.bv_len, + bvec.bv_offset); nents++; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2dc3b5153f0d..aa362f493216 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -271,7 +271,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (nbd_cmd(req) == NBD_CMD_WRITE) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... @@ -281,8 +281,8 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", - nbd->disk->disk_name, req, bvec->bv_len); - result = sock_send_bvec(nbd, bvec, flags); + nbd->disk->disk_name, req, bvec.bv_len); + result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", @@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(nbd, bvec); + result = sock_recv_bvec(nbd, &bvec); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); @@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", - nbd->disk->disk_name, req, bvec->bv_len); + nbd->disk->disk_name, req, bvec.bv_len); } } return req; diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 53d217381873..5539d2920872 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -550,9 +550,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, struct bio *bio, enum dma_data_direction dma_dir, int psegs) { - struct bio_vec *bvec, *bvprv = NULL; + struct bio_vec bvec, bvprv; + struct bvec_iter iter; struct scatterlist *sg = NULL; - int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; + int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; + int first = 1; if (nvmeq->dev->stripe_size) split_len = nvmeq->dev->stripe_size - @@ -560,25 +562,28 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, (nvmeq->dev->stripe_size - 1)); sg_init_table(iod->sg, psegs); - bio_for_each_segment(bvec, bio, i) { - if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { - sg->length += bvec->bv_len; + bio_for_each_segment(bvec, bio, iter) { + if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { + sg->length += bvec.bv_len; } else { - if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) - return nvme_split_and_submit(bio, nvmeq, i, - length, 0); + if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) + return nvme_split_and_submit(bio, nvmeq, + iter.bi_idx, + length, 0); sg = sg ? sg + 1 : iod->sg; - sg_set_page(sg, bvec->bv_page, bvec->bv_len, - bvec->bv_offset); + sg_set_page(sg, bvec.bv_page, + bvec.bv_len, bvec.bv_offset); nsegs++; } - if (split_len - length < bvec->bv_len) - return nvme_split_and_submit(bio, nvmeq, i, split_len, - split_len - length); - length += bvec->bv_len; + if (split_len - length < bvec.bv_len) + return nvme_split_and_submit(bio, nvmeq, iter.bi_idx, + split_len, + split_len - length); + length += bvec.bv_len; bvprv = bvec; + first = 0; } iod->nents = nsegs; sg_mark_end(sg); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 464be78a0836..1c6edb9a9960 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -94,7 +94,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, { unsigned int offset = 0; struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; unsigned int i = 0; size_t size; void *buf; @@ -106,14 +106,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, __func__, __LINE__, i, bio_segments(iter.bio), bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); - size = bvec->bv_len; - buf = bvec_kmap_irq(bvec, &flags); + size = bvec.bv_len; + buf = bvec_kmap_irq(&bvec, &flags); if (gather) memcpy(dev->bounce_buf+offset, buf, size); else memcpy(buf, dev->bounce_buf+offset, size); offset += size; - flush_kernel_dcache_page(bvec->bv_page); + flush_kernel_dcache_page(bvec.bv_page); bvec_kunmap_irq(buf, &flags); i++; } @@ -130,7 +130,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, #ifdef DEBUG unsigned int n = 0; - struct bio_vec *bv; + struct bio_vec bv; struct req_iterator iter; rq_for_each_segment(bv, req, iter) diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 320bbfc9b902..ef45cfb98fd2 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -555,14 +555,14 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, const char *op = write ? "write" : "read"; loff_t offset = bio->bi_iter.bi_sector << 9; int error = 0; - struct bio_vec *bvec; - unsigned int i; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *next; - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { /* PS3 is ppc64, so we don't handle highmem */ - char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; - size_t len = bvec->bv_len, retlen; + char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; + size_t len = bvec.bv_len, retlen; dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, len, offset); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a8f4fe2d4d1b..20e8ab35736b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1109,23 +1109,23 @@ static void bio_chain_put(struct bio *chain) */ static void zero_bio_chain(struct bio *chain, int start_ofs) { - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; unsigned long flags; void *buf; - int i; int pos = 0; while (chain) { - bio_for_each_segment(bv, chain, i) { - if (pos + bv->bv_len > start_ofs) { + bio_for_each_segment(bv, chain, iter) { + if (pos + bv.bv_len > start_ofs) { int remainder = max(start_ofs - pos, 0); - buf = bvec_kmap_irq(bv, &flags); + buf = bvec_kmap_irq(&bv, &flags); memset(buf + remainder, 0, - bv->bv_len - remainder); - flush_dcache_page(bv->bv_page); + bv.bv_len - remainder); + flush_dcache_page(bv.bv_page); bvec_kunmap_irq(buf, &flags); } - pos += bv->bv_len; + pos += bv.bv_len; } chain = chain->bi_next; @@ -1173,11 +1173,11 @@ static struct bio *bio_clone_range(struct bio *bio_src, unsigned int len, gfp_t gfpmask) { - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; + struct bvec_iter end_iter; unsigned int resid; - unsigned short idx; unsigned int voff; - unsigned short end_idx; unsigned short vcnt; struct bio *bio; @@ -1196,22 +1196,22 @@ static struct bio *bio_clone_range(struct bio *bio_src, /* Find first affected segment... */ resid = offset; - bio_for_each_segment(bv, bio_src, idx) { - if (resid < bv->bv_len) + bio_for_each_segment(bv, bio_src, iter) { + if (resid < bv.bv_len) break; - resid -= bv->bv_len; + resid -= bv.bv_len; } voff = resid; /* ...and the last affected segment */ resid += len; - __bio_for_each_segment(bv, bio_src, end_idx, idx) { - if (resid <= bv->bv_len) + __bio_for_each_segment(bv, bio_src, end_iter, iter) { + if (resid <= bv.bv_len) break; - resid -= bv->bv_len; + resid -= bv.bv_len; } - vcnt = end_idx - idx + 1; + vcnt = end_iter.bi_idx = iter.bi_idx + 1; /* Build the clone */ @@ -1229,7 +1229,7 @@ static struct bio *bio_clone_range(struct bio *bio_src, * Copy over our part of the bio_vec, then update the first * and last (or only) entries. */ - memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx], + memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[iter.bi_idx], vcnt * sizeof (struct bio_vec)); bio->bi_io_vec[0].bv_offset += voff; if (vcnt > 1) { diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 3716633be3c2..cf8cd293abb5 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, void *cb_data) { struct list_head dma_list[RSXX_MAX_TARGETS]; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long long addr8; unsigned int laddr; unsigned int bv_len; @@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, bv_len -= RSXX_HW_BLK_SIZE; } } else { - bio_for_each_segment(bvec, bio, i) { - bv_len = bvec->bv_len; - bv_off = bvec->bv_offset; + bio_for_each_segment(bvec, bio, iter) { + bv_len = bvec.bv_len; + bv_off = bvec.bv_offset; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); @@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, st = rsxx_queue_dma(card, &dma_list[tgt], bio_data_dir(bio), dma_off, dma_len, - laddr, bvec->bv_page, + laddr, bvec.bv_page, bv_off, cb, cb_data); if (st) goto bvec_err; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 038a6d2aced3..b62f37925374 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl) struct bio_vec *bv; int n; - __bio_for_each_segment(bv, b->bio, n, 0) + bio_for_each_segment_all(bv, b->bio, n) __free_page(bv->bv_page); __btree_node_write_done(cl); @@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b) struct bio_vec *bv; void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); - bio_for_each_segment(bv, b->bio, j) + bio_for_each_segment_all(bv, b->bio, j) memcpy(page_address(bv->bv_page), base + j * PAGE_SIZE, PAGE_SIZE); diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 92b3fd468a03..03cb4d114e16 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) { char name[BDEVNAME_SIZE]; struct bio *check; - struct bio_vec *bv; + struct bio_vec bv, *bv2; + struct bvec_iter iter; int i; check = bio_clone(bio, GFP_NOIO); @@ -185,13 +186,13 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) submit_bio_wait(READ_SYNC, check); - bio_for_each_segment(bv, bio, i) { - void *p1 = kmap_atomic(bv->bv_page); - void *p2 = page_address(check->bi_io_vec[i].bv_page); + bio_for_each_segment(bv, bio, iter) { + void *p1 = kmap_atomic(bv.bv_page); + void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); - cache_set_err_on(memcmp(p1 + bv->bv_offset, - p2 + bv->bv_offset, - bv->bv_len), + cache_set_err_on(memcmp(p1 + bv.bv_offset, + p2 + bv.bv_offset, + bv.bv_len), dc->disk.c, "verify failed at dev %s sector %llu", bdevname(dc->bdev, name), @@ -200,8 +201,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) kunmap_atomic(p1); } - bio_for_each_segment_all(bv, check, i) - __free_page(bv->bv_page); + bio_for_each_segment_all(bv2, check, i) + __free_page(bv2->bv_page); out_put: bio_put(check); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index dc44f0689eb7..9b5b6a41a9b6 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -22,12 +22,12 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error) static void bch_generic_make_request_hack(struct bio *bio) { if (bio->bi_iter.bi_idx) { - int i; - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); - bio_for_each_segment(bv, bio, i) - clone->bi_io_vec[clone->bi_vcnt++] = *bv; + bio_for_each_segment(bv, bio, iter) + clone->bi_io_vec[clone->bi_vcnt++] = bv; clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; clone->bi_bdev = bio->bi_bdev; @@ -73,8 +73,9 @@ static void bch_generic_make_request_hack(struct bio *bio) struct bio *bch_bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { - unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9; - struct bio_vec *bv; + unsigned vcnt = 0, nbytes = sectors << 9; + struct bio_vec bv; + struct bvec_iter iter; struct bio *ret = NULL; BUG_ON(sectors <= 0); @@ -86,49 +87,35 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, ret = bio_alloc_bioset(gfp, 1, bs); if (!ret) return NULL; - idx = 0; goto out; } - bio_for_each_segment(bv, bio, idx) { - vcnt = idx - bio->bi_iter.bi_idx; + bio_for_each_segment(bv, bio, iter) { + vcnt++; - if (!nbytes) { - ret = bio_alloc_bioset(gfp, vcnt, bs); - if (!ret) - return NULL; + if (nbytes <= bv.bv_len) + break; - memcpy(ret->bi_io_vec, __bio_iovec(bio), - sizeof(struct bio_vec) * vcnt); + nbytes -= bv.bv_len; + } - break; - } else if (nbytes < bv->bv_len) { - ret = bio_alloc_bioset(gfp, ++vcnt, bs); - if (!ret) - return NULL; + ret = bio_alloc_bioset(gfp, vcnt, bs); + if (!ret) + return NULL; - memcpy(ret->bi_io_vec, __bio_iovec(bio), - sizeof(struct bio_vec) * vcnt); + bio_for_each_segment(bv, bio, iter) { + ret->bi_io_vec[ret->bi_vcnt++] = bv; - ret->bi_io_vec[vcnt - 1].bv_len = nbytes; - bv->bv_offset += nbytes; - bv->bv_len -= nbytes; + if (ret->bi_vcnt == vcnt) break; - } - - nbytes -= bv->bv_len; } + + ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes; out: ret->bi_bdev = bio->bi_bdev; ret->bi_iter.bi_sector = bio->bi_iter.bi_sector; ret->bi_iter.bi_size = sectors << 9; ret->bi_rw = bio->bi_rw; - ret->bi_vcnt = vcnt; - ret->bi_max_vecs = vcnt; - - bio->bi_iter.bi_sector += sectors; - bio->bi_iter.bi_size -= sectors << 9; - bio->bi_iter.bi_idx = idx; if (bio_integrity(bio)) { if (bio_integrity_clone(ret, bio, gfp)) { @@ -137,9 +124,10 @@ out: } bio_integrity_trim(ret, 0, bio_sectors(ret)); - bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); } + bio_advance(bio, ret->bi_iter.bi_size); + return ret; } @@ -155,12 +143,13 @@ static unsigned bch_bio_max_sectors(struct bio *bio) if (bio_segments(bio) > max_segments || q->merge_bvec_fn) { - struct bio_vec *bv; - int i, seg = 0; + struct bio_vec bv; + struct bvec_iter iter; + unsigned seg = 0; ret = 0; - bio_for_each_segment(bv, bio, i) { + bio_for_each_segment(bv, bio, iter) { struct bvec_merge_data bvm = { .bi_bdev = bio->bi_bdev, .bi_sector = bio->bi_iter.bi_sector, @@ -172,11 +161,11 @@ static unsigned bch_bio_max_sectors(struct bio *bio) break; if (q->merge_bvec_fn && - q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) break; seg++; - ret += bv->bv_len >> 9; + ret += bv.bv_len >> 9; } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 47a9bbc75124..4c0a422fd49f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -198,14 +198,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio) static void bio_csum(struct bio *bio, struct bkey *k) { - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; uint64_t csum = 0; - int i; - bio_for_each_segment(bv, bio, i) { - void *d = kmap(bv->bv_page) + bv->bv_offset; - csum = bch_crc64_update(csum, d, bv->bv_len); - kunmap(bv->bv_page); + bio_for_each_segment(bv, bio, iter) { + void *d = kmap(bv.bv_page) + bv.bv_offset; + csum = bch_crc64_update(csum, d, bv.bv_len); + kunmap(bv.bv_page); } k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); @@ -1182,17 +1182,17 @@ void bch_cached_dev_request_init(struct cached_dev *dc) static int flash_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) { - struct bio_vec *bv; - int i; + struct bio_vec bv; + struct bvec_iter iter; /* Zero fill bio */ - bio_for_each_segment(bv, bio, i) { - unsigned j = min(bv->bv_len >> 9, sectors); + bio_for_each_segment(bv, bio, iter) { + unsigned j = min(bv.bv_len >> 9, sectors); - void *p = kmap(bv->bv_page); - memset(p + bv->bv_offset, 0, j << 9); - kunmap(bv->bv_page); + void *p = kmap(bv.bv_page); + memset(p + bv.bv_offset, 0, j << 9); + kunmap(bv.bv_page); sectors -= j; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a5d9c0ee4d60..bef353c51c04 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -937,9 +937,9 @@ static struct dma_async_tx_descriptor * async_copy_data(int frombio, struct bio *bio, struct page *page, sector_t sector, struct dma_async_tx_descriptor *tx) { - struct bio_vec *bvl; + struct bio_vec bvl; + struct bvec_iter iter; struct page *bio_page; - int i; int page_offset; struct async_submit_ctl submit; enum async_tx_flags flags = 0; @@ -953,8 +953,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, flags |= ASYNC_TX_FENCE; init_async_submit(&submit, flags, tx, NULL, NULL, NULL); - bio_for_each_segment(bvl, bio, i) { - int len = bvl->bv_len; + bio_for_each_segment(bvl, bio, iter) { + int len = bvl.bv_len; int clen; int b_offset = 0; @@ -970,8 +970,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, clen = len; if (clen > 0) { - b_offset += bvl->bv_offset; - bio_page = bvl->bv_page; + b_offset += bvl.bv_offset; + bio_page = bvl.bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, b_offset, clen, &submit); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 92bd22ce6760..9cbc567698ce 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, struct dasd_diag_req *dreq; struct dasd_diag_bio *dbio; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int count, datasize; sector_t recid, first_rec, last_rec; @@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, /* Check struct bio and count the number of blocks for the request. */ count = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); } /* Paranoia. */ if (count != last_rec - first_rec + 1) @@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, dbio = dreq->bio; recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { memset(dbio, 0, sizeof (struct dasd_diag_bio)); dbio->type = rw_cmd; dbio->block_number = recid + 1; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index cee7e2708a1f..70d177017329 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int off; int count, cidaw, cplength, datasize; @@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len >> (block->s2b_shift + 9); + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len >> (block->s2b_shift + 9); #endif } /* Paranoia. */ @@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( last_rec - recid + 1, cmd, basedev, blksize); } rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { sector_t trkid = recid; unsigned int recoffs = sector_div(trkid, blk_per_trk); rcmd = cmd; @@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *idaw_dst; unsigned int cidaw, cplength, datasize; unsigned int tlf; @@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( idaw_dst = NULL; idaw_len = 0; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; while (seg_len) { if (new_track) { trkid = recid; @@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( { struct dasd_ccw_req *cqr; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int trkcount, ctidaw; unsigned char cmd; @@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( new_track = 1; recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; while (seg_len) { if (new_track) { trkid = recid; @@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( } } else { rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; last_tidaw = itcw_add_tidaw(itcw, 0x00, - dst, bv->bv_len); + dst, bv.bv_len); if (IS_ERR(last_tidaw)) { ret = -EINVAL; goto out_error; @@ -3276,7 +3276,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned char cmd; unsigned int trkcount; @@ -3376,8 +3376,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); } rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; if (cmd == DASD_ECKD_CCW_READ_TRACK) memset(dst, 0, seg_len); if (!len_to_track_end) { @@ -3422,7 +3422,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) struct dasd_eckd_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, blk_per_trk, off; sector_t recid; @@ -3440,8 +3440,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->uses_cdl == 0 || recid > 2*blk_per_trk) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->uses_cdl && recid <= 2*blk_per_trk) ccw++; @@ -3452,7 +3452,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 9cbc8c32ba59..2c8e68bf9a1c 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; int count, cidaw, cplength, datasize; sector_t recid, first_rec, last_rec; @@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len / blksize; + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len / blksize; #endif } /* Paranoia. */ @@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, } recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { /* Locate record for stupid devices. */ if (private->rdc_data.mode.bits.data_chain == 0) { ccw[-1].flags |= CCW_FLAG_CC; @@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) struct dasd_fba_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, off; int status; @@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->rdc_data.mode.bits.data_chain != 0) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->rdc_data.mode.bits.data_chain == 0) ccw++; @@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 16814a8457f8..ebf41e228e55 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -808,12 +808,12 @@ static void dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; - int i; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; @@ -844,21 +844,21 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) } index = (bio->bi_iter.bi_sector >> 3); - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - page_address(bvec->bv_page) + bvec->bv_offset; + page_address(bvec.bv_page) + bvec.bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; - if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) + if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, - bvec->bv_len); + bvec.bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, - bvec->bv_len); + bvec.bv_len); } - bytes_done += bvec->bv_len; + bytes_done += bvec.bv_len; } bio_endio(bio, 0); return; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index d0ab5019d885..76bed1743db1 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq) struct aidaw *aidaw = scmrq->aidaw; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; msb->bs = MSB_BS_4K; scmrq->aob->request.msb_count = 1; @@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq) msb->data_addr = (u64) aidaw; rq_for_each_segment(bv, scmrq->request, iter) { - WARN_ON(bv->bv_offset); - msb->blk_count += bv->bv_len >> 12; - aidaw->data_addr = (u64) page_address(bv->bv_page); + WARN_ON(bv.bv_offset); + msb->blk_count += bv.bv_len >> 12; + aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } } diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 27f930cd657f..9aae909d47a5 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) struct aidaw *aidaw = scmrq->aidaw; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; int i = 0; u64 addr; @@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) i++; } rq_for_each_segment(bv, req, iter) { - aidaw->data_addr = (u64) page_address(bv->bv_page); + aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; i++; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index dd4e73fdb323..3e530f9da8c4 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -184,11 +184,11 @@ static unsigned long xpram_highest_page_index(void) static void xpram_make_request(struct request_queue *q, struct bio *bio) { xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned int index; unsigned long page_addr; unsigned long bytes; - int i; if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) @@ -200,10 +200,10 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio) if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) goto fail; index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - kmap(bvec->bv_page) + bvec->bv_offset; - bytes = bvec->bv_len; + kmap(bvec.bv_page) + bvec.bv_offset; + bytes = bvec.bv_len; if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) /* More paranoia. */ goto fail; diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 9d26637308be..7143e86af326 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { printk(MPT2SAS_ERR_FMT "%s: the smp response space is " @@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e771a88c6a74..196a67f2e95f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", @@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -2067,19 +2068,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 53741be754b4..581ff78be1a2 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) struct cl_object *obj = ll_i2info(inode)->lli_clob; pgoff_t offset; int ret; - int i; int rw; obd_count page_count = 0; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *bio; ssize_t bytes; @@ -221,14 +221,14 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) LASSERT(rw == bio->bi_rw); offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; - bio_for_each_segment(bvec, bio, i) { - BUG_ON(bvec->bv_offset != 0); - BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); + bio_for_each_segment(bvec, bio, iter) { + BUG_ON(bvec.bv_offset != 0); + BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); - pages[page_count] = bvec->bv_page; + pages[page_count] = bvec.bv_page; offsets[page_count] = offset; page_count++; - offset += bvec->bv_len; + offset += bvec.bv_len; } LASSERT(page_count <= LLOOP_MAX_SEGMENTS); } diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index e9e6f984092b..6f988382b174 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -672,9 +672,10 @@ static ssize_t reset_store(struct device *dev, static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) { - int i, offset; + int offset; u32 index; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; switch (rw) { case READ: @@ -689,33 +690,33 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int max_transfer_size = PAGE_SIZE - offset; - if (bvec->bv_len > max_transfer_size) { + if (bvec.bv_len > max_transfer_size) { /* * zram_bvec_rw() can only make operation on a single * zram page. Split the bio vector. */ struct bio_vec bv; - bv.bv_page = bvec->bv_page; + bv.bv_page = bvec.bv_page; bv.bv_len = max_transfer_size; - bv.bv_offset = bvec->bv_offset; + bv.bv_offset = bvec.bv_offset; if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) goto out; - bv.bv_len = bvec->bv_len - max_transfer_size; + bv.bv_len = bvec.bv_len - max_transfer_size; bv.bv_offset += max_transfer_size; if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) goto out; } else - if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) + if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) < 0) goto out; - update_position(&index, &offset, bvec); + update_position(&index, &offset, &bvec); } set_bit(BIO_UPTODATE, &bio->bi_flags); diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 08e3d1388c65..9127db86f315 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -299,25 +299,26 @@ static void bio_integrity_generate(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity_exchg bix; - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; - unsigned int i, sectors, total; + unsigned int sectors, total; void *prot_buf = bio->bi_integrity->bip_buf; total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; - bio_for_each_segment(bv, bio, i) { - void *kaddr = kmap_atomic(bv->bv_page); - bix.data_buf = kaddr + bv->bv_offset; - bix.data_size = bv->bv_len; + bio_for_each_segment(bv, bio, iter) { + void *kaddr = kmap_atomic(bv.bv_page); + bix.data_buf = kaddr + bv.bv_offset; + bix.data_size = bv.bv_len; bix.prot_buf = prot_buf; bix.sector = sector; bi->generate_fn(&bix); - sectors = bv->bv_len / bi->sector_size; + sectors = bv.bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; total += sectors * bi->tuple_size; @@ -441,19 +442,20 @@ static int bio_integrity_verify(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity_exchg bix; - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; sector_t sector = bio->bi_integrity->bip_sector; - unsigned int i, sectors, total, ret; + unsigned int sectors, total, ret; void *prot_buf = bio->bi_integrity->bip_buf; ret = total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; - bio_for_each_segment(bv, bio, i) { - void *kaddr = kmap_atomic(bv->bv_page); - bix.data_buf = kaddr + bv->bv_offset; - bix.data_size = bv->bv_len; + bio_for_each_segment(bv, bio, iter) { + void *kaddr = kmap_atomic(bv.bv_page); + bix.data_buf = kaddr + bv.bv_offset; + bix.data_size = bv.bv_len; bix.prot_buf = prot_buf; bix.sector = sector; @@ -464,7 +466,7 @@ static int bio_integrity_verify(struct bio *bio) return ret; } - sectors = bv->bv_len / bi->sector_size; + sectors = bv.bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; total += sectors * bi->tuple_size; diff --git a/fs/bio.c b/fs/bio.c index 7bb281fc3d5c..8b7f14a95503 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -473,13 +473,13 @@ EXPORT_SYMBOL(bio_alloc_bioset); void zero_fill_bio(struct bio *bio) { unsigned long flags; - struct bio_vec *bv; - int i; + struct bio_vec bv; + struct bvec_iter iter; - bio_for_each_segment(bv, bio, i) { - char *data = bvec_kmap_irq(bv, &flags); - memset(data, 0, bv->bv_len); - flush_dcache_page(bv->bv_page); + bio_for_each_segment(bv, bio, iter) { + char *data = bvec_kmap_irq(&bv, &flags); + memset(data, 0, bv.bv_len); + flush_dcache_page(bv.bv_page); bvec_kunmap_irq(data, &flags); } } @@ -1687,11 +1687,11 @@ void bio_check_pages_dirty(struct bio *bio) #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE void bio_flush_dcache_pages(struct bio *bi) { - int i; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; - bio_for_each_segment(bvec, bi, i) - flush_dcache_page(bvec->bv_page); + bio_for_each_segment(bvec, bi, iter) + flush_dcache_page(bvec.bv_page); } EXPORT_SYMBOL(bio_flush_dcache_pages); #endif @@ -1840,7 +1840,7 @@ void bio_trim(struct bio *bio, int offset, int size) bio->bi_iter.bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { diff --git a/include/linux/bio.h b/include/linux/bio.h index 9f182fcbe714..c16adb5f69f8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -63,10 +63,13 @@ */ #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) -#define bio_iovec(bio) (*__bio_iovec(bio)) + +#define bio_iter_iovec(bio, iter) ((bio)->bi_io_vec[(iter).bi_idx]) #define bio_page(bio) (bio_iovec((bio)).bv_page) #define bio_offset(bio) (bio_iovec((bio)).bv_offset) +#define bio_iovec(bio) (*__bio_iovec(bio)) + #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) @@ -133,15 +136,6 @@ static inline void *bio_data(struct bio *bio) #define bio_io_error(bio) bio_endio((bio), -EIO) -/* - * drivers should not use the __ version unless they _really_ know what - * they're doing - */ -#define __bio_for_each_segment(bvl, bio, i, start_idx) \ - for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ - i < (bio)->bi_vcnt; \ - bvl++, i++) - /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it @@ -151,10 +145,16 @@ static inline void *bio_data(struct bio *bio) bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ i++) -#define bio_for_each_segment(bvl, bio, i) \ - for (i = (bio)->bi_iter.bi_idx; \ - bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ - i++) +#define __bio_for_each_segment(bvl, bio, iter, start) \ + for (iter = (start); \ + bvl = bio_iter_iovec((bio), (iter)), \ + (iter).bi_idx < (bio)->bi_vcnt; \ + (iter).bi_idx++) + +#define bio_for_each_segment(bvl, bio, iter) \ + __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) + +#define bio_iter_last(bio, iter) ((iter).bi_idx == (bio)->bi_vcnt - 1) /* * get a reference to a bio, so it won't disappear. the intended use is diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1b135d49b279..337b92a54658 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -735,7 +735,7 @@ struct rq_map_data { }; struct req_iterator { - int i; + struct bvec_iter iter; struct bio *bio; }; @@ -748,10 +748,11 @@ struct req_iterator { #define rq_for_each_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_segment(bvl, _iter.bio, _iter.i) + bio_for_each_segment(bvl, _iter.bio, _iter.iter) #define rq_iter_last(rq, _iter) \ - (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) + (_iter.bio->bi_next == NULL && \ + bio_iter_last(_iter.bio, _iter.iter)) #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" diff --git a/mm/bounce.c b/mm/bounce.c index 5a7d58fb883b..523918b8c6dc 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -98,27 +98,24 @@ int init_emergency_isa_pool(void) static void copy_to_high_bio_irq(struct bio *to, struct bio *from) { unsigned char *vfrom; - struct bio_vec *tovec, *fromvec; - int i; - - bio_for_each_segment(tovec, to, i) { - fromvec = from->bi_io_vec + i; - - /* - * not bounced - */ - if (tovec->bv_page == fromvec->bv_page) - continue; - - /* - * fromvec->bv_offset and fromvec->bv_len might have been - * modified by the block layer, so use the original copy, - * bounce_copy_vec already uses tovec->bv_len - */ - vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; + struct bio_vec tovec, *fromvec = from->bi_io_vec; + struct bvec_iter iter; + + bio_for_each_segment(tovec, to, iter) { + if (tovec.bv_page != fromvec->bv_page) { + /* + * fromvec->bv_offset and fromvec->bv_len might have + * been modified by the block layer, so use the original + * copy, bounce_copy_vec already uses tovec->bv_len + */ + vfrom = page_address(fromvec->bv_page) + + tovec.bv_offset; + + bounce_copy_vec(&tovec, vfrom); + flush_dcache_page(tovec.bv_page); + } - bounce_copy_vec(tovec, vfrom); - flush_dcache_page(tovec->bv_page); + fromvec++; } } @@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, { struct bio *bio; int rw = bio_data_dir(*bio_orig); - struct bio_vec *to, *from; + struct bio_vec *to, from; + struct bvec_iter iter; unsigned i; if (force) goto bounce; - bio_for_each_segment(from, *bio_orig, i) - if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) + bio_for_each_segment(from, *bio_orig, iter) + if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) goto bounce; return; -- cgit v1.2.3 From 791badbdb3e4fc1001ee3bcdaedc6d4f167fcbe8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Dec 2013 12:28:02 -0500 Subject: memcg: convert away from cftype->read() and ->read_map() In preparation of conversion to kernfs, cgroup file handling is being consolidated so that it can be easily mapped to the seq_file based interface of kernfs. cftype->read_map() doesn't add any value and being replaced with ->read_seq_string(), and all users of cftype->read() can be easily served, usually better, by seq_file and other methods. Update mem_cgroup_read() to return u64 instead of printing itself and rename it to mem_cgroup_read_u64(), and update mem_cgroup_oom_control_read() to use ->read_seq_string() instead of ->read_map(). This patch doesn't make any visible behavior changes. Signed-off-by: Tejun Heo Acked-by: Michal Hocko Acked-by: Li Zefan Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki --- mm/memcontrol.c | 49 +++++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7aa0d405b148..f149521a77e6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5150,14 +5150,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) return val << PAGE_SHIFT; } -static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct file *file, - char __user *buf, size_t nbytes, loff_t *ppos) +static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - char str[64]; u64 val; - int name, len; + int name; enum res_type type; type = MEMFILE_TYPE(cft->private); @@ -5183,8 +5181,7 @@ static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css, BUG(); } - len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); - return simple_read_from_buffer(buf, nbytes, ppos, str, len); + return val; } static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) @@ -5911,16 +5908,12 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, } static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct cgroup_map_cb *cb) + struct cftype *cft, struct seq_file *sf) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); - - if (atomic_read(&memcg->under_oom)) - cb->fill(cb, "under_oom", 1); - else - cb->fill(cb, "under_oom", 0); + seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); + seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); return 0; } @@ -6239,31 +6232,31 @@ static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), .write_string = mem_cgroup_write, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "soft_limit_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), .write_string = mem_cgroup_write, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "failcnt", .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "stat", @@ -6297,7 +6290,7 @@ static struct cftype mem_cgroup_files[] = { }, { .name = "oom_control", - .read_map = mem_cgroup_oom_control_read, + .read_seq_string = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, @@ -6315,24 +6308,24 @@ static struct cftype mem_cgroup_files[] = { .name = "kmem.limit_in_bytes", .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), .write_string = mem_cgroup_write, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "kmem.usage_in_bytes", .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "kmem.failcnt", .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "kmem.max_usage_in_bytes", .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, #ifdef CONFIG_SLABINFO { @@ -6349,25 +6342,25 @@ static struct cftype memsw_cgroup_files[] = { { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "memsw.max_usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "memsw.limit_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), .write_string = mem_cgroup_write, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { .name = "memsw.failcnt", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), .trigger = mem_cgroup_reset, - .read = mem_cgroup_read, + .read_u64 = mem_cgroup_read_u64, }, { }, /* terminate */ }; -- cgit v1.2.3 From 716f479d279fb456f58be44180d7479da75e5a4e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Dec 2013 12:28:03 -0500 Subject: hugetlb_cgroup: convert away from cftype->read() In preparation of conversion to kernfs, cgroup file handling is being consolidated so that it can be easily mapped to the seq_file based interface of kernfs. All users of cftype->read() can be easily served, usually better, by seq_file and other methods. Update hugetlb_cgroup_read() to return u64 instead of printing itself and rename it to hugetlb_cgroup_read_u64(). This patch doesn't make any visible behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Michal Hocko Acked-by: Li Zefan Cc: Aneesh Kumar K.V Cc: Johannes Weiner --- mm/hugetlb_cgroup.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index bda8e44f6fde..d747a84e09b0 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -242,22 +242,16 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, return; } -static ssize_t hugetlb_cgroup_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct file *file, - char __user *buf, size_t nbytes, - loff_t *ppos) +static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) { - u64 val; - char str[64]; - int idx, name, len; + int idx, name; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); idx = MEMFILE_IDX(cft->private); name = MEMFILE_ATTR(cft->private); - val = res_counter_read_u64(&h_cg->hugepage[idx], name); - len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); - return simple_read_from_buffer(buf, nbytes, ppos, str, len); + return res_counter_read_u64(&h_cg->hugepage[idx], name); } static int hugetlb_cgroup_write(struct cgroup_subsys_state *css, @@ -337,28 +331,28 @@ static void __init __hugetlb_cgroup_file_init(int idx) cft = &h->cgroup_files[0]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); - cft->read = hugetlb_cgroup_read; + cft->read_u64 = hugetlb_cgroup_read_u64; cft->write_string = hugetlb_cgroup_write; /* Add the usage file */ cft = &h->cgroup_files[1]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); - cft->read = hugetlb_cgroup_read; + cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the MAX usage file */ cft = &h->cgroup_files[2]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE); cft->trigger = hugetlb_cgroup_reset; - cft->read = hugetlb_cgroup_read; + cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the failcntfile */ cft = &h->cgroup_files[3]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf); cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT); cft->trigger = hugetlb_cgroup_reset; - cft->read = hugetlb_cgroup_read; + cft->read_u64 = hugetlb_cgroup_read_u64; /* NULL terminate the last cft */ cft = &h->cgroup_files[4]; -- cgit v1.2.3 From 2da8ca822d49c8b8781800ad155aaa00e7bb5f1a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Dec 2013 12:28:04 -0500 Subject: cgroup: replace cftype->read_seq_string() with cftype->seq_show() In preparation of conversion to kernfs, cgroup file handling is updated so that it can be easily mapped to kernfs. This patch replaces cftype->read_seq_string() with cftype->seq_show() which is not limited to single_open() operation and will map directcly to kernfs seq_file interface. The conversions are mechanical. As ->seq_show() doesn't have @css and @cft, the functions which make use of them are converted to use seq_css() and seq_cft() respectively. In several occassions, e.f. if it has seq_string in its name, the function name is updated to fit the new method better. This patch does not introduce any behavior changes. Signed-off-by: Tejun Heo Acked-by: Aristeu Rozanski Acked-by: Vivek Goyal Acked-by: Michal Hocko Acked-by: Daniel Wagner Acked-by: Li Zefan Cc: Jens Axboe Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Neil Horman --- block/blk-throttle.c | 35 ++++++------- block/cfq-iosched.c | 131 ++++++++++++++++++++-------------------------- include/linux/cgroup.h | 9 ++-- kernel/cgroup.c | 34 ++++++------ kernel/cgroup_freezer.c | 7 ++- kernel/cpuset.c | 12 ++--- kernel/sched/core.c | 7 ++- kernel/sched/cpuacct.c | 14 +++-- mm/memcontrol.c | 28 +++++----- net/core/netprio_cgroup.c | 8 +-- security/device_cgroup.c | 7 ++- 11 files changed, 128 insertions(+), 164 deletions(-) (limited to 'mm') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 06534049afba..a760857e6b62 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1303,13 +1303,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, return __blkg_prfill_rwstat(sf, pd, &rwstat); } -static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int tg_print_cpu_rwstat(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl, - cft->private, true); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat, + &blkcg_policy_throtl, seq_cft(sf)->private, true); return 0; } @@ -1335,19 +1332,17 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, return __blkg_prfill_u64(sf, pd, v); } -static int tg_print_conf_u64(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int tg_print_conf_u64(struct seq_file *sf, void *v) { - blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64, - &blkcg_policy_throtl, cft->private, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, + &blkcg_policy_throtl, seq_cft(sf)->private, false); return 0; } -static int tg_print_conf_uint(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int tg_print_conf_uint(struct seq_file *sf, void *v) { - blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint, - &blkcg_policy_throtl, cft->private, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, + &blkcg_policy_throtl, seq_cft(sf)->private, false); return 0; } @@ -1428,40 +1423,40 @@ static struct cftype throtl_files[] = { { .name = "throttle.read_bps_device", .private = offsetof(struct throtl_grp, bps[READ]), - .read_seq_string = tg_print_conf_u64, + .seq_show = tg_print_conf_u64, .write_string = tg_set_conf_u64, .max_write_len = 256, }, { .name = "throttle.write_bps_device", .private = offsetof(struct throtl_grp, bps[WRITE]), - .read_seq_string = tg_print_conf_u64, + .seq_show = tg_print_conf_u64, .write_string = tg_set_conf_u64, .max_write_len = 256, }, { .name = "throttle.read_iops_device", .private = offsetof(struct throtl_grp, iops[READ]), - .read_seq_string = tg_print_conf_uint, + .seq_show = tg_print_conf_uint, .write_string = tg_set_conf_uint, .max_write_len = 256, }, { .name = "throttle.write_iops_device", .private = offsetof(struct throtl_grp, iops[WRITE]), - .read_seq_string = tg_print_conf_uint, + .seq_show = tg_print_conf_uint, .write_string = tg_set_conf_uint, .max_write_len = 256, }, { .name = "throttle.io_service_bytes", .private = offsetof(struct tg_stats_cpu, service_bytes), - .read_seq_string = tg_print_cpu_rwstat, + .seq_show = tg_print_cpu_rwstat, }, { .name = "throttle.io_serviced", .private = offsetof(struct tg_stats_cpu, serviced), - .read_seq_string = tg_print_cpu_rwstat, + .seq_show = tg_print_cpu_rwstat, }, { } /* terminate */ }; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4d5cec1ad80d..744833b630c6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1632,11 +1632,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf, return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); } -static int cfqg_print_weight_device(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfqg_print_weight_device(struct seq_file *sf, void *v) { - blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device, - &blkcg_policy_cfq, 0, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_weight_device, &blkcg_policy_cfq, + 0, false); return 0; } @@ -1650,26 +1650,23 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); } -static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css, - struct cftype *cft, - struct seq_file *sf) +static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v) { - blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device, - &blkcg_policy_cfq, 0, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, + 0, false); return 0; } -static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *sf) +static int cfq_print_weight(struct seq_file *sf, void *v) { - seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight); + seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight); return 0; } -static int cfq_print_leaf_weight(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfq_print_leaf_weight(struct seq_file *sf, void *v) { - seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight); + seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight); return 0; } @@ -1762,23 +1759,17 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css, return __cfq_set_weight(css, cft, val, true); } -static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *sf) +static int cfqg_print_stat(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq, - cft->private, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, + &blkcg_policy_cfq, seq_cft(sf)->private, false); return 0; } -static int cfqg_print_rwstat(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfqg_print_rwstat(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq, - cft->private, true); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, + &blkcg_policy_cfq, seq_cft(sf)->private, true); return 0; } @@ -1798,23 +1789,19 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, return __blkg_prfill_rwstat(sf, pd, &sum); } -static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfqg_print_stat_recursive(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, - &blkcg_policy_cfq, cft->private, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_stat_recursive, &blkcg_policy_cfq, + seq_cft(sf)->private, false); return 0; } -static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, - &blkcg_policy_cfq, cft->private, true); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq, + seq_cft(sf)->private, true); return 0; } @@ -1835,13 +1822,11 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, } /* print avg_queue_size */ -static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v) { - struct blkcg *blkcg = css_to_blkcg(css); - - blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, - &blkcg_policy_cfq, 0, false); + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), + cfqg_prfill_avg_queue_size, &blkcg_policy_cfq, + 0, false); return 0; } #endif /* CONFIG_DEBUG_BLK_CGROUP */ @@ -1851,14 +1836,14 @@ static struct cftype cfq_blkcg_files[] = { { .name = "weight_device", .flags = CFTYPE_ONLY_ON_ROOT, - .read_seq_string = cfqg_print_leaf_weight_device, + .seq_show = cfqg_print_leaf_weight_device, .write_string = cfqg_set_leaf_weight_device, .max_write_len = 256, }, { .name = "weight", .flags = CFTYPE_ONLY_ON_ROOT, - .read_seq_string = cfq_print_leaf_weight, + .seq_show = cfq_print_leaf_weight, .write_u64 = cfq_set_leaf_weight, }, @@ -1866,26 +1851,26 @@ static struct cftype cfq_blkcg_files[] = { { .name = "weight_device", .flags = CFTYPE_NOT_ON_ROOT, - .read_seq_string = cfqg_print_weight_device, + .seq_show = cfqg_print_weight_device, .write_string = cfqg_set_weight_device, .max_write_len = 256, }, { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, - .read_seq_string = cfq_print_weight, + .seq_show = cfq_print_weight, .write_u64 = cfq_set_weight, }, { .name = "leaf_weight_device", - .read_seq_string = cfqg_print_leaf_weight_device, + .seq_show = cfqg_print_leaf_weight_device, .write_string = cfqg_set_leaf_weight_device, .max_write_len = 256, }, { .name = "leaf_weight", - .read_seq_string = cfq_print_leaf_weight, + .seq_show = cfq_print_leaf_weight, .write_u64 = cfq_set_leaf_weight, }, @@ -1893,114 +1878,114 @@ static struct cftype cfq_blkcg_files[] = { { .name = "time", .private = offsetof(struct cfq_group, stats.time), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "sectors", .private = offsetof(struct cfq_group, stats.sectors), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "io_service_bytes", .private = offsetof(struct cfq_group, stats.service_bytes), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, { .name = "io_serviced", .private = offsetof(struct cfq_group, stats.serviced), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, { .name = "io_service_time", .private = offsetof(struct cfq_group, stats.service_time), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, { .name = "io_wait_time", .private = offsetof(struct cfq_group, stats.wait_time), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, { .name = "io_merged", .private = offsetof(struct cfq_group, stats.merged), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, { .name = "io_queued", .private = offsetof(struct cfq_group, stats.queued), - .read_seq_string = cfqg_print_rwstat, + .seq_show = cfqg_print_rwstat, }, /* the same statictics which cover the cfqg and its descendants */ { .name = "time_recursive", .private = offsetof(struct cfq_group, stats.time), - .read_seq_string = cfqg_print_stat_recursive, + .seq_show = cfqg_print_stat_recursive, }, { .name = "sectors_recursive", .private = offsetof(struct cfq_group, stats.sectors), - .read_seq_string = cfqg_print_stat_recursive, + .seq_show = cfqg_print_stat_recursive, }, { .name = "io_service_bytes_recursive", .private = offsetof(struct cfq_group, stats.service_bytes), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, { .name = "io_serviced_recursive", .private = offsetof(struct cfq_group, stats.serviced), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, { .name = "io_service_time_recursive", .private = offsetof(struct cfq_group, stats.service_time), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, { .name = "io_wait_time_recursive", .private = offsetof(struct cfq_group, stats.wait_time), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, { .name = "io_merged_recursive", .private = offsetof(struct cfq_group, stats.merged), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, { .name = "io_queued_recursive", .private = offsetof(struct cfq_group, stats.queued), - .read_seq_string = cfqg_print_rwstat_recursive, + .seq_show = cfqg_print_rwstat_recursive, }, #ifdef CONFIG_DEBUG_BLK_CGROUP { .name = "avg_queue_size", - .read_seq_string = cfqg_print_avg_queue_size, + .seq_show = cfqg_print_avg_queue_size, }, { .name = "group_wait_time", .private = offsetof(struct cfq_group, stats.group_wait_time), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "idle_time", .private = offsetof(struct cfq_group, stats.idle_time), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "empty_time", .private = offsetof(struct cfq_group, stats.empty_time), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "dequeue", .private = offsetof(struct cfq_group, stats.dequeue), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, { .name = "unaccounted_time", .private = offsetof(struct cfq_group, stats.unaccounted_time), - .read_seq_string = cfqg_print_stat, + .seq_show = cfqg_print_stat, }, #endif /* CONFIG_DEBUG_BLK_CGROUP */ { } /* terminate */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c3d698a72e02..b32a0f8ae9ad 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -444,12 +444,9 @@ struct cftype { * read_s64() is a signed version of read_u64() */ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); - /* - * read_seq_string() is used for outputting a simple sequence - * using seqfile. - */ - int (*read_seq_string)(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *m); + + /* generic seq_file read interface */ + int (*seq_show)(struct seq_file *sf, void *v); /* * write_u64() is a shortcut for the common case of accepting diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 036c05d8e572..c45e63328a0a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2212,10 +2212,9 @@ static int cgroup_release_agent_write(struct cgroup_subsys_state *css, return 0; } -static int cgroup_release_agent_show(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *seq) +static int cgroup_release_agent_show(struct seq_file *seq, void *v) { - struct cgroup *cgrp = css->cgroup; + struct cgroup *cgrp = seq_css(seq)->cgroup; if (!cgroup_lock_live_group(cgrp)) return -ENODEV; @@ -2225,10 +2224,11 @@ static int cgroup_release_agent_show(struct cgroup_subsys_state *css, return 0; } -static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *seq) +static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) { - seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup)); + struct cgroup *cgrp = seq_css(seq)->cgroup; + + seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp)); return 0; } @@ -2291,8 +2291,8 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) struct cftype *cft = seq_cft(m); struct cgroup_subsys_state *css = seq_css(m); - if (cft->read_seq_string) - return cft->read_seq_string(css, cft, m); + if (cft->seq_show) + return cft->seq_show(m, arg); if (cft->read_u64) seq_printf(m, "%llu\n", cft->read_u64(css, cft)); @@ -2559,7 +2559,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft) if (cft->mode) return cft->mode; - if (cft->read_u64 || cft->read_s64 || cft->read_seq_string) + if (cft->read_u64 || cft->read_s64 || cft->seq_show) mode |= S_IRUGO; if (cft->write_u64 || cft->write_s64 || cft->write_string || @@ -3874,7 +3874,7 @@ static struct cftype cgroup_base_files[] = { { .name = "cgroup.sane_behavior", .flags = CFTYPE_ONLY_ON_ROOT, - .read_seq_string = cgroup_sane_behavior_show, + .seq_show = cgroup_sane_behavior_show, }, /* @@ -3899,7 +3899,7 @@ static struct cftype cgroup_base_files[] = { { .name = "release_agent", .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, - .read_seq_string = cgroup_release_agent_show, + .seq_show = cgroup_release_agent_show, .write_string = cgroup_release_agent_write, .max_write_len = PATH_MAX, }, @@ -5274,9 +5274,7 @@ static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css, return count; } -static int current_css_set_cg_links_read(struct cgroup_subsys_state *css, - struct cftype *cft, - struct seq_file *seq) +static int current_css_set_cg_links_read(struct seq_file *seq, void *v) { struct cgrp_cset_link *link; struct css_set *cset; @@ -5301,9 +5299,9 @@ static int current_css_set_cg_links_read(struct cgroup_subsys_state *css, } #define MAX_TASKS_SHOWN_PER_CSS 25 -static int cgroup_css_links_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *seq) +static int cgroup_css_links_read(struct seq_file *seq, void *v) { + struct cgroup_subsys_state *css = seq_css(seq); struct cgrp_cset_link *link; read_lock(&css_set_lock); @@ -5349,12 +5347,12 @@ static struct cftype debug_files[] = { { .name = "current_css_set_cg_links", - .read_seq_string = current_css_set_cg_links_read, + .seq_show = current_css_set_cg_links_read, }, { .name = "cgroup_css_links", - .read_seq_string = cgroup_css_links_read, + .seq_show = cgroup_css_links_read, }, { diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index f0ff64d0ebaa..6c3154e477f6 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -301,10 +301,9 @@ out_unlock: spin_unlock_irq(&freezer->lock); } -static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *m) +static int freezer_read(struct seq_file *m, void *v) { - struct cgroup_subsys_state *pos; + struct cgroup_subsys_state *css = seq_css(m), *pos; rcu_read_lock(); @@ -458,7 +457,7 @@ static struct cftype files[] = { { .name = "state", .flags = CFTYPE_NOT_ON_ROOT, - .read_seq_string = freezer_read, + .seq_show = freezer_read, .write_string = freezer_write, }, { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 032929f91648..4410ac6a55f1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1732,12 +1732,10 @@ out_unlock: * and since these maps can change value dynamically, one could read * gibberish by doing partial reads while a list was changing. */ -static int cpuset_common_read_seq_string(struct cgroup_subsys_state *css, - struct cftype *cft, - struct seq_file *sf) +static int cpuset_common_seq_show(struct seq_file *sf, void *v) { - struct cpuset *cs = css_cs(css); - cpuset_filetype_t type = cft->private; + struct cpuset *cs = css_cs(seq_css(sf)); + cpuset_filetype_t type = seq_cft(sf)->private; ssize_t count; char *buf, *s; int ret = 0; @@ -1824,7 +1822,7 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) static struct cftype files[] = { { .name = "cpus", - .read_seq_string = cpuset_common_read_seq_string, + .seq_show = cpuset_common_seq_show, .write_string = cpuset_write_resmask, .max_write_len = (100U + 6 * NR_CPUS), .private = FILE_CPULIST, @@ -1832,7 +1830,7 @@ static struct cftype files[] = { { .name = "mems", - .read_seq_string = cpuset_common_read_seq_string, + .seq_show = cpuset_common_seq_show, .write_string = cpuset_write_resmask, .max_write_len = (100U + 6 * MAX_NUMNODES), .private = FILE_MEMLIST, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f28ec6722f0b..7e8cbb9ee4d6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7256,10 +7256,9 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) return ret; } -static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *sf) +static int cpu_stats_show(struct seq_file *sf, void *v) { - struct task_group *tg = css_tg(css); + struct task_group *tg = css_tg(seq_css(sf)); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); @@ -7318,7 +7317,7 @@ static struct cftype cpu_files[] = { }, { .name = "stat", - .read_seq_string = cpu_stats_show, + .seq_show = cpu_stats_show, }, #endif #ifdef CONFIG_RT_GROUP_SCHED diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index dd88738cd4a9..622e0818f905 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -163,10 +163,9 @@ out: return err; } -static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *m) +static int cpuacct_percpu_seq_show(struct seq_file *m, void *V) { - struct cpuacct *ca = css_ca(css); + struct cpuacct *ca = css_ca(seq_css(m)); u64 percpu; int i; @@ -183,10 +182,9 @@ static const char * const cpuacct_stat_desc[] = { [CPUACCT_STAT_SYSTEM] = "system", }; -static int cpuacct_stats_show(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int cpuacct_stats_show(struct seq_file *sf, void *v) { - struct cpuacct *ca = css_ca(css); + struct cpuacct *ca = css_ca(seq_css(sf)); int cpu; s64 val = 0; @@ -220,11 +218,11 @@ static struct cftype files[] = { }, { .name = "usage_percpu", - .read_seq_string = cpuacct_percpu_seq_read, + .seq_show = cpuacct_percpu_seq_show, }, { .name = "stat", - .read_seq_string = cpuacct_stats_show, + .seq_show = cpuacct_stats_show, }, { } /* terminate */ }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f149521a77e6..9252219376cc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3014,10 +3014,9 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) } #ifdef CONFIG_SLABINFO -static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *m) +static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct memcg_cache_params *params; if (!memcg_can_account_kmem(memcg)) @@ -5418,8 +5417,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, #endif #ifdef CONFIG_NUMA -static int memcg_numa_stat_show(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *m) +static int memcg_numa_stat_show(struct seq_file *m, void *v) { struct numa_stat { const char *name; @@ -5435,7 +5433,7 @@ static int memcg_numa_stat_show(struct cgroup_subsys_state *css, const struct numa_stat *stat; int nid; unsigned long nr; - struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); @@ -5474,10 +5472,9 @@ static inline void mem_cgroup_lru_names_not_uptodate(void) BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); } -static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *m) +static int memcg_stat_show(struct seq_file *m, void *v) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); struct mem_cgroup *mi; unsigned int i; @@ -5907,10 +5904,9 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, spin_unlock(&memcg_oom_lock); } -static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *sf) +static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); @@ -6260,7 +6256,7 @@ static struct cftype mem_cgroup_files[] = { }, { .name = "stat", - .read_seq_string = memcg_stat_show, + .seq_show = memcg_stat_show, }, { .name = "force_empty", @@ -6290,7 +6286,7 @@ static struct cftype mem_cgroup_files[] = { }, { .name = "oom_control", - .read_seq_string = mem_cgroup_oom_control_read, + .seq_show = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, @@ -6300,7 +6296,7 @@ static struct cftype mem_cgroup_files[] = { #ifdef CONFIG_NUMA { .name = "numa_stat", - .read_seq_string = memcg_numa_stat_show, + .seq_show = memcg_numa_stat_show, }, #endif #ifdef CONFIG_MEMCG_KMEM @@ -6330,7 +6326,7 @@ static struct cftype mem_cgroup_files[] = { #ifdef CONFIG_SLABINFO { .name = "kmem.slabinfo", - .read_seq_string = mem_cgroup_slabinfo_read, + .seq_show = mem_cgroup_slabinfo_read, }, #endif #endif diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 498710dce4a5..56cbb69ba024 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -173,14 +173,14 @@ static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft) return css->cgroup->id; } -static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft, - struct seq_file *sf) +static int read_priomap(struct seq_file *sf, void *v) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) - seq_printf(sf, "%s %u\n", dev->name, netprio_prio(css, dev)); + seq_printf(sf, "%s %u\n", dev->name, + netprio_prio(seq_css(sf), dev)); rcu_read_unlock(); return 0; } @@ -238,7 +238,7 @@ static struct cftype ss_files[] = { }, { .name = "ifpriomap", - .read_seq_string = read_priomap, + .seq_show = read_priomap, .write_string = write_priomap, }, { } /* terminate */ diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 7c2a0a71049e..d3b6d2cd3a06 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -274,10 +274,9 @@ static void set_majmin(char *str, unsigned m) sprintf(str, "%u", m); } -static int devcgroup_seq_read(struct cgroup_subsys_state *css, - struct cftype *cft, struct seq_file *m) +static int devcgroup_seq_show(struct seq_file *m, void *v) { - struct dev_cgroup *devcgroup = css_to_devcgroup(css); + struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m)); struct dev_exception_item *ex; char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; @@ -679,7 +678,7 @@ static struct cftype dev_cgroup_files[] = { }, { .name = "list", - .read_seq_string = devcgroup_seq_read, + .seq_show = devcgroup_seq_show, .private = DEVCG_LIST, }, { } /* terminate */ -- cgit v1.2.3 From 5877231f646bbd6d1d545e7af83aaa6e6b746013 Mon Sep 17 00:00:00 2001 From: Aneesh Kumar K.V Date: Fri, 6 Dec 2013 00:08:22 +0530 Subject: mm: Move change_prot_numa outside CONFIG_ARCH_USES_NUMA_PROT_NONE change_prot_numa should work even if _PAGE_NUMA != _PAGE_PROTNONE. On archs like ppc64 that don't use _PAGE_PROTNONE and also have a separate page table outside linux pagetable, we just need to make sure that when calling change_prot_numa we flush the hardware page table entry so that next page access result in a numa fault. We still need to make sure we use the numa faulting logic only when CONFIG_NUMA_BALANCING is set. This implies the migrate-on-fault (Lazy migration) via mbind will only work if CONFIG_NUMA_BALANCING is set. Signed-off-by: Aneesh Kumar K.V Reviewed-by: Rik van Riel Acked-by: Mel Gorman Signed-off-by: Benjamin Herrenschmidt --- include/linux/mm.h | 2 +- mm/mempolicy.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1cedd000cf29..a7b4e310bf42 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1842,7 +1842,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) } #endif -#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE +#ifdef CONFIG_NUMA_BALANCING unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eca4a3129129..9f73b29d304d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma, return 0; } -#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE +#ifdef CONFIG_NUMA_BALANCING /* * This is used to mark a range of virtual addresses to be inaccessible. * These are later cleared by a NUMA hinting fault. Depending on these @@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { int nr_updated; - BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE); nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); if (nr_updated) @@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, { return 0; } -#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ +#endif /* CONFIG_NUMA_BALANCING */ /* * Walk through page tables and collect pages to be migrated. -- cgit v1.2.3 From 8afb1474db4701d1ab80cd8251137a3260e6913e Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 10 Sep 2013 11:43:37 +0800 Subject: slub: Fix calculation of cpu slabs /sys/kernel/slab/:t-0000048 # cat cpu_slabs 231 N0=16 N1=215 /sys/kernel/slab/:t-0000048 # cat slabs 145 N0=36 N1=109 See, the number of slabs is smaller than that of cpu slabs. The bug was introduced by commit 49e2258586b423684f03c278149ab46d8f8b6700 ("slub: per cpu cache for partial pages"). We should use page->pages instead of page->pobjects when calculating the number of cpu partial slabs. This also fixes the mapping of slabs and nodes. As there's no variable storing the number of total/active objects in cpu partial slabs, and we don't have user interfaces requiring those statistics, I just add WARN_ON for those cases. Cc: # 3.2+ Acked-by: Christoph Lameter Reviewed-by: Wanpeng Li Signed-off-by: Li Zefan Signed-off-by: Pekka Enberg --- mm/slub.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 545a170ebf9f..89490d9d91e0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4299,7 +4299,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s, page = ACCESS_ONCE(c->partial); if (page) { - x = page->pobjects; + node = page_to_nid(page); + if (flags & SO_TOTAL) + WARN_ON_ONCE(1); + else if (flags & SO_OBJECTS) + WARN_ON_ONCE(1); + else + x = page->pages; total += x; nodes[node] += x; } -- cgit v1.2.3 From b3ff8a2f9569fb41b9cf8902897d787a33bac84f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sun, 12 Jan 2014 20:23:27 -0800 Subject: cgroup: remove stray references to css_id Trivial: remove the few stray references to css_id, which itself was removed in v3.13's 2ff2a7d03bbe "cgroup: kill css_id". Signed-off-by: Hugh Dickins Signed-off-by: Tejun Heo --- drivers/md/bcache/request.c | 1 - include/linux/cgroup.h | 3 --- mm/page_cgroup.c | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) (limited to 'mm') diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index fbcc851ed5a5..61bcfc21d2a0 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -163,7 +163,6 @@ static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) static void bcachecg_destroy(struct cgroup *cgroup) { struct bch_cgroup *cg = cgroup_to_bcache(cgroup); - free_css_id(&bcache_subsys, &cg->css); kfree(cg); } diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index cfaf416492dd..5c097596104b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -29,7 +29,6 @@ struct cgroupfs_root; struct cgroup_subsys; struct inode; struct cgroup; -struct css_id; extern int cgroup_init_early(void); extern int cgroup_init(void); @@ -79,8 +78,6 @@ struct cgroup_subsys_state { struct cgroup_subsys_state *parent; unsigned long flags; - /* ID for this css, if possible */ - struct css_id __rcu *id; /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 6d757e3a872a..3bd0b8e6ab12 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -451,7 +451,7 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry * @ent: swap entry to be looked up. * - * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) + * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) */ unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { -- cgit v1.2.3 From c65c1877bd6826ce0d9713d76e30a7bed8e49f38 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jan 2014 13:23:49 +0100 Subject: slub: use lockdep_assert_held Instead of using comments in an attempt at getting the locking right, use proper assertions that actively warn you if you got it wrong. Also add extra braces in a few sites to comply with coding-style. Signed-off-by: Peter Zijlstra Signed-off-by: Pekka Enberg --- mm/slub.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 89490d9d91e0..367b224f2aa5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) /* * Tracking of fully allocated slabs for debugging purposes. - * - * list_lock must be held. */ static void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + if (!(s->flags & SLAB_STORE_USER)) return; list_add(&page->lru, &n->full); } -/* - * list_lock must be held. - */ -static void remove_full(struct kmem_cache *s, struct page *page) +static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + if (!(s->flags & SLAB_STORE_USER)) return; @@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) { return 1; } static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} -static inline void remove_full(struct kmem_cache *s, struct page *page) {} +static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, + struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) @@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page) /* * Management of partially allocated slabs. - * - * list_lock must be held. */ static inline void add_partial(struct kmem_cache_node *n, struct page *page, int tail) { + lockdep_assert_held(&n->list_lock); + n->nr_partial++; if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); @@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n, list_add(&page->lru, &n->partial); } -/* - * list_lock must be held. - */ static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + list_del(&page->lru); n->nr_partial--; } @@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n, * return the pointer to the freelist. * * Returns a list of objects or NULL if it fails. - * - * Must hold list_lock since we modify the partial list. */ static inline void *acquire_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page, @@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s, unsigned long counters; struct page new; + lockdep_assert_held(&n->list_lock); + /* * Zap the freelist and set the frozen bit. * The old freelist is the list of objects for the @@ -1887,7 +1886,7 @@ redo: else if (l == M_FULL) - remove_full(s, page); + remove_full(s, n, page); if (m == M_PARTIAL) { @@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, new.inuse--; if ((!new.inuse || !prior) && !was_frozen) { - if (kmem_cache_has_cpu_partial(s) && !prior) + if (kmem_cache_has_cpu_partial(s) && !prior) { /* * Slab was on no list before and will be @@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ new.frozen = 1; - else { /* Needs to be taken off a list */ + } else { /* Needs to be taken off a list */ n = get_node(s, page_to_nid(page)); /* @@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { if (kmem_cache_debug(s)) - remove_full(s, page); + remove_full(s, n, page); add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -2614,9 +2613,10 @@ slab_empty: */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } else + } else { /* Slab must be on the full list */ - remove_full(s, page); + remove_full(s, n, page); + } spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); -- cgit v1.2.3 From 26e4f2057516f1c457e0e95346a00303f983ad53 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sat, 4 Jan 2014 16:32:31 +0900 Subject: slub: Fix possible format string bug. The "name" is determined at runtime and is parsed as format string. Acked-by: David Rientjes Signed-off-by: Tetsuo Handa Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 367b224f2aa5..a99e9e67c60e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5169,7 +5169,7 @@ static int sysfs_slab_add(struct kmem_cache *s) } s->kobj.kset = slab_kset; - err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); if (err) { kobject_put(&s->kobj); return err; -- cgit v1.2.3 From b3084f4db3aeb991c507ca774337c7e7893ed04f Mon Sep 17 00:00:00 2001 From: Aneesh Kumar K.V Date: Mon, 13 Jan 2014 11:34:24 +0530 Subject: powerpc/thp: Fix crash on mremap This patch fix the below crash NIP [c00000000004cee4] .__hash_page_thp+0x2a4/0x440 LR [c0000000000439ac] .hash_page+0x18c/0x5e0 ... Call Trace: [c000000736103c40] [00001ffffb000000] 0x1ffffb000000(unreliable) [437908.479693] [c000000736103d50] [c0000000000439ac] .hash_page+0x18c/0x5e0 [437908.479699] [c000000736103e30] [c00000000000924c] .do_hash_page+0x4c/0x58 On ppc64 we use the pgtable for storing the hpte slot information and store address to the pgtable at a constant offset (PTRS_PER_PMD) from pmd. On mremap, when we switch the pmd, we need to withdraw and deposit the pgtable again, so that we find the pgtable at PTRS_PER_PMD offset from new pmd. We also want to move the withdraw and deposit before the set_pmd so that, when page fault find the pmd as trans huge we can be sure that pgtable can be located at the offset. Signed-off-by: Aneesh Kumar K.V Acked-by: Kirill A. Shutemov Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pgtable-ppc64.h | 14 ++++++++++++++ include/asm-generic/pgtable.h | 12 ++++++++++++ mm/huge_memory.c | 14 +++++--------- 3 files changed, 31 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4a191c472867..d27960c89a71 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -558,5 +558,19 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #define __HAVE_ARCH_PMDP_INVALIDATE extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +typedef struct spinlock spinlock_t; +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * Archs like ppc64 use pgtable to store per pmd + * specific information. So when we switch the pmd, + * we should also withdraw and deposit the pgtable + */ + return true; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index db0923458940..8e4f41d9af4d 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -558,6 +558,18 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) } #endif +#ifndef pmd_move_must_withdraw +static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, + spinlock_t *old_pmd_ptl) +{ + /* + * With split pmd lock we also need to move preallocated + * PTE page table if new_pmd is on different PMD page table. + */ + return new_pmd_ptl != old_pmd_ptl; +} +#endif + /* * This function is meant to be used by sites walking pagetables with * the mmap_sem hold in read mode to protect against MADV_DONTNEED and diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 95d1acb0f3d2..5d80c53b87cb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1502,19 +1502,15 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); - set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); - if (new_ptl != old_ptl) { - pgtable_t pgtable; - /* - * Move preallocated PTE page table if new_pmd is on - * different PMD page table. - */ + if (pmd_move_must_withdraw(new_ptl, old_ptl)) { + pgtable_t pgtable; pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); pgtable_trans_huge_deposit(mm, new_pmd, pgtable); - - spin_unlock(new_ptl); } + set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); + if (new_ptl != old_ptl) + spin_unlock(new_ptl); spin_unlock(old_ptl); } out: -- cgit v1.2.3 From 8a0921712ec6d00754b5d7afea78137772efee0a Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 2 Jan 2014 13:53:21 -0800 Subject: percpu: use VMALLOC_TOTAL instead of VMALLOC_END - VMALLOC_START vmalloc already gives a useful macro to calculate the total vmalloc size. Use it. Signed-off-by: Laura Abbott Signed-off-by: Tejun Heo --- mm/percpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 0d10defe951e..afbf352ae580 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1686,10 +1686,10 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, max_distance += ai->unit_size; /* warn if maximum distance is further than 75% of vmalloc space */ - if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { + if (max_distance > VMALLOC_TOTAL * 3 / 4) { pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " "space 0x%lx\n", max_distance, - (unsigned long)(VMALLOC_END - VMALLOC_START)); + VMALLOC_TOTAL); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; -- cgit v1.2.3 From 0abdd7a81b7e3fd781d7fabcca49501852bba17e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 21 Jan 2014 15:48:12 -0800 Subject: dma-debug: introduce debug_dma_assert_idle() Record actively mapped pages and provide an api for asserting a given page is dma inactive before execution proceeds. Placing debug_dma_assert_idle() in cow_user_page() flagged the violation of the dma-api in the NET_DMA implementation (see commit 77873803363c "net_dma: mark broken"). The implementation includes the capability to count, in a limited way, repeat mappings of the same page that occur without an intervening unmap. This 'overlap' counter is limited to the few bits of tag space in a radix tree. This mechanism is added to mitigate false negative cases where, for example, a page is dma mapped twice and debug_dma_assert_idle() is called after the page is un-mapped once. Signed-off-by: Dan Williams Cc: Joerg Roedel Cc: Vinod Koul Cc: Russell King Cc: James Bottomley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/dma-debug.h | 6 ++ lib/Kconfig.debug | 12 ++- lib/dma-debug.c | 193 ++++++++++++++++++++++++++++++++++++++++++---- mm/memory.c | 3 + 4 files changed, 199 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index fc0e34ce038f..fe8cb610deac 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -85,6 +85,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, extern void debug_dma_dump_mappings(struct device *dev); +extern void debug_dma_assert_idle(struct page *page); + #else /* CONFIG_DMA_API_DEBUG */ static inline void dma_debug_add_bus(struct bus_type *bus) @@ -183,6 +185,10 @@ static inline void debug_dma_dump_mappings(struct device *dev) { } +static inline void debug_dma_assert_idle(struct page *page) +{ +} + #endif /* CONFIG_DMA_API_DEBUG */ #endif /* __DMA_DEBUG_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6982094a7e74..900b63c1e899 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1584,8 +1584,16 @@ config DMA_API_DEBUG With this option you will be able to detect common bugs in device drivers like double-freeing of DMA mappings or freeing mappings that were never allocated. - This option causes a performance degredation. Use only if you want - to debug device drivers. If unsure, say N. + + This also attempts to catch cases where a page owned by DMA is + accessed by the cpu in a way that could cause data corruption. For + example, this enables cow_user_page() to check that the source page is + not undergoing DMA. + + This option causes a performance degradation. Use only if you want to + debug device drivers and dma interactions. + + If unsure, say N. source "samples/Kconfig" diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d87a17a819d0..c38083871f11 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -53,11 +53,26 @@ enum map_err_types { #define DMA_DEBUG_STACKTRACE_ENTRIES 5 +/** + * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping + * @list: node on pre-allocated free_entries list + * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent + * @type: single, page, sg, coherent + * @pfn: page frame of the start address + * @offset: offset of mapping relative to pfn + * @size: length of the mapping + * @direction: enum dma_data_direction + * @sg_call_ents: 'nents' from dma_map_sg + * @sg_mapped_ents: 'mapped_ents' from dma_map_sg + * @map_err_type: track whether dma_mapping_error() was checked + * @stacktrace: support backtraces when a violation is detected + */ struct dma_debug_entry { struct list_head list; struct device *dev; int type; - phys_addr_t paddr; + unsigned long pfn; + size_t offset; u64 dev_addr; u64 size; int direction; @@ -372,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry) list_del(&entry->list); } +static unsigned long long phys_addr(struct dma_debug_entry *entry) +{ + return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; +} + /* * Dump mapping entries for debugging purposes */ @@ -389,9 +409,9 @@ void debug_dma_dump_mappings(struct device *dev) list_for_each_entry(entry, &bucket->list, list) { if (!dev || dev == entry->dev) { dev_info(entry->dev, - "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", + "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", type2name[entry->type], idx, - (unsigned long long)entry->paddr, + phys_addr(entry), entry->pfn, entry->dev_addr, entry->size, dir2name[entry->direction], maperr2str[entry->map_err_type]); @@ -403,6 +423,133 @@ void debug_dma_dump_mappings(struct device *dev) } EXPORT_SYMBOL(debug_dma_dump_mappings); +/* + * For each page mapped (initial page in the case of + * dma_alloc_coherent/dma_map_{single|page}, or each page in a + * scatterlist) insert into this tree using the pfn as the key. At + * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If + * the pfn already exists at insertion time add a tag as a reference + * count for the overlapping mappings. For now, the overlap tracking + * just ensures that 'unmaps' balance 'maps' before marking the pfn + * idle, but we should also be flagging overlaps as an API violation. + * + * Memory usage is mostly constrained by the maximum number of available + * dma-debug entries in that we need a free dma_debug_entry before + * inserting into the tree. In the case of dma_map_{single|page} and + * dma_alloc_coherent there is only one dma_debug_entry and one pfn to + * track per event. dma_map_sg(), on the other hand, + * consumes a single dma_debug_entry, but inserts 'nents' entries into + * the tree. + * + * At any time debug_dma_assert_idle() can be called to trigger a + * warning if the given page is in the active set. + */ +static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); +static DEFINE_SPINLOCK(radix_lock); +#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) + +static int active_pfn_read_overlap(unsigned long pfn) +{ + int overlap = 0, i; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) + overlap |= 1 << i; + return overlap; +} + +static int active_pfn_set_overlap(unsigned long pfn, int overlap) +{ + int i; + + if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) + return 0; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (overlap & 1 << i) + radix_tree_tag_set(&dma_active_pfn, pfn, i); + else + radix_tree_tag_clear(&dma_active_pfn, pfn, i); + + return overlap; +} + +static void active_pfn_inc_overlap(unsigned long pfn) +{ + int overlap = active_pfn_read_overlap(pfn); + + overlap = active_pfn_set_overlap(pfn, ++overlap); + + /* If we overflowed the overlap counter then we're potentially + * leaking dma-mappings. Otherwise, if maps and unmaps are + * balanced then this overflow may cause false negatives in + * debug_dma_assert_idle() as the pfn may be marked idle + * prematurely. + */ + WARN_ONCE(overlap == 0, + "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", + ACTIVE_PFN_MAX_OVERLAP, pfn); +} + +static int active_pfn_dec_overlap(unsigned long pfn) +{ + int overlap = active_pfn_read_overlap(pfn); + + return active_pfn_set_overlap(pfn, --overlap); +} + +static int active_pfn_insert(struct dma_debug_entry *entry) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&radix_lock, flags); + rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); + if (rc == -EEXIST) + active_pfn_inc_overlap(entry->pfn); + spin_unlock_irqrestore(&radix_lock, flags); + + return rc; +} + +static void active_pfn_remove(struct dma_debug_entry *entry) +{ + unsigned long flags; + + spin_lock_irqsave(&radix_lock, flags); + if (active_pfn_dec_overlap(entry->pfn) == 0) + radix_tree_delete(&dma_active_pfn, entry->pfn); + spin_unlock_irqrestore(&radix_lock, flags); +} + +/** + * debug_dma_assert_idle() - assert that a page is not undergoing dma + * @page: page to lookup in the dma_active_pfn tree + * + * Place a call to this routine in cases where the cpu touching the page + * before the dma completes (page is dma_unmapped) will lead to data + * corruption. + */ +void debug_dma_assert_idle(struct page *page) +{ + unsigned long flags; + struct dma_debug_entry *entry; + + if (!page) + return; + + spin_lock_irqsave(&radix_lock, flags); + entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); + spin_unlock_irqrestore(&radix_lock, flags); + + if (!entry) + return; + + err_printk(entry->dev, entry, + "DMA-API: cpu touching an active dma mapped page " + "[pfn=0x%lx]\n", entry->pfn); +} + /* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. @@ -411,10 +558,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) { struct hash_bucket *bucket; unsigned long flags; + int rc; bucket = get_hash_bucket(entry, &flags); hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); + + rc = active_pfn_insert(entry); + if (rc == -ENOMEM) { + pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); + global_disable = true; + } + + /* TODO: report -EEXIST errors here as overlapping mappings are + * not supported by the DMA API + */ } static struct dma_debug_entry *__dma_entry_alloc(void) @@ -469,6 +627,8 @@ static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; + active_pfn_remove(entry); + /* * add to beginning of the list - this way the entries are * more likely cache hot when they are reallocated. @@ -895,15 +1055,15 @@ static void check_unmap(struct dma_debug_entry *ref) ref->dev_addr, ref->size, type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && - (ref->paddr != entry->paddr)) { + (phys_addr(ref) != phys_addr(entry))) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " "[cpu alloc address=0x%016llx] " "[cpu free address=0x%016llx]", ref->dev_addr, ref->size, - (unsigned long long)entry->paddr, - (unsigned long long)ref->paddr); + phys_addr(entry), + phys_addr(ref)); } if (ref->sg_call_ents && ref->type == dma_debug_sg && @@ -1052,7 +1212,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->dev = dev; entry->type = dma_debug_page; - entry->paddr = page_to_phys(page) + offset; + entry->pfn = page_to_pfn(page); + entry->offset = offset, entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; @@ -1148,7 +1309,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, entry->type = dma_debug_sg; entry->dev = dev; - entry->paddr = sg_phys(s); + entry->pfn = page_to_pfn(sg_page(s)); + entry->offset = s->offset, entry->size = sg_dma_len(s); entry->dev_addr = sg_dma_address(s); entry->direction = direction; @@ -1198,7 +1360,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = dir, @@ -1233,7 +1396,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, entry->type = dma_debug_coherent; entry->dev = dev; - entry->paddr = virt_to_phys(virt); + entry->pfn = page_to_pfn(virt_to_page(virt)); + entry->offset = (size_t) virt & PAGE_MASK; entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; @@ -1248,7 +1412,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, struct dma_debug_entry ref = { .type = dma_debug_coherent, .dev = dev, - .paddr = virt_to_phys(virt), + .pfn = page_to_pfn(virt_to_page(virt)), + .offset = (size_t) virt & PAGE_MASK, .dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, @@ -1356,7 +1521,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, @@ -1388,7 +1554,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, diff --git a/mm/memory.c b/mm/memory.c index 6768ce9e57d2..e9c550484ba6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -2559,6 +2560,8 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { + debug_dma_assert_idle(src); + /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by -- cgit v1.2.3 From a0368d4e48fc9ad65a66f6819a801f3f542b4f0f Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 21 Jan 2014 15:48:49 -0800 Subject: mm: hugetlb: use get_page_foll() in follow_hugetlb_page() get_page_foll() is more optimal and is always safe to use under the PT lock. More so for hugetlbfs as there's no risk of race conditions with split_huge_page regardless of the PT lock. Signed-off-by: Andrea Arcangeli Tested-by: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dee6cf4e6d34..7596e104bffa 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3079,7 +3079,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, same_page: if (pages) { pages[i] = mem_map_offset(page, pfn_offset); - get_page(pages[i]); + get_page_foll(pages[i]); } if (vmas) -- cgit v1.2.3 From ebf360f9bb957f68e19e88f5067c015997dc26a6 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 21 Jan 2014 15:48:51 -0800 Subject: mm: hugetlbfs: move the put/get_page slab and hugetlbfs optimization in a faster path We don't actually need a reference on the head page in the slab and hugetlbfs paths, as long as we add a smp_rmb() which should be faster than get_page_unless_zero. [akpm@linux-foundation.org: fix typo in comment] Signed-off-by: Andrea Arcangeli Cc: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 140 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 78 insertions(+), 62 deletions(-) (limited to 'mm') diff --git a/mm/swap.c b/mm/swap.c index 84b26aaabd03..e2757fbb04ea 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -86,45 +86,61 @@ static void put_compound_page(struct page *page) /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); + /* + * THP can not break up slab pages so avoid taking + * compound_lock(). Slab performs non-atomic bit ops + * on page->flags for better performance. In + * particular slab_unlock() in slub used to be a hot + * path. It is still hot on arches that do not support + * this_cpu_cmpxchg_double(). + * + * If "page" is part of a slab or hugetlbfs page it + * cannot be splitted and the head page cannot change + * from under us. And if "page" is part of a THP page + * under splitting, if the head page pointed by the + * THP tail isn't a THP head anymore, we'll find + * PageTail clear after smp_rmb() and we'll treat it + * as a single page. + */ + if (PageSlab(page_head) || PageHeadHuge(page_head)) { + /* + * If "page" is a THP tail, we must read the tail page + * flags after the head page flags. The + * split_huge_page side enforces write memory + * barriers between clearing PageTail and before the + * head page can be freed and reallocated. + */ + smp_rmb(); + if (likely(PageTail(page))) { + /* + * __split_huge_page_refcount + * cannot race here. + */ + VM_BUG_ON(!PageHead(page_head)); + VM_BUG_ON(page_mapcount(page) <= 0); + atomic_dec(&page->_mapcount); + if (put_page_testzero(page_head)) + __put_compound_page(page_head); + return; + } else + /* + * __split_huge_page_refcount + * run before us, "page" was a + * THP tail. The split + * page_head has been freed + * and reallocated as slab or + * hugetlbfs page of smaller + * order (only possible if + * reallocated as slab on + * x86). + */ + goto out_put_single; + } + if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; - /* - * THP can not break up slab pages so avoid taking - * compound_lock(). Slab performs non-atomic bit ops - * on page->flags for better performance. In particular - * slab_unlock() in slub used to be a hot path. It is - * still hot on arches that do not support - * this_cpu_cmpxchg_double(). - */ - if (PageSlab(page_head) || PageHeadHuge(page_head)) { - if (likely(PageTail(page))) { - /* - * __split_huge_page_refcount - * cannot race here. - */ - VM_BUG_ON(!PageHead(page_head)); - atomic_dec(&page->_mapcount); - if (put_page_testzero(page_head)) - VM_BUG_ON(1); - if (put_page_testzero(page_head)) - __put_compound_page(page_head); - return; - } else - /* - * __split_huge_page_refcount - * run before us, "page" was a - * THP tail. The split - * page_head has been freed - * and reallocated as slab or - * hugetlbfs page of smaller - * order (only possible if - * reallocated as slab on - * x86). - */ - goto skip_lock; - } /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time @@ -135,7 +151,6 @@ static void put_compound_page(struct page *page) if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); -skip_lock: if (put_page_testzero(page_head)) { /* * The head page may have been @@ -221,36 +236,37 @@ bool __get_page_tail(struct page *page) * split_huge_page(). */ unsigned long flags; - bool got = false; + bool got; struct page *page_head = compound_trans_head(page); - if (likely(page != page_head && get_page_unless_zero(page_head))) { - /* Ref to put_compound_page() comment. */ - if (PageSlab(page_head) || PageHeadHuge(page_head)) { - if (likely(PageTail(page))) { - /* - * This is a hugetlbfs page or a slab - * page. __split_huge_page_refcount - * cannot race here. - */ - VM_BUG_ON(!PageHead(page_head)); - __get_page_tail_foll(page, false); - return true; - } else { - /* - * __split_huge_page_refcount run - * before us, "page" was a THP - * tail. The split page_head has been - * freed and reallocated as slab or - * hugetlbfs page of smaller order - * (only possible if reallocated as - * slab on x86). - */ - put_page(page_head); - return false; - } + /* Ref to put_compound_page() comment. */ + if (PageSlab(page_head) || PageHeadHuge(page_head)) { + smp_rmb(); + if (likely(PageTail(page))) { + /* + * This is a hugetlbfs page or a slab + * page. __split_huge_page_refcount + * cannot race here. + */ + VM_BUG_ON(!PageHead(page_head)); + __get_page_tail_foll(page, true); + return true; + } else { + /* + * __split_huge_page_refcount run + * before us, "page" was a THP + * tail. The split page_head has been + * freed and reallocated as slab or + * hugetlbfs page of smaller order + * (only possible if reallocated as + * slab on x86). + */ + return false; } + } + got = false; + if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time -- cgit v1.2.3 From 44518d2b32646e37b4b7a0813bbbe98dc21c7f8f Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 21 Jan 2014 15:48:54 -0800 Subject: mm: tail page refcounting optimization for slab and hugetlbfs This skips the _mapcount mangling for slab and hugetlbfs pages. The main trouble in doing this is to guarantee that PageSlab and PageHeadHuge remains constant for all get_page/put_page run on the tail of slab or hugetlbfs compound pages. Otherwise if they're set during get_page but not set during put_page, the _mapcount of the tail page would underflow. PageHeadHuge will remain true until the compound page is released and enters the buddy allocator so it won't risk to change even if the tail page is the last reference left on the page. PG_slab instead is cleared before the slab frees the head page with put_page, so if the tail pin is released after the slab freed the page, we would have a problem. But in the slab case the tail pin cannot be the last reference left on the page. This is because the slab code is free to reuse the compound page after a kfree/kmem_cache_free without having to check if there's any tail pin left. In turn all tail pins must be always released while the head is still pinned by the slab code and so we know PG_slab will be still set too. Signed-off-by: Andrea Arcangeli Reviewed-by: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 6 ------ include/linux/mm.h | 32 +++++++++++++++++++++++++++++++- mm/internal.h | 3 ++- mm/swap.c | 33 +++++++++++++++++++++++++++------ 4 files changed, 60 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 251233c1494d..d01cc972a1d9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -31,7 +31,6 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); void hugepage_put_subpool(struct hugepage_subpool *spool); int PageHuge(struct page *page); -int PageHeadHuge(struct page *page_head); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -104,11 +103,6 @@ static inline int PageHuge(struct page *page) return 0; } -static inline int PageHeadHuge(struct page *page_head) -{ - return 0; -} - static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } diff --git a/include/linux/mm.h b/include/linux/mm.h index 9fac6dd69b11..f95c71b7c1fd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -414,15 +414,45 @@ static inline int page_count(struct page *page) return atomic_read(&compound_head(page)->_count); } +#ifdef CONFIG_HUGETLB_PAGE +extern int PageHeadHuge(struct page *page_head); +#else /* CONFIG_HUGETLB_PAGE */ +static inline int PageHeadHuge(struct page *page_head) +{ + return 0; +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static inline bool __compound_tail_refcounted(struct page *page) +{ + return !PageSlab(page) && !PageHeadHuge(page); +} + +/* + * This takes a head page as parameter and tells if the + * tail page reference counting can be skipped. + * + * For this to be safe, PageSlab and PageHeadHuge must remain true on + * any given page where they return true here, until all tail pins + * have been released. + */ +static inline bool compound_tail_refcounted(struct page *page) +{ + VM_BUG_ON(!PageHead(page)); + return __compound_tail_refcounted(page); +} + static inline void get_huge_page_tail(struct page *page) { /* * __split_huge_page_refcount() cannot run * from under us. + * In turn no need of compound_trans_head here. */ VM_BUG_ON(page_mapcount(page) < 0); VM_BUG_ON(atomic_read(&page->_count) != 0); - atomic_inc(&page->_mapcount); + if (compound_tail_refcounted(compound_head(page))) + atomic_inc(&page->_mapcount); } extern bool __get_page_tail(struct page *page); diff --git a/mm/internal.h b/mm/internal.h index 684f7aa9692a..a85a3ab1f7ef 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -51,7 +51,8 @@ static inline void __get_page_tail_foll(struct page *page, VM_BUG_ON(page_mapcount(page) < 0); if (get_page_head) atomic_inc(&page->first_page->_count); - atomic_inc(&page->_mapcount); + if (compound_tail_refcounted(page->first_page)) + atomic_inc(&page->_mapcount); } /* diff --git a/mm/swap.c b/mm/swap.c index e2757fbb04ea..bba4aa5bf686 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -88,8 +88,9 @@ static void put_compound_page(struct page *page) /* * THP can not break up slab pages so avoid taking - * compound_lock(). Slab performs non-atomic bit ops - * on page->flags for better performance. In + * compound_lock() and skip the tail page refcounting + * (in _mapcount) too. Slab performs non-atomic bit + * ops on page->flags for better performance. In * particular slab_unlock() in slub used to be a hot * path. It is still hot on arches that do not support * this_cpu_cmpxchg_double(). @@ -102,7 +103,7 @@ static void put_compound_page(struct page *page) * PageTail clear after smp_rmb() and we'll treat it * as a single page. */ - if (PageSlab(page_head) || PageHeadHuge(page_head)) { + if (!__compound_tail_refcounted(page_head)) { /* * If "page" is a THP tail, we must read the tail page * flags after the head page flags. The @@ -117,10 +118,30 @@ static void put_compound_page(struct page *page) * cannot race here. */ VM_BUG_ON(!PageHead(page_head)); - VM_BUG_ON(page_mapcount(page) <= 0); - atomic_dec(&page->_mapcount); - if (put_page_testzero(page_head)) + VM_BUG_ON(page_mapcount(page) != 0); + if (put_page_testzero(page_head)) { + /* + * If this is the tail of a + * slab compound page, the + * tail pin must not be the + * last reference held on the + * page, because the PG_slab + * cannot be cleared before + * all tail pins (which skips + * the _mapcount tail + * refcounting) have been + * released. For hugetlbfs the + * tail pin may be the last + * reference on the page + * instead, because + * PageHeadHuge will not go + * away until the compound + * page enters the buddy + * allocator. + */ + VM_BUG_ON(PageSlab(page_head)); __put_compound_page(page_head); + } return; } else /* -- cgit v1.2.3 From 3bfcd13ec0b43b39b02072ba67bf197d15379387 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 21 Jan 2014 15:48:56 -0800 Subject: mm: hugetlbfs: use __compound_tail_refcounted in __get_page_tail too Also remove hugetlb.h which isn't needed anymore as PageHeadHuge is handled in mm.h. Signed-off-by: Andrea Arcangeli Cc: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/swap.c b/mm/swap.c index bba4aa5bf686..7434e3619c14 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -31,7 +31,6 @@ #include #include #include -#include #include "internal.h" @@ -261,7 +260,7 @@ bool __get_page_tail(struct page *page) struct page *page_head = compound_trans_head(page); /* Ref to put_compound_page() comment. */ - if (PageSlab(page_head) || PageHeadHuge(page_head)) { + if (!__compound_tail_refcounted(page_head)) { smp_rmb(); if (likely(PageTail(page))) { /* -- cgit v1.2.3 From 758f66a29ccc6383353fd395aa04be15e8dea445 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 21 Jan 2014 15:48:57 -0800 Subject: mm/hugetlb.c: simplify PageHeadHuge() and PageHuge() Signed-off-by: Andrea Arcangeli Cc: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7596e104bffa..1d9125360bf5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -690,15 +690,11 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) */ int PageHuge(struct page *page) { - compound_page_dtor *dtor; - if (!PageCompound(page)) return 0; page = compound_head(page); - dtor = get_compound_page_dtor(page); - - return dtor == free_huge_page; + return get_compound_page_dtor(page) == free_huge_page; } EXPORT_SYMBOL_GPL(PageHuge); @@ -708,14 +704,10 @@ EXPORT_SYMBOL_GPL(PageHuge); */ int PageHeadHuge(struct page *page_head) { - compound_page_dtor *dtor; - if (!PageHead(page_head)) return 0; - dtor = get_compound_page_dtor(page_head); - - return dtor == free_huge_page; + return get_compound_page_dtor(page_head) == free_huge_page; } EXPORT_SYMBOL_GPL(PageHeadHuge); -- cgit v1.2.3 From 26296ad2dfb4059f840e46cd7af38d0025a9d8d7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 21 Jan 2014 15:48:59 -0800 Subject: mm/swap.c: reorganize put_compound_page() Tweak it so save a tab stop, make code layout slightly less nutty. Signed-off-by: Andrea Arcangeli Cc: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swap.c | 254 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 125 insertions(+), 129 deletions(-) (limited to 'mm') diff --git a/mm/swap.c b/mm/swap.c index 7434e3619c14..d1100b619e61 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -81,154 +81,150 @@ static void __put_compound_page(struct page *page) static void put_compound_page(struct page *page) { - if (unlikely(PageTail(page))) { - /* __split_huge_page_refcount can run under us */ - struct page *page_head = compound_trans_head(page); + struct page *page_head; - /* - * THP can not break up slab pages so avoid taking - * compound_lock() and skip the tail page refcounting - * (in _mapcount) too. Slab performs non-atomic bit - * ops on page->flags for better performance. In - * particular slab_unlock() in slub used to be a hot - * path. It is still hot on arches that do not support - * this_cpu_cmpxchg_double(). - * - * If "page" is part of a slab or hugetlbfs page it - * cannot be splitted and the head page cannot change - * from under us. And if "page" is part of a THP page - * under splitting, if the head page pointed by the - * THP tail isn't a THP head anymore, we'll find - * PageTail clear after smp_rmb() and we'll treat it - * as a single page. - */ - if (!__compound_tail_refcounted(page_head)) { + if (likely(!PageTail(page))) { + if (put_page_testzero(page)) { /* - * If "page" is a THP tail, we must read the tail page - * flags after the head page flags. The - * split_huge_page side enforces write memory - * barriers between clearing PageTail and before the - * head page can be freed and reallocated. + * By the time all refcounts have been released + * split_huge_page cannot run anymore from under us. */ - smp_rmb(); - if (likely(PageTail(page))) { - /* - * __split_huge_page_refcount - * cannot race here. - */ - VM_BUG_ON(!PageHead(page_head)); - VM_BUG_ON(page_mapcount(page) != 0); - if (put_page_testzero(page_head)) { - /* - * If this is the tail of a - * slab compound page, the - * tail pin must not be the - * last reference held on the - * page, because the PG_slab - * cannot be cleared before - * all tail pins (which skips - * the _mapcount tail - * refcounting) have been - * released. For hugetlbfs the - * tail pin may be the last - * reference on the page - * instead, because - * PageHeadHuge will not go - * away until the compound - * page enters the buddy - * allocator. - */ - VM_BUG_ON(PageSlab(page_head)); - __put_compound_page(page_head); - } - return; - } else - /* - * __split_huge_page_refcount - * run before us, "page" was a - * THP tail. The split - * page_head has been freed - * and reallocated as slab or - * hugetlbfs page of smaller - * order (only possible if - * reallocated as slab on - * x86). - */ - goto out_put_single; + if (PageHead(page)) + __put_compound_page(page); + else + __put_single_page(page); } + return; + } - if (likely(page != page_head && - get_page_unless_zero(page_head))) { - unsigned long flags; + /* __split_huge_page_refcount can run under us */ + page_head = compound_trans_head(page); + /* + * THP can not break up slab pages so avoid taking + * compound_lock() and skip the tail page refcounting (in + * _mapcount) too. Slab performs non-atomic bit ops on + * page->flags for better performance. In particular + * slab_unlock() in slub used to be a hot path. It is still + * hot on arches that do not support + * this_cpu_cmpxchg_double(). + * + * If "page" is part of a slab or hugetlbfs page it cannot be + * splitted and the head page cannot change from under us. And + * if "page" is part of a THP page under splitting, if the + * head page pointed by the THP tail isn't a THP head anymore, + * we'll find PageTail clear after smp_rmb() and we'll treat + * it as a single page. + */ + if (!__compound_tail_refcounted(page_head)) { + /* + * If "page" is a THP tail, we must read the tail page + * flags after the head page flags. The + * split_huge_page side enforces write memory barriers + * between clearing PageTail and before the head page + * can be freed and reallocated. + */ + smp_rmb(); + if (likely(PageTail(page))) { /* - * page_head wasn't a dangling pointer but it - * may not be a head page anymore by the time - * we obtain the lock. That is ok as long as it - * can't be freed from under us. + * __split_huge_page_refcount cannot race + * here. */ - flags = compound_lock_irqsave(page_head); - if (unlikely(!PageTail(page))) { - /* __split_huge_page_refcount run before us */ - compound_unlock_irqrestore(page_head, flags); - if (put_page_testzero(page_head)) { - /* - * The head page may have been - * freed and reallocated as a - * compound page of smaller - * order and then freed again. - * All we know is that it - * cannot have become: a THP - * page, a compound page of - * higher order, a tail page. - * That is because we still - * hold the refcount of the - * split THP tail and - * page_head was the THP head - * before the split. - */ - if (PageHead(page_head)) - __put_compound_page(page_head); - else - __put_single_page(page_head); - } -out_put_single: - if (put_page_testzero(page)) - __put_single_page(page); - return; + VM_BUG_ON(!PageHead(page_head)); + VM_BUG_ON(page_mapcount(page) != 0); + if (put_page_testzero(page_head)) { + /* + * If this is the tail of a slab + * compound page, the tail pin must + * not be the last reference held on + * the page, because the PG_slab + * cannot be cleared before all tail + * pins (which skips the _mapcount + * tail refcounting) have been + * released. For hugetlbfs the tail + * pin may be the last reference on + * the page instead, because + * PageHeadHuge will not go away until + * the compound page enters the buddy + * allocator. + */ + VM_BUG_ON(PageSlab(page_head)); + __put_compound_page(page_head); } - VM_BUG_ON(page_head != page->first_page); + return; + } else /* - * We can release the refcount taken by - * get_page_unless_zero() now that - * __split_huge_page_refcount() is blocked on - * the compound_lock. + * __split_huge_page_refcount run before us, + * "page" was a THP tail. The split page_head + * has been freed and reallocated as slab or + * hugetlbfs page of smaller order (only + * possible if reallocated as slab on x86). */ - if (put_page_testzero(page_head)) - VM_BUG_ON(1); - /* __split_huge_page_refcount will wait now */ - VM_BUG_ON(page_mapcount(page) <= 0); - atomic_dec(&page->_mapcount); - VM_BUG_ON(atomic_read(&page_head->_count) <= 0); - VM_BUG_ON(atomic_read(&page->_count) != 0); - compound_unlock_irqrestore(page_head, flags); + goto out_put_single; + } + + if (likely(page != page_head && get_page_unless_zero(page_head))) { + unsigned long flags; + /* + * page_head wasn't a dangling pointer but it may not + * be a head page anymore by the time we obtain the + * lock. That is ok as long as it can't be freed from + * under us. + */ + flags = compound_lock_irqsave(page_head); + if (unlikely(!PageTail(page))) { + /* __split_huge_page_refcount run before us */ + compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { + /* + * The head page may have been freed + * and reallocated as a compound page + * of smaller order and then freed + * again. All we know is that it + * cannot have become: a THP page, a + * compound page of higher order, a + * tail page. That is because we + * still hold the refcount of the + * split THP tail and page_head was + * the THP head before the split. + */ if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } - } else { - /* page_head is a dangling pointer */ - VM_BUG_ON(PageTail(page)); - goto out_put_single; +out_put_single: + if (put_page_testzero(page)) + __put_single_page(page); + return; } - } else if (put_page_testzero(page)) { - if (PageHead(page)) - __put_compound_page(page); - else - __put_single_page(page); + VM_BUG_ON(page_head != page->first_page); + /* + * We can release the refcount taken by + * get_page_unless_zero() now that + * __split_huge_page_refcount() is blocked on the + * compound_lock. + */ + if (put_page_testzero(page_head)) + VM_BUG_ON(1); + /* __split_huge_page_refcount will wait now */ + VM_BUG_ON(page_mapcount(page) <= 0); + atomic_dec(&page->_mapcount); + VM_BUG_ON(atomic_read(&page_head->_count) <= 0); + VM_BUG_ON(atomic_read(&page->_count) != 0); + compound_unlock_irqrestore(page_head, flags); + + if (put_page_testzero(page_head)) { + if (PageHead(page_head)) + __put_compound_page(page_head); + else + __put_single_page(page_head); + } + } else { + /* page_head is a dangling pointer */ + VM_BUG_ON(PageTail(page)); + goto out_put_single; } } -- cgit v1.2.3 From 9b7ac260188ddacffdcaadd6a61e4a502238a63f Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 21 Jan 2014 15:49:01 -0800 Subject: mm/hugetlb.c: defer PageHeadHuge() symbol export No actual need of it. So keep it internal. Signed-off-by: Andrea Arcangeli Cc: Khalid Aziz Cc: Pravin Shelar Cc: Greg Kroah-Hartman Cc: Ben Hutchings Cc: Christoph Lameter Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Cc: Andi Kleen Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1d9125360bf5..f730b7a37590 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -709,7 +709,6 @@ int PageHeadHuge(struct page *page_head) return get_compound_page_dtor(page_head) == free_huge_page; } -EXPORT_SYMBOL_GPL(PageHeadHuge); pgoff_t __basepage_index(struct page *page) { -- cgit v1.2.3 From c728852f5dd41ce34e7ce0a179cf28cd5f4dc301 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 21 Jan 2014 15:49:02 -0800 Subject: mm: thp: __get_page_tail_foll() can use get_huge_page_tail() Cleanup. Change __get_page_tail_foll() to use get_huge_page_tail() to avoid the code duplication. Signed-off-by: Oleg Nesterov Cc: Thomas Gleixner Cc: Dave Jones Cc: Darren Hart Cc: Peter Zijlstra Cc: Mel Gorman Acked-by: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/internal.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index a85a3ab1f7ef..a346ba120e42 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -47,12 +47,9 @@ static inline void __get_page_tail_foll(struct page *page, * page_cache_get_speculative()) on tail pages. */ VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); - VM_BUG_ON(atomic_read(&page->_count) != 0); - VM_BUG_ON(page_mapcount(page) < 0); if (get_page_head) atomic_inc(&page->first_page->_count); - if (compound_tail_refcounted(page->first_page)) - atomic_inc(&page->_mapcount); + get_huge_page_tail(page); } /* -- cgit v1.2.3 From 943dca1a1fcbccb58de944669b833fd38a6c809b Mon Sep 17 00:00:00 2001 From: Yasuaki Ishimatsu Date: Tue, 21 Jan 2014 15:49:06 -0800 Subject: mm: get rid of unnecessary pageblock scanning in setup_zone_migrate_reserve Yasuaki Ishimatsu reported memory hot-add spent more than 5 _hours_ on 9TB memory machine since onlining memory sections is too slow. And we found out setup_zone_migrate_reserve spent >90% of the time. The problem is, setup_zone_migrate_reserve scans all pageblocks unconditionally, but it is only necessary if the number of reserved block was reduced (i.e. memory hot remove). Moreover, maximum MIGRATE_RESERVE per zone is currently 2. It means that the number of reserved pageblocks is almost always unchanged. This patch adds zone->nr_migrate_reserve_block to maintain the number of MIGRATE_RESERVE pageblocks and it reduces the overhead of setup_zone_migrate_reserve dramatically. The following table shows time of onlining a memory section. Amount of memory | 128GB | 192GB | 256GB| --------------------------------------------- linux-3.12 | 23.9 | 31.4 | 44.5 | This patch | 8.3 | 8.3 | 8.6 | Mel's proposal patch | 10.9 | 19.2 | 31.3 | --------------------------------------------- (millisecond) 128GB : 4 nodes and each node has 32GB of memory 192GB : 6 nodes and each node has 32GB of memory 256GB : 8 nodes and each node has 32GB of memory (*1) Mel proposed his idea by the following threads. https://lkml.org/lkml/2013/10/30/272 [akpm@linux-foundation.org: tweak comment] Signed-off-by: KOSAKI Motohiro Signed-off-by: Yasuaki Ishimatsu Reported-by: Yasuaki Ishimatsu Tested-by: Yasuaki Ishimatsu Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 ++++++ mm/page_alloc.c | 13 +++++++++++++ 2 files changed, 19 insertions(+) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index bd791e452ad7..67ab5febabf7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -489,6 +489,12 @@ struct zone { unsigned long present_pages; unsigned long managed_pages; + /* + * Number of MIGRATE_RESEVE page block. To maintain for just + * optimization. Protected by zone->lock. + */ + int nr_migrate_reserve_block; + /* * rarely used fields: */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5248fe070aa4..89d81f4429ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3901,6 +3901,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) struct page *page; unsigned long block_migratetype; int reserve; + int old_reserve; /* * Get the start pfn, end pfn and the number of blocks to reserve @@ -3922,6 +3923,12 @@ static void setup_zone_migrate_reserve(struct zone *zone) * future allocation of hugepages at runtime. */ reserve = min(2, reserve); + old_reserve = zone->nr_migrate_reserve_block; + + /* When memory hot-add, we almost always need to do nothing */ + if (reserve == old_reserve) + return; + zone->nr_migrate_reserve_block = reserve; for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { if (!pfn_valid(pfn)) @@ -3959,6 +3966,12 @@ static void setup_zone_migrate_reserve(struct zone *zone) reserve--; continue; } + } else if (!old_reserve) { + /* + * At boot time we don't need to scan the whole zone + * for turning off MIGRATE_RESERVE. + */ + break; } /* -- cgit v1.2.3 From b35f1819acd9243a3ff7ad25b1fa8bd6bfe80fb2 Mon Sep 17 00:00:00 2001 From: Kirill A. Shutemov Date: Tue, 21 Jan 2014 15:49:07 -0800 Subject: mm: create a separate slab for page->ptl allocation If DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC are enabled spinlock_t on x86_64 is 72 bytes. For page->ptl they will be allocated from kmalloc-96 slab, so we loose 24 on each. An average system can easily allocate few tens thousands of page->ptl and overhead is significant. Let's create a separate slab for page->ptl allocation to solve this. To make sure that it really works this time, some numbers from my test machine (just booted, no load): Before: # grep '^\(kmalloc-96\|page->ptl\)' /proc/slabinfo kmalloc-96 31987 32190 128 30 1 : tunables 120 60 8 : slabdata 1073 1073 92 After: # grep '^\(kmalloc-96\|page->ptl\)' /proc/slabinfo page->ptl 27516 28143 72 53 1 : tunables 120 60 8 : slabdata 531 531 9 kmalloc-96 3853 5280 128 30 1 : tunables 120 60 8 : slabdata 176 176 0 Note that the patch is useful not only for debug case, but also for PREEMPT_RT, where spinlock_t is always bloated. Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 12 ++++++++++++ init/main.c | 2 +- mm/memory.c | 13 +++++++++++-- 3 files changed, 24 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 58202c26c559..fc4415256ec3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1350,6 +1350,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS +void __init ptlock_cache_init(void); extern bool ptlock_alloc(struct page *page); extern void ptlock_free(struct page *page); @@ -1358,6 +1359,10 @@ static inline spinlock_t *ptlock_ptr(struct page *page) return page->ptl; } #else /* ALLOC_SPLIT_PTLOCKS */ +static inline void ptlock_cache_init(void) +{ +} + static inline bool ptlock_alloc(struct page *page) { return true; @@ -1410,10 +1415,17 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { return &mm->page_table_lock; } +static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } static inline void pte_lock_deinit(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ +static inline void pgtable_init(void) +{ + ptlock_cache_init(); + pgtable_cache_init(); +} + static inline bool pgtable_page_ctor(struct page *page) { inc_zone_page_state(page, NR_PAGETABLE); diff --git a/init/main.c b/init/main.c index febc511e078a..01573fdfa186 100644 --- a/init/main.c +++ b/init/main.c @@ -476,7 +476,7 @@ static void __init mm_init(void) mem_init(); kmem_cache_init(); percpu_init_late(); - pgtable_cache_init(); + pgtable_init(); vmalloc_init(); } diff --git a/mm/memory.c b/mm/memory.c index e9c550484ba6..86487dfa5e59 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4275,11 +4275,20 @@ void copy_user_huge_page(struct page *dst, struct page *src, #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS + +static struct kmem_cache *page_ptl_cachep; + +void __init ptlock_cache_init(void) +{ + page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, + SLAB_PANIC, NULL); +} + bool ptlock_alloc(struct page *page) { spinlock_t *ptl; - ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); if (!ptl) return false; page->ptl = ptl; @@ -4288,6 +4297,6 @@ bool ptlock_alloc(struct page *page) void ptlock_free(struct page *page) { - kfree(page->ptl); + kmem_cache_free(page_ptl_cachep, page->ptl); } #endif -- cgit v1.2.3 From 549543dff797ae1081f61a69f8511c61806c3735 Mon Sep 17 00:00:00 2001 From: Zhi Yong Wu Date: Tue, 21 Jan 2014 15:49:08 -0800 Subject: mm, memory-failure: fix typo in me_pagecache_dirty() [akpm@linux-foundation.org: s/cache/pagecache/] Signed-off-by: Zhi Yong Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fabe55046c1d..9fa6586d5275 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -611,7 +611,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) } /* - * Dirty cache page page + * Dirty pagecache page * Issues: when the error hit a hole page the error is not properly * propagated. */ -- cgit v1.2.3 From e8569dd299dbc7bac878325c0bdc7aa449eae479 Mon Sep 17 00:00:00 2001 From: Andreas Sandberg Date: Tue, 21 Jan 2014 15:49:09 -0800 Subject: mm/hugetlb.c: call MMU notifiers when copying a hugetlb page range When copy_hugetlb_page_range() is called to copy a range of hugetlb mappings, the secondary MMUs are not notified if there is a protection downgrade, which breaks COW semantics in KVM. This patch adds the necessary MMU notifier calls. Signed-off-by: Andreas Sandberg Acked-by: Steve Capper Acked-by: Marc Zyngier Cc: Mel Gorman Cc: Rik van Riel Cc: Hugh Dickins Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f730b7a37590..1697ff0cc53a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2346,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, int cow; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); + unsigned long mmun_start; /* For mmu_notifiers */ + unsigned long mmun_end; /* For mmu_notifiers */ + int ret = 0; cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; + mmun_start = vma->vm_start; + mmun_end = vma->vm_end; + if (cow) + mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); + for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = huge_pte_offset(src, addr); if (!src_pte) continue; dst_pte = huge_pte_alloc(dst, addr, sz); - if (!dst_pte) - goto nomem; + if (!dst_pte) { + ret = -ENOMEM; + break; + } /* If the pagetables are shared don't copy or take references */ if (dst_pte == src_pte) @@ -2377,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, spin_unlock(src_ptl); spin_unlock(dst_ptl); } - return 0; -nomem: - return -ENOMEM; + if (cow) + mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); + + return ret; } static int is_hugetlb_entry_migration(pte_t pte) -- cgit v1.2.3 From ece86e222db48d04bda218a2be70e384518bb08c Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Tue, 21 Jan 2014 15:49:12 -0800 Subject: mm/vmalloc: interchage the implementation of vmalloc_to_{pfn,page} Currently we are implementing vmalloc_to_pfn() as a wrapper around vmalloc_to_page(), which is implemented as follow: 1. walks the page talbes to generates the corresponding pfn, 2. then converts the pfn to struct page, 3. returns it. And vmalloc_to_pfn() re-wraps vmalloc_to_page() to get the pfn. This seems too circuitous, so this patch reverses the way: implement vmalloc_to_page() as a wrapper around vmalloc_to_pfn(). This makes vmalloc_to_pfn() and vmalloc_to_page() slightly more efficient. No functional change. Signed-off-by: Jianyu Zhan Cc: Vladimir Murzin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0fdf96803c5b..e4f0db2a3eae 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) } /* - * Walk a vmap address to the struct page it maps. + * Walk a vmap address to the physical pfn it maps to. */ -struct page *vmalloc_to_page(const void *vmalloc_addr) +unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; - struct page *page = NULL; + unsigned long pfn = 0; pgd_t *pgd = pgd_offset_k(addr); /* @@ -244,23 +244,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) - page = pte_page(pte); + pfn = pte_pfn(pte); pte_unmap(ptep); } } } - return page; + return pfn; } -EXPORT_SYMBOL(vmalloc_to_page); +EXPORT_SYMBOL(vmalloc_to_pfn); /* - * Map a vmalloc()-space virtual address to the physical page frame number. + * Map a vmalloc()-space virtual address to the struct page. */ -unsigned long vmalloc_to_pfn(const void *vmalloc_addr) +struct page *vmalloc_to_page(const void *vmalloc_addr) { - return page_to_pfn(vmalloc_to_page(vmalloc_addr)); + return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); } -EXPORT_SYMBOL(vmalloc_to_pfn); +EXPORT_SYMBOL(vmalloc_to_page); /*** Global kva allocator ***/ -- cgit v1.2.3 From aec6a8889a98a0cd58357cd0937a25189908f191 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:49:13 -0800 Subject: mm, show_mem: remove SHOW_MEM_FILTER_PAGE_COUNT Commit 4b59e6c47309 ("mm, show_mem: suppress page counts in non-blockable contexts") introduced SHOW_MEM_FILTER_PAGE_COUNT to suppress PFN walks on large memory machines. Commit c78e93630d15 ("mm: do not walk all of system memory during show_mem") avoided a PFN walk in the generic show_mem helper which removes the requirement for SHOW_MEM_FILTER_PAGE_COUNT in that case. This patch removes PFN walkers from the arch-specific implementations that report on a per-node or per-zone granularity. ARM and unicore32 still do a PFN walk as they report memory usage on each bank which is a much finer granularity where the debugging information may still be of use. As the remaining arches doing PFN walks have relatively small amounts of memory, this patch simply removes SHOW_MEM_FILTER_PAGE_COUNT. [akpm@linux-foundation.org: fix parisc] Signed-off-by: Mel Gorman Acked-by: David Rientjes Cc: Tony Luck Cc: Russell King Cc: James Bottomley Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/init.c | 3 --- arch/ia64/mm/contig.c | 68 ------------------------------------------------ arch/ia64/mm/discontig.c | 63 -------------------------------------------- arch/ia64/mm/init.c | 48 ++++++++++++++++++++++++++++++++++ arch/parisc/mm/init.c | 59 ++++++++++++----------------------------- arch/unicore32/mm/init.c | 3 --- include/linux/mm.h | 1 - lib/show_mem.c | 3 --- mm/page_alloc.c | 7 ----- 9 files changed, 65 insertions(+), 190 deletions(-) (limited to 'mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3e8f106ee5fe..2e71e245df90 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -92,9 +92,6 @@ void show_mem(unsigned int filter) printk("Mem-info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; - for_each_bank (i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index da5237d636d6..52715a71aede 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -31,74 +31,6 @@ static unsigned long max_gap; #endif -/** - * show_mem - give short summary of memory stats - * - * Shows a simple page count of reserved and used pages in the system. - * For discontig machines, it does this on a per-pgdat basis. - */ -void show_mem(unsigned int filter) -{ - int i, total_reserved = 0; - int total_shared = 0, total_cached = 0; - unsigned long total_present = 0; - pg_data_t *pgdat; - - printk(KERN_INFO "Mem-info:\n"); - show_free_areas(filter); - printk(KERN_INFO "Node memory in pages:\n"); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; - for_each_online_pgdat(pgdat) { - unsigned long present; - unsigned long flags; - int shared = 0, cached = 0, reserved = 0; - int nid = pgdat->node_id; - - if (skip_free_areas_node(filter, nid)) - continue; - pgdat_resize_lock(pgdat, &flags); - present = pgdat->node_present_pages; - for(i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page; - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) - touch_nmi_watchdog(); - if (pfn_valid(pgdat->node_start_pfn + i)) - page = pfn_to_page(pgdat->node_start_pfn + i); - else { -#ifdef CONFIG_VIRTUAL_MEM_MAP - if (max_gap < LARGE_GAP) - continue; -#endif - i = vmemmap_find_next_valid_pfn(nid, i) - 1; - continue; - } - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page)-1; - } - pgdat_resize_unlock(pgdat, &flags); - total_present += present; - total_reserved += reserved; - total_cached += cached; - total_shared += shared; - printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " - "shrd: %10d, swpd: %10d\n", nid, - present, reserved, shared, cached); - } - printk(KERN_INFO "%ld pages of RAM\n", total_present); - printk(KERN_INFO "%d reserved pages\n", total_reserved); - printk(KERN_INFO "%d pages shared\n", total_shared); - printk(KERN_INFO "%d pages swap cached\n", total_cached); - printk(KERN_INFO "Total of %ld pages in page table cache\n", - quicklist_total_size()); - printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); -} - - /* physical address where the bootmem map is located */ unsigned long bootmap_start; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 2de08f4d9930..878626805369 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -607,69 +607,6 @@ void *per_cpu_init(void) } #endif /* CONFIG_SMP */ -/** - * show_mem - give short summary of memory stats - * - * Shows a simple page count of reserved and used pages in the system. - * For discontig machines, it does this on a per-pgdat basis. - */ -void show_mem(unsigned int filter) -{ - int i, total_reserved = 0; - int total_shared = 0, total_cached = 0; - unsigned long total_present = 0; - pg_data_t *pgdat; - - printk(KERN_INFO "Mem-info:\n"); - show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; - printk(KERN_INFO "Node memory in pages:\n"); - for_each_online_pgdat(pgdat) { - unsigned long present; - unsigned long flags; - int shared = 0, cached = 0, reserved = 0; - int nid = pgdat->node_id; - - if (skip_free_areas_node(filter, nid)) - continue; - pgdat_resize_lock(pgdat, &flags); - present = pgdat->node_present_pages; - for(i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page; - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) - touch_nmi_watchdog(); - if (pfn_valid(pgdat->node_start_pfn + i)) - page = pfn_to_page(pgdat->node_start_pfn + i); - else { - i = vmemmap_find_next_valid_pfn(nid, i) - 1; - continue; - } - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page)-1; - } - pgdat_resize_unlock(pgdat, &flags); - total_present += present; - total_reserved += reserved; - total_cached += cached; - total_shared += shared; - printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " - "shrd: %10d, swpd: %10d\n", nid, - present, reserved, shared, cached); - } - printk(KERN_INFO "%ld pages of RAM\n", total_present); - printk(KERN_INFO "%d reserved pages\n", total_reserved); - printk(KERN_INFO "%d pages shared\n", total_shared); - printk(KERN_INFO "%d pages swap cached\n", total_cached); - printk(KERN_INFO "Total of %ld pages in page table cache\n", - quicklist_total_size()); - printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); -} - /** * call_pernode_memory - use SRAT to call callback functions with node info * @start: physical start of range diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 88504abf5704..25c350264a41 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -684,3 +684,51 @@ per_linux32_init(void) } __initcall(per_linux32_init); + +/** + * show_mem - give short summary of memory stats + * + * Shows a simple page count of reserved and used pages in the system. + * For discontig machines, it does this on a per-pgdat basis. + */ +void show_mem(unsigned int filter) +{ + int total_reserved = 0; + unsigned long total_present = 0; + pg_data_t *pgdat; + + printk(KERN_INFO "Mem-info:\n"); + show_free_areas(filter); + printk(KERN_INFO "Node memory in pages:\n"); + for_each_online_pgdat(pgdat) { + unsigned long present; + unsigned long flags; + int reserved = 0; + int nid = pgdat->node_id; + int zoneid; + + if (skip_free_areas_node(filter, nid)) + continue; + pgdat_resize_lock(pgdat, &flags); + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + reserved += zone->present_pages - zone->managed_pages; + } + present = pgdat->node_present_pages; + + pgdat_resize_unlock(pgdat, &flags); + total_present += present; + total_reserved += reserved; + printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ", + nid, present, reserved); + } + printk(KERN_INFO "%ld pages of RAM\n", total_present); + printk(KERN_INFO "%d reserved pages\n", total_reserved); + printk(KERN_INFO "Total of %ld pages in page table cache\n", + quicklist_total_size()); + printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); +} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 96f8168cf4ec..ae085ad0fba0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -645,55 +645,30 @@ EXPORT_SYMBOL(empty_zero_page); void show_mem(unsigned int filter) { - int i,free = 0,total = 0,reserved = 0; - int shared = 0, cached = 0; + int total = 0,reserved = 0; + pg_data_t *pgdat; printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; -#ifndef CONFIG_DISCONTIGMEM - i = max_mapnr; - while (i-- > 0) { - total++; - if (PageReserved(mem_map+i)) - reserved++; - else if (PageSwapCache(mem_map+i)) - cached++; - else if (!page_count(&mem_map[i])) - free++; - else - shared += page_count(&mem_map[i]) - 1; - } -#else - for (i = 0; i < npmem_ranges; i++) { - int j; - for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { - struct page *p; - unsigned long flags; - - pgdat_resize_lock(NODE_DATA(i), &flags); - p = nid_page_nr(i, j) - node_start_pfn(i); - - total++; - if (PageReserved(p)) - reserved++; - else if (PageSwapCache(p)) - cached++; - else if (!page_count(p)) - free++; - else - shared += page_count(p) - 1; - pgdat_resize_unlock(NODE_DATA(i), &flags); - } + for_each_online_pgdat(pgdat) { + unsigned long flags; + int zoneid; + + pgdat_resize_lock(pgdat, &flags); + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + total += zone->present_pages; + reserved = zone->present_pages - zone->managed_pages; + } + pgdat_resize_unlock(pgdat, &flags); } -#endif + printk(KERN_INFO "%d pages of RAM\n", total); printk(KERN_INFO "%d reserved pages\n", reserved); - printk(KERN_INFO "%d pages shared\n", shared); - printk(KERN_INFO "%d pages swap cached\n", cached); - #ifdef CONFIG_DISCONTIGMEM { diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index ae6bc036db92..be2bde9b07cf 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -66,9 +66,6 @@ void show_mem(unsigned int filter) printk(KERN_DEFAULT "Mem-info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; - for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; diff --git a/include/linux/mm.h b/include/linux/mm.h index fc4415256ec3..4c0c01afc19b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1016,7 +1016,6 @@ extern void pagefault_out_of_memory(void); * various contexts. */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ -#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */ extern void show_free_areas(unsigned int flags); extern bool skip_free_areas_node(unsigned int flags, int nid); diff --git a/lib/show_mem.c b/lib/show_mem.c index 5847a4921b8e..f58689f5a24e 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -17,9 +17,6 @@ void show_mem(unsigned int filter) printk("Mem-Info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; - for_each_online_pgdat(pgdat) { unsigned long flags; int zoneid; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 89d81f4429ca..ec4417cb458a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2071,13 +2071,6 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) debug_guardpage_minorder() > 0) return; - /* - * Walking all memory to count page types is very expensive and should - * be inhibited in non-blockable contexts. - */ - if (!(gfp_mask & __GFP_WAIT)) - filter |= SHOW_MEM_FILTER_PAGE_COUNT; - /* * This documents exceptions given to allocations in certain * contexts that are allowed to allocate outside current's set -- cgit v1.2.3 From 49f0ce5f92321cdcf741e35f385669a421013cb7 Mon Sep 17 00:00:00 2001 From: Jerome Marchand Date: Tue, 21 Jan 2014 15:49:14 -0800 Subject: mm: add overcommit_kbytes sysctl variable Some applications that run on HPC clusters are designed around the availability of RAM and the overcommit ratio is fine tuned to get the maximum usage of memory without swapping. With growing memory, the 1%-of-all-RAM grain provided by overcommit_ratio has become too coarse for these workload (on a 2TB machine it represents no less than 20GB). This patch adds the new overcommit_kbytes sysctl variable that allow a much finer grain. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix nommu build] Signed-off-by: Jerome Marchand Cc: Dave Hansen Cc: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 12 ++++++++++++ Documentation/vm/overcommit-accounting | 7 ++++--- include/linux/mm.h | 9 +++++++++ include/linux/mman.h | 1 + kernel/sysctl.c | 11 ++++++++--- mm/mmap.c | 1 + mm/nommu.c | 1 + mm/util.c | 36 ++++++++++++++++++++++++++++++++-- 8 files changed, 70 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 1fbd4eb7b64a..9f5481bdc5a4 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -47,6 +47,7 @@ Currently, these files are in /proc/sys/vm: - numa_zonelist_order - oom_dump_tasks - oom_kill_allocating_task +- overcommit_kbytes - overcommit_memory - overcommit_ratio - page-cluster @@ -574,6 +575,17 @@ The default value is 0. ============================================================== +overcommit_kbytes: + +When overcommit_memory is set to 2, the committed address space is not +permitted to exceed swap plus this amount of physical RAM. See below. + +Note: overcommit_kbytes is the counterpart of overcommit_ratio. Only one +of them may be specified at a time. Setting one disables the other (which +then appears as 0 when read). + +============================================================== + overcommit_memory: This value contains a flag that enables memory overcommitment. diff --git a/Documentation/vm/overcommit-accounting b/Documentation/vm/overcommit-accounting index 8eaa2fc4b8fa..cbfaaa674118 100644 --- a/Documentation/vm/overcommit-accounting +++ b/Documentation/vm/overcommit-accounting @@ -14,8 +14,8 @@ The Linux kernel supports the following overcommit handling modes 2 - Don't overcommit. The total address space commit for the system is not permitted to exceed swap + a - configurable percentage (default is 50) of physical RAM. - Depending on the percentage you use, in most situations + configurable amount (default is 50%) of physical RAM. + Depending on the amount you use, in most situations this means a process will not be killed while accessing pages but will receive errors on memory allocation as appropriate. @@ -26,7 +26,8 @@ The Linux kernel supports the following overcommit handling modes The overcommit policy is set via the sysctl `vm.overcommit_memory'. -The overcommit percentage is set via `vm.overcommit_ratio'. +The overcommit amount can be set via `vm.overcommit_ratio' (percentage) +or `vm.overcommit_kbytes' (absolute value). The current overcommit limit and amount committed are viewable in /proc/meminfo as CommitLimit and Committed_AS respectively. diff --git a/include/linux/mm.h b/include/linux/mm.h index 4c0c01afc19b..a512dd836931 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -57,6 +57,15 @@ extern int sysctl_legacy_va_layout; extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; + +extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); +extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, + size_t *, loff_t *); + #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) /* to align the pointer to the (next) page boundary */ diff --git a/include/linux/mman.h b/include/linux/mman.h index 7f7f8dae4b1d..16373c8f5f57 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -9,6 +9,7 @@ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c8da99f905cf..332cefcdb04b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -95,8 +95,6 @@ #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ -extern int sysctl_overcommit_memory; -extern int sysctl_overcommit_ratio; extern int max_threads; extern int suid_dumpable; #ifdef CONFIG_COREDUMP @@ -1121,7 +1119,14 @@ static struct ctl_table vm_table[] = { .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = overcommit_ratio_handler, + }, + { + .procname = "overcommit_kbytes", + .data = &sysctl_overcommit_kbytes, + .maxlen = sizeof(sysctl_overcommit_kbytes), + .mode = 0644, + .proc_handler = overcommit_kbytes_handler, }, { .procname = "page-cluster", diff --git a/mm/mmap.c b/mm/mmap.c index 834b2d785f1e..39552de6e1db 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -86,6 +86,7 @@ EXPORT_SYMBOL(vm_get_page_prot); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ +unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ diff --git a/mm/nommu.c b/mm/nommu.c index fec093adad9c..8740213b1647 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -60,6 +60,7 @@ unsigned long highest_memmap_pfn; struct percpu_counter vm_committed_as; int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_ratio = 50; /* default is 50% */ +unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ diff --git a/mm/util.c b/mm/util.c index 808f375648e7..a24aa22f2473 100644 --- a/mm/util.c +++ b/mm/util.c @@ -404,13 +404,45 @@ struct address_space *page_mapping(struct page *page) return mapping; } +int overcommit_ratio_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret == 0 && write) + sysctl_overcommit_kbytes = 0; + return ret; +} + +int overcommit_kbytes_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); + if (ret == 0 && write) + sysctl_overcommit_ratio = 0; + return ret; +} + /* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { - return ((totalram_pages - hugetlb_total_pages()) - * sysctl_overcommit_ratio / 100) + total_swap_pages; + unsigned long allowed; + + if (sysctl_overcommit_kbytes) + allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); + else + allowed = ((totalram_pages - hugetlb_total_pages()) + * sysctl_overcommit_ratio / 100); + allowed += total_swap_pages; + + return allowed; } -- cgit v1.2.3 From 363ee17f0f405faff74d9aaf93d21d5f41d5102d Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 21 Jan 2014 15:49:15 -0800 Subject: mm/mmap.c: add mlock_future_check() helper Both do_brk and do_mmap_pgoff verify that we are actually capable of locking future pages if the corresponding VM_LOCKED flags are used. Encapsulate this logic into a single mlock_future_check() helper function. Signed-off-by: Davidlohr Bueso Cc: Rik van Riel Reviewed-by: Michel Lespinasse Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 39552de6e1db..a0e7153a79e6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1191,6 +1191,24 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } +static inline int mlock_future_check(struct mm_struct *mm, + unsigned long flags, + unsigned long len) +{ + unsigned long locked, lock_limit; + + /* mlock MCL_FUTURE? */ + if (flags & VM_LOCKED) { + locked = len >> PAGE_SHIFT; + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + return -EAGAIN; + } + return 0; +} + /* * The caller must hold down_write(¤t->mm->mmap_sem). */ @@ -1252,16 +1270,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, if (!can_do_mlock()) return -EPERM; - /* mlock MCL_FUTURE? */ - if (vm_flags & VM_LOCKED) { - unsigned long locked, lock_limit; - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } + if (mlock_future_check(mm, vm_flags, len)) + return -EAGAIN; if (file) { struct inode *inode = file_inode(file); @@ -2592,18 +2602,9 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) if (error & ~PAGE_MASK) return error; - /* - * mlock MCL_FUTURE? - */ - if (mm->def_flags & VM_LOCKED) { - unsigned long locked, lock_limit; - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } + error = mlock_future_check(mm, mm->def_flags, len); + if (error) + return error; /* * mm->mmap_sem is required to protect against another thread -- cgit v1.2.3 From 1f1cd7054fe7f45e65dd4963d0a38e5ab7a57cae Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 21 Jan 2014 15:49:16 -0800 Subject: mm/mlock: prepare params outside critical region All mlock related syscalls prepare lock limits, lengths and start parameters with the mmap_sem held. Move this logic outside of the critical region. For the case of mlock, continue incrementing the amount already locked by mm->locked_vm with the rwsem taken. Signed-off-by: Davidlohr Bueso Cc: Rik van Riel Reviewed-by: Michel Lespinasse Acked-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/mlock.c b/mm/mlock.c index 192e6eebe4f2..10819ed4df3e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -709,19 +709,21 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) lru_add_drain_all(); /* flush pagevec */ - down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; - locked = len >> PAGE_SHIFT; - locked += current->mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; + locked = len >> PAGE_SHIFT; + + down_write(¤t->mm->mmap_sem); + + locked += current->mm->locked_vm; /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); + up_write(¤t->mm->mmap_sem); if (!error) error = __mm_populate(start, len, 0); @@ -732,11 +734,13 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; - down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; + + down_write(¤t->mm->mmap_sem); ret = do_mlock(start, len, 0); up_write(¤t->mm->mmap_sem); + return ret; } @@ -781,12 +785,12 @@ SYSCALL_DEFINE1(mlockall, int, flags) if (flags & MCL_CURRENT) lru_add_drain_all(); /* flush pagevec */ - down_write(¤t->mm->mmap_sem); - lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; + down_write(¤t->mm->mmap_sem); + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); -- cgit v1.2.3 From 931d13f534a9bb39539f0a851209ca18013ba0c2 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:49:17 -0800 Subject: mm/memblock: debug: correct displaying of upper memory boundary Current memblock APIs don't work on 32 PAE or LPAE extension arches where the physical memory start address beyond 4GB. The problem was discussed here [3] where Tejun, Yinghai(thanks) proposed a way forward with memblock interfaces. Based on the proposal, this series adds necessary memblock interfaces and convert the core kernel code to use them. Architectures already converted to NO_BOOTMEM use these new interfaces and other which still uses bootmem, these new interfaces just fallback to exiting bootmem APIs. So no functional change in behavior. In long run, once all the architectures moves to NO_BOOTMEM, we can get rid of bootmem layer completely. This is one step to remove the core code dependency with bootmem and also gives path for architectures to move away from bootmem. Testing is done on ARM architecture with 32 bit ARM LAPE machines with normal as well sparse(faked) memory model. This patch (of 23): When debugging is enabled (cmdline has "memblock=debug") the memblock will display upper memory boundary per each allocated/freed memory range wrongly. For example: memblock_reserve: [0x0000009e7e8000-0x0000009e7ed000] _memblock_early_alloc_try_nid_nopanic+0xfc/0x12c The 0x0000009e7ed000 is displayed instead of 0x0000009e7ecfff Hence, correct this by changing formula used to calculate upper memory boundary to (u64)base + size - 1 instead of (u64)base + size everywhere in the debug messages. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Acked-by: Tejun Heo Cc: H. Peter Anvin Cc: Russell King Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 53e477bb5558..aab566998b61 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -643,7 +643,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) { memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", (unsigned long long)base, - (unsigned long long)base + size, + (unsigned long long)base + size - 1, (void *)_RET_IP_); return __memblock_remove(&memblock.reserved, base, size); @@ -655,7 +655,7 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", (unsigned long long)base, - (unsigned long long)base + size, + (unsigned long long)base + size - 1, (void *)_RET_IP_); return memblock_add_region(_rgn, base, size, MAX_NUMNODES); -- cgit v1.2.3 From 66a20757214d94b915f2d2aada1384dead9ab18d Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 21 Jan 2014 15:49:20 -0800 Subject: memblock, numa: introduce flags field into memblock There is no flag in memblock to describe what type the memory is. Sometimes, we may use memblock to reserve some memory for special usage. And we want to know what kind of memory it is. So we need a way to In hotplug environment, we want to reserve hotpluggable memory so the kernel won't be able to use it. And when the system is up, we have to free these hotpluggable memory to buddy. So we need to mark these memory first. In order to do so, we need to mark out these special memory in memblock. In this patch, we introduce a new "flags" member into memblock_region: struct memblock_region { phys_addr_t base; phys_addr_t size; unsigned long flags; /* This is new. */ #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int nid; #endif }; This patch does the following things: 1) Add "flags" member to memblock_region. 2) Modify the following APIs' prototype: memblock_add_region() memblock_insert_region() 3) Add memblock_reserve_region() to support reserve memory with flags, and keep memblock_reserve()'s prototype unmodified. 4) Modify other APIs to support flags, but keep their prototype unmodified. The idea is from Wen Congyang and Liu Jiang . Suggested-by: Wen Congyang Suggested-by: Liu Jiang Signed-off-by: Tang Chen Reviewed-by: Zhang Yanfei Cc: "H. Peter Anvin" Cc: "Rafael J . Wysocki" Cc: Chen Tang Cc: Gong Chen Cc: Ingo Molnar Cc: Jiang Liu Cc: Johannes Weiner Cc: Lai Jiangshan Cc: Larry Woodman Cc: Len Brown Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Prarit Bhargava Cc: Rik van Riel Cc: Taku Izumi Cc: Tejun Heo Cc: Thomas Gleixner Cc: Thomas Renninger Cc: Toshi Kani Cc: Vasilis Liaskovitis Cc: Wanpeng Li Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 1 + mm/memblock.c | 53 ++++++++++++++++++++++++++++++++++-------------- 2 files changed, 39 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 77c60e52939d..9a805ec6e794 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -22,6 +22,7 @@ struct memblock_region { phys_addr_t base; phys_addr_t size; + unsigned long flags; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int nid; #endif diff --git a/mm/memblock.c b/mm/memblock.c index aab566998b61..270b005ca964 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -255,6 +255,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u type->cnt = 1; type->regions[0].base = 0; type->regions[0].size = 0; + type->regions[0].flags = 0; memblock_set_region_node(&type->regions[0], MAX_NUMNODES); } } @@ -405,7 +406,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) if (this->base + this->size != next->base || memblock_get_region_node(this) != - memblock_get_region_node(next)) { + memblock_get_region_node(next) || + this->flags != next->flags) { BUG_ON(this->base + this->size > next->base); i++; continue; @@ -425,13 +427,15 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) * @base: base address of the new region * @size: size of the new region * @nid: node id of the new region + * @flags: flags of the new region * * Insert new memblock region [@base,@base+@size) into @type at @idx. * @type must already have extra room to accomodate the new region. */ static void __init_memblock memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, - phys_addr_t size, int nid) + phys_addr_t size, + int nid, unsigned long flags) { struct memblock_region *rgn = &type->regions[idx]; @@ -439,6 +443,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); rgn->base = base; rgn->size = size; + rgn->flags = flags; memblock_set_region_node(rgn, nid); type->cnt++; type->total_size += size; @@ -450,6 +455,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region + * @flags: flags of the new region * * Add new memblock region [@base,@base+@size) into @type. The new region * is allowed to overlap with existing ones - overlaps don't affect already @@ -460,7 +466,8 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, * 0 on success, -errno on failure. */ static int __init_memblock memblock_add_region(struct memblock_type *type, - phys_addr_t base, phys_addr_t size, int nid) + phys_addr_t base, phys_addr_t size, + int nid, unsigned long flags) { bool insert = false; phys_addr_t obase = base; @@ -475,6 +482,7 @@ static int __init_memblock memblock_add_region(struct memblock_type *type, WARN_ON(type->cnt != 1 || type->total_size); type->regions[0].base = base; type->regions[0].size = size; + type->regions[0].flags = flags; memblock_set_region_node(&type->regions[0], nid); type->total_size = size; return 0; @@ -505,7 +513,8 @@ repeat: nr_new++; if (insert) memblock_insert_region(type, i++, base, - rbase - base, nid); + rbase - base, nid, + flags); } /* area below @rend is dealt with, forget about it */ base = min(rend, end); @@ -515,7 +524,8 @@ repeat: if (base < end) { nr_new++; if (insert) - memblock_insert_region(type, i, base, end - base, nid); + memblock_insert_region(type, i, base, end - base, + nid, flags); } /* @@ -537,12 +547,13 @@ repeat: int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) { - return memblock_add_region(&memblock.memory, base, size, nid); + return memblock_add_region(&memblock.memory, base, size, nid, 0); } int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { - return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); + return memblock_add_region(&memblock.memory, base, size, + MAX_NUMNODES, 0); } /** @@ -597,7 +608,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, rgn->size -= base - rbase; type->total_size -= base - rbase; memblock_insert_region(type, i, rbase, base - rbase, - memblock_get_region_node(rgn)); + memblock_get_region_node(rgn), + rgn->flags); } else if (rend > end) { /* * @rgn intersects from above. Split and redo the @@ -607,7 +619,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, rgn->size -= end - rbase; type->total_size -= end - rbase; memblock_insert_region(type, i--, rbase, end - rbase, - memblock_get_region_node(rgn)); + memblock_get_region_node(rgn), + rgn->flags); } else { /* @rgn is fully contained, record it */ if (!*end_rgn) @@ -649,16 +662,24 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) return __memblock_remove(&memblock.reserved, base, size); } -int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) +static int __init_memblock memblock_reserve_region(phys_addr_t base, + phys_addr_t size, + int nid, + unsigned long flags) { struct memblock_type *_rgn = &memblock.reserved; - memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", + memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", (unsigned long long)base, (unsigned long long)base + size - 1, - (void *)_RET_IP_); + flags, (void *)_RET_IP_); + + return memblock_add_region(_rgn, base, size, nid, flags); +} - return memblock_add_region(_rgn, base, size, MAX_NUMNODES); +int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) +{ + return memblock_reserve_region(base, size, MAX_NUMNODES, 0); } /** @@ -1101,6 +1122,7 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit) static void __init_memblock memblock_dump(struct memblock_type *type, char *name) { unsigned long long base, size; + unsigned long flags; int i; pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); @@ -1111,13 +1133,14 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name base = rgn->base; size = rgn->size; + flags = rgn->flags; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP if (memblock_get_region_node(rgn) != MAX_NUMNODES) snprintf(nid_buf, sizeof(nid_buf), " on node %d", memblock_get_region_node(rgn)); #endif - pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", - name, i, base, base + size - 1, size, nid_buf); + pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", + name, i, base, base + size - 1, size, nid_buf, flags); } } -- cgit v1.2.3 From 66b16edf9eafc3291cabb2253d0f342a847656b7 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 21 Jan 2014 15:49:23 -0800 Subject: memblock, mem_hotplug: introduce MEMBLOCK_HOTPLUG flag to mark hotpluggable regions In find_hotpluggable_memory, once we find out a memory region which is hotpluggable, we want to mark them in memblock.memory. So that we could control memblock allocator not to allocte hotpluggable memory for the kernel later. To achieve this goal, we introduce MEMBLOCK_HOTPLUG flag to indicate the hotpluggable memory regions in memblock and a function memblock_mark_hotplug() to mark hotpluggable memory if we find one. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Tang Chen Reviewed-by: Zhang Yanfei Cc: "H. Peter Anvin" Cc: "Rafael J . Wysocki" Cc: Chen Tang Cc: Gong Chen Cc: Ingo Molnar Cc: Jiang Liu Cc: Johannes Weiner Cc: Lai Jiangshan Cc: Larry Woodman Cc: Len Brown Cc: Liu Jiang Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Prarit Bhargava Cc: Rik van Riel Cc: Taku Izumi Cc: Tejun Heo Cc: Thomas Gleixner Cc: Thomas Renninger Cc: Toshi Kani Cc: Vasilis Liaskovitis Cc: Wanpeng Li Cc: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 17 ++++++++++++++++ mm/memblock.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 9a805ec6e794..b788faa71563 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -19,6 +19,9 @@ #define INIT_MEMBLOCK_REGIONS 128 +/* Definition of memblock flags. */ +#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ + struct memblock_region { phys_addr_t base; phys_addr_t size; @@ -60,6 +63,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); +int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); +int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, @@ -122,6 +127,18 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, i != (u64)ULLONG_MAX; \ __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) +static inline void memblock_set_region_flags(struct memblock_region *r, + unsigned long flags) +{ + r->flags |= flags; +} + +static inline void memblock_clear_region_flags(struct memblock_region *r, + unsigned long flags) +{ + r->flags &= ~flags; +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); diff --git a/mm/memblock.c b/mm/memblock.c index 270b005ca964..2121ec4c7fa0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -682,6 +682,59 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) return memblock_reserve_region(base, size, MAX_NUMNODES, 0); } +/** + * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. + * @base: the base phys addr of the region + * @size: the size of the region + * + * This function isolates region [@base, @base + @size), and mark it with flag + * MEMBLOCK_HOTPLUG. + * + * Return 0 on succees, -errno on failure. + */ +int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) +{ + struct memblock_type *type = &memblock.memory; + int i, ret, start_rgn, end_rgn; + + ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); + if (ret) + return ret; + + for (i = start_rgn; i < end_rgn; i++) + memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG); + + memblock_merge_regions(type); + return 0; +} + +/** + * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. + * @base: the base phys addr of the region + * @size: the size of the region + * + * This function isolates region [@base, @base + @size), and clear flag + * MEMBLOCK_HOTPLUG for the isolated regions. + * + * Return 0 on succees, -errno on failure. + */ +int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) +{ + struct memblock_type *type = &memblock.memory; + int i, ret, start_rgn, end_rgn; + + ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); + if (ret) + return ret; + + for (i = start_rgn; i < end_rgn; i++) + memblock_clear_region_flags(&type->regions[i], + MEMBLOCK_HOTPLUG); + + memblock_merge_regions(type); + return 0; +} + /** * __next_free_mem_range - next function for for_each_free_mem_range() * @idx: pointer to u64 loop variable -- cgit v1.2.3 From e7e8de5918dd6a07cbddae559600d7765ad6a56e Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 21 Jan 2014 15:49:26 -0800 Subject: memblock: make memblock_set_node() support different memblock_type [sfr@canb.auug.org.au: fix powerpc build] Signed-off-by: Tang Chen Reviewed-by: Zhang Yanfei Cc: "H. Peter Anvin" Cc: "Rafael J . Wysocki" Cc: Chen Tang Cc: Gong Chen Cc: Ingo Molnar Cc: Jiang Liu Cc: Johannes Weiner Cc: Lai Jiangshan Cc: Larry Woodman Cc: Len Brown Cc: Liu Jiang Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Prarit Bhargava Cc: Rik van Riel Cc: Taku Izumi Cc: Tejun Heo Cc: Thomas Gleixner Cc: Thomas Renninger Cc: Toshi Kani Cc: Vasilis Liaskovitis Cc: Wanpeng Li Cc: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/metag/mm/init.c | 3 ++- arch/metag/mm/numa.c | 3 ++- arch/microblaze/mm/init.c | 3 ++- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/numa.c | 8 +++++--- arch/sh/kernel/setup.c | 4 ++-- arch/sparc/mm/init_64.c | 5 +++-- arch/x86/mm/init_32.c | 2 +- arch/x86/mm/init_64.c | 2 +- arch/x86/mm/numa.c | 6 ++++-- include/linux/memblock.h | 3 ++- mm/memblock.c | 6 +++--- 12 files changed, 28 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 3cd6288f65c2..11fa51c89617 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -204,7 +204,8 @@ static void __init do_init_bootmem(void) start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), 0); + PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, 0); } /* All of system RAM sits in node 0 for the non-NUMA case */ diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c index b172aa45fcf8..67b46c295072 100644 --- a/arch/metag/mm/numa.c +++ b/arch/metag/mm/numa.c @@ -42,7 +42,8 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) memblock_add(start, end - start); memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), nid); + PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, nid); /* Node-local pgdat */ pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data), diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 74c7bcc1e82d..89077d346714 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -192,7 +192,8 @@ void __init setup_memory(void) start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); memblock_set_node(start_pfn << PAGE_SHIFT, - (end_pfn - start_pfn) << PAGE_SHIFT, 0); + (end_pfn - start_pfn) << PAGE_SHIFT, + &memblock.memory, 0); } /* free bootmem is whole main memory */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3fa93dc7fe75..8c1dd23652a1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -209,7 +209,7 @@ void __init do_init_bootmem(void) /* Place all memblock_regions in the same node and merge contiguous * memblock_regions */ - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); /* Add all physical memory to the bootmem map, mark each area * present. diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 078d3e00a616..5a944f25e94f 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -670,7 +670,8 @@ static void __init parse_drconf_memory(struct device_node *memory) node_set_online(nid); sz = numa_enforce_memory_limit(base, size); if (sz) - memblock_set_node(base, sz, nid); + memblock_set_node(base, sz, + &memblock.memory, nid); } while (--ranges); } } @@ -760,7 +761,7 @@ new_range: continue; } - memblock_set_node(start, size, nid); + memblock_set_node(start, size, &memblock.memory, nid); if (--ranges) goto new_range; @@ -797,7 +798,8 @@ static void __init setup_nonnuma(void) fake_numa_create_new_node(end_pfn, &nid); memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), nid); + PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, nid); node_set_online(nid); } } diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 1cf90e947dbf..de19cfa768f2 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -230,8 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, pmb_bolt_mapping((unsigned long)__va(start), start, end - start, PAGE_KERNEL); - memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), nid); + memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, nid); } void __init __weak plat_early_device_setup(void) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5322e530d09c..eafbc65c9c47 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1021,7 +1021,8 @@ static void __init add_node_ranges(void) "start[%lx] end[%lx]\n", nid, start, this_end); - memblock_set_node(start, this_end - start, nid); + memblock_set_node(start, this_end - start, + &memblock.memory, nid); start = this_end; } } @@ -1325,7 +1326,7 @@ static void __init bootmem_init_nonnuma(void) (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); allocate_node_data(0); node_set_online(0); } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5bdc5430597c..e39504878aec 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -665,7 +665,7 @@ void __init initmem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 104d56a9245f..f35c66c5959a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -643,7 +643,7 @@ kernel_physical_mapping_init(unsigned long start, #ifndef CONFIG_NUMA void __init initmem_init(void) { - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); } #endif diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index c85da7bb6b60..82e079a0d363 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -491,7 +491,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) for (i = 0; i < mi->nr_blks; i++) { struct numa_memblk *mb = &mi->blk[i]; - memblock_set_node(mb->start, mb->end - mb->start, mb->nid); + memblock_set_node(mb->start, mb->end - mb->start, + &memblock.memory, mb->nid); } /* @@ -565,7 +566,8 @@ static int __init numa_init(int (*init_func)(void)) nodes_clear(node_possible_map); nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); + WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, + MAX_NUMNODES)); numa_reset_distance(); ret = init_func(); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index b788faa71563..97480d392e40 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -140,7 +140,8 @@ static inline void memblock_clear_region_flags(struct memblock_region *r, } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP -int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_set_node(phys_addr_t base, phys_addr_t size, + struct memblock_type *type, int nid); static inline void memblock_set_region_node(struct memblock_region *r, int nid) { diff --git a/mm/memblock.c b/mm/memblock.c index 2121ec4c7fa0..d5681008dce1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -911,18 +911,18 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, * memblock_set_node - set node ID on memblock regions * @base: base of area to set node ID for * @size: size of area to set node ID for + * @type: memblock type to set node ID for * @nid: node ID to set * - * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. + * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. * Regions which cross the area boundaries are split as necessary. * * RETURNS: * 0 on success, -errno on failure. */ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, - int nid) + struct memblock_type *type, int nid) { - struct memblock_type *type = &memblock.memory; int start_rgn, end_rgn; int i, ret; -- cgit v1.2.3 From 55ac590c2fadad785d60dd70c12d62823bc2cd39 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 21 Jan 2014 15:49:35 -0800 Subject: memblock, mem_hotplug: make memblock skip hotpluggable regions if needed Linux kernel cannot migrate pages used by the kernel. As a result, hotpluggable memory used by the kernel won't be able to be hot-removed. To solve this problem, the basic idea is to prevent memblock from allocating hotpluggable memory for the kernel at early time, and arrange all hotpluggable memory in ACPI SRAT(System Resource Affinity Table) as ZONE_MOVABLE when initializing zones. In the previous patches, we have marked hotpluggable memory regions with MEMBLOCK_HOTPLUG flag in memblock.memory. In this patch, we make memblock skip these hotpluggable memory regions in the default top-down allocation function if movable_node boot option is specified. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Tang Chen Signed-off-by: Zhang Yanfei Cc: "H. Peter Anvin" Cc: "Rafael J . Wysocki" Cc: Chen Tang Cc: Gong Chen Cc: Ingo Molnar Cc: Jiang Liu Cc: Johannes Weiner Cc: Lai Jiangshan Cc: Larry Woodman Cc: Len Brown Cc: Liu Jiang Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Prarit Bhargava Cc: Rik van Riel Cc: Taku Izumi Cc: Tejun Heo Cc: Thomas Gleixner Cc: Thomas Renninger Cc: Toshi Kani Cc: Vasilis Liaskovitis Cc: Wanpeng Li Cc: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 24 ++++++++++++++++++++++++ mm/memblock.c | 12 ++++++++++++ mm/memory_hotplug.c | 1 + 3 files changed, 37 insertions(+) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 97480d392e40..2f52c8c492bd 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -47,6 +47,10 @@ struct memblock { extern struct memblock memblock; extern int memblock_debug; +#ifdef CONFIG_MOVABLE_NODE +/* If movable_node boot option specified */ +extern bool movable_node_enabled; +#endif /* CONFIG_MOVABLE_NODE */ #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) @@ -65,6 +69,26 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); +#ifdef CONFIG_MOVABLE_NODE +static inline bool memblock_is_hotpluggable(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_HOTPLUG; +} + +static inline bool movable_node_is_enabled(void) +{ + return movable_node_enabled; +} +#else +static inline bool memblock_is_hotpluggable(struct memblock_region *m) +{ + return false; +} +static inline bool movable_node_is_enabled(void) +{ + return false; +} +#endif #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, diff --git a/mm/memblock.c b/mm/memblock.c index d5681008dce1..6a2a48a122a9 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -39,6 +39,9 @@ struct memblock memblock __initdata_memblock = { }; int memblock_debug __initdata_memblock; +#ifdef CONFIG_MOVABLE_NODE +bool movable_node_enabled __initdata_memblock = false; +#endif static int memblock_can_resize __initdata_memblock; static int memblock_memory_in_slab __initdata_memblock = 0; static int memblock_reserved_in_slab __initdata_memblock = 0; @@ -820,6 +823,11 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, * @out_nid: ptr to int for nid of the range, can be %NULL * * Reverse of __next_free_mem_range(). + * + * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't + * be able to hot-remove hotpluggable memory used by the kernel. So this + * function skip hotpluggable regions if needed when allocating memory for the + * kernel. */ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, @@ -844,6 +852,10 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) continue; + /* skip hotpluggable memory regions if needed */ + if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) + continue; + /* scan areas before each reservation for intersection */ for ( ; ri >= 0; ri--) { struct memblock_region *r = &rsv->regions[ri]; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 489f235502db..01e39afde1cb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1446,6 +1446,7 @@ static int __init cmdline_parse_movable_node(char *p) * the kernel away from hotpluggable memory. */ memblock_set_bottom_up(true); + movable_node_enabled = true; #else pr_warn("movable_node option not supported\n"); #endif -- cgit v1.2.3 From b2f3eebe7a8ef6cd4e2ea088ac7f613793f6cad6 Mon Sep 17 00:00:00 2001 From: Tang Chen Date: Tue, 21 Jan 2014 15:49:38 -0800 Subject: x86, numa, acpi, memory-hotplug: make movable_node have higher priority If users specify the original movablecore=nn@ss boot option, the kernel will arrange [ss, ss+nn) as ZONE_MOVABLE. The kernelcore=nn@ss boot option is similar except it specifies ZONE_NORMAL ranges. Now, if users specify "movable_node" in kernel commandline, the kernel will arrange hotpluggable memory in SRAT as ZONE_MOVABLE. And if users do this, all the other movablecore=nn@ss and kernelcore=nn@ss options should be ignored. For those who don't want this, just specify nothing. The kernel will act as before. Signed-off-by: Tang Chen Signed-off-by: Zhang Yanfei Reviewed-by: Wanpeng Li Cc: "H. Peter Anvin" Cc: "Rafael J . Wysocki" Cc: Chen Tang Cc: Gong Chen Cc: Ingo Molnar Cc: Jiang Liu Cc: Johannes Weiner Cc: Lai Jiangshan Cc: Larry Woodman Cc: Len Brown Cc: Liu Jiang Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Minchan Kim Cc: Prarit Bhargava Cc: Rik van Riel Cc: Taku Izumi Cc: Tejun Heo Cc: Thomas Gleixner Cc: Thomas Renninger Cc: Toshi Kani Cc: Vasilis Liaskovitis Cc: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ec4417cb458a..4f59d1986018 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5018,9 +5018,33 @@ static void __init find_zone_movable_pfns_for_nodes(void) nodemask_t saved_node_state = node_states[N_MEMORY]; unsigned long totalpages = early_calculate_totalpages(); int usable_nodes = nodes_weight(node_states[N_MEMORY]); + struct memblock_type *type = &memblock.memory; + + /* Need to find movable_zone earlier when movable_node is specified. */ + find_usable_zone_for_movable(); + + /* + * If movable_node is specified, ignore kernelcore and movablecore + * options. + */ + if (movable_node_is_enabled()) { + for (i = 0; i < type->cnt; i++) { + if (!memblock_is_hotpluggable(&type->regions[i])) + continue; + + nid = type->regions[i].nid; + + usable_startpfn = PFN_DOWN(type->regions[i].base); + zone_movable_pfn[nid] = zone_movable_pfn[nid] ? + min(usable_startpfn, zone_movable_pfn[nid]) : + usable_startpfn; + } + + goto out2; + } /* - * If movablecore was specified, calculate what size of + * If movablecore=nn[KMG] was specified, calculate what size of * kernelcore that corresponds so that memory usable for * any allocation type is evenly spread. If both kernelcore * and movablecore are specified, then the value of kernelcore @@ -5046,7 +5070,6 @@ static void __init find_zone_movable_pfns_for_nodes(void) goto out; /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ - find_usable_zone_for_movable(); usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; restart: @@ -5137,6 +5160,7 @@ restart: if (usable_nodes && required_kernelcore > usable_nodes) goto restart; +out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ for (nid = 0; nid < MAX_NUMNODES; nid++) zone_movable_pfn[nid] = -- cgit v1.2.3 From 1c98dd905ddb7552f13a3f06aa0bd9ef6affeeb7 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 21 Jan 2014 15:49:41 -0800 Subject: memcg: fix kmem_account_flags check in memcg_can_account_kmem() We should start kmem accounting for a memory cgroup only after both its kmem limit is set (KMEM_ACCOUNTED_ACTIVE) and related call sites are patched (KMEM_ACCOUNTED_ACTIVATED). Currently memcg_can_account_kmem() allows kmem accounting even if only one of the conditions is true. Fix it. This means that a page might get charged by memcg_kmem_newpage_charge which would see its static key patched already but memcg_kmem_commit_charge would still see it unpatched and so the charge won't be committed. The result would be charge inconsistency (page_cgroup not marked as PageCgroupUsed) and the charge would leak because __memcg_kmem_uncharge_pages would ignore it. [mhocko@suse.cz: augment changelog] Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Acked-by: Michal Hocko Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Glauber Costa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7f1a356153c0..3065fa80251d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2959,7 +2959,8 @@ static DEFINE_MUTEX(set_limit_mutex); static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) { return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && - (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); + (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK) == + KMEM_ACCOUNTED_MASK; } /* -- cgit v1.2.3 From 2753b35bcd3156727138cd2305b825611a571a3f Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 21 Jan 2014 15:49:42 -0800 Subject: memcg: make memcg_update_cache_sizes() static This function is not used outside of memcontrol.c so make it static. Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Acked-by: Michal Hocko Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3065fa80251d..08541f680d90 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3087,7 +3087,7 @@ int memcg_cache_id(struct mem_cgroup *memcg) * But when we create a new cache, we can call this as well if its parent * is kmem-limited. That will have to hold set_limit_mutex as well. */ -int memcg_update_cache_sizes(struct mem_cgroup *memcg) +static int memcg_update_cache_sizes(struct mem_cgroup *memcg) { int num, ret; -- cgit v1.2.3 From b854f711f6b8b49674d494c5e6d706096dd38301 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:43 -0800 Subject: mm/rmap: recompute pgoff for huge page Rmap traversing is used in five different cases, try_to_unmap(), try_to_munlock(), page_referenced(), page_mkclean() and remove_migration_ptes(). Each one implements its own traversing functions for the cases, anon, file, ksm, respectively. These cause lots of duplications and cause maintenance overhead. They also make codes being hard to understand and error-prone. One example is hugepage handling. There is a code to compute hugepage offset correctly in try_to_unmap_file(), but, there isn't a code to compute hugepage offset in rmap_walk_file(). These are used pairwise in migration context, but we missed to modify pairwise. To overcome these drawbacks, we should unify these through one unified function. I decide rmap_walk() as main function since it has no unnecessity. And to control behavior of rmap_walk(), I introduce struct rmap_walk_control having some function pointers. These makes rmap_walk() working for their specific needs. This patchset remove a lot of duplicated code as you can see in below short-stat and kernel text size also decrease slightly. text data bss dec hex filename 10640 1 16 10657 29a1 mm/rmap.o 10047 1 16 10064 2750 mm/rmap.o 13823 705 8288 22816 5920 mm/ksm.o 13199 705 8288 22192 56b0 mm/ksm.o This patch (of 9): We have to recompute pgoff if the given page is huge, since result based on HPAGE_SIZE is not approapriate for scanning the vma interval tree, as shown by commit 36e4f20af833 ("hugetlb: do not use vma_hugecache_offset() for vma_prio_tree_foreach") and commit 369a713e ("rmap: recompute pgoff for unmapping huge page"). To handle both the cases, normal page for page cache and hugetlb page, by same way, we can use compound_page(). It returns 0 on non-compound page and it also returns proper value on compound page. Signed-off-by: Joonsoo Kim Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 068522d8502a..edc0aea2c4e3 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1512,7 +1512,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) static int try_to_unmap_file(struct page *page, enum ttu_flags flags) { struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff = page->index << compound_order(page); struct vm_area_struct *vma; int ret = SWAP_AGAIN; unsigned long cursor; @@ -1520,9 +1520,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) unsigned long max_nl_size = 0; unsigned int mapcount; - if (PageHuge(page)) - pgoff = page->index << compound_order(page); - mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); @@ -1712,7 +1709,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, struct vm_area_struct *, unsigned long, void *), void *arg) { struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff = page->index << compound_order(page); struct vm_area_struct *vma; int ret = SWAP_AGAIN; -- cgit v1.2.3 From 0f843c6ac318bb3ea7b63437b66dd39d8f01b088 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:45 -0800 Subject: mm/rmap: factor nonlinear handling out of try_to_unmap_file() To merge all kinds of rmap traverse functions, try_to_unmap(), try_to_munlock(), page_referenced() and page_mkclean(), we need to extract common parts and separate out non-common parts. Nonlinear handling is handled just in try_to_unmap_file() and other rmap traverse functions doesn't care of it. Therfore it is better to factor nonlinear handling out of try_to_unmap_file() in order to merge all kinds of rmap traverse functions easily. Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 136 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 74 insertions(+), 62 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index edc0aea2c4e3..7eab4ed304c1 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1426,6 +1426,79 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, return ret; } +static int try_to_unmap_nonlinear(struct page *page, + struct address_space *mapping, struct vm_area_struct *vma) +{ + int ret = SWAP_AGAIN; + unsigned long cursor; + unsigned long max_nl_cursor = 0; + unsigned long max_nl_size = 0; + unsigned int mapcount; + + list_for_each_entry(vma, + &mapping->i_mmap_nonlinear, shared.nonlinear) { + + cursor = (unsigned long) vma->vm_private_data; + if (cursor > max_nl_cursor) + max_nl_cursor = cursor; + cursor = vma->vm_end - vma->vm_start; + if (cursor > max_nl_size) + max_nl_size = cursor; + } + + if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ + return SWAP_FAIL; + } + + /* + * We don't try to search for this page in the nonlinear vmas, + * and page_referenced wouldn't have found it anyway. Instead + * just walk the nonlinear vmas trying to age and unmap some. + * The mapcount of the page we came in with is irrelevant, + * but even so use it as a guide to how hard we should try? + */ + mapcount = page_mapcount(page); + if (!mapcount) + return ret; + + cond_resched(); + + max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; + if (max_nl_cursor == 0) + max_nl_cursor = CLUSTER_SIZE; + + do { + list_for_each_entry(vma, + &mapping->i_mmap_nonlinear, shared.nonlinear) { + + cursor = (unsigned long) vma->vm_private_data; + while (cursor < max_nl_cursor && + cursor < vma->vm_end - vma->vm_start) { + if (try_to_unmap_cluster(cursor, &mapcount, + vma, page) == SWAP_MLOCK) + ret = SWAP_MLOCK; + cursor += CLUSTER_SIZE; + vma->vm_private_data = (void *) cursor; + if ((int)mapcount <= 0) + return ret; + } + vma->vm_private_data = (void *) max_nl_cursor; + } + cond_resched(); + max_nl_cursor += CLUSTER_SIZE; + } while (max_nl_cursor <= max_nl_size); + + /* + * Don't loop forever (perhaps all the remaining pages are + * in locked vmas). Reset cursor on all unreserved nonlinear + * vmas, now forgetting on which ones it had fallen behind. + */ + list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) + vma->vm_private_data = NULL; + + return ret; +} + bool is_vma_temporary_stack(struct vm_area_struct *vma) { int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); @@ -1515,10 +1588,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) pgoff_t pgoff = page->index << compound_order(page); struct vm_area_struct *vma; int ret = SWAP_AGAIN; - unsigned long cursor; - unsigned long max_nl_cursor = 0; - unsigned long max_nl_size = 0; - unsigned int mapcount; mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { @@ -1539,64 +1608,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) if (TTU_ACTION(flags) == TTU_MUNLOCK) goto out; - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, - shared.nonlinear) { - cursor = (unsigned long) vma->vm_private_data; - if (cursor > max_nl_cursor) - max_nl_cursor = cursor; - cursor = vma->vm_end - vma->vm_start; - if (cursor > max_nl_size) - max_nl_size = cursor; - } - - if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ - ret = SWAP_FAIL; - goto out; - } - - /* - * We don't try to search for this page in the nonlinear vmas, - * and page_referenced wouldn't have found it anyway. Instead - * just walk the nonlinear vmas trying to age and unmap some. - * The mapcount of the page we came in with is irrelevant, - * but even so use it as a guide to how hard we should try? - */ - mapcount = page_mapcount(page); - if (!mapcount) - goto out; - cond_resched(); - - max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; - if (max_nl_cursor == 0) - max_nl_cursor = CLUSTER_SIZE; - - do { - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, - shared.nonlinear) { - cursor = (unsigned long) vma->vm_private_data; - while ( cursor < max_nl_cursor && - cursor < vma->vm_end - vma->vm_start) { - if (try_to_unmap_cluster(cursor, &mapcount, - vma, page) == SWAP_MLOCK) - ret = SWAP_MLOCK; - cursor += CLUSTER_SIZE; - vma->vm_private_data = (void *) cursor; - if ((int)mapcount <= 0) - goto out; - } - vma->vm_private_data = (void *) max_nl_cursor; - } - cond_resched(); - max_nl_cursor += CLUSTER_SIZE; - } while (max_nl_cursor <= max_nl_size); - - /* - * Don't loop forever (perhaps all the remaining pages are - * in locked vmas). Reset cursor on all unreserved nonlinear - * vmas, now forgetting on which ones it had fallen behind. - */ - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) - vma->vm_private_data = NULL; + ret = try_to_unmap_nonlinear(page, mapping, vma); out: mutex_unlock(&mapping->i_mmap_mutex); return ret; -- cgit v1.2.3 From faecd8dd852d4e4a63a1b8ad43e5df8e41ee0336 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:46 -0800 Subject: mm/rmap: factor lock function out of rmap_walk_anon() When we traverse anon_vma, we need to take a read-side anon_lock. But there is subtle difference in the situation so that we can't use same method to take a lock in each cases. Therefore, we need to make rmap_walk_anon() taking difference lock function. This patch is the first step, factoring lock function for anon_lock out of rmap_walk_anon(). It will be used in case of removing migration entry and in default of rmap_walk_anon(). Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 7eab4ed304c1..5a79bf585e27 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1684,6 +1684,24 @@ void __put_anon_vma(struct anon_vma *anon_vma) } #ifdef CONFIG_MIGRATION +static struct anon_vma *rmap_walk_anon_lock(struct page *page) +{ + struct anon_vma *anon_vma; + + /* + * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() + * because that depends on page_mapped(); but not all its usages + * are holding mmap_sem. Users without mmap_sem are required to + * take a reference count to prevent the anon_vma disappearing + */ + anon_vma = page_anon_vma(page); + if (!anon_vma) + return NULL; + + anon_vma_lock_read(anon_vma); + return anon_vma; +} + /* * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): * Called by migrate.c to remove migration ptes, but might be used more later. @@ -1696,16 +1714,10 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, struct anon_vma_chain *avc; int ret = SWAP_AGAIN; - /* - * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() - * because that depends on page_mapped(); but not all its usages - * are holding mmap_sem. Users without mmap_sem are required to - * take a reference count to prevent the anon_vma disappearing - */ - anon_vma = page_anon_vma(page); + anon_vma = rmap_walk_anon_lock(page); if (!anon_vma) return ret; - anon_vma_lock_read(anon_vma); + anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); -- cgit v1.2.3 From 051ac83adf69eea4f57a97356e4282e395a5fa6d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:48 -0800 Subject: mm/rmap: make rmap_walk to get the rmap_walk_control argument In each rmap traverse case, there is some difference so that we need function pointers and arguments to them in order to handle these For this purpose, struct rmap_walk_control is introduced in this patch, and will be extended in following patch. Introducing and extending are separate, because it clarify changes. Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 7 +++---- include/linux/rmap.h | 9 +++++++-- mm/ksm.c | 6 +++--- mm/migrate.c | 7 ++++++- mm/rmap.c | 19 ++++++++----------- 5 files changed, 27 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 45c9b6a17bcb..0eef8cb0baf7 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -76,8 +76,7 @@ struct page *ksm_might_need_to_copy(struct page *page, int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags); int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg); +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); #else /* !CONFIG_KSM */ @@ -120,8 +119,8 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) return 0; } -static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, - struct vm_area_struct *, unsigned long, void *), void *arg) +static inline int rmap_walk_ksm(struct page *page, + struct rmap_walk_control *rwc) { return 0; } diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 6dacb93a6d94..6a456ce6de20 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -235,11 +235,16 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +struct rmap_walk_control { + void *arg; + int (*rmap_one)(struct page *page, struct vm_area_struct *vma, + unsigned long addr, void *arg); +}; + /* * Called by migrate.c to remove migration ptes, but might be used more later. */ -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg); +int rmap_walk(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/mm/ksm.c b/mm/ksm.c index 175fff79dc95..c3035fee8080 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1997,8 +1997,7 @@ out: } #ifdef CONFIG_MIGRATION -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { struct stable_node *stable_node; struct rmap_item *rmap_item; @@ -2033,7 +2032,8 @@ again: if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; - ret = rmap_one(page, vma, rmap_item->address, arg); + ret = rwc->rmap_one(page, vma, + rmap_item->address, rwc->arg); if (ret != SWAP_AGAIN) { anon_vma_unlock_read(anon_vma); goto out; diff --git a/mm/migrate.c b/mm/migrate.c index 9194375b2307..11d89dc0574c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -199,7 +199,12 @@ out: */ static void remove_migration_ptes(struct page *old, struct page *new) { - rmap_walk(new, remove_migration_pte, old); + struct rmap_walk_control rwc = { + .rmap_one = remove_migration_pte, + .arg = old, + }; + + rmap_walk(new, &rwc); } /* diff --git a/mm/rmap.c b/mm/rmap.c index 5a79bf585e27..f8f10ad5d359 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1706,8 +1706,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page) * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): * Called by migrate.c to remove migration ptes, but might be used more later. */ -static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -1721,7 +1720,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; } @@ -1729,8 +1728,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, return ret; } -static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << compound_order(page); @@ -1742,7 +1740,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; } @@ -1755,17 +1753,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, return ret; } -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +int rmap_walk(struct page *page, struct rmap_walk_control *rwc) { VM_BUG_ON(!PageLocked(page)); if (unlikely(PageKsm(page))) - return rmap_walk_ksm(page, rmap_one, arg); + return rmap_walk_ksm(page, rwc); else if (PageAnon(page)) - return rmap_walk_anon(page, rmap_one, arg); + return rmap_walk_anon(page, rwc); else - return rmap_walk_file(page, rmap_one, arg); + return rmap_walk_file(page, rwc); } #endif /* CONFIG_MIGRATION */ -- cgit v1.2.3 From 0dd1c7bbce8d1d142bb25aefaa50262dfd77cb78 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:49 -0800 Subject: mm/rmap: extend rmap_walk_xxx() to cope with different cases There are a lot of common parts in traversing functions, but there are also a little of uncommon parts in it. By assigning proper function pointer on each rmap_walker_control, we can handle these difference correctly. Following are differences we should handle. 1. difference of lock function in anon mapping case 2. nonlinear handling in file mapping case 3. prechecked condition: checking memcg in page_referenced(), checking VM_SHARE in page_mkclean() checking temporary vma in try_to_unmap() 4. exit condition: checking page_mapped() in try_to_unmap() So, in this patch, I introduce 4 function pointers to handle above differences. Signed-off-by: Joonsoo Kim Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 15 +++++++++++++++ mm/ksm.c | 7 +++++++ mm/rmap.c | 37 +++++++++++++++++++++++++++++-------- 3 files changed, 51 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 6a456ce6de20..616aa4d05f0a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -235,10 +235,25 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +/* + * rmap_walk_control: To control rmap traversing for specific needs + * + * arg: passed to rmap_one() and invalid_vma() + * rmap_one: executed on each vma where page is mapped + * done: for checking traversing termination condition + * file_nonlinear: for handling file nonlinear mapping + * anon_lock: for getting anon_lock by optimized way rather than default + * invalid_vma: for skipping uninterested vma + */ struct rmap_walk_control { void *arg; int (*rmap_one)(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg); + int (*done)(struct page *page); + int (*file_nonlinear)(struct page *, struct address_space *, + struct vm_area_struct *vma); + struct anon_vma *(*anon_lock)(struct page *page); + bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; /* diff --git a/mm/ksm.c b/mm/ksm.c index c3035fee8080..91b8cb35f7cc 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2032,12 +2032,19 @@ again: if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) + continue; + ret = rwc->rmap_one(page, vma, rmap_item->address, rwc->arg); if (ret != SWAP_AGAIN) { anon_vma_unlock_read(anon_vma); goto out; } + if (rwc->done && rwc->done(page)) { + anon_vma_unlock_read(anon_vma); + goto out; + } } anon_vma_unlock_read(anon_vma); } diff --git a/mm/rmap.c b/mm/rmap.c index f8f10ad5d359..97bf8f0396f8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1684,10 +1684,14 @@ void __put_anon_vma(struct anon_vma *anon_vma) } #ifdef CONFIG_MIGRATION -static struct anon_vma *rmap_walk_anon_lock(struct page *page) +static struct anon_vma *rmap_walk_anon_lock(struct page *page, + struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; + if (rwc->anon_lock) + return rwc->anon_lock(page); + /* * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages @@ -1713,16 +1717,22 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) struct anon_vma_chain *avc; int ret = SWAP_AGAIN; - anon_vma = rmap_walk_anon_lock(page); + anon_vma = rmap_walk_anon_lock(page, rwc); if (!anon_vma) return ret; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); + + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) + continue; + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; + if (rwc->done && rwc->done(page)) + break; } anon_vma_unlock_read(anon_vma); return ret; @@ -1740,15 +1750,26 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); + + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) + continue; + ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) - break; + goto done; + if (rwc->done && rwc->done(page)) + goto done; } - /* - * No nonlinear handling: being always shared, nonlinear vmas - * never contain migration ptes. Decide what to do about this - * limitation to linear when we need rmap_walk() on nonlinear. - */ + + if (!rwc->file_nonlinear) + goto done; + + if (list_empty(&mapping->i_mmap_nonlinear)) + goto done; + + ret = rwc->file_nonlinear(page, mapping, vma); + +done: mutex_unlock(&mapping->i_mmap_mutex); return ret; } -- cgit v1.2.3 From 52629506420ce32997f1fba0a1ab2f1aaa9a4f79 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:50 -0800 Subject: mm/rmap: use rmap_walk() in try_to_unmap() Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in try_to_unmap(). In this patch, I change following things. 1. enable rmap_walk() if !CONFIG_MIGRATION. 2. mechanical change to use rmap_walk() in try_to_unmap(). Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 5 +---- mm/ksm.c | 4 ++-- mm/rmap.c | 48 ++++++++++++++++++++++++++++++++++++------------ 3 files changed, 39 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 616aa4d05f0a..2462458708cd 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -190,7 +190,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *, int try_to_unmap(struct page *, enum ttu_flags flags); int try_to_unmap_one(struct page *, struct vm_area_struct *, - unsigned long address, enum ttu_flags flags); + unsigned long address, void *arg); /* * Called from mm/filemap_xip.c to unmap empty zero page @@ -256,9 +256,6 @@ struct rmap_walk_control { bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; -/* - * Called by migrate.c to remove migration ptes, but might be used more later. - */ int rmap_walk(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/mm/ksm.c b/mm/ksm.c index 91b8cb35f7cc..6b4baa97f4c0 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1982,7 +1982,7 @@ again: continue; ret = try_to_unmap_one(page, vma, - rmap_item->address, flags); + rmap_item->address, (void *)flags); if (ret != SWAP_AGAIN || !page_mapped(page)) { anon_vma_unlock_read(anon_vma); goto out; @@ -1996,7 +1996,6 @@ out: return ret; } -#ifdef CONFIG_MIGRATION int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { struct stable_node *stable_node; @@ -2054,6 +2053,7 @@ out: return ret; } +#ifdef CONFIG_MIGRATION void ksm_migrate_page(struct page *newpage, struct page *oldpage) { struct stable_node *stable_node; diff --git a/mm/rmap.c b/mm/rmap.c index 97bf8f0396f8..b3263cb32361 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1179,15 +1179,18 @@ out: /* * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. + * + * @arg: enum ttu_flags will be passed to this argument */ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, enum ttu_flags flags) + unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; + enum ttu_flags flags = (enum ttu_flags)arg; pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) @@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma) return false; } +static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) +{ + return is_vma_temporary_stack(vma); +} + /** * try_to_unmap_anon - unmap or unlock anonymous page using the object-based * rmap method @@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) continue; address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, flags); + ret = try_to_unmap_one(page, vma, address, (void *)flags); if (ret != SWAP_AGAIN || !page_mapped(page)) break; } @@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, flags); + ret = try_to_unmap_one(page, vma, address, (void *)flags); if (ret != SWAP_AGAIN || !page_mapped(page)) goto out; } @@ -1614,6 +1622,11 @@ out: return ret; } +static int page_not_mapped(struct page *page) +{ + return !page_mapped(page); +}; + /** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped @@ -1631,16 +1644,29 @@ out: int try_to_unmap(struct page *page, enum ttu_flags flags) { int ret; + struct rmap_walk_control rwc = { + .rmap_one = try_to_unmap_one, + .arg = (void *)flags, + .done = page_not_mapped, + .file_nonlinear = try_to_unmap_nonlinear, + .anon_lock = page_lock_anon_vma_read, + }; - BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); - if (unlikely(PageKsm(page))) - ret = try_to_unmap_ksm(page, flags); - else if (PageAnon(page)) - ret = try_to_unmap_anon(page, flags); - else - ret = try_to_unmap_file(page, flags); + /* + * During exec, a temporary VMA is setup and later moved. + * The VMA is moved under the anon_vma lock but not the + * page tables leading to a race where migration cannot + * find the migration ptes. Rather than increasing the + * locking requirements of exec(), migration skips + * temporary VMAs until after exec() completes. + */ + if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) + rwc.invalid_vma = invalid_migration_vma; + + ret = rmap_walk(page, &rwc); + if (ret != SWAP_MLOCK && !page_mapped(page)) ret = SWAP_SUCCESS; return ret; @@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma) anon_vma_free(anon_vma); } -#ifdef CONFIG_MIGRATION static struct anon_vma *rmap_walk_anon_lock(struct page *page, struct rmap_walk_control *rwc) { @@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc) else return rmap_walk_file(page, rwc); } -#endif /* CONFIG_MIGRATION */ #ifdef CONFIG_HUGETLB_PAGE /* -- cgit v1.2.3 From e8351ac9bfa7f4412d5d196b6742309473ca506d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:52 -0800 Subject: mm/rmap: use rmap_walk() in try_to_munlock() Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in try_to_munlock(). In this patch, I change following things. 1. remove some variants of rmap traversing functions. cf> try_to_unmap_ksm, try_to_unmap_anon, try_to_unmap_file 2. mechanical change to use rmap_walk() in try_to_munlock(). 3. copy and paste comments. Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 6 -- mm/ksm.c | 50 ----------------- mm/rmap.c | 154 ++++++++++++++-------------------------------------- 3 files changed, 42 insertions(+), 168 deletions(-) (limited to 'mm') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 0eef8cb0baf7..91b9719722c3 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -75,7 +75,6 @@ struct page *ksm_might_need_to_copy(struct page *page, int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags); -int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); @@ -114,11 +113,6 @@ static inline int page_referenced_ksm(struct page *page, return 0; } -static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) -{ - return 0; -} - static inline int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { diff --git a/mm/ksm.c b/mm/ksm.c index 6b4baa97f4c0..646d45a6b6c8 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1946,56 +1946,6 @@ out: return referenced; } -int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) -{ - struct stable_node *stable_node; - struct rmap_item *rmap_item; - int ret = SWAP_AGAIN; - int search_new_forks = 0; - - VM_BUG_ON(!PageKsm(page)); - VM_BUG_ON(!PageLocked(page)); - - stable_node = page_stable_node(page); - if (!stable_node) - return SWAP_FAIL; -again: - hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { - struct anon_vma *anon_vma = rmap_item->anon_vma; - struct anon_vma_chain *vmac; - struct vm_area_struct *vma; - - anon_vma_lock_read(anon_vma); - anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, - 0, ULONG_MAX) { - vma = vmac->vma; - if (rmap_item->address < vma->vm_start || - rmap_item->address >= vma->vm_end) - continue; - /* - * Initially we examine only the vma which covers this - * rmap_item; but later, if there is still work to do, - * we examine covering vmas in other mms: in case they - * were forked from the original since ksmd passed. - */ - if ((rmap_item->mm == vma->vm_mm) == search_new_forks) - continue; - - ret = try_to_unmap_one(page, vma, - rmap_item->address, (void *)flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) { - anon_vma_unlock_read(anon_vma); - goto out; - } - } - anon_vma_unlock_read(anon_vma); - } - if (!search_new_forks++) - goto again; -out: - return ret; -} - int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { struct stable_node *stable_node; diff --git a/mm/rmap.c b/mm/rmap.c index b3263cb32361..c73e0c645d09 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1177,9 +1177,6 @@ out: } /* - * Subfunctions of try_to_unmap: try_to_unmap_one called - * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. - * * @arg: enum ttu_flags will be passed to this argument */ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, @@ -1521,107 +1518,6 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) return is_vma_temporary_stack(vma); } -/** - * try_to_unmap_anon - unmap or unlock anonymous page using the object-based - * rmap method - * @page: the page to unmap/unlock - * @flags: action and flags - * - * Find all the mappings of a page using the mapping pointer and the vma chains - * contained in the anon_vma struct it points to. - * - * This function is only called from try_to_unmap/try_to_munlock for - * anonymous pages. - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * 'LOCKED. - */ -static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) -{ - struct anon_vma *anon_vma; - pgoff_t pgoff; - struct anon_vma_chain *avc; - int ret = SWAP_AGAIN; - - anon_vma = page_lock_anon_vma_read(page); - if (!anon_vma) - return ret; - - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { - struct vm_area_struct *vma = avc->vma; - unsigned long address; - - /* - * During exec, a temporary VMA is setup and later moved. - * The VMA is moved under the anon_vma lock but not the - * page tables leading to a race where migration cannot - * find the migration ptes. Rather than increasing the - * locking requirements of exec(), migration skips - * temporary VMAs until after exec() completes. - */ - if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && - is_vma_temporary_stack(vma)) - continue; - - address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, (void *)flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) - break; - } - - page_unlock_anon_vma_read(anon_vma); - return ret; -} - -/** - * try_to_unmap_file - unmap/unlock file page using the object-based rmap method - * @page: the page to unmap/unlock - * @flags: action and flags - * - * Find all the mappings of a page using the mapping pointer and the vma chains - * contained in the address_space struct it points to. - * - * This function is only called from try_to_unmap/try_to_munlock for - * object-based pages. - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * 'LOCKED. - */ -static int try_to_unmap_file(struct page *page, enum ttu_flags flags) -{ - struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << compound_order(page); - struct vm_area_struct *vma; - int ret = SWAP_AGAIN; - - mutex_lock(&mapping->i_mmap_mutex); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - unsigned long address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, (void *)flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) - goto out; - } - - if (list_empty(&mapping->i_mmap_nonlinear)) - goto out; - - /* - * We don't bother to try to find the munlocked page in nonlinears. - * It's costly. Instead, later, page reclaim logic may call - * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. - */ - if (TTU_ACTION(flags) == TTU_MUNLOCK) - goto out; - - ret = try_to_unmap_nonlinear(page, mapping, vma); -out: - mutex_unlock(&mapping->i_mmap_mutex); - return ret; -} - static int page_not_mapped(struct page *page) { return !page_mapped(page); @@ -1689,14 +1585,25 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) */ int try_to_munlock(struct page *page) { + int ret; + struct rmap_walk_control rwc = { + .rmap_one = try_to_unmap_one, + .arg = (void *)TTU_MUNLOCK, + .done = page_not_mapped, + /* + * We don't bother to try to find the munlocked page in + * nonlinears. It's costly. Instead, later, page reclaim logic + * may call try_to_unmap() and recover PG_mlocked lazily. + */ + .file_nonlinear = NULL, + .anon_lock = page_lock_anon_vma_read, + + }; + VM_BUG_ON(!PageLocked(page) || PageLRU(page)); - if (unlikely(PageKsm(page))) - return try_to_unmap_ksm(page, TTU_MUNLOCK); - else if (PageAnon(page)) - return try_to_unmap_anon(page, TTU_MUNLOCK); - else - return try_to_unmap_file(page, TTU_MUNLOCK); + ret = rmap_walk(page, &rwc); + return ret; } void __put_anon_vma(struct anon_vma *anon_vma) @@ -1732,8 +1639,18 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, } /* - * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): - * Called by migrate.c to remove migration ptes, but might be used more later. + * rmap_walk_anon - do something to anonymous page using the object-based + * rmap method + * @page: the page to be handled + * @rwc: control variable according to each walk type + * + * Find all the mappings of a page using the mapping pointer and the vma chains + * contained in the anon_vma struct it points to. + * + * When called from try_to_munlock(), the mmap_sem of the mm containing the vma + * where the page was found will be held for write. So, we won't recheck + * vm_flags for that VMA. That should be OK, because that vma shouldn't be + * LOCKED. */ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) { @@ -1763,6 +1680,19 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) return ret; } +/* + * rmap_walk_file - do something to file page using the object-based rmap method + * @page: the page to be handled + * @rwc: control variable according to each walk type + * + * Find all the mappings of a page using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + * + * When called from try_to_munlock(), the mmap_sem of the mm containing the vma + * where the page was found will be held for write. So, we won't recheck + * vm_flags for that VMA. That should be OK, because that vma shouldn't be + * LOCKED. + */ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) { struct address_space *mapping = page->mapping; -- cgit v1.2.3 From 9f32624be943538983eb0f18b73a9052d1493c80 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:53 -0800 Subject: mm/rmap: use rmap_walk() in page_referenced() Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in page_referenced(). In this patch, I change following things. 1. remove some variants of rmap traversing functions. cf> page_referenced_ksm, page_referenced_anon, page_referenced_file 2. introduce new struct page_referenced_arg and pass it to page_referenced_one(), main function of rmap_walk, in order to count reference, to store vm_flags and to check finish condition. 3. mechanical change to use rmap_walk() in page_referenced(). [liwanp@linux.vnet.ibm.com: fix BUG at rmap_walk] Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Wanpeng Li Cc: Sasha Levin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 2 - include/linux/rmap.h | 2 +- mm/ksm.c | 60 ++------------- mm/rmap.c | 210 ++++++++++++++++++--------------------------------- 4 files changed, 80 insertions(+), 194 deletions(-) (limited to 'mm') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 91b9719722c3..3be6bb18562d 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -73,8 +73,6 @@ static inline void set_page_stable_node(struct page *page, struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); -int page_referenced_ksm(struct page *page, - struct mem_cgroup *memcg, unsigned long *vm_flags); int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2462458708cd..1da693d51255 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -184,7 +184,7 @@ static inline void page_dup_rmap(struct page *page) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); int page_referenced_one(struct page *, struct vm_area_struct *, - unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); + unsigned long address, void *arg); #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) diff --git a/mm/ksm.c b/mm/ksm.c index 646d45a6b6c8..3df141e5f3e0 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1891,61 +1891,6 @@ struct page *ksm_might_need_to_copy(struct page *page, return new_page; } -int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, - unsigned long *vm_flags) -{ - struct stable_node *stable_node; - struct rmap_item *rmap_item; - unsigned int mapcount = page_mapcount(page); - int referenced = 0; - int search_new_forks = 0; - - VM_BUG_ON(!PageKsm(page)); - VM_BUG_ON(!PageLocked(page)); - - stable_node = page_stable_node(page); - if (!stable_node) - return 0; -again: - hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { - struct anon_vma *anon_vma = rmap_item->anon_vma; - struct anon_vma_chain *vmac; - struct vm_area_struct *vma; - - anon_vma_lock_read(anon_vma); - anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, - 0, ULONG_MAX) { - vma = vmac->vma; - if (rmap_item->address < vma->vm_start || - rmap_item->address >= vma->vm_end) - continue; - /* - * Initially we examine only the vma which covers this - * rmap_item; but later, if there is still work to do, - * we examine covering vmas in other mms: in case they - * were forked from the original since ksmd passed. - */ - if ((rmap_item->mm == vma->vm_mm) == search_new_forks) - continue; - - if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) - continue; - - referenced += page_referenced_one(page, vma, - rmap_item->address, &mapcount, vm_flags); - if (!search_new_forks || !mapcount) - break; - } - anon_vma_unlock_read(anon_vma); - if (!mapcount) - goto out; - } - if (!search_new_forks++) - goto again; -out: - return referenced; -} - int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) { struct stable_node *stable_node; @@ -1954,6 +1899,11 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) int search_new_forks = 0; VM_BUG_ON(!PageKsm(page)); + + /* + * Rely on the page lock to protect against concurrent modifications + * to that page's node of the stable tree. + */ VM_BUG_ON(!PageLocked(page)); stable_node = page_stable_node(page); diff --git a/mm/rmap.c b/mm/rmap.c index c73e0c645d09..080413036406 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -660,17 +660,22 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) return 1; } +struct page_referenced_arg { + int mapcount; + int referenced; + unsigned long vm_flags; + struct mem_cgroup *memcg; +}; /* - * Subfunctions of page_referenced: page_referenced_one called - * repeatedly from either page_referenced_anon or page_referenced_file. + * arg: page_referenced_arg will be passed */ int page_referenced_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, unsigned int *mapcount, - unsigned long *vm_flags) + unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; int referenced = 0; + struct page_referenced_arg *pra = arg; if (unlikely(PageTransHuge(page))) { pmd_t *pmd; @@ -682,13 +687,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, pmd = page_check_address_pmd(page, mm, address, PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); if (!pmd) - goto out; + return SWAP_AGAIN; if (vma->vm_flags & VM_LOCKED) { spin_unlock(ptl); - *mapcount = 0; /* break early from loop */ - *vm_flags |= VM_LOCKED; - goto out; + pra->vm_flags |= VM_LOCKED; + return SWAP_FAIL; /* To break the loop */ } /* go ahead even if the pmd is pmd_trans_splitting() */ @@ -704,13 +708,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, */ pte = page_check_address(page, mm, address, &ptl, 0); if (!pte) - goto out; + return SWAP_AGAIN; if (vma->vm_flags & VM_LOCKED) { pte_unmap_unlock(pte, ptl); - *mapcount = 0; /* break early from loop */ - *vm_flags |= VM_LOCKED; - goto out; + pra->vm_flags |= VM_LOCKED; + return SWAP_FAIL; /* To break the loop */ } if (ptep_clear_flush_young_notify(vma, address, pte)) { @@ -727,113 +730,27 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); } - (*mapcount)--; - - if (referenced) - *vm_flags |= vma->vm_flags; -out: - return referenced; -} - -static int page_referenced_anon(struct page *page, - struct mem_cgroup *memcg, - unsigned long *vm_flags) -{ - unsigned int mapcount; - struct anon_vma *anon_vma; - pgoff_t pgoff; - struct anon_vma_chain *avc; - int referenced = 0; - - anon_vma = page_lock_anon_vma_read(page); - if (!anon_vma) - return referenced; - - mapcount = page_mapcount(page); - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { - struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(page, vma); - /* - * If we are reclaiming on behalf of a cgroup, skip - * counting on behalf of references from different - * cgroups - */ - if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) - continue; - referenced += page_referenced_one(page, vma, address, - &mapcount, vm_flags); - if (!mapcount) - break; + if (referenced) { + pra->referenced++; + pra->vm_flags |= vma->vm_flags; } - page_unlock_anon_vma_read(anon_vma); - return referenced; + pra->mapcount--; + if (!pra->mapcount) + return SWAP_SUCCESS; /* To break the loop */ + + return SWAP_AGAIN; } -/** - * page_referenced_file - referenced check for object-based rmap - * @page: the page we're checking references on. - * @memcg: target memory control group - * @vm_flags: collect encountered vma->vm_flags who actually referenced the page - * - * For an object-based mapped page, find all the places it is mapped and - * check/clear the referenced flag. This is done by following the page->mapping - * pointer, then walking the chain of vmas it holds. It returns the number - * of references it found. - * - * This function is only called from page_referenced for object-based pages. - */ -static int page_referenced_file(struct page *page, - struct mem_cgroup *memcg, - unsigned long *vm_flags) +static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) { - unsigned int mapcount; - struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - struct vm_area_struct *vma; - int referenced = 0; - - /* - * The caller's checks on page->mapping and !PageAnon have made - * sure that this is a file page: the check for page->mapping - * excludes the case just before it gets set on an anon page. - */ - BUG_ON(PageAnon(page)); + struct page_referenced_arg *pra = arg; + struct mem_cgroup *memcg = pra->memcg; - /* - * The page lock not only makes sure that page->mapping cannot - * suddenly be NULLified by truncation, it makes sure that the - * structure at mapping cannot be freed and reused yet, - * so we can safely take mapping->i_mmap_mutex. - */ - BUG_ON(!PageLocked(page)); - - mutex_lock(&mapping->i_mmap_mutex); - - /* - * i_mmap_mutex does not stabilize mapcount at all, but mapcount - * is more likely to be accurate if we note it after spinning. - */ - mapcount = page_mapcount(page); - - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - unsigned long address = vma_address(page, vma); - /* - * If we are reclaiming on behalf of a cgroup, skip - * counting on behalf of references from different - * cgroups - */ - if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) - continue; - referenced += page_referenced_one(page, vma, address, - &mapcount, vm_flags); - if (!mapcount) - break; - } + if (!mm_match_cgroup(vma->vm_mm, memcg)) + return true; - mutex_unlock(&mapping->i_mmap_mutex); - return referenced; + return false; } /** @@ -851,32 +768,47 @@ int page_referenced(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) { - int referenced = 0; + int ret; int we_locked = 0; + struct page_referenced_arg pra = { + .mapcount = page_mapcount(page), + .memcg = memcg, + }; + struct rmap_walk_control rwc = { + .rmap_one = page_referenced_one, + .arg = (void *)&pra, + .anon_lock = page_lock_anon_vma_read, + }; *vm_flags = 0; - if (page_mapped(page) && page_rmapping(page)) { - if (!is_locked && (!PageAnon(page) || PageKsm(page))) { - we_locked = trylock_page(page); - if (!we_locked) { - referenced++; - goto out; - } - } - if (unlikely(PageKsm(page))) - referenced += page_referenced_ksm(page, memcg, - vm_flags); - else if (PageAnon(page)) - referenced += page_referenced_anon(page, memcg, - vm_flags); - else if (page->mapping) - referenced += page_referenced_file(page, memcg, - vm_flags); - if (we_locked) - unlock_page(page); + if (!page_mapped(page)) + return 0; + + if (!page_rmapping(page)) + return 0; + + if (!is_locked && (!PageAnon(page) || PageKsm(page))) { + we_locked = trylock_page(page); + if (!we_locked) + return 1; } -out: - return referenced; + + /* + * If we are reclaiming on behalf of a cgroup, skip + * counting on behalf of references from different + * cgroups + */ + if (memcg) { + rwc.invalid_vma = invalid_page_referenced_vma; + } + + ret = rmap_walk(page, &rwc); + *vm_flags = pra.vm_flags; + + if (we_locked) + unlock_page(page); + + return pra.referenced; } static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, @@ -1700,6 +1632,14 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) struct vm_area_struct *vma; int ret = SWAP_AGAIN; + /* + * The page lock not only makes sure that page->mapping cannot + * suddenly be NULLified by truncation, it makes sure that the + * structure at mapping cannot be freed and reused yet, + * so we can safely take mapping->i_mmap_mutex. + */ + VM_BUG_ON(!PageLocked(page)); + if (!mapping) return ret; mutex_lock(&mapping->i_mmap_mutex); @@ -1731,8 +1671,6 @@ done: int rmap_walk(struct page *page, struct rmap_walk_control *rwc) { - VM_BUG_ON(!PageLocked(page)); - if (unlikely(PageKsm(page))) return rmap_walk_ksm(page, rwc); else if (PageAnon(page)) -- cgit v1.2.3 From 9853a407b97d8d066b5a865173a4859a3e69fd8a Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:49:55 -0800 Subject: mm/rmap: use rmap_walk() in page_mkclean() Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in page_mkclean(). In this patch, I change following things. 1. remove some variants of rmap traversing functions. cf> page_mkclean_file 2. mechanical change to use rmap_walk() in page_mkclean(). Signed-off-by: Joonsoo Kim Reviewed-by: Naoya Horiguchi Cc: Mel Gorman Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 080413036406..962e2a1e13a0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -812,12 +812,13 @@ int page_referenced(struct page *page, } static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, - unsigned long address) + unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; spinlock_t *ptl; int ret = 0; + int *cleaned = arg; pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) @@ -836,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); - if (ret) + if (ret) { mmu_notifier_invalidate_page(mm, address); + (*cleaned)++; + } out: - return ret; + return SWAP_AGAIN; } -static int page_mkclean_file(struct address_space *mapping, struct page *page) +static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - struct vm_area_struct *vma; - int ret = 0; - - BUG_ON(PageAnon(page)); + if (vma->vm_flags & VM_SHARED) + return 0; - mutex_lock(&mapping->i_mmap_mutex); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - if (vma->vm_flags & VM_SHARED) { - unsigned long address = vma_address(page, vma); - ret += page_mkclean_one(page, vma, address); - } - } - mutex_unlock(&mapping->i_mmap_mutex); - return ret; + return 1; } int page_mkclean(struct page *page) { - int ret = 0; + int cleaned = 0; + struct address_space *mapping; + struct rmap_walk_control rwc = { + .arg = (void *)&cleaned, + .rmap_one = page_mkclean_one, + .invalid_vma = invalid_mkclean_vma, + }; BUG_ON(!PageLocked(page)); - if (page_mapped(page)) { - struct address_space *mapping = page_mapping(page); - if (mapping) - ret = page_mkclean_file(mapping, page); - } + if (!page_mapped(page)) + return 0; - return ret; + mapping = page_mapping(page); + if (!mapping) + return 0; + + rmap_walk(page, &rwc); + + return cleaned; } EXPORT_SYMBOL_GPL(page_mkclean); -- cgit v1.2.3 From 1da4db0cd5c8a31d4468ec906b413e75e604b465 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 21 Jan 2014 15:49:58 -0800 Subject: oom_kill: change oom_kill.c to use for_each_thread() Change oom_kill.c to use for_each_thread() rather than the racy while_each_thread() which can loop forever if we race with exit. Note also that most users were buggy even if while_each_thread() was fine, the task can exit even _before_ rcu_read_lock(). Fortunately the new for_each_thread() only requires the stable task_struct, so this change fixes both problems. Signed-off-by: Oleg Nesterov Reviewed-by: Sergey Dyasly Tested-by: Sergey Dyasly Reviewed-by: Sameer Nanda Cc: "Eric W. Biederman" Cc: Frederic Weisbecker Cc: Mandeep Singh Baines Cc: "Ma, Xindong" Reviewed-by: Michal Hocko Cc: "Tu, Xiaobing" Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1e4a600a6163..96d7945f75a6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -59,7 +59,7 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, { struct task_struct *start = tsk; - do { + for_each_thread(start, tsk) { if (mask) { /* * If this is a mempolicy constrained oom, tsk's @@ -77,7 +77,7 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, if (cpuset_mems_allowed_intersects(current, tsk)) return true; } - } while_each_thread(start, tsk); + } return false; } @@ -97,14 +97,14 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, */ struct task_struct *find_lock_task_mm(struct task_struct *p) { - struct task_struct *t = p; + struct task_struct *t; - do { + for_each_thread(p, t) { task_lock(t); if (likely(t->mm)) return t; task_unlock(t); - } while_each_thread(p, t); + } return NULL; } @@ -301,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, unsigned long chosen_points = 0; rcu_read_lock(); - do_each_thread(g, p) { + for_each_process_thread(g, p) { unsigned int points; switch (oom_scan_process_thread(p, totalpages, nodemask, @@ -323,7 +323,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, chosen = p; chosen_points = points; } - } while_each_thread(g, p); + } if (chosen) get_task_struct(chosen); rcu_read_unlock(); @@ -406,7 +406,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, { struct task_struct *victim = p; struct task_struct *child; - struct task_struct *t = p; + struct task_struct *t; struct mm_struct *mm; unsigned int victim_points = 0; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, @@ -437,7 +437,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, * still freeing memory. */ read_lock(&tasklist_lock); - do { + for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; @@ -455,7 +455,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, get_task_struct(victim); } } - } while_each_thread(p, t); + } read_unlock(&tasklist_lock); rcu_read_lock(); -- cgit v1.2.3 From ad96244179fbd55b40c00f10f399bc04739b8e1f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 21 Jan 2014 15:50:00 -0800 Subject: oom_kill: has_intersects_mems_allowed() needs rcu_read_lock() At least out_of_memory() calls has_intersects_mems_allowed() without even rcu_read_lock(), this is obviously buggy. Add the necessary rcu_read_lock(). This means that we can not simply return from the loop, we need "bool ret" and "break". While at it, swap the names of task_struct's (the argument and the local). This cleans up the code a little bit and avoids the unnecessary initialization. Signed-off-by: Oleg Nesterov Reviewed-by: Sergey Dyasly Tested-by: Sergey Dyasly Reviewed-by: Sameer Nanda Cc: "Eric W. Biederman" Cc: Frederic Weisbecker Cc: Mandeep Singh Baines Cc: "Ma, Xindong" Reviewed-by: Michal Hocko Cc: "Tu, Xiaobing" Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 96d7945f75a6..0d8ad1ebd1d1 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -47,18 +47,20 @@ static DEFINE_SPINLOCK(zone_scan_lock); #ifdef CONFIG_NUMA /** * has_intersects_mems_allowed() - check task eligiblity for kill - * @tsk: task struct of which task to consider + * @start: task struct of which task to consider * @mask: nodemask passed to page allocator for mempolicy ooms * * Task eligibility is determined by whether or not a candidate task, @tsk, * shares the same mempolicy nodes as current if it is bound by such a policy * and whether or not it has the same set of allowed cpuset nodes. */ -static bool has_intersects_mems_allowed(struct task_struct *tsk, +static bool has_intersects_mems_allowed(struct task_struct *start, const nodemask_t *mask) { - struct task_struct *start = tsk; + struct task_struct *tsk; + bool ret = false; + rcu_read_lock(); for_each_thread(start, tsk) { if (mask) { /* @@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, * mempolicy intersects current, otherwise it may be * needlessly killed. */ - if (mempolicy_nodemask_intersects(tsk, mask)) - return true; + ret = mempolicy_nodemask_intersects(tsk, mask); } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ - if (cpuset_mems_allowed_intersects(current, tsk)) - return true; + ret = cpuset_mems_allowed_intersects(current, tsk); } + if (ret) + break; } + rcu_read_unlock(); - return false; + return ret; } #else static bool has_intersects_mems_allowed(struct task_struct *tsk, -- cgit v1.2.3 From 4d4048be8a93769350efa31d2482a038b7de73d0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 21 Jan 2014 15:50:01 -0800 Subject: oom_kill: add rcu_read_lock() into find_lock_task_mm() find_lock_task_mm() expects it is called under rcu or tasklist lock, but it seems that at least oom_unkillable_task()->task_in_mem_cgroup() and mem_cgroup_out_of_memory()->oom_badness() can call it lockless. Perhaps we could fix the callers, but this patch simply adds rcu lock into find_lock_task_mm(). This also allows to simplify a bit one of its callers, oom_kill_process(). Signed-off-by: Oleg Nesterov Cc: Sergey Dyasly Cc: Sameer Nanda Cc: "Eric W. Biederman" Cc: Frederic Weisbecker Cc: Mandeep Singh Baines Cc: "Ma, Xindong" Reviewed-by: Michal Hocko Cc: "Tu, Xiaobing" Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 0d8ad1ebd1d1..054ff47c4478 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -102,14 +102,19 @@ struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t; + rcu_read_lock(); + for_each_thread(p, t) { task_lock(t); if (likely(t->mm)) - return t; + goto found; task_unlock(t); } + t = NULL; +found: + rcu_read_unlock(); - return NULL; + return t; } /* return true if the task is not adequate as candidate victim task. */ @@ -461,10 +466,8 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, } read_unlock(&tasklist_lock); - rcu_read_lock(); p = find_lock_task_mm(victim); if (!p) { - rcu_read_unlock(); put_task_struct(victim); return; } else if (victim != p) { @@ -490,6 +493,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, * That thread will now get access to memory reserves since it has a * pending fatal signal. */ + rcu_read_lock(); for_each_process(p) if (p->mm == mm && !same_thread_group(p, victim) && !(p->flags & PF_KTHREAD)) { -- cgit v1.2.3 From fd615c4e671979e3e362df537d6be38f8d27aa80 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:05 -0800 Subject: mm/memblock: debug: don't free reserved array if !ARCH_DISCARD_MEMBLOCK Now the Nobootmem allocator will always try to free memory allocated for reserved memory regions (free_low_memory_core_early()) without taking into to account current memblock debugging configuration (CONFIG_ARCH_DISCARD_MEMBLOCK and CONFIG_DEBUG_FS state). As result if: - CONFIG_DEBUG_FS defined - CONFIG_ARCH_DISCARD_MEMBLOCK not defined; - reserved memory regions array have been resized during boot then: - memory allocated for reserved memory regions array will be freed to buddy allocator; - debug_fs entry "sys/kernel/debug/memblock/reserved" will show garbage instead of state of memory reservations. like: 0: 0x98393bc0..0x9a393bbf 1: 0xff120000..0xff11ffff 2: 0x00000000..0xffffffff Hence, do not free memory allocated for reserved memory regions if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK). Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Reviewed-by: Tejun Heo Cc: Yinghai Lu Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 6a2a48a122a9..de4d9c352fd6 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -269,6 +269,19 @@ phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( if (memblock.reserved.regions == memblock_reserved_init_regions) return 0; + /* + * Don't allow nobootmem allocator to free reserved memory regions + * array if + * - CONFIG_DEBUG_FS is enabled; + * - CONFIG_ARCH_DISCARD_MEMBLOCK is not enabled; + * - reserved memory regions array have been resized during boot. + * Otherwise debug_fs entry "sys/kernel/debug/memblock/reserved" + * will show garbage instead of state of memory reservations. + */ + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK)) + return 0; + *addr = __pa(memblock.reserved.regions); return PAGE_ALIGN(sizeof(struct memblock_region) * -- cgit v1.2.3 From 869a84e1ca163b737236dae997db4a6a1e230b9b Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:10 -0800 Subject: mm/memblock: remove unnecessary inclusions of bootmem.h Clean-up to remove depedency with bootmem headers. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Reviewed-by: Tejun Heo Cc: Yinghai Lu Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Cc: Christoph Lameter Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/mem.c | 1 - mm/memory_hotplug.c | 1 - 2 files changed, 2 deletions(-) (limited to 'mm') diff --git a/drivers/char/mem.c b/drivers/char/mem.c index f895a8c8a244..92c5937f80c3 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 01e39afde1cb..af4935ee444f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 79f40fab0b3a78e0e41fac79a65a9870f4b05652 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:12 -0800 Subject: mm/memblock: drop WARN and use SMP_CACHE_BYTES as a default alignment Don't produce warning and interpret 0 as "default align" equal to SMP_CACHE_BYTES in case if caller of memblock_alloc_base_nid() doesn't specify alignment for the block (align == 0). This is done in preparation of introducing common memblock alloc interface to make code behavior consistent. More details are in below thread : https://lkml.org/lkml/2013/10/13/117. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index de4d9c352fd6..6aca54812db0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -969,8 +969,8 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, { phys_addr_t found; - if (WARN_ON(!align)) - align = __alignof__(long long); + if (!align) + align = SMP_CACHE_BYTES; /* align @size to avoid excessive fragmentation on reserved array */ size = round_up(size, align); -- cgit v1.2.3 From 87029ee9390b2297dae699d5fb135b77992116e5 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:14 -0800 Subject: mm/memblock: reorder parameters of memblock_find_in_range_node Reorder parameters of memblock_find_in_range_node to be consistent with other memblock APIs. The change was suggested by Tejun Heo . Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 5 +++-- mm/memblock.c | 16 ++++++++-------- mm/nobootmem.c | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 2f52c8c492bd..11c31590cc49 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -55,8 +55,9 @@ extern bool movable_node_enabled; #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end, + int nid); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); diff --git a/mm/memblock.c b/mm/memblock.c index 6aca54812db0..a95d6dc066d5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -157,10 +157,10 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, /** * memblock_find_in_range_node - find free area in given range and node - * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @size: size of free area to find * @align: alignment of free area to find + * @start: start of candidate range + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @nid: nid of the free area to find, %MAX_NUMNODES for any node * * Find @size free area aligned to @align in the specified range and node. @@ -176,9 +176,9 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * RETURNS: * Found address on success, 0 on failure. */ -phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, - phys_addr_t end, phys_addr_t size, - phys_addr_t align, int nid) +phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t align, phys_addr_t start, + phys_addr_t end, int nid) { int ret; phys_addr_t kernel_end; @@ -241,8 +241,8 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) { - return memblock_find_in_range_node(start, end, size, align, - MAX_NUMNODES); + return memblock_find_in_range_node(size, align, start, end, + MAX_NUMNODES); } static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) @@ -975,7 +975,7 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, /* align @size to avoid excessive fragmentation on reserved array */ size = round_up(size, align); - found = memblock_find_in_range_node(0, max_addr, size, align, nid); + found = memblock_find_in_range_node(size, align, 0, max_addr, nid); if (found && !memblock_reserve(found, size)) return found; diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 2c254d374655..59777e050d09 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -41,7 +41,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, if (limit > memblock.current_limit) limit = memblock.current_limit; - addr = memblock_find_in_range_node(goal, limit, size, align, nid); + addr = memblock_find_in_range_node(size, align, goal, limit, nid); if (!addr) return NULL; -- cgit v1.2.3 From b115423357e0cda6d8f45d0c81df537d7b004020 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:16 -0800 Subject: mm/memblock: switch to use NUMA_NO_NODE instead of MAX_NUMNODES It's recommended to use NUMA_NO_NODE everywhere to select "process any node" behavior or to indicate that "no node id specified". Hence, update __next_free_mem_range*() API's to accept both NUMA_NO_NODE and MAX_NUMNODES, but emit warning once on MAX_NUMNODES, and correct corresponding API's documentation to describe new behavior. Also, update other memblock/nobootmem APIs where MAX_NUMNODES is used dirrectly. The change was suggested by Tejun Heo. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 4 ++-- mm/memblock.c | 28 +++++++++++++++++++--------- mm/nobootmem.c | 8 ++++---- 3 files changed, 25 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 11c31590cc49..cd0274bebd4c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -118,7 +118,7 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, /** * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes + * @nid: node selector, %NUMA_NO_NODE for all nodes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL @@ -138,7 +138,7 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, /** * for_each_free_mem_range_reverse - rev-iterate through free memblock areas * @i: u64 used as loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes + * @nid: node selector, %NUMA_NO_NODE for all nodes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL diff --git a/mm/memblock.c b/mm/memblock.c index a95d6dc066d5..03f1dc7b663c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -94,7 +94,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type, * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @size: size of free area to find * @align: alignment of free area to find - * @nid: nid of the free area to find, %MAX_NUMNODES for any node + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Utility called from memblock_find_in_range_node(), find free area bottom-up. * @@ -126,7 +126,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @size: size of free area to find * @align: alignment of free area to find - * @nid: nid of the free area to find, %MAX_NUMNODES for any node + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Utility called from memblock_find_in_range_node(), find free area top-down. * @@ -161,7 +161,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * @align: alignment of free area to find * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} - * @nid: nid of the free area to find, %MAX_NUMNODES for any node + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Find @size free area aligned to @align in the specified range and node. * @@ -242,7 +242,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t align) { return memblock_find_in_range_node(size, align, start, end, - MAX_NUMNODES); + NUMA_NO_NODE); } static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) @@ -754,7 +754,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) /** * __next_free_mem_range - next function for for_each_free_mem_range() * @idx: pointer to u64 loop variable - * @nid: node selector, %MAX_NUMNODES for all nodes + * @nid: node selector, %NUMA_NO_NODE for all nodes * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL @@ -782,6 +782,11 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, struct memblock_type *rsv = &memblock.reserved; int mi = *idx & 0xffffffff; int ri = *idx >> 32; + bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); + + if (nid == MAX_NUMNODES) + pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n", + __func__); for ( ; mi < mem->cnt; mi++) { struct memblock_region *m = &mem->regions[mi]; @@ -789,7 +794,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, phys_addr_t m_end = m->base + m->size; /* only memory regions are associated with nodes, check it */ - if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) + if (check_node && nid != memblock_get_region_node(m)) continue; /* scan areas before each reservation for intersection */ @@ -830,7 +835,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, /** * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() * @idx: pointer to u64 loop variable - * @nid: nid: node selector, %MAX_NUMNODES for all nodes + * @nid: nid: node selector, %NUMA_NO_NODE for all nodes * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL @@ -850,6 +855,11 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, struct memblock_type *rsv = &memblock.reserved; int mi = *idx & 0xffffffff; int ri = *idx >> 32; + bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); + + if (nid == MAX_NUMNODES) + pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n", + __func__); if (*idx == (u64)ULLONG_MAX) { mi = mem->cnt - 1; @@ -862,7 +872,7 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t m_end = m->base + m->size; /* only memory regions are associated with nodes, check it */ - if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) + if (check_node && nid != memblock_get_region_node(m)) continue; /* skip hotpluggable memory regions if needed */ @@ -989,7 +999,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); + return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); } phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 59777e050d09..19121ceb8874 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -117,7 +117,7 @@ static unsigned long __init free_low_memory_core_early(void) phys_addr_t start, end, size; u64 i; - for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) + for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) count += __free_memory_core(start, end); /* free range that is used for reserved array if we allocate it */ @@ -161,7 +161,7 @@ unsigned long __init free_all_bootmem(void) reset_all_zones_managed_pages(); /* - * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id + * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id * because in some case like Node0 doesn't have RAM installed * low ram will be on Node1 */ @@ -215,7 +215,7 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size, restart: - ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); + ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit); if (ptr) return ptr; @@ -299,7 +299,7 @@ again: if (ptr) return ptr; - ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, + ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit); if (ptr) return ptr; -- cgit v1.2.3 From 26f09e9b3a0696f6fe20b021901300fba26fb579 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 21 Jan 2014 15:50:19 -0800 Subject: mm/memblock: add memblock memory allocation apis Introduce memblock memory allocation APIs which allow to support PAE or LPAE extension on 32 bits archs where the physical memory start address can be beyond 4GB. In such cases, existing bootmem APIs which operate on 32 bit addresses won't work and needs memblock layer which operates on 64 bit addresses. So we add equivalent APIs so that we can replace usage of bootmem with memblock interfaces. Architectures already converted to NO_BOOTMEM use these new memblock interfaces. The architectures which are still not converted to NO_BOOTMEM continue to function as is because we still maintain the fal lback option of bootmem back-end supporting these new interfaces. So no functional change as such. In long run, once all the architectures moves to NO_BOOTMEM, we can get rid of bootmem layer completely. This is one step to remove the core code dependency with bootmem and also gives path for architectures to move away from bootmem. The proposed interface will became active if both CONFIG_HAVE_MEMBLOCK and CONFIG_NO_BOOTMEM are specified by arch. In case !CONFIG_NO_BOOTMEM, the memblock() wrappers will fallback to the existing bootmem apis so that arch's not converted to NO_BOOTMEM continue to work as is. The meaning of MEMBLOCK_ALLOC_ACCESSIBLE and MEMBLOCK_ALLOC_ANYWHERE is kept same. [akpm@linux-foundation.org: s/depricated/deprecated/] Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/dma.h | 4 +- include/linux/bootmem.h | 152 +++++++++++++++++++++++++++++++++ mm/memblock.c | 209 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 361 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 58b8c6a0ab1f..99084431d6ae 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -8,8 +8,8 @@ #define MAX_DMA_ADDRESS 0xffffffffUL #else #define MAX_DMA_ADDRESS ({ \ - extern unsigned long arm_dma_zone_size; \ - arm_dma_zone_size ? \ + extern phys_addr_t arm_dma_zone_size; \ + arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 55d52fb7ac1d..2fae55def608 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -5,6 +5,7 @@ #define _LINUX_BOOTMEM_H #include +#include #include /* @@ -141,6 +142,157 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #define alloc_bootmem_low_pages_node(pgdat, x) \ __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) + +#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) + +/* FIXME: use MEMBLOCK_ALLOC_* variants here */ +#define BOOTMEM_ALLOC_ACCESSIBLE 0 +#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) + +/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ +void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, + phys_addr_t max_addr, int nid); +void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid); +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, + BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, + BOOTMEM_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, + BOOTMEM_ALLOC_ACCESSIBLE, + nid); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} + +#else + +#define BOOTMEM_ALLOC_ACCESSIBLE 0 + + +/* Fall back to all the existing bootmem APIs */ +static inline void * __init memblock_virt_alloc( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_nopanic( + phys_addr_t size, phys_addr_t align) +{ + if (!align) + align = SMP_CACHE_BYTES; + return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_from_nopanic( + phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) +{ + return __alloc_bootmem_nopanic(size, align, min_addr); +} + +static inline void * __init memblock_virt_alloc_node( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_node_nopanic( + phys_addr_t size, int nid) +{ + return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size, + SMP_CACHE_BYTES, + BOOTMEM_LOW_LIMIT); +} + +static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return __alloc_bootmem_node_high(NODE_DATA(nid), size, align, + min_addr); +} + +static inline void * __init memblock_virt_alloc_try_nid_nopanic( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, int nid) +{ + return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align, + min_addr, max_addr); +} + +static inline void __init memblock_free_early( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem(base, size); +} + +static inline void __init memblock_free_early_nid( + phys_addr_t base, phys_addr_t size, int nid) +{ + free_bootmem_node(NODE_DATA(nid), base, size); +} + +static inline void __init memblock_free_late( + phys_addr_t base, phys_addr_t size) +{ + free_bootmem_late(base, size); +} +#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */ + #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP extern void *alloc_remap(int nid, unsigned long size); #else diff --git a/mm/memblock.c b/mm/memblock.c index 03f1dc7b663c..018e55dc004d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -21,6 +21,9 @@ #include #include +#include + +#include "internal.h" static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; @@ -785,7 +788,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); if (nid == MAX_NUMNODES) - pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n", + pr_warn_once("%s: Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n", __func__); for ( ; mi < mem->cnt; mi++) { @@ -858,7 +861,7 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); if (nid == MAX_NUMNODES) - pr_warn_once("%s: Usage of MAX_NUMNODES is depricated. Use NUMA_NO_NODE instead\n", + pr_warn_once("%s: Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n", __func__); if (*idx == (u64)ULLONG_MAX) { @@ -1029,6 +1032,208 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } +/** + * memblock_virt_alloc_internal - allocate boot memory block + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @min_addr: the lower bound of the memory region to allocate (phys address) + * @max_addr: the upper bound of the memory region to allocate (phys address) + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node + * + * The @min_addr limit is dropped if it can not be satisfied and the allocation + * will fall back to memory below @min_addr. Also, allocation may fall back + * to any node in the system if the specified node can not + * hold the requested memory. + * + * The allocation is performed from memory region limited by + * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. + * + * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. + * + * The phys address of allocated boot memory block is converted to virtual and + * allocated memory is reset to 0. + * + * In addition, function sets the min_count to 0 using kmemleak_alloc for + * allocated boot memory block, so that it is never reported as leaks. + * + * RETURNS: + * Virtual address of allocated memory block on success, NULL on failure. + */ +static void * __init memblock_virt_alloc_internal( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid) +{ + phys_addr_t alloc; + void *ptr; + + if (nid == MAX_NUMNODES) + pr_warn("%s: usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE\n", + __func__); + + /* + * Detect any accidental use of these APIs after slab is ready, as at + * this moment memblock may be deinitialized already and its + * internal data may be destroyed (after execution of free_all_bootmem) + */ + if (WARN_ON_ONCE(slab_is_available())) + return kzalloc_node(size, GFP_NOWAIT, nid); + + if (!align) + align = SMP_CACHE_BYTES; + + /* align @size to avoid excessive fragmentation on reserved array */ + size = round_up(size, align); + +again: + alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, + nid); + if (alloc) + goto done; + + if (nid != NUMA_NO_NODE) { + alloc = memblock_find_in_range_node(size, align, min_addr, + max_addr, NUMA_NO_NODE); + if (alloc) + goto done; + } + + if (min_addr) { + min_addr = 0; + goto again; + } else { + goto error; + } + +done: + memblock_reserve(alloc, size); + ptr = phys_to_virt(alloc); + memset(ptr, 0, size); + + /* + * The min_count is set to 0 so that bootmem allocated blocks + * are never reported as leaks. This is because many of these blocks + * are only referred via the physical address which is not + * looked up by kmemleak. + */ + kmemleak_alloc(ptr, size, 0, 0); + + return ptr; + +error: + return NULL; +} + +/** + * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @min_addr: the lower bound of the memory region from where the allocation + * is preferred (phys address) + * @max_addr: the upper bound of the memory region from where the allocation + * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to + * allocate only from memory limited by memblock.current_limit value + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node + * + * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides + * additional debug information (including caller info), if enabled. + * + * RETURNS: + * Virtual address of allocated memory block on success, NULL on failure. + */ +void * __init memblock_virt_alloc_try_nid_nopanic( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid) +{ + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", + __func__, (u64)size, (u64)align, nid, (u64)min_addr, + (u64)max_addr, (void *)_RET_IP_); + return memblock_virt_alloc_internal(size, align, min_addr, + max_addr, nid); +} + +/** + * memblock_virt_alloc_try_nid - allocate boot memory block with panicking + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @min_addr: the lower bound of the memory region from where the allocation + * is preferred (phys address) + * @max_addr: the upper bound of the memory region from where the allocation + * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to + * allocate only from memory limited by memblock.current_limit value + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node + * + * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() + * which provides debug information (including caller info), if enabled, + * and panics if the request can not be satisfied. + * + * RETURNS: + * Virtual address of allocated memory block on success, NULL on failure. + */ +void * __init memblock_virt_alloc_try_nid( + phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid) +{ + void *ptr; + + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", + __func__, (u64)size, (u64)align, nid, (u64)min_addr, + (u64)max_addr, (void *)_RET_IP_); + ptr = memblock_virt_alloc_internal(size, align, + min_addr, max_addr, nid); + if (ptr) + return ptr; + + panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", + __func__, (u64)size, (u64)align, nid, (u64)min_addr, + (u64)max_addr); + return NULL; +} + +/** + * __memblock_free_early - free boot memory block + * @base: phys starting address of the boot memory block + * @size: size of the boot memory block in bytes + * + * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. + * The freeing memory will not be released to the buddy allocator. + */ +void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) +{ + memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", + __func__, (u64)base, (u64)base + size - 1, + (void *)_RET_IP_); + kmemleak_free_part(__va(base), size); + __memblock_remove(&memblock.reserved, base, size); +} + +/* + * __memblock_free_late - free bootmem block pages directly to buddy allocator + * @addr: phys starting address of the boot memory block + * @size: size of the boot memory block in bytes + * + * This is only useful when the bootmem allocator has already been torn + * down, but we are still initializing the system. Pages are released directly + * to the buddy allocator, no bootmem metadata is updated because it is gone. + */ +void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) +{ + u64 cursor, end; + + memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", + __func__, (u64)base, (u64)base + size - 1, + (void *)_RET_IP_); + kmemleak_free_part(__va(base), size); + cursor = PFN_UP(base); + end = PFN_DOWN(base + size); + + for (; cursor < end; cursor++) { + __free_pages_bootmem(pfn_to_page(cursor), 0); + totalram_pages++; + } +} /* * Remaining API functions -- cgit v1.2.3 From 6782832eba5e8c87a749a41da8deda1c3ef67ba0 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 21 Jan 2014 15:50:25 -0800 Subject: mm/page_alloc.c: use memblock apis for early memory allocations Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tony Lindgren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f59d1986018..b230e838883d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4215,7 +4215,6 @@ static noinline __init_refok int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; - struct pglist_data *pgdat = zone->zone_pgdat; size_t alloc_size; /* @@ -4231,7 +4230,8 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node_nopanic(pgdat, alloc_size); + memblock_virt_alloc_node_nopanic( + alloc_size, zone->zone_pgdat->node_id); } else { /* * This case means that a zone whose size was 0 gets new memory @@ -4351,13 +4351,14 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node) #endif /** - * free_bootmem_with_active_regions - Call free_bootmem_node for each active range + * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. - * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node + * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid * * If an architecture guarantees that all ranges registered with * add_active_ranges() contain no holes and may be freed, this - * this function may be used instead of calling free_bootmem() manually. + * this function may be used instead of calling memblock_free_early_nid() + * manually. */ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) { @@ -4369,9 +4370,9 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) end_pfn = min(end_pfn, max_low_pfn); if (start_pfn < end_pfn) - free_bootmem_node(NODE_DATA(this_nid), - PFN_PHYS(start_pfn), - (end_pfn - start_pfn) << PAGE_SHIFT); + memblock_free_early_nid(PFN_PHYS(start_pfn), + (end_pfn - start_pfn) << PAGE_SHIFT, + this_nid); } } @@ -4642,8 +4643,9 @@ static void __init setup_usemap(struct pglist_data *pgdat, unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); zone->pageblock_flags = NULL; if (usemapsize) - zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, - usemapsize); + zone->pageblock_flags = + memblock_virt_alloc_node_nopanic(usemapsize, + pgdat->node_id); } #else static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, @@ -4837,7 +4839,8 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) - map = alloc_bootmem_node_nopanic(pgdat, size); + map = memblock_virt_alloc_node_nopanic(size, + pgdat->node_id); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -5887,7 +5890,7 @@ void *__init alloc_large_system_hash(const char *tablename, do { size = bucketsize << log2qty; if (flags & HASH_EARLY) - table = alloc_bootmem_nopanic(size); + table = memblock_virt_alloc_nopanic(size, 0); else if (hashdist) table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); else { -- cgit v1.2.3 From bb016b84164554725899aef544331085e08cb402 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 21 Jan 2014 15:50:34 -0800 Subject: mm/sparse: use memblock apis for early memory allocations Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: Grygorii Strashko Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tejun Heo Cc: Tony Lindgren Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/sparse-vmemmap.c | 6 ++++-- mm/sparse.c | 27 +++++++++++++++------------ 2 files changed, 19 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 27eeab3be757..4cba9c2783a1 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -40,7 +40,8 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node, unsigned long align, unsigned long goal) { - return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); + return memblock_virt_alloc_try_nid(size, align, goal, + BOOTMEM_ALLOC_ACCESSIBLE, node); } static void *vmemmap_buf; @@ -226,7 +227,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, if (vmemmap_buf_start) { /* need to free left buf */ - free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); + memblock_free_early(__pa(vmemmap_buf), + vmemmap_buf_end - vmemmap_buf); vmemmap_buf = NULL; vmemmap_buf_end = NULL; } diff --git a/mm/sparse.c b/mm/sparse.c index 8cc7be0e9590..63c3ea5c119c 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -69,7 +69,7 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) else section = kzalloc(array_size, GFP_KERNEL); } else { - section = alloc_bootmem_node(NODE_DATA(nid), array_size); + section = memblock_virt_alloc_node(array_size, nid); } return section; @@ -279,8 +279,9 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, limit = goal + (1UL << PA_SECTION_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT); again: - p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, - SMP_CACHE_BYTES, goal, limit); + p = memblock_virt_alloc_try_nid_nopanic(size, + SMP_CACHE_BYTES, goal, limit, + nid); if (!p && limit) { limit = 0; goto again; @@ -331,7 +332,7 @@ static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { - return alloc_bootmem_node_nopanic(pgdat, size); + return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) @@ -376,8 +377,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) return map; size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); - map = __alloc_bootmem_node_high(NODE_DATA(nid), size, - PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + map = memblock_virt_alloc_try_nid(size, + PAGE_SIZE, __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, nid); return map; } void __init sparse_mem_maps_populate_node(struct page **map_map, @@ -401,8 +403,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, } size = PAGE_ALIGN(size); - map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, - PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + map = memblock_virt_alloc_try_nid(size * map_count, + PAGE_SIZE, __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, nodeid); if (map) { for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) @@ -545,7 +548,7 @@ void __init sparse_init(void) * sparse_early_mem_map_alloc, so allocate usemap_map at first. */ size = sizeof(unsigned long *) * NR_MEM_SECTIONS; - usemap_map = alloc_bootmem(size); + usemap_map = memblock_virt_alloc(size, 0); if (!usemap_map) panic("can not allocate usemap_map\n"); alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, @@ -553,7 +556,7 @@ void __init sparse_init(void) #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER size2 = sizeof(struct page *) * NR_MEM_SECTIONS; - map_map = alloc_bootmem(size2); + map_map = memblock_virt_alloc(size2, 0); if (!map_map) panic("can not allocate map_map\n"); alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, @@ -583,9 +586,9 @@ void __init sparse_init(void) vmemmap_populate_print_last(); #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER - free_bootmem(__pa(map_map), size2); + memblock_free_early(__pa(map_map), size2); #endif - free_bootmem(__pa(usemap_map), size); + memblock_free_early(__pa(usemap_map), size); } #ifdef CONFIG_MEMORY_HOTPLUG -- cgit v1.2.3 From 8b89a1169437541a2a9b62c8f7b1a5c0ceb0fbde Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:36 -0800 Subject: mm/hugetlb.c: use memblock apis for early memory allocations Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tejun Heo Cc: Tony Lindgren Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1697ff0cc53a..04306b9de90d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1271,9 +1271,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h) for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { void *addr; - addr = __alloc_bootmem_node_nopanic(NODE_DATA(node), - huge_page_size(h), huge_page_size(h), 0); - + addr = memblock_virt_alloc_try_nid_nopanic( + huge_page_size(h), huge_page_size(h), + 0, BOOTMEM_ALLOC_ACCESSIBLE, node); if (addr) { /* * Use the beginning of the huge page to store the @@ -1313,8 +1313,8 @@ static void __init gather_bootmem_prealloc(void) #ifdef CONFIG_HIGHMEM page = pfn_to_page(m->phys >> PAGE_SHIFT); - free_bootmem_late((unsigned long)m, - sizeof(struct huge_bootmem_page)); + memblock_free_late(__pa(m), + sizeof(struct huge_bootmem_page)); #else page = virt_to_page(m); #endif -- cgit v1.2.3 From 0d036e9e33df8befa9348683ba68258fee7f0a00 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:38 -0800 Subject: mm/page_cgroup.c: use memblock apis for early memory allocations Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: Grygorii Strashko Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tejun Heo Cc: Tony Lindgren Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_cgroup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 6d757e3a872a..d8bd2c500aa4 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -54,8 +54,9 @@ static int __init alloc_node_page_cgroup(int nid) table_size = sizeof(struct page_cgroup) * nr_pages; - base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), - table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + base = memblock_virt_alloc_try_nid_nopanic( + table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), + BOOTMEM_ALLOC_ACCESSIBLE, nid); if (!base) return -ENOMEM; NODE_DATA(nid)->node_page_cgroup = base; -- cgit v1.2.3 From 999c17e3de4855af4e829c0871ad32fc76a93991 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 21 Jan 2014 15:50:40 -0800 Subject: mm/percpu.c: use memblock apis for early memory allocations Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: Grygorii Strashko Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tejun Heo Cc: Tony Lindgren Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/percpu.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 0d10defe951e..65fd8a749712 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); - ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); + ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); if (!ptr) return NULL; ai = ptr; @@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { - free_bootmem(__pa(ai), ai->__ai_size); + memblock_free_early(__pa(ai), ai->__ai_size); } /** @@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ - group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); - group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); - unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); - unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); + group_offsets = memblock_virt_alloc(ai->nr_groups * + sizeof(group_offsets[0]), 0); + group_sizes = memblock_virt_alloc(ai->nr_groups * + sizeof(group_sizes[0]), 0); + unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); + unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; @@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; - pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); + pcpu_slot = memblock_virt_alloc( + pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); @@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, * covers static area + reserved area (mostly used for module * static percpu allocation). */ - schunk = alloc_bootmem(pcpu_chunk_struct_size); + schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); INIT_LIST_HEAD(&schunk->list); schunk->base_addr = base_addr; schunk->map = smap; @@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, /* init dynamic chunk if necessary */ if (dyn_size) { - dchunk = alloc_bootmem(pcpu_chunk_struct_size); + dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); INIT_LIST_HEAD(&dchunk->list); dchunk->base_addr = base_addr; dchunk->map = dmap; @@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); - areas = alloc_bootmem_nopanic(areas_size); + areas = memblock_virt_alloc_nopanic(areas_size, 0); if (!areas) { rc = -ENOMEM; goto out_free; @@ -1712,7 +1715,7 @@ out_free_areas: out_free: pcpu_free_alloc_info(ai); if (areas) - free_bootmem(__pa(areas), areas_size); + memblock_free_early(__pa(areas), areas_size); return rc; } #endif /* BUILD_EMBED_FIRST_CHUNK */ @@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); - pages = alloc_bootmem(pages_size); + pages = memblock_virt_alloc(pages_size, 0); /* allocate pages */ j = 0; @@ -1823,7 +1826,7 @@ enomem: free_fn(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: - free_bootmem(__pa(pages), pages_size); + memblock_free_early(__pa(pages), pages_size); pcpu_free_alloc_info(ai); return rc; } @@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset); static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { - return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); + return memblock_virt_alloc_from_nopanic( + size, align, __pa(MAX_DMA_ADDRESS)); } static void __init pcpu_dfl_fc_free(void *ptr, size_t size) { - free_bootmem(__pa(ptr), size); + memblock_free_early(__pa(ptr), size); } void __init setup_per_cpu_areas(void) @@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void) void *fc; ai = pcpu_alloc_alloc_info(1, 1); - fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + fc = memblock_virt_alloc_from_nopanic(unit_size, + PAGE_SIZE, + __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ -- cgit v1.2.3 From 9e43aa2b8d1cb3137bd7e60d5fead83d0569de2b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 21 Jan 2014 15:50:43 -0800 Subject: mm/memory_hotplug.c: use memblock apis for early memory allocations Correct ensure_zone_is_initialized() function description according to the introduced memblock APIs for early memory allocations. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: "Rafael J. Wysocki" Cc: Arnd Bergmann Cc: Christoph Lameter Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Konrad Rzeszutek Wilk Cc: Michal Hocko Cc: Paul Walmsley Cc: Pavel Machek Cc: Russell King Cc: Tejun Heo Cc: Tony Lindgren Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index af4935ee444f..cc2ab37220b7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -268,7 +268,7 @@ static void fix_zone_id(struct zone *zone, unsigned long start_pfn, } /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or - * alloc_bootmem_node_nopanic() */ + * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */ static int __ref ensure_zone_is_initialized(struct zone *zone, unsigned long start_pfn, unsigned long num_pages) { -- cgit v1.2.3 From 560dca27a6b36015e4f69a4ceba0ee5be0707c17 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 21 Jan 2014 15:50:55 -0800 Subject: mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter Check nid parameter and produce warning if it has deprecated MAX_NUMNODES value. Also re-assign NUMA_NO_NODE value to the nid parameter in this case. These will help to identify the wrong API usage (the caller) and make code simpler. Signed-off-by: Grygorii Strashko Signed-off-by: Santosh Shilimkar Cc: Yinghai Lu Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 018e55dc004d..1c2ef2c7edab 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -785,11 +785,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, struct memblock_type *rsv = &memblock.reserved; int mi = *idx & 0xffffffff; int ri = *idx >> 32; - bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); - if (nid == MAX_NUMNODES) - pr_warn_once("%s: Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n", - __func__); + if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) + nid = NUMA_NO_NODE; for ( ; mi < mem->cnt; mi++) { struct memblock_region *m = &mem->regions[mi]; @@ -797,7 +795,7 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, phys_addr_t m_end = m->base + m->size; /* only memory regions are associated with nodes, check it */ - if (check_node && nid != memblock_get_region_node(m)) + if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) continue; /* scan areas before each reservation for intersection */ @@ -858,11 +856,9 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, struct memblock_type *rsv = &memblock.reserved; int mi = *idx & 0xffffffff; int ri = *idx >> 32; - bool check_node = (nid != NUMA_NO_NODE) && (nid != MAX_NUMNODES); - if (nid == MAX_NUMNODES) - pr_warn_once("%s: Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n", - __func__); + if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) + nid = NUMA_NO_NODE; if (*idx == (u64)ULLONG_MAX) { mi = mem->cnt - 1; @@ -875,7 +871,7 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t m_end = m->base + m->size; /* only memory regions are associated with nodes, check it */ - if (check_node && nid != memblock_get_region_node(m)) + if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) continue; /* skip hotpluggable memory regions if needed */ @@ -1067,9 +1063,8 @@ static void * __init memblock_virt_alloc_internal( phys_addr_t alloc; void *ptr; - if (nid == MAX_NUMNODES) - pr_warn("%s: usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE\n", - __func__); + if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) + nid = NUMA_NO_NODE; /* * Detect any accidental use of these APIs after slab is ready, as at -- cgit v1.2.3 From 4883e997b26ed857da8dae6a6e6aeb12830b978d Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 21 Jan 2014 15:50:56 -0800 Subject: mm/hwpoison: add '#' to hwpoison_inject Add '#' to hwpoison_inject just as done in madvise_hwpoison. Signed-off-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Reviewed-by: Vladimir Murzin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hwpoison-inject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 4c84678371eb..95487c71cad5 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -55,7 +55,7 @@ static int hwpoison_inject(void *data, u64 val) return 0; inject: - printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn); + pr_info("Injecting memory failure at pfn %#lx\n", pfn); return memory_failure(pfn, 18, MF_COUNT_INCREASED); } -- cgit v1.2.3 From 1c30e0177e4f41a11cb88b0f1f056ccebfe0fff4 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:50:58 -0800 Subject: mm: numa: make NUMA-migrate related functions static numamigrate_update_ratelimit and numamigrate_isolate_page only have callers in mm/migrate.c. This patch makes them static. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 11d89dc0574c..41eba21f10ba 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1599,7 +1599,8 @@ bool migrate_ratelimited(int node) } /* Returns true if the node is migrate rate-limited after the update */ -bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) +static bool numamigrate_update_ratelimit(pg_data_t *pgdat, + unsigned long nr_pages) { bool rate_limited = false; @@ -1623,7 +1624,7 @@ bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) return rate_limited; } -int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) { int page_lru; -- cgit v1.2.3 From 1c5e9c27cbd966c7f0038698d5dcd5ada3574f47 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:50:59 -0800 Subject: mm: numa: limit scope of lock for NUMA migrate rate limiting NUMA migrate rate limiting protects a migration counter and window using a lock but in some cases this can be a contended lock. It is not critical that the number of pages be perfect, lost updates are acceptable. Reduce the importance of this lock. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 5 +---- mm/migrate.c | 21 ++++++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 67ab5febabf7..5f2052c83154 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -764,10 +764,7 @@ typedef struct pglist_data { int kswapd_max_order; enum zone_type classzone_idx; #ifdef CONFIG_NUMA_BALANCING - /* - * Lock serializing the per destination node AutoNUMA memory - * migration rate limiting data. - */ + /* Lock serializing the migrate rate limiting window */ spinlock_t numabalancing_migrate_lock; /* Rate limiting time interval */ diff --git a/mm/migrate.c b/mm/migrate.c index 41eba21f10ba..4612bb2e3677 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node) static bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) { - bool rate_limited = false; - /* * Rate-limit the amount of data that is being migrated to a node. * Optimal placement is no good if the memory bus is saturated and * all the time is being spent migrating! */ - spin_lock(&pgdat->numabalancing_migrate_lock); if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { + spin_lock(&pgdat->numabalancing_migrate_lock); pgdat->numabalancing_migrate_nr_pages = 0; pgdat->numabalancing_migrate_next_window = jiffies + msecs_to_jiffies(migrate_interval_millisecs); + spin_unlock(&pgdat->numabalancing_migrate_lock); } if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) - rate_limited = true; - else - pgdat->numabalancing_migrate_nr_pages += nr_pages; - spin_unlock(&pgdat->numabalancing_migrate_lock); - - return rate_limited; + return true; + + /* + * This is an unlocked non-atomic update so errors are possible. + * The consequences are failing to migrate when we potentiall should + * have which is not severe enough to warrant locking. If it is ever + * a problem, it can be converted to a per-cpu counter. + */ + pgdat->numabalancing_migrate_nr_pages += nr_pages; + return false; } static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) -- cgit v1.2.3 From af1839d722c986ffeaae1e70a6ef1c75ff38dcd5 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:51:01 -0800 Subject: mm: numa: trace tasks that fail migration due to rate limiting A low local/remote numa hinting fault ratio is potentially explained by failed migrations. This patch adds a tracepoint that fires when migration fails due to migration rate limitation. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/migrate.h | 26 ++++++++++++++++++++++++++ mm/migrate.c | 5 ++++- 2 files changed, 30 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index ec2a6ccfd7e5..3075ffbb9a83 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -45,6 +45,32 @@ TRACE_EVENT(mm_migrate_pages, __print_symbolic(__entry->reason, MIGRATE_REASON)) ); +TRACE_EVENT(mm_numa_migrate_ratelimit, + + TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), + + TP_ARGS(p, dst_nid, nr_pages), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN) + __field( pid_t, pid) + __field( int, dst_nid) + __field( unsigned long, nr_pages) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->dst_nid = dst_nid; + __entry->nr_pages = nr_pages; + ), + + TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu", + __entry->comm, + __entry->pid, + __entry->dst_nid, + __entry->nr_pages) +); #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/mm/migrate.c b/mm/migrate.c index 4612bb2e3677..f9e16350d09c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1614,8 +1614,11 @@ static bool numamigrate_update_ratelimit(pg_data_t *pgdat, msecs_to_jiffies(migrate_interval_millisecs); spin_unlock(&pgdat->numabalancing_migrate_lock); } - if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) + if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { + trace_mm_numa_migrate_ratelimit(current, pgdat->node_id, + nr_pages); return true; + } /* * This is an unlocked non-atomic update so errors are possible. -- cgit v1.2.3 From 64a9a34e22896dad430e21a28ad8cb00a756fefc Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:51:02 -0800 Subject: mm: numa: do not automatically migrate KSM pages KSM pages can be shared between tasks that are not necessarily related to each other from a NUMA perspective. This patch causes those pages to be ignored by automatic NUMA balancing so they do not migrate and do not cause unrelated tasks to be grouped together. Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Alex Thorlton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mprotect.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index bb53a6591aea..7332c1785744 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ptent = *pte; page = vm_normal_page(vma, addr, oldpte); - if (page) { + if (page && !PageKsm(page)) { if (!pte_numa(oldpte)) { ptent = pte_mknuma(ptent); set_pte_at(mm, addr, pte, ptent); -- cgit v1.2.3 From 947b3dd1a84ba3fcb7163688fdc36671941786f4 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 21 Jan 2014 15:51:04 -0800 Subject: memcg, oom: lock mem_cgroup_print_oom_info mem_cgroup_print_oom_info uses a static buffer (memcg_name) to store the name of the cgroup. This is not safe as pointed out by David Rientjes because memcg oom is locked only for its hierarchy and nothing prevents another parallel hierarchy to trigger oom as well and overwrite the already in-use buffer. This patch introduces oom_info_lock hidden inside mem_cgroup_print_oom_info which is held throughout the function. It makes access to memcg_name safe and as a bonus it also prevents parallel memcg ooms to interleave their statistics which would make the printed data hard to analyze otherwise. Signed-off-by: Michal Hocko Cc: Johannes Weiner Cc: KOSAKI Motohiro Cc: KAMEZAWA Hiroyuki Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 08541f680d90..57b16083f046 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1647,13 +1647,13 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, */ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { - struct cgroup *task_cgrp; - struct cgroup *mem_cgrp; /* - * Need a buffer in BSS, can't rely on allocations. The code relies - * on the assumption that OOM is serialized for memory controller. - * If this assumption is broken, revisit this code. + * protects memcg_name and makes sure that parallel ooms do not + * interleave */ + static DEFINE_SPINLOCK(oom_info_lock); + struct cgroup *task_cgrp; + struct cgroup *mem_cgrp; static char memcg_name[PATH_MAX]; int ret; struct mem_cgroup *iter; @@ -1662,6 +1662,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) if (!p) return; + spin_lock(&oom_info_lock); rcu_read_lock(); mem_cgrp = memcg->css.cgroup; @@ -1730,6 +1731,7 @@ done: pr_cont("\n"); } + spin_unlock(&oom_info_lock); } /* -- cgit v1.2.3 From 0eb927c0ab789d3d7d69f68acb850f69d4e7c36f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 21 Jan 2014 15:51:05 -0800 Subject: mm: compaction: trace compaction begin and end The broad goal of the series is to improve allocation success rates for huge pages through memory compaction, while trying not to increase the compaction overhead. The original objective was to reintroduce capturing of high-order pages freed by the compaction, before they are split by concurrent activity. However, several bugs and opportunities for simple improvements were found in the current implementation, mostly through extra tracepoints (which are however too ugly for now to be considered for sending). The patches mostly deal with two mechanisms that reduce compaction overhead, which is caching the progress of migrate and free scanners, and marking pageblocks where isolation failed to be skipped during further scans. Patch 1 (from mgorman) adds tracepoints that allow calculate time spent in compaction and potentially debug scanner pfn values. Patch 2 encapsulates the some functionality for handling deferred compactions for better maintainability, without a functional change type is not determined without being actually needed. Patch 3 fixes a bug where cached scanner pfn's are sometimes reset only after they have been read to initialize a compaction run. Patch 4 fixes a bug where scanners meeting is sometimes not properly detected and can lead to multiple compaction attempts quitting early without doing any work. Patch 5 improves the chances of sync compaction to process pageblocks that async compaction has skipped due to being !MIGRATE_MOVABLE. Patch 6 improves the chances of sync direct compaction to actually do anything when called after async compaction fails during allocation slowpath. The impact of patches were validated using mmtests's stress-highalloc benchmark with mmtests's stress-highalloc benchmark on a x86_64 machine with 4GB memory. Due to instability of the results (mostly related to the bugs fixed by patches 2 and 3), 10 iterations were performed, taking min,mean,max values for success rates and mean values for time and vmstat-based metrics. First, the default GFP_HIGHUSER_MOVABLE allocations were tested with the patches stacked on top of v3.13-rc2. Patch 2 is OK to serve as baseline due to no functional changes in 1 and 2. Comments below. stress-highalloc 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-nothp 3-nothp 4-nothp 5-nothp 6-nothp Success 1 Min 9.00 ( 0.00%) 10.00 (-11.11%) 43.00 (-377.78%) 43.00 (-377.78%) 33.00 (-266.67%) Success 1 Mean 27.50 ( 0.00%) 25.30 ( 8.00%) 45.50 (-65.45%) 45.90 (-66.91%) 46.30 (-68.36%) Success 1 Max 36.00 ( 0.00%) 36.00 ( 0.00%) 47.00 (-30.56%) 48.00 (-33.33%) 52.00 (-44.44%) Success 2 Min 10.00 ( 0.00%) 8.00 ( 20.00%) 46.00 (-360.00%) 45.00 (-350.00%) 35.00 (-250.00%) Success 2 Mean 26.40 ( 0.00%) 23.50 ( 10.98%) 47.30 (-79.17%) 47.60 (-80.30%) 48.10 (-82.20%) Success 2 Max 34.00 ( 0.00%) 33.00 ( 2.94%) 48.00 (-41.18%) 50.00 (-47.06%) 54.00 (-58.82%) Success 3 Min 65.00 ( 0.00%) 63.00 ( 3.08%) 85.00 (-30.77%) 84.00 (-29.23%) 85.00 (-30.77%) Success 3 Mean 76.70 ( 0.00%) 70.50 ( 8.08%) 86.20 (-12.39%) 85.50 (-11.47%) 86.00 (-12.13%) Success 3 Max 87.00 ( 0.00%) 86.00 ( 1.15%) 88.00 ( -1.15%) 87.00 ( 0.00%) 87.00 ( 0.00%) 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-nothp 3-nothp 4-nothp 5-nothp 6-nothp User 6437.72 6459.76 5960.32 5974.55 6019.67 System 1049.65 1049.09 1029.32 1031.47 1032.31 Elapsed 1856.77 1874.48 1949.97 1994.22 1983.15 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-nothp 3-nothp 4-nothp 5-nothp 6-nothp Minor Faults 253952267 254581900 250030122 250507333 250157829 Major Faults 420 407 506 530 530 Swap Ins 4 9 9 6 6 Swap Outs 398 375 345 346 333 Direct pages scanned 197538 189017 298574 287019 299063 Kswapd pages scanned 1809843 1801308 1846674 1873184 1861089 Kswapd pages reclaimed 1806972 1798684 1844219 1870509 1858622 Direct pages reclaimed 197227 188829 298380 286822 298835 Kswapd efficiency 99% 99% 99% 99% 99% Kswapd velocity 953.382 970.449 952.243 934.569 922.286 Direct efficiency 99% 99% 99% 99% 99% Direct velocity 104.058 101.832 153.961 143.200 148.205 Percentage direct scans 9% 9% 13% 13% 13% Zone normal velocity 347.289 359.676 348.063 339.933 332.983 Zone dma32 velocity 710.151 712.605 758.140 737.835 737.507 Zone dma velocity 0.000 0.000 0.000 0.000 0.000 Page writes by reclaim 557.600 429.000 353.600 426.400 381.800 Page writes file 159 53 7 79 48 Page writes anon 398 375 345 346 333 Page reclaim immediate 825 644 411 575 420 Sector Reads 2781750 2769780 2878547 2939128 2910483 Sector Writes 12080843 12083351 12012892 12002132 12010745 Page rescued immediate 0 0 0 0 0 Slabs scanned 1575654 1545344 1778406 1786700 1794073 Direct inode steals 9657 10037 15795 14104 14645 Kswapd inode steals 46857 46335 50543 50716 51796 Kswapd skipped wait 0 0 0 0 0 THP fault alloc 97 91 81 71 77 THP collapse alloc 456 506 546 544 565 THP splits 6 5 5 4 4 THP fault fallback 0 1 0 0 0 THP collapse fail 14 14 12 13 12 Compaction stalls 1006 980 1537 1536 1548 Compaction success 303 284 562 559 578 Compaction failures 702 696 974 976 969 Page migrate success 1177325 1070077 3927538 3781870 3877057 Page migrate failure 0 0 0 0 0 Compaction pages isolated 2547248 2306457 8301218 8008500 8200674 Compaction migrate scanned 42290478 38832618 153961130 154143900 159141197 Compaction free scanned 89199429 79189151 356529027 351943166 356326727 Compaction cost 1566 1426 5312 5156 5294 NUMA PTE updates 0 0 0 0 0 NUMA hint faults 0 0 0 0 0 NUMA hint local faults 0 0 0 0 0 NUMA hint local percent 100 100 100 100 100 NUMA pages migrated 0 0 0 0 0 AutoNUMA cost 0 0 0 0 0 Observations: - The "Success 3" line is allocation success rate with system idle (phases 1 and 2 are with background interference). I used to get stable values around 85% with vanilla 3.11. The lower min and mean values came with 3.12. This was bisected to commit 81c0a2bb ("mm: page_alloc: fair zone allocator policy") As explained in comment for patch 3, I don't think the commit is wrong, but that it makes the effect of compaction bugs worse. From patch 3 onwards, the results are OK and match the 3.11 results. - Patch 4 also clearly helps phases 1 and 2, and exceeds any results I've seen with 3.11 (I didn't measure it that thoroughly then, but it was never above 40%). - Compaction cost and number of scanned pages is higher, especially due to patch 4. However, keep in mind that patches 3 and 4 fix existing bugs in the current design of compaction overhead mitigation, they do not change it. If overhead is found unacceptable, then it should be decreased differently (and consistently, not due to random conditions) than the current implementation does. In contrast, patches 5 and 6 (which are not strictly bug fixes) do not increase the overhead (but also not success rates). This might be a limitation of the stress-highalloc benchmark as it's quite uniform. Another set of results is when configuring stress-highalloc t allocate with similar flags as THP uses: (GFP_HIGHUSER_MOVABLE|__GFP_NOMEMALLOC|__GFP_NORETRY|__GFP_NO_KSWAPD) stress-highalloc 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-thp 3-thp 4-thp 5-thp 6-thp Success 1 Min 2.00 ( 0.00%) 7.00 (-250.00%) 18.00 (-800.00%) 19.00 (-850.00%) 26.00 (-1200.00%) Success 1 Mean 19.20 ( 0.00%) 17.80 ( 7.29%) 29.20 (-52.08%) 29.90 (-55.73%) 32.80 (-70.83%) Success 1 Max 27.00 ( 0.00%) 29.00 ( -7.41%) 35.00 (-29.63%) 36.00 (-33.33%) 37.00 (-37.04%) Success 2 Min 3.00 ( 0.00%) 8.00 (-166.67%) 21.00 (-600.00%) 21.00 (-600.00%) 32.00 (-966.67%) Success 2 Mean 19.30 ( 0.00%) 17.90 ( 7.25%) 32.20 (-66.84%) 32.60 (-68.91%) 35.70 (-84.97%) Success 2 Max 27.00 ( 0.00%) 30.00 (-11.11%) 36.00 (-33.33%) 37.00 (-37.04%) 39.00 (-44.44%) Success 3 Min 62.00 ( 0.00%) 62.00 ( 0.00%) 85.00 (-37.10%) 75.00 (-20.97%) 64.00 ( -3.23%) Success 3 Mean 66.30 ( 0.00%) 65.50 ( 1.21%) 85.60 (-29.11%) 83.40 (-25.79%) 83.50 (-25.94%) Success 3 Max 70.00 ( 0.00%) 69.00 ( 1.43%) 87.00 (-24.29%) 86.00 (-22.86%) 87.00 (-24.29%) 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-thp 3-thp 4-thp 5-thp 6-thp User 6547.93 6475.85 6265.54 6289.46 6189.96 System 1053.42 1047.28 1043.23 1042.73 1038.73 Elapsed 1835.43 1821.96 1908.67 1912.74 1956.38 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 3.13-rc2 2-thp 3-thp 4-thp 5-thp 6-thp Minor Faults 256805673 253106328 253222299 249830289 251184418 Major Faults 395 375 423 434 448 Swap Ins 12 10 10 12 9 Swap Outs 530 537 487 455 415 Direct pages scanned 71859 86046 153244 152764 190713 Kswapd pages scanned 1900994 1870240 1898012 1892864 1880520 Kswapd pages reclaimed 1897814 1867428 1894939 1890125 1877924 Direct pages reclaimed 71766 85908 153167 152643 190600 Kswapd efficiency 99% 99% 99% 99% 99% Kswapd velocity 1029.000 1067.782 1000.091 991.049 951.218 Direct efficiency 99% 99% 99% 99% 99% Direct velocity 38.897 49.127 80.747 79.983 96.468 Percentage direct scans 3% 4% 7% 7% 9% Zone normal velocity 351.377 372.494 348.910 341.689 335.310 Zone dma32 velocity 716.520 744.414 731.928 729.343 712.377 Zone dma velocity 0.000 0.000 0.000 0.000 0.000 Page writes by reclaim 669.300 604.000 545.700 538.900 429.900 Page writes file 138 66 58 83 14 Page writes anon 530 537 487 455 415 Page reclaim immediate 806 655 772 548 517 Sector Reads 2711956 2703239 2811602 2818248 2839459 Sector Writes 12163238 12018662 12038248 11954736 11994892 Page rescued immediate 0 0 0 0 0 Slabs scanned 1385088 1388364 1507968 1513292 1558656 Direct inode steals 1739 2564 4622 5496 6007 Kswapd inode steals 47461 46406 47804 48013 48466 Kswapd skipped wait 0 0 0 0 0 THP fault alloc 110 82 84 69 70 THP collapse alloc 445 482 467 462 539 THP splits 6 5 4 5 3 THP fault fallback 3 0 0 0 0 THP collapse fail 15 14 14 14 13 Compaction stalls 659 685 1033 1073 1111 Compaction success 222 225 410 427 456 Compaction failures 436 460 622 646 655 Page migrate success 446594 439978 1085640 1095062 1131716 Page migrate failure 0 0 0 0 0 Compaction pages isolated 1029475 1013490 2453074 2482698 2565400 Compaction migrate scanned 9955461 11344259 24375202 27978356 30494204 Compaction free scanned 27715272 28544654 80150615 82898631 85756132 Compaction cost 552 555 1344 1379 1436 NUMA PTE updates 0 0 0 0 0 NUMA hint faults 0 0 0 0 0 NUMA hint local faults 0 0 0 0 0 NUMA hint local percent 100 100 100 100 100 NUMA pages migrated 0 0 0 0 0 AutoNUMA cost 0 0 0 0 0 There are some differences from the previous results for THP-like allocations: - Here, the bad result for unpatched kernel in phase 3 is much more consistent to be between 65-70% and not related to the "regression" in 3.12. Still there is the improvement from patch 4 onwards, which brings it on par with simple GFP_HIGHUSER_MOVABLE allocations. - Compaction costs have increased, but nowhere near as much as the non-THP case. Again, the patches should be worth the gained determininsm. - Patches 5 and 6 somewhat increase the number of migrate-scanned pages. This is most likely due to __GFP_NO_KSWAPD flag, which means the cached pfn's and pageblock skip bits are not reset by kswapd that often (at least in phase 3 where no concurrent activity would wake up kswapd) and the patches thus help the sync-after-async compaction. It doesn't however show that the sync compaction would help so much with success rates, which can be again seen as a limitation of the benchmark scenario. This patch (of 6): Add two tracepoints for compaction begin and end of a zone. Using this it is possible to calculate how much time a workload is spending within compaction and potentially debug problems related to cached pfns for scanning. In combination with the direct reclaim and slab trace points it should be possible to estimate most allocation-related overhead for a workload. Signed-off-by: Mel Gorman Signed-off-by: Vlastimil Babka Cc: Rik van Riel Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/compaction.h | 42 +++++++++++++++++++++++++++++++++++++++ mm/compaction.c | 4 ++++ 2 files changed, 46 insertions(+) (limited to 'mm') diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index fde1b3e94c7d..06f544ef2f6f 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -67,6 +67,48 @@ TRACE_EVENT(mm_compaction_migratepages, __entry->nr_failed) ); +TRACE_EVENT(mm_compaction_begin, + TP_PROTO(unsigned long zone_start, unsigned long migrate_start, + unsigned long free_start, unsigned long zone_end), + + TP_ARGS(zone_start, migrate_start, free_start, zone_end), + + TP_STRUCT__entry( + __field(unsigned long, zone_start) + __field(unsigned long, migrate_start) + __field(unsigned long, free_start) + __field(unsigned long, zone_end) + ), + + TP_fast_assign( + __entry->zone_start = zone_start; + __entry->migrate_start = migrate_start; + __entry->free_start = free_start; + __entry->zone_end = zone_end; + ), + + TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu", + __entry->zone_start, + __entry->migrate_start, + __entry->free_start, + __entry->zone_end) +); + +TRACE_EVENT(mm_compaction_end, + TP_PROTO(int status), + + TP_ARGS(status), + + TP_STRUCT__entry( + __field(int, status) + ), + + TP_fast_assign( + __entry->status = status; + ), + + TP_printk("status=%d", __entry->status) +); #endif /* _TRACE_COMPACTION_H */ diff --git a/mm/compaction.c b/mm/compaction.c index f58bcd016f43..a03995eddedb 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -970,6 +970,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) __reset_isolation_suitable(zone); + trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); + migrate_prep_local(); while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { @@ -1015,6 +1017,8 @@ out: cc->nr_freepages -= release_freepages(&cc->freepages); VM_BUG_ON(cc->nr_freepages != 0); + trace_mm_compaction_end(ret); + return ret; } -- cgit v1.2.3 From de6c60a6c115acaa721cfd499e028a413d1fcbf3 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 21 Jan 2014 15:51:07 -0800 Subject: mm: compaction: encapsulate defer reset logic Currently there are several functions to manipulate the deferred compaction state variables. The remaining case where the variables are touched directly is when a successful allocation occurs in direct compaction, or is expected to be successful in the future by kswapd. Here, the lowest order that is expected to fail is updated, and in the case of successful allocation, the deferred status and counter is reset completely. Create a new function compaction_defer_reset() to encapsulate this functionality and make it easier to understand the code. No functional change. Signed-off-by: Vlastimil Babka Acked-by: Mel Gorman Reviewed-by: Rik van Riel Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 16 ++++++++++++++++ mm/compaction.c | 9 ++++----- mm/page_alloc.c | 5 +---- 3 files changed, 21 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 091d72e70d8a..7e1c76e3cd68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -62,6 +62,22 @@ static inline bool compaction_deferred(struct zone *zone, int order) return zone->compact_considered < defer_limit; } +/* + * Update defer tracking counters after successful compaction of given order, + * which means an allocation either succeeded (alloc_success == true) or is + * expected to succeed. + */ +static inline void compaction_defer_reset(struct zone *zone, int order, + bool alloc_success) +{ + if (alloc_success) { + zone->compact_considered = 0; + zone->compact_defer_shift = 0; + } + if (order >= zone->compact_order_failed) + zone->compact_order_failed = order + 1; +} + /* Returns true if restarting compaction after many failures */ static inline bool compaction_restarting(struct zone *zone, int order) { diff --git a/mm/compaction.c b/mm/compaction.c index a03995eddedb..927de97cab8d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1124,12 +1124,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) compact_zone(zone, cc); if (cc->order > 0) { - int ok = zone_watermark_ok(zone, cc->order, - low_wmark_pages(zone), 0, 0); - if (ok && cc->order >= zone->compact_order_failed) - zone->compact_order_failed = cc->order + 1; + if (zone_watermark_ok(zone, cc->order, + low_wmark_pages(zone), 0, 0)) + compaction_defer_reset(zone, cc->order, false); /* Currently async compaction is never deferred. */ - else if (!ok && cc->sync) + else if (cc->sync) defer_compaction(zone, cc->order); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b230e838883d..84da0e3bc886 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2235,10 +2235,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, preferred_zone, migratetype); if (page) { preferred_zone->compact_blockskip_flush = false; - preferred_zone->compact_considered = 0; - preferred_zone->compact_defer_shift = 0; - if (order >= preferred_zone->compact_order_failed) - preferred_zone->compact_order_failed = order + 1; + compaction_defer_reset(preferred_zone, order, true); count_vm_event(COMPACTSUCCESS); return page; } -- cgit v1.2.3 From d3132e4b83e6bd383c74d716f7281d7c3136089c Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 21 Jan 2014 15:51:08 -0800 Subject: mm: compaction: reset cached scanner pfn's before reading them Compaction caches pfn's for its migrate and free scanners to avoid scanning the whole zone each time. In compact_zone(), the cached values are read to set up initial values for the scanners. There are several situations when these cached pfn's are reset to the first and last pfn of the zone, respectively. One of these situations is when a compaction has been deferred for a zone and is now being restarted during a direct compaction, which is also done in compact_zone(). However, compact_zone() currently reads the cached pfn's *before* resetting them. This means the reset doesn't affect the compaction that performs it, and with good chance also subsequent compactions, as update_pageblock_skip() is likely to be called and update the cached pfn's to those being processed. Another chance for a successful reset is when a direct compaction detects that migration and free scanners meet (which has its own problems addressed by another patch) and sets update_pageblock_skip flag which kswapd uses to do the reset because it goes to sleep. This is clearly a bug that results in non-deterministic behavior, so this patch moves the cached pfn reset to be performed *before* the values are read. Signed-off-by: Vlastimil Babka Acked-by: Mel Gorman Acked-by: Rik van Riel Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 927de97cab8d..f4e2c166880b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -946,6 +946,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ; } + /* + * Clear pageblock skip if there were failures recently and compaction + * is about to be retried after being deferred. kswapd does not do + * this reset as it'll reset the cached information when going to sleep. + */ + if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) + __reset_isolation_suitable(zone); + /* * Setup to move all movable pages to the end of the zone. Used cached * information on where the scanners should start but check that it @@ -962,14 +970,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) zone->compact_cached_migrate_pfn = cc->migrate_pfn; } - /* - * Clear pageblock skip if there were failures recently and compaction - * is about to be retried after being deferred. kswapd does not do - * this reset as it'll reset the cached information when going to sleep. - */ - if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) - __reset_isolation_suitable(zone); - trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); migrate_prep_local(); -- cgit v1.2.3 From 7ed695e069c3cbea5e1fd08f84a04536da91f584 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 21 Jan 2014 15:51:09 -0800 Subject: mm: compaction: detect when scanners meet in isolate_freepages Compaction of a zone is finished when the migrate scanner (which begins at the zone's lowest pfn) meets the free page scanner (which begins at the zone's highest pfn). This is detected in compact_zone() and in the case of direct compaction, the compact_blockskip_flush flag is set so that kswapd later resets the cached scanner pfn's, and a new compaction may again start at the zone's borders. The meeting of the scanners can happen during either scanner's activity. However, it may currently fail to be detected when it occurs in the free page scanner, due to two problems. First, isolate_freepages() keeps free_pfn at the highest block where it isolated pages from, for the purposes of not missing the pages that are returned back to allocator when migration fails. Second, failing to isolate enough free pages due to scanners meeting results in -ENOMEM being returned by migrate_pages(), which makes compact_zone() bail out immediately without calling compact_finished() that would detect scanners meeting. This failure to detect scanners meeting might result in repeated attempts at compaction of a zone that keep starting from the cached pfn's close to the meeting point, and quickly failing through the -ENOMEM path, without the cached pfns being reset, over and over. This has been observed (through additional tracepoints) in the third phase of the mmtests stress-highalloc benchmark, where the allocator runs on an otherwise idle system. The problem was observed in the DMA32 zone, which was used as a fallback to the preferred Normal zone, but on the 4GB system it was actually the largest zone. The problem is even amplified for such fallback zone - the deferred compaction logic, which could (after being fixed by a previous patch) reset the cached scanner pfn's, is only applied to the preferred zone and not for the fallbacks. The problem in the third phase of the benchmark was further amplified by commit 81c0a2bb515f ("mm: page_alloc: fair zone allocator policy") which resulted in a non-deterministic regression of the allocation success rate from ~85% to ~65%. This occurs in about half of benchmark runs, making bisection problematic. It is unlikely that the commit itself is buggy, but it should put more pressure on the DMA32 zone during phases 1 and 2, which may leave it more fragmented in phase 3 and expose the bugs that this patch fixes. The fix is to make scanners meeting in isolate_freepage() stay that way, and to check in compact_zone() for scanners meeting when migrate_pages() returns -ENOMEM. The result is that compact_finished() also detects scanners meeting and sets the compact_blockskip_flush flag to make kswapd reset the scanner pfn's. The results in stress-highalloc benchmark show that the "regression" by commit 81c0a2bb515f in phase 3 no longer occurs, and phase 1 and 2 allocation success rates are also significantly improved. Signed-off-by: Vlastimil Babka Cc: Mel Gorman Cc: Rik van Riel Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index f4e2c166880b..cc46db36e708 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -660,7 +660,7 @@ static void isolate_freepages(struct zone *zone, * is the end of the pageblock the migration scanner is using. */ pfn = cc->free_pfn; - low_pfn = cc->migrate_pfn + pageblock_nr_pages; + low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); /* * Take care that if the migration scanner is at the end of the zone @@ -676,7 +676,7 @@ static void isolate_freepages(struct zone *zone, * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ - for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; + for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; pfn -= pageblock_nr_pages) { unsigned long isolated; @@ -738,7 +738,14 @@ static void isolate_freepages(struct zone *zone, /* split_free_page does not map the pages */ map_pages(freelist); - cc->free_pfn = high_pfn; + /* + * If we crossed the migrate scanner, we want to keep it that way + * so that compact_finished() may detect this + */ + if (pfn < low_pfn) + cc->free_pfn = max(pfn, zone->zone_start_pfn); + else + cc->free_pfn = high_pfn; cc->nr_freepages = nr_freepages; } @@ -1005,7 +1012,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) if (err) { putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; - if (err == -ENOMEM) { + /* + * migrate_pages() may return -ENOMEM when scanners meet + * and we want compact_finished() to detect it + */ + if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { ret = COMPACT_PARTIAL; goto out; } -- cgit v1.2.3 From 50b5b094e683f8e51e82c6dfe97b1608cf97e6c0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 21 Jan 2014 15:51:10 -0800 Subject: mm: compaction: do not mark unmovable pageblocks as skipped in async compaction Compaction temporarily marks pageblocks where it fails to isolate pages as to-be-skipped in further compactions, in order to improve efficiency. One of the reasons to fail isolating pages is that isolation is not attempted in pageblocks that are not of MIGRATE_MOVABLE (or CMA) type. The problem is that blocks skipped due to not being MIGRATE_MOVABLE in async compaction become skipped due to the temporary mark also in future sync compaction. Moreover, this may follow quite soon during __alloc_page_slowpath, without much time for kswapd to clear the pageblock skip marks. This goes against the idea that sync compaction should try to scan these blocks more thoroughly than the async compaction. The fix is to ensure in async compaction that these !MIGRATE_MOVABLE blocks are not marked to be skipped. Note this should not affect performance or locking impact of further async compactions, as skipping a block due to being !MIGRATE_MOVABLE is done soon after skipping a block marked to be skipped, both without locking. Signed-off-by: Vlastimil Babka Cc: Rik van Riel Acked-by: Mel Gorman Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index cc46db36e708..32a033cb5c65 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -459,6 +459,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, unsigned long flags; bool locked = false; struct page *page = NULL, *valid_page = NULL; + bool skipped_async_unsuitable = false; /* * Ensure that there are not too many pages isolated from the LRU @@ -534,6 +535,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (!cc->sync && last_pageblock_nr != pageblock_nr && !migrate_async_suitable(get_pageblock_migratetype(page))) { cc->finished_update_migrate = true; + skipped_async_unsuitable = true; goto next_pageblock; } @@ -627,8 +629,13 @@ next_pageblock: if (locked) spin_unlock_irqrestore(&zone->lru_lock, flags); - /* Update the pageblock-skip if the whole pageblock was scanned */ - if (low_pfn == end_pfn) + /* + * Update the pageblock-skip information and cached scanner pfn, + * if the whole pageblock was scanned without isolating any page. + * This is not done when pageblock was skipped due to being unsuitable + * for async compaction, so that eventual sync compaction can try. + */ + if (low_pfn == end_pfn && !skipped_async_unsuitable) update_pageblock_skip(cc, valid_page, nr_isolated, true); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); -- cgit v1.2.3 From 55b7c4c99f6a448f72179297fe6432544f220063 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 21 Jan 2014 15:51:11 -0800 Subject: mm: compaction: reset scanner positions immediately when they meet Compaction used to start its migrate and free page scaners at the zone's lowest and highest pfn, respectively. Later, caching was introduced to remember the scanners' progress across compaction attempts so that pageblocks are not re-scanned uselessly. Additionally, pageblocks where isolation failed are marked to be quickly skipped when encountered again in future compactions. Currently, both the reset of cached pfn's and clearing of the pageblock skip information for a zone is done in __reset_isolation_suitable(). This function gets called when: - compaction is restarting after being deferred - compact_blockskip_flush flag is set in compact_finished() when the scanners meet (and not again cleared when direct compaction succeeds in allocation) and kswapd acts upon this flag before going to sleep This behavior is suboptimal for several reasons: - when direct sync compaction is called after async compaction fails (in the allocation slowpath), it will effectively do nothing, unless kswapd happens to process the compact_blockskip_flush flag meanwhile. This is racy and goes against the purpose of sync compaction to more thoroughly retry the compaction of a zone where async compaction has failed. The restart-after-deferring path cannot help here as deferring happens only after the sync compaction fails. It is also done only for the preferred zone, while the compaction might be done for a fallback zone. - the mechanism of marking pageblock to be skipped has little value since the cached pfn's are reset only together with the pageblock skip flags. This effectively limits pageblock skip usage to parallel compactions. This patch changes compact_finished() so that cached pfn's are reset immediately when the scanners meet. Clearing pageblock skip flags is unchanged, as well as the other situations where cached pfn's are reset. This allows the sync-after-async compaction to retry pageblocks not marked as skipped, such as blocks !MIGRATE_MOVABLE blocks that async compactions now skips without marking them. Signed-off-by: Vlastimil Babka Cc: Rik van Riel Acked-by: Mel Gorman Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 32a033cb5c65..3a91a2ea3d34 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -851,6 +851,10 @@ static int compact_finished(struct zone *zone, /* Compaction run completes if the migrate and free scanner meet */ if (cc->free_pfn <= cc->migrate_pfn) { + /* Let the next compaction start anew. */ + zone->compact_cached_migrate_pfn = zone->zone_start_pfn; + zone->compact_cached_free_pfn = zone_end_pfn(zone); + /* * Mark that the PG_migrate_skip information should be cleared * by kswapd when it goes to sleep. kswapd does not set the -- cgit v1.2.3 From aed0a0e32de387da831284fda25021de32477195 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Tue, 21 Jan 2014 15:51:12 -0800 Subject: mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure __GFP_NOFAIL may return NULL when coupled with GFP_NOWAIT or GFP_ATOMIC. Luckily, nothing currently does such craziness. So instead of causing such allocations to loop (potentially forever), we maintain the current behavior and also warn about the new users of the deprecated flag. Suggested-by: Andrew Morton Signed-off-by: David Rientjes Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 84da0e3bc886..533e2147d14f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2525,8 +2525,15 @@ rebalance: } /* Atomic allocations - we can't balance anything */ - if (!wait) + if (!wait) { + /* + * All existing users of the deprecated __GFP_NOFAIL are + * blockable, so warn of any new users that actually allow this + * type of allocation to fail. + */ + WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); goto nopage; + } /* Avoid recursion of direct reclaim */ if (current->flags & PF_MEMALLOC) -- cgit v1.2.3 From 354a3363363724c21ea2e4b28370e27983c2452e Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 21 Jan 2014 15:51:14 -0800 Subject: mm/migrate: add comment about permanent failure path Let's add a comment about where the failed page goes to, which makes code more readable. Signed-off-by: Naoya Horiguchi Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Reviewed-by: Wanpeng Li Acked-by: Rafael Aquini Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index f9e16350d09c..626ca3c5d07b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1125,7 +1125,12 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, nr_succeeded++; break; default: - /* Permanent failure */ + /* + * Permanent failure (-EBUSY, -ENOSYS, etc.): + * unlike -EAGAIN case, the failed page is + * removed from migration page list and not + * retried in the next outer loop. + */ nr_failed++; break; } -- cgit v1.2.3 From 32665f2bbfed2e325d37236d9b0071a11a69124e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:51:15 -0800 Subject: mm/migrate: correct failure handling if !hugepage_migration_support() We should remove the page from the list if we fail with ENOSYS, since migrate_pages() consider error cases except -ENOMEM and -EAGAIN as permanent failure and it assumes that the page would be removed from the list. Without this patch, we could overcount number of failure. In addition, we should put back the new hugepage if !hugepage_migration_support(). If not, we would leak hugepage memory. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Reviewed-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Cc: Rafael Aquini Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 626ca3c5d07b..13bedcc4656b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1013,7 +1013,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, { int rc = 0; int *result = NULL; - struct page *new_hpage = get_new_page(hpage, private, &result); + struct page *new_hpage; struct anon_vma *anon_vma = NULL; /* @@ -1023,9 +1023,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * tables or check whether the hugepage is pmd-based or not before * kicking migration. */ - if (!hugepage_migration_support(page_hstate(hpage))) + if (!hugepage_migration_support(page_hstate(hpage))) { + putback_active_hugepage(hpage); return -ENOSYS; + } + new_hpage = get_new_page(hpage, private, &result); if (!new_hpage) return -ENOMEM; -- cgit v1.2.3 From 59c82b70dcd9cc273c21fae5abc29e41fc732a17 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:51:17 -0800 Subject: mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages Some part of putback_lru_pages() and putback_movable_pages() is duplicated, so it could confuse us what we should use. We can remove putback_lru_pages() since it is not really needed now. This makes us undestand and maintain the code more easily. And comment on putback_movable_pages() is stale now, so fix it. Signed-off-by: Joonsoo Kim Reviewed-by: Wanpeng Li Cc: Christoph Lameter Cc: Naoya Horiguchi Cc: Rafael Aquini Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Vlastimil Babka Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 2 -- mm/memory-failure.c | 8 +++++++- mm/migrate.c | 29 +++++++++-------------------- 3 files changed, 16 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f015c059e159..2a411bc0097f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -35,7 +35,6 @@ enum migrate_reason { #ifdef CONFIG_MIGRATION -extern void putback_lru_pages(struct list_head *l); extern void putback_movable_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); @@ -59,7 +58,6 @@ extern int migrate_page_move_mapping(struct address_space *mapping, int extra_count); #else -static inline void putback_lru_pages(struct list_head *l) {} static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, enum migrate_mode mode, int reason) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9fa6586d5275..b25ed321e667 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1585,7 +1585,13 @@ static int __soft_offline_page(struct page *page, int flags) ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { - putback_lru_pages(&pagelist); + if (!list_empty(&pagelist)) { + list_del(&page->lru); + dec_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + putback_lru_page(page); + } + pr_info("soft offline: %#lx: migration failed %d, type %lx\n", pfn, ret, page->flags); if (ret > 0) diff --git a/mm/migrate.c b/mm/migrate.c index 13bedcc4656b..8a73d66be102 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -71,29 +71,13 @@ int migrate_prep_local(void) return 0; } -/* - * Add isolated pages on the list back to the LRU under page lock - * to avoid leaking evictable pages back onto unevictable list. - */ -void putback_lru_pages(struct list_head *l) -{ - struct page *page; - struct page *page2; - - list_for_each_entry_safe(page, page2, l, lru) { - list_del(&page->lru); - dec_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - putback_lru_page(page); - } -} - /* * Put previously isolated pages back onto the appropriate lists * from where they were once taken off for compaction/migration. * - * This function shall be used instead of putback_lru_pages(), - * whenever the isolated pageset has been built by isolate_migratepages_range() + * This function shall be used whenever the isolated pageset has been + * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() + * and isolate_huge_page(). */ void putback_movable_pages(struct list_head *l) { @@ -1725,7 +1709,12 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); if (nr_remaining) { - putback_lru_pages(&migratepages); + if (!list_empty(&migratepages)) { + list_del(&page->lru); + dec_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + putback_lru_page(page); + } isolated = 0; } else count_vm_numa_event(NUMA_PAGE_MIGRATE); -- cgit v1.2.3 From 78d5506e82b21a1a1de68c24182db2c2fe521422 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 21 Jan 2014 15:51:18 -0800 Subject: mm/migrate: remove unused function, fail_migrate_page() fail_migrate_page() isn't used anywhere, so remove it. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Reviewed-by: Naoya Horiguchi Reviewed-by: Wanpeng Li Cc: Rafael Aquini Cc: Vlastimil Babka Cc: Wanpeng Li Cc: Mel Gorman Cc: Rik van Riel Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 4 ---- mm/migrate.c | 8 -------- 2 files changed, 12 deletions(-) (limited to 'mm') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 2a411bc0097f..84a31ad0b791 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -41,9 +41,6 @@ extern int migrate_page(struct address_space *, extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, enum migrate_mode mode, int reason); -extern int fail_migrate_page(struct address_space *, - struct page *, struct page *); - extern int migrate_prep(void); extern int migrate_prep_local(void); extern int migrate_vmas(struct mm_struct *mm, @@ -84,7 +81,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, /* Possible settings for the migrate_page() method in address_operations */ #define migrate_page NULL -#define fail_migrate_page NULL #endif /* CONFIG_MIGRATION */ diff --git a/mm/migrate.c b/mm/migrate.c index 8a73d66be102..a8025befc323 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -552,14 +552,6 @@ void migrate_page_copy(struct page *newpage, struct page *page) * Migration functions ***********************************************************/ -/* Always fail migration. Used for mappings that are not movable */ -int fail_migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page) -{ - return -EIO; -} -EXPORT_SYMBOL(fail_migrate_page); - /* * Common logic to directly migrate a single page suitable for * pages that do not use PagePrivate/PagePrivate2. -- cgit v1.2.3 From 12ab028be0008640de712ca890dc1a9ae224934d Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 23 Jan 2014 15:52:48 -0800 Subject: mm/zswap.c: change params from hidden to ro The "compressor" and "enabled" params are currently hidden, this changes them to read-only, so userspace can tell if zswap is enabled or not and see what compressor is in use. Signed-off-by: Dan Streetman Cc: Vladimir Murzin Cc: Bob Liu Cc: Minchan Kim Cc: Weijie Yang Acked-by: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zswap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 5a63f78a5601..e55bab9dc41f 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -77,12 +77,12 @@ static u64 zswap_duplicate_entry; **********************************/ /* Enable/disable zswap (disabled by default, fixed at boot for now) */ static bool zswap_enabled __read_mostly; -module_param_named(enabled, zswap_enabled, bool, 0); +module_param_named(enabled, zswap_enabled, bool, 0444); /* Compressor to be used by zswap (fixed at boot for now) */ #define ZSWAP_COMPRESSOR_DEFAULT "lzo" static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; -module_param_named(compressor, zswap_compressor, charp, 0); +module_param_named(compressor, zswap_compressor, charp, 0444); /* The maximum percentage of memory that the compressed pool can occupy */ static unsigned int zswap_max_pool_percent = 20; -- cgit v1.2.3 From f0b791a34cb3cffd2bbc3ca4365c9b719fa2c9f3 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 23 Jan 2014 15:52:49 -0800 Subject: mm: print more details for bad_page() bad_page() is cool in that it prints out a bunch of data about the page. But, I can never remember which page flags are good and which are bad, or whether ->index or ->mapping is required to be NULL. This patch allows bad/dump_page() callers to specify a string about why they are dumping the page and adds explanation strings to a number of places. It also adds a 'bad_flags' argument to bad_page(), which it then dumps out separately from the flags which are actually set. This way, the messages will show specifically why the page was bad, *specifically* which flags it is complaining about, if it was a page flag combination which was the problem. [akpm@linux-foundation.org: switch to pr_alert] Signed-off-by: Dave Hansen Reviewed-by: Christoph Lameter Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++- mm/balloon_compaction.c | 4 +-- mm/memory.c | 2 +- mm/memory_hotplug.c | 2 +- mm/page_alloc.c | 72 ++++++++++++++++++++++++++++++++++++------------- 5 files changed, 61 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index a512dd836931..03bbcb84d96e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2029,7 +2029,9 @@ extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages; extern int soft_offline_page(struct page *page, int flags); -extern void dump_page(struct page *page); +extern void dump_page(struct page *page, char *reason); +extern void dump_page_badflags(struct page *page, char *reason, + unsigned long badflags); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 07dbc8ec46cf..6e45a5074bf0 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -267,7 +267,7 @@ void balloon_page_putback(struct page *page) put_page(page); } else { WARN_ON(1); - dump_page(page); + dump_page(page, "not movable balloon page"); } unlock_page(page); } @@ -287,7 +287,7 @@ int balloon_page_migrate(struct page *newpage, BUG_ON(!trylock_page(newpage)); if (WARN_ON(!__is_movable_balloon_page(page))) { - dump_page(page); + dump_page(page, "not movable balloon page"); unlock_page(newpage); return rc; } diff --git a/mm/memory.c b/mm/memory.c index 86487dfa5e59..71d70c082b98 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -671,7 +671,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) - dump_page(page); + dump_page(page, "bad pte"); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index cc2ab37220b7..a512a47241a4 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1309,7 +1309,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) #ifdef CONFIG_DEBUG_VM printk(KERN_ALERT "removing pfn %lx from LRU failed\n", pfn); - dump_page(page); + dump_page(page, "failed to remove from LRU"); #endif put_page(page); /* Because we don't have big zone->lock. we should diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 533e2147d14f..1939f4446a36 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -295,7 +295,7 @@ static inline int bad_range(struct zone *zone, struct page *page) } #endif -static void bad_page(struct page *page) +static void bad_page(struct page *page, char *reason, unsigned long bad_flags) { static unsigned long resume; static unsigned long nr_shown; @@ -329,7 +329,7 @@ static void bad_page(struct page *page) printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); - dump_page(page); + dump_page_badflags(page, reason, bad_flags); print_modules(); dump_stack(); @@ -383,7 +383,7 @@ static int destroy_compound_page(struct page *page, unsigned long order) int bad = 0; if (unlikely(compound_order(page) != order)) { - bad_page(page); + bad_page(page, "wrong compound order", 0); bad++; } @@ -392,8 +392,11 @@ static int destroy_compound_page(struct page *page, unsigned long order) for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - if (unlikely(!PageTail(p) || (p->first_page != page))) { - bad_page(page); + if (unlikely(!PageTail(p))) { + bad_page(page, "PageTail not set", 0); + bad++; + } else if (unlikely(p->first_page != page)) { + bad_page(page, "first_page not consistent", 0); bad++; } __ClearPageTail(p); @@ -618,12 +621,23 @@ out: static inline int free_pages_check(struct page *page) { - if (unlikely(page_mapcount(page) | - (page->mapping != NULL) | - (atomic_read(&page->_count) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | - (mem_cgroup_bad_page_check(page)))) { - bad_page(page); + char *bad_reason = NULL; + unsigned long bad_flags = 0; + + if (unlikely(page_mapcount(page))) + bad_reason = "nonzero mapcount"; + if (unlikely(page->mapping != NULL)) + bad_reason = "non-NULL mapping"; + if (unlikely(atomic_read(&page->_count) != 0)) + bad_reason = "nonzero _count"; + if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { + bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; + bad_flags = PAGE_FLAGS_CHECK_AT_FREE; + } + if (unlikely(mem_cgroup_bad_page_check(page))) + bad_reason = "cgroup check failed"; + if (unlikely(bad_reason)) { + bad_page(page, bad_reason, bad_flags); return 1; } page_cpupid_reset_last(page); @@ -843,12 +857,23 @@ static inline void expand(struct zone *zone, struct page *page, */ static inline int check_new_page(struct page *page) { - if (unlikely(page_mapcount(page) | - (page->mapping != NULL) | - (atomic_read(&page->_count) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | - (mem_cgroup_bad_page_check(page)))) { - bad_page(page); + char *bad_reason = NULL; + unsigned long bad_flags = 0; + + if (unlikely(page_mapcount(page))) + bad_reason = "nonzero mapcount"; + if (unlikely(page->mapping != NULL)) + bad_reason = "non-NULL mapping"; + if (unlikely(atomic_read(&page->_count) != 0)) + bad_reason = "nonzero _count"; + if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { + bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; + bad_flags = PAGE_FLAGS_CHECK_AT_PREP; + } + if (unlikely(mem_cgroup_bad_page_check(page))) + bad_reason = "cgroup check failed"; + if (unlikely(bad_reason)) { + bad_page(page, bad_reason, bad_flags); return 1; } return 0; @@ -6494,12 +6519,23 @@ static void dump_page_flags(unsigned long flags) printk(")\n"); } -void dump_page(struct page *page) +void dump_page_badflags(struct page *page, char *reason, unsigned long badflags) { printk(KERN_ALERT "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); dump_page_flags(page->flags); + if (reason) + pr_alert("page dumped because: %s\n", reason); + if (page->flags & badflags) { + pr_alert("bad because of flags:\n"); + dump_page_flags(page->flags & badflags); + } mem_cgroup_print_bad_page(page); } + +void dump_page(struct page *page, char *reason) +{ + dump_page_badflags(page, reason, 0); +} -- cgit v1.2.3 From 01cc2e58697e34c6ee9a40fb6cebc18bf5a1923f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 23 Jan 2014 15:52:50 -0800 Subject: mm: munlock: fix potential race with THP page split Since commit ff6a6da60b89 ("mm: accelerate munlock() treatment of THP pages") munlock skips tail pages of a munlocked THP page. There is some attempt to prevent bad consequences of racing with a THP page split, but code inspection indicates that there are two problems that may lead to a non-fatal, yet wrong outcome. First, __split_huge_page_refcount() copies flags including PageMlocked from the head page to the tail pages. Clearing PageMlocked by munlock_vma_page() in the middle of this operation might result in part of tail pages left with PageMlocked flag. As the head page still appears to be a THP page until all tail pages are processed, munlock_vma_page() might think it munlocked the whole THP page and skip all the former tail pages. Before ff6a6da60, those pages would be cleared in further iterations of munlock_vma_pages_range(), but NR_MLOCK would still become undercounted (related the next point). Second, NR_MLOCK accounting is based on call to hpage_nr_pages() after the PageMlocked is cleared. The accounting might also become inconsistent due to race with __split_huge_page_refcount() - undercount when HUGE_PMD_NR is subtracted, but some tail pages are left with PageMlocked set and counted again (only possible before ff6a6da60) - overcount when hpage_nr_pages() sees a normal page (split has already finished), but the parallel split has meanwhile cleared PageMlocked from additional tail pages This patch prevents both problems via extending the scope of lru_lock in munlock_vma_page(). This is convenient because: - __split_huge_page_refcount() takes lru_lock for its whole operation - munlock_vma_page() typically takes lru_lock anyway for page isolation As this becomes a second function where page isolation is done with lru_lock already held, factor this out to a new __munlock_isolate_lru_page() function and clean up the code around. [akpm@linux-foundation.org: avoid a coding-style ugly] Signed-off-by: Vlastimil Babka Cc: Sasha Levin Cc: Michel Lespinasse Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Mel Gorman Cc: Hugh Dickins Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 104 +++++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 60 insertions(+), 44 deletions(-) (limited to 'mm') diff --git a/mm/mlock.c b/mm/mlock.c index 10819ed4df3e..b30adbe62034 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -90,6 +90,26 @@ void mlock_vma_page(struct page *page) } } +/* + * Isolate a page from LRU with optional get_page() pin. + * Assumes lru_lock already held and page already pinned. + */ +static bool __munlock_isolate_lru_page(struct page *page, bool getpage) +{ + if (PageLRU(page)) { + struct lruvec *lruvec; + + lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); + if (getpage) + get_page(page); + ClearPageLRU(page); + del_page_from_lru_list(page, lruvec, page_lru(page)); + return true; + } + + return false; +} + /* * Finish munlock after successful page isolation * @@ -126,9 +146,9 @@ static void __munlock_isolated_page(struct page *page) static void __munlock_isolation_failed(struct page *page) { if (PageUnevictable(page)) - count_vm_event(UNEVICTABLE_PGSTRANDED); + __count_vm_event(UNEVICTABLE_PGSTRANDED); else - count_vm_event(UNEVICTABLE_PGMUNLOCKED); + __count_vm_event(UNEVICTABLE_PGMUNLOCKED); } /** @@ -152,28 +172,34 @@ static void __munlock_isolation_failed(struct page *page) unsigned int munlock_vma_page(struct page *page) { unsigned int nr_pages; + struct zone *zone = page_zone(page); BUG_ON(!PageLocked(page)); - if (TestClearPageMlocked(page)) { - nr_pages = hpage_nr_pages(page); - mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); - if (!isolate_lru_page(page)) - __munlock_isolated_page(page); - else - __munlock_isolation_failed(page); - } else { - nr_pages = hpage_nr_pages(page); - } - /* - * Regardless of the original PageMlocked flag, we determine nr_pages - * after touching the flag. This leaves a possible race with a THP page - * split, such that a whole THP page was munlocked, but nr_pages == 1. - * Returning a smaller mask due to that is OK, the worst that can - * happen is subsequent useless scanning of the former tail pages. - * The NR_MLOCK accounting can however become broken. + * Serialize with any parallel __split_huge_page_refcount() which + * might otherwise copy PageMlocked to part of the tail pages before + * we clear it in the head page. It also stabilizes hpage_nr_pages(). */ + spin_lock_irq(&zone->lru_lock); + + nr_pages = hpage_nr_pages(page); + if (!TestClearPageMlocked(page)) + goto unlock_out; + + __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); + + if (__munlock_isolate_lru_page(page, true)) { + spin_unlock_irq(&zone->lru_lock); + __munlock_isolated_page(page); + goto out; + } + __munlock_isolation_failed(page); + +unlock_out: + spin_unlock_irq(&zone->lru_lock); + +out: return nr_pages - 1; } @@ -310,34 +336,24 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) struct page *page = pvec->pages[i]; if (TestClearPageMlocked(page)) { - struct lruvec *lruvec; - int lru; - - if (PageLRU(page)) { - lruvec = mem_cgroup_page_lruvec(page, zone); - lru = page_lru(page); - /* - * We already have pin from follow_page_mask() - * so we can spare the get_page() here. - */ - ClearPageLRU(page); - del_page_from_lru_list(page, lruvec, lru); - } else { - __munlock_isolation_failed(page); - goto skip_munlock; - } - - } else { -skip_munlock: /* - * We won't be munlocking this page in the next phase - * but we still need to release the follow_page_mask() - * pin. We cannot do it under lru_lock however. If it's - * the last pin, __page_cache_release would deadlock. + * We already have pin from follow_page_mask() + * so we can spare the get_page() here. */ - pagevec_add(&pvec_putback, pvec->pages[i]); - pvec->pages[i] = NULL; + if (__munlock_isolate_lru_page(page, false)) + continue; + else + __munlock_isolation_failed(page); } + + /* + * We won't be munlocking this page in the next phase + * but we still need to release the follow_page_mask() + * pin. We cannot do it under lru_lock however. If it's + * the last pin, __page_cache_release() would deadlock. + */ + pagevec_add(&pvec_putback, pvec->pages[i]); + pvec->pages[i] = NULL; } delta_munlocked = -nr + pagevec_count(&pvec_putback); __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); -- cgit v1.2.3 From 8ff69e2c85f84b6b371e3c1d01207e73c0500125 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:52 -0800 Subject: memcg: do not use vmalloc for mem_cgroup allocations The vmalloc was introduced by 33327948782b ("memcgroup: use vmalloc for mem_cgroup allocation"), because at that time MAX_NUMNODES was used for defining the per-node array in the mem_cgroup structure so that the structure could be huge even if the system had the only NUMA node. The situation was significantly improved by commit 45cf7ebd5a03 ("memcg: reduce the size of struct memcg 244-fold"), which made the size of the mem_cgroup structure calculated dynamically depending on the real number of NUMA nodes installed on the system (nr_node_ids), so now there is no point in using vmalloc here: the structure is allocated rarely and on most systems its size is about 1K. Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 67dd2a881433..7890ce9d6bd1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include @@ -381,12 +380,6 @@ struct mem_cgroup { /* WARNING: nodeinfo must be the last member here */ }; -static size_t memcg_size(void) -{ - return sizeof(struct mem_cgroup) + - nr_node_ids * sizeof(struct mem_cgroup_per_node *); -} - /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ @@ -6405,14 +6398,12 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *memcg; - size_t size = memcg_size(); + size_t size; - /* Can be very big if nr_node_ids is very big */ - if (size < PAGE_SIZE) - memcg = kzalloc(size, GFP_KERNEL); - else - memcg = vzalloc(size); + size = sizeof(struct mem_cgroup); + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); + memcg = kzalloc(size, GFP_KERNEL); if (!memcg) return NULL; @@ -6423,10 +6414,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) return memcg; out_free: - if (size < PAGE_SIZE) - kfree(memcg); - else - vfree(memcg); + kfree(memcg); return NULL; } @@ -6444,7 +6432,6 @@ out_free: static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; - size_t size = memcg_size(); mem_cgroup_remove_from_trees(memcg); @@ -6465,10 +6452,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) * the cgroup_lock. */ disarm_static_keys(memcg); - if (size < PAGE_SIZE) - kfree(memcg); - else - vfree(memcg); + kfree(memcg); } /* -- cgit v1.2.3 From 309381feaee564281c3d9e90fbca8963bb7428ad Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 23 Jan 2014 15:52:54 -0800 Subject: mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 8 ++++---- include/linux/gfp.h | 1 + include/linux/hugetlb.h | 3 ++- include/linux/hugetlb_cgroup.h | 5 +++-- include/linux/mm.h | 29 +++++++++++++---------------- include/linux/mmdebug.h | 9 +++++++++ include/linux/page-flags.h | 10 +++++----- include/linux/pagemap.h | 10 +++++----- include/linux/percpu.h | 1 + mm/cleancache.c | 6 +++--- mm/compaction.c | 2 +- mm/filemap.c | 16 ++++++++-------- mm/huge_memory.c | 36 ++++++++++++++++++------------------ mm/hugetlb.c | 10 +++++----- mm/hugetlb_cgroup.c | 2 +- mm/internal.h | 10 +++++----- mm/ksm.c | 12 ++++++------ mm/memcontrol.c | 28 ++++++++++++++-------------- mm/memory.c | 8 ++++---- mm/migrate.c | 6 +++--- mm/mlock.c | 4 ++-- mm/page_alloc.c | 21 +++++++++++---------- mm/page_io.c | 4 ++-- mm/rmap.c | 10 +++++----- mm/shmem.c | 8 ++++---- mm/slub.c | 12 ++++++------ mm/swap.c | 36 ++++++++++++++++++------------------ mm/swap_state.c | 16 ++++++++-------- mm/swapfile.c | 8 ++++---- mm/vmscan.c | 20 ++++++++++---------- 30 files changed, 181 insertions(+), 170 deletions(-) (limited to 'mm') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 0596e8e0cc19..207d9aef662d 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, static inline void get_head_page_multiple(struct page *page, int nr) { - VM_BUG_ON(page != compound_head(page)); - VM_BUG_ON(page_count(page) == 0); + VM_BUG_ON_PAGE(page != compound_head(page), page); + VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_add(nr, &page->_count); SetPageReferenced(page); } @@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, head = pte_page(pte); page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { - VM_BUG_ON(compound_head(page) != head); + VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; if (PageTail(page)) get_huge_page_tail(page); @@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, head = pte_page(pte); page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { - VM_BUG_ON(compound_head(page) != head); + VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; if (PageTail(page)) get_huge_page_tail(page); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 9b4dd491f7e8..0437439bc047 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H +#include #include #include #include diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d01cc972a1d9..8c43cc469d78 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -2,6 +2,7 @@ #define _LINUX_HUGETLB_H #include +#include #include #include #include @@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, static inline struct hstate *page_hstate(struct page *page) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); return size_to_hstate(PAGE_SIZE << compound_order(page)); } diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index ce8217f7b5c2..787bba3bf552 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -15,6 +15,7 @@ #ifndef _LINUX_HUGETLB_CGROUP_H #define _LINUX_HUGETLB_CGROUP_H +#include #include struct hugetlb_cgroup; @@ -28,7 +29,7 @@ struct hugetlb_cgroup; static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; @@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) static inline int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; diff --git a/include/linux/mm.h b/include/linux/mm.h index 03bbcb84d96e..d9992fc128ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -5,6 +5,7 @@ #ifdef __KERNEL__ +#include #include #include #include @@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page) */ static inline int put_page_testzero(struct page *page) { - VM_BUG_ON(atomic_read(&page->_count) == 0); + VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); return atomic_dec_and_test(&page->_count); } @@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) static inline void compound_lock(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(PageSlab(page)); + VM_BUG_ON_PAGE(PageSlab(page), page); bit_spin_lock(PG_compound_lock, &page->flags); #endif } @@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page) static inline void compound_unlock(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(PageSlab(page)); + VM_BUG_ON_PAGE(PageSlab(page), page); bit_spin_unlock(PG_compound_lock, &page->flags); #endif } @@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page) */ static inline bool compound_tail_refcounted(struct page *page) { - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); return __compound_tail_refcounted(page); } @@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page) /* * __split_huge_page_refcount() cannot run from under us. */ - VM_BUG_ON(!PageTail(page)); - VM_BUG_ON(page_mapcount(page) < 0); - VM_BUG_ON(atomic_read(&page->_count) != 0); + VM_BUG_ON_PAGE(!PageTail(page), page); + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); if (compound_tail_refcounted(page->first_page)) atomic_inc(&page->_mapcount); } @@ -474,7 +475,7 @@ static inline void get_page(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_count. */ - VM_BUG_ON(atomic_read(&page->_count) <= 0); + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); atomic_inc(&page->_count); } @@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page) static inline void __SetPageBuddy(struct page *page) { - VM_BUG_ON(atomic_read(&page->_mapcount) != -1); + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); } static inline void __ClearPageBuddy(struct page *page) { - VM_BUG_ON(!PageBuddy(page)); + VM_BUG_ON_PAGE(!PageBuddy(page), page); atomic_set(&page->_mapcount, -1); } @@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page) * slab code uses page->slab_cache and page->first_page (for tail * pages), which share storage with page->ptl. */ - VM_BUG_ON(*(unsigned long *)&page->ptl); + VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page)) return false; spin_lock_init(ptlock_ptr(page)); @@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page) static inline void pgtable_pmd_page_dtor(struct page *page) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON(page->pmd_huge_pte); + VM_BUG_ON_PAGE(page->pmd_huge_pte, page); #endif ptlock_free(page); } @@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages; extern int soft_offline_page(struct page *page, int flags); -extern void dump_page(struct page *page, char *reason); -extern void dump_page_badflags(struct page *page, char *reason, - unsigned long badflags); - #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) extern void clear_huge_page(struct page *page, unsigned long addr, diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 580bd587d916..5042c036dda9 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,10 +1,19 @@ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 +struct page; + +extern void dump_page(struct page *page, char *reason); +extern void dump_page_badflags(struct page *page, char *reason, + unsigned long badflags); + #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) +#define VM_BUG_ON_PAGE(cond, page) \ + do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) +#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 98ada58f9942..e464b4e987e8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page) */ static inline int PageTransHuge(struct page *page) { - VM_BUG_ON(PageTail(page)); + VM_BUG_ON_PAGE(PageTail(page), page); return PageHead(page); } @@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page) */ static inline int PageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); return PageActive(page); } static inline void SetPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); SetPageActive(page); } static inline void __ClearPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); __ClearPageActive(page); } static inline void ClearPageSlabPfmemalloc(struct page *page) { - VM_BUG_ON(!PageSlab(page)); + VM_BUG_ON_PAGE(!PageSlab(page), page); ClearPageActive(page); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e3dea75a078b..1710d1b060ba 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page) * disabling preempt, and hence no need for the "speculative get" that * SMP requires. */ - VM_BUG_ON(page_count(page) == 0); + VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_inc(&page->_count); #else @@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page) return 0; } #endif - VM_BUG_ON(PageTail(page)); + VM_BUG_ON_PAGE(PageTail(page), page); return 1; } @@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count) # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic()); # endif - VM_BUG_ON(page_count(page) == 0); + VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_add(count, &page->_count); #else if (unlikely(!atomic_add_unless(&page->_count, count, 0))) return 0; #endif - VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); return 1; } @@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count) static inline void page_unfreeze_refs(struct page *page, int count) { - VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); atomic_set(&page->_count, count); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9e4761caa80c..e3817d2441b6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -1,6 +1,7 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include #include #include #include diff --git a/mm/cleancache.c b/mm/cleancache.c index 5875f48ce279..d0eac4350403 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -237,7 +237,7 @@ int __cleancache_get_page(struct page *page) goto out; } - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; if (fake_pool_id < 0) goto out; @@ -279,7 +279,7 @@ void __cleancache_put_page(struct page *page) return; } - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; if (fake_pool_id < 0) return; @@ -318,7 +318,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, if (pool_id < 0) return; - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (cleancache_get_key(mapping->host, &key) >= 0) { cleancache_ops->invalidate_page(pool_id, key, page->index); diff --git a/mm/compaction.c b/mm/compaction.c index 3a91a2ea3d34..e0ab02d70f13 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -601,7 +601,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (__isolate_lru_page(page, mode) != 0) continue; - VM_BUG_ON(PageTransCompound(page)); + VM_BUG_ON_PAGE(PageTransCompound(page), page); /* Successfully isolated */ cc->finished_update_migrate = true; diff --git a/mm/filemap.c b/mm/filemap.c index b7749a92021c..7a7f3e0db738 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -409,9 +409,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) { int error; - VM_BUG_ON(!PageLocked(old)); - VM_BUG_ON(!PageLocked(new)); - VM_BUG_ON(new->mapping); + VM_BUG_ON_PAGE(!PageLocked(old), old); + VM_BUG_ON_PAGE(!PageLocked(new), new); + VM_BUG_ON_PAGE(new->mapping, new); error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (!error) { @@ -461,8 +461,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, { int error; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageSwapBacked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageSwapBacked(page), page); error = mem_cgroup_cache_charge(page, current->mm, gfp_mask & GFP_RECLAIM_MASK); @@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); */ void unlock_page(struct page *page) { - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); clear_bit_unlock(PG_locked, &page->flags); smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); @@ -760,7 +760,7 @@ repeat: page_cache_release(page); goto repeat; } - VM_BUG_ON(page->index != offset); + VM_BUG_ON_PAGE(page->index != offset, page); } return page; } @@ -1656,7 +1656,7 @@ retry_find: put_page(page); goto retry_find; } - VM_BUG_ON(page->index != offset); + VM_BUG_ON_PAGE(page->index != offset, page); /* * We have a locked page in the page cache, now we need to check diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 95d1acb0f3d2..25fab7150fa0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -712,7 +712,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, pgtable_t pgtable; spinlock_t *ptl; - VM_BUG_ON(!PageCompound(page)); + VM_BUG_ON_PAGE(!PageCompound(page), page); pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) return VM_FAULT_OOM; @@ -893,7 +893,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, goto out; } src_page = pmd_page(pmd); - VM_BUG_ON(!PageHead(src_page)); + VM_BUG_ON_PAGE(!PageHead(src_page), src_page); get_page(src_page); page_dup_rmap(src_page); add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); @@ -1067,7 +1067,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_same(*pmd, orig_pmd))) goto out_free_pages; - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); pmdp_clear_flush(vma, haddr, pmd); /* leave pmd empty until pte is filled */ @@ -1133,7 +1133,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out_unlock; page = pmd_page(orig_pmd); - VM_BUG_ON(!PageCompound(page) || !PageHead(page)); + VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); if (page_mapcount(page) == 1) { pmd_t entry; entry = pmd_mkyoung(orig_pmd); @@ -1211,7 +1211,7 @@ alloc: add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); put_huge_zero_page(); } else { - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); page_remove_rmap(page); put_page(page); } @@ -1249,7 +1249,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, goto out; page = pmd_page(*pmd); - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); if (flags & FOLL_TOUCH) { pmd_t _pmd; /* @@ -1274,7 +1274,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, } } page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; - VM_BUG_ON(!PageCompound(page)); + VM_BUG_ON_PAGE(!PageCompound(page), page); if (flags & FOLL_GET) get_page_foll(page); @@ -1432,9 +1432,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, } else { page = pmd_page(orig_pmd); page_remove_rmap(page); - VM_BUG_ON(page_mapcount(page) < 0); + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); tlb_remove_page(tlb, page); @@ -2176,9 +2176,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, if (unlikely(!page)) goto out; - VM_BUG_ON(PageCompound(page)); - BUG_ON(!PageAnon(page)); - VM_BUG_ON(!PageSwapBacked(page)); + VM_BUG_ON_PAGE(PageCompound(page), page); + VM_BUG_ON_PAGE(!PageAnon(page), page); + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); /* cannot use mapcount: can't collapse if there's a gup pin */ if (page_count(page) != 1) @@ -2201,8 +2201,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, } /* 0 stands for page_is_file_cache(page) == false */ inc_zone_page_state(page, NR_ISOLATED_ANON + 0); - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageLRU(page), page); /* If there is no mapped pte young don't collapse the page */ if (pte_young(pteval) || PageReferenced(page) || @@ -2232,7 +2232,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, } else { src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); - VM_BUG_ON(page_mapcount(src_page) != 1); + VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to @@ -2311,7 +2311,7 @@ static struct page struct vm_area_struct *vma, unsigned long address, int node) { - VM_BUG_ON(*hpage); + VM_BUG_ON_PAGE(*hpage, *hpage); /* * Allocate the page while the vma is still valid and under * the mmap_sem read mode so there is no memory allocation @@ -2580,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, */ node = page_to_nid(page); khugepaged_node_load[node]++; - VM_BUG_ON(PageCompound(page)); + VM_BUG_ON_PAGE(PageCompound(page), page); if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) goto out_unmap; /* cannot use mapcount: can't collapse if there's a gup pin */ @@ -2876,7 +2876,7 @@ again: return; } page = pmd_page(*pmd); - VM_BUG_ON(!page_count(page)); + VM_BUG_ON_PAGE(!page_count(page), page); get_page(page); spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 04306b9de90d..c01cb9fedb18 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -584,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page) 1 << PG_active | 1 << PG_reserved | 1 << PG_private | 1 << PG_writeback); } - VM_BUG_ON(hugetlb_cgroup_from_page(page)); + VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); set_compound_page_dtor(page, NULL); set_page_refcounted(page); arch_release_hugepage(page); @@ -1089,7 +1089,7 @@ retry: * no users -- drop the buddy allocator's reference. */ put_page_testzero(page); - VM_BUG_ON(page_count(page)); + VM_BUG_ON_PAGE(page_count(page), page); enqueue_huge_page(h, page); } free: @@ -3503,7 +3503,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage) bool isolate_huge_page(struct page *page, struct list_head *list) { - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); if (!get_page_unless_zero(page)) return false; spin_lock(&hugetlb_lock); @@ -3514,7 +3514,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list) void putback_active_hugepage(struct page *page) { - VM_BUG_ON(!PageHead(page)); + VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); spin_unlock(&hugetlb_lock); @@ -3523,7 +3523,7 @@ void putback_active_hugepage(struct page *page) bool is_hugepage_active(struct page *page) { - VM_BUG_ON(!PageHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page), page); /* * This function can be called for a tail page because the caller, * scan_movable_pages, scans through a given pfn-range which typically diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index d747a84e09b0..cb00829bb466 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -390,7 +390,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) if (hugetlb_cgroup_disabled()) return; - VM_BUG_ON(!PageHuge(oldhpage)); + VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); spin_lock(&hugetlb_lock); h_cg = hugetlb_cgroup_from_page(oldhpage); set_hugetlb_cgroup(oldhpage, NULL); diff --git a/mm/internal.h b/mm/internal.h index a346ba120e42..dc95e979ae56 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v) */ static inline void set_page_refcounted(struct page *page) { - VM_BUG_ON(PageTail(page)); - VM_BUG_ON(atomic_read(&page->_count)); + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(atomic_read(&page->_count), page); set_page_count(page, 1); } @@ -46,7 +46,7 @@ static inline void __get_page_tail_foll(struct page *page, * speculative page access (like in * page_cache_get_speculative()) on tail pages. */ - VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); + VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); if (get_page_head) atomic_inc(&page->first_page->_count); get_huge_page_tail(page); @@ -71,7 +71,7 @@ static inline void get_page_foll(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_count. */ - VM_BUG_ON(atomic_read(&page->_count) <= 0); + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); atomic_inc(&page->_count); } } @@ -173,7 +173,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) static inline int mlocked_vma_newpage(struct vm_area_struct *vma, struct page *page) { - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) return 0; diff --git a/mm/ksm.c b/mm/ksm.c index 3df141e5f3e0..f91ddf5c3688 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1898,13 +1898,13 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) int ret = SWAP_AGAIN; int search_new_forks = 0; - VM_BUG_ON(!PageKsm(page)); + VM_BUG_ON_PAGE(!PageKsm(page), page); /* * Rely on the page lock to protect against concurrent modifications * to that page's node of the stable tree. */ - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); stable_node = page_stable_node(page); if (!stable_node) @@ -1958,13 +1958,13 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage) { struct stable_node *stable_node; - VM_BUG_ON(!PageLocked(oldpage)); - VM_BUG_ON(!PageLocked(newpage)); - VM_BUG_ON(newpage->mapping != oldpage->mapping); + VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); + VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); + VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); stable_node = page_stable_node(newpage); if (stable_node) { - VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); + VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); stable_node->kpfn = page_to_pfn(newpage); /* * newpage->mapping was set in advance; now we need smp_wmb() diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7890ce9d6bd1..72f2d90e7ef6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2897,7 +2897,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) unsigned short id; swp_entry_t ent; - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); pc = lookup_page_cgroup(page); lock_page_cgroup(pc); @@ -2931,7 +2931,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, bool anon; lock_page_cgroup(pc); - VM_BUG_ON(PageCgroupUsed(pc)); + VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); /* * we don't need page_cgroup_lock about tail pages, becase they are not * accessed by any other context at this point. @@ -2966,7 +2966,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, if (lrucare) { if (was_on_lru) { lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, page_lru(page)); } @@ -3780,7 +3780,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) if (!memcg) return; - VM_BUG_ON(mem_cgroup_is_root(memcg)); + VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); memcg_uncharge_kmem(memcg, PAGE_SIZE << order); } #else @@ -3859,7 +3859,7 @@ static int mem_cgroup_move_account(struct page *page, bool anon = PageAnon(page); VM_BUG_ON(from == to); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); /* * The page is isolated from LRU. So, collapse function * will not handle this page. But page splitting can happen. @@ -3952,7 +3952,7 @@ static int mem_cgroup_move_parent(struct page *page, parent = root_mem_cgroup; if (nr_pages > 1) { - VM_BUG_ON(!PageTransHuge(page)); + VM_BUG_ON_PAGE(!PageTransHuge(page), page); flags = compound_lock_irqsave(page); } @@ -3986,7 +3986,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, if (PageTransHuge(page)) { nr_pages <<= compound_order(page); - VM_BUG_ON(!PageTransHuge(page)); + VM_BUG_ON_PAGE(!PageTransHuge(page), page); /* * Never OOM-kill a process for a huge page. The * fault handler will fall back to regular pages. @@ -4006,8 +4006,8 @@ int mem_cgroup_newpage_charge(struct page *page, { if (mem_cgroup_disabled()) return 0; - VM_BUG_ON(page_mapped(page)); - VM_BUG_ON(page->mapping && !PageAnon(page)); + VM_BUG_ON_PAGE(page_mapped(page), page); + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); VM_BUG_ON(!mm); return mem_cgroup_charge_common(page, mm, gfp_mask, MEM_CGROUP_CHARGE_TYPE_ANON); @@ -4211,7 +4211,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, if (PageTransHuge(page)) { nr_pages <<= compound_order(page); - VM_BUG_ON(!PageTransHuge(page)); + VM_BUG_ON_PAGE(!PageTransHuge(page), page); } /* * Check if our page_cgroup is valid @@ -4303,7 +4303,7 @@ void mem_cgroup_uncharge_page(struct page *page) /* early check. */ if (page_mapped(page)) return; - VM_BUG_ON(page->mapping && !PageAnon(page)); + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); /* * If the page is in swap cache, uncharge should be deferred * to the swap path, which also properly accounts swap usage @@ -4323,8 +4323,8 @@ void mem_cgroup_uncharge_page(struct page *page) void mem_cgroup_uncharge_cache_page(struct page *page) { - VM_BUG_ON(page_mapped(page)); - VM_BUG_ON(page->mapping); + VM_BUG_ON_PAGE(page_mapped(page), page); + VM_BUG_ON_PAGE(page->mapping, page); __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); } @@ -6880,7 +6880,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, enum mc_target_type ret = MC_TARGET_NONE; page = pmd_page(pmd); - VM_BUG_ON(!page || !PageHead(page)); + VM_BUG_ON_PAGE(!page || !PageHead(page), page); if (!move_anon()) return ret; pc = lookup_page_cgroup(page); diff --git a/mm/memory.c b/mm/memory.c index 71d70c082b98..be6a0c0d4ae0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -289,7 +289,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) return 0; batch = tlb->active; } - VM_BUG_ON(batch->nr > batch->max); + VM_BUG_ON_PAGE(batch->nr > batch->max, page); return batch->max - batch->nr; } @@ -2702,7 +2702,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, goto unwritable_page; } } else - VM_BUG_ON(!PageLocked(old_page)); + VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); /* * Since we dropped the lock we need to revalidate @@ -3358,7 +3358,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf.page); else - VM_BUG_ON(!PageLocked(vmf.page)); + VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); /* * Should we do an early C-O-W break? @@ -3395,7 +3395,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto unwritable_page; } } else - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); page_mkwrite = 1; } } diff --git a/mm/migrate.c b/mm/migrate.c index a8025befc323..4b3996eb7f0f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -499,7 +499,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) if (PageUptodate(page)) SetPageUptodate(newpage); if (TestClearPageActive(page)) { - VM_BUG_ON(PageUnevictable(page)); + VM_BUG_ON_PAGE(PageUnevictable(page), page); SetPageActive(newpage); } else if (TestClearPageUnevictable(page)) SetPageUnevictable(newpage); @@ -871,7 +871,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, * free the metadata, so the page can be freed. */ if (!page->mapping) { - VM_BUG_ON(PageAnon(page)); + VM_BUG_ON_PAGE(PageAnon(page), page); if (page_has_private(page)) { try_to_free_buffers(page); goto uncharge; @@ -1618,7 +1618,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) { int page_lru; - VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); + VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) diff --git a/mm/mlock.c b/mm/mlock.c index b30adbe62034..4e1a68162285 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -279,8 +279,8 @@ static int __mlock_posix_error_return(long retval) static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, int *pgrescued) { - VM_BUG_ON(PageLRU(page)); - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (page_mapcount(page) <= 1 && page_evictable(page)) { pagevec_add(pvec, page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1939f4446a36..f18f016cca80 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -509,12 +509,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, return 0; if (page_is_guard(buddy) && page_order(buddy) == order) { - VM_BUG_ON(page_count(buddy) != 0); + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); return 1; } if (PageBuddy(buddy) && page_order(buddy) == order) { - VM_BUG_ON(page_count(buddy) != 0); + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); return 1; } return 0; @@ -564,8 +564,8 @@ static inline void __free_one_page(struct page *page, page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); - VM_BUG_ON(page_idx & ((1 << order) - 1)); - VM_BUG_ON(bad_range(zone, page)); + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); + VM_BUG_ON_PAGE(bad_range(zone, page), page); while (order < MAX_ORDER-1) { buddy_idx = __find_buddy_index(page_idx, order); @@ -827,7 +827,7 @@ static inline void expand(struct zone *zone, struct page *page, area--; high--; size >>= 1; - VM_BUG_ON(bad_range(zone, &page[size])); + VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); #ifdef CONFIG_DEBUG_PAGEALLOC if (high < debug_guardpage_minorder()) { @@ -980,7 +980,7 @@ int move_freepages(struct zone *zone, for (page = start_page; page <= end_page;) { /* Make sure we are not inadvertently changing nodes */ - VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); if (!pfn_valid_within(page_to_pfn(page))) { page++; @@ -1429,8 +1429,8 @@ void split_page(struct page *page, unsigned int order) { int i; - VM_BUG_ON(PageCompound(page)); - VM_BUG_ON(!page_count(page)); + VM_BUG_ON_PAGE(PageCompound(page), page); + VM_BUG_ON_PAGE(!page_count(page), page); #ifdef CONFIG_KMEMCHECK /* @@ -1577,7 +1577,7 @@ again: zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); - VM_BUG_ON(bad_range(zone, page)); + VM_BUG_ON_PAGE(bad_range(zone, page), page); if (prep_new_page(page, order, gfp_flags)) goto again; return page; @@ -6021,7 +6021,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); - VM_BUG_ON(!zone_spans_pfn(zone, pfn)); + VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) if (flags & value) @@ -6539,3 +6539,4 @@ void dump_page(struct page *page, char *reason) { dump_page_badflags(page, reason, 0); } +EXPORT_SYMBOL_GPL(dump_page); diff --git a/mm/page_io.c b/mm/page_io.c index 8c79a4764be0..7247be6114ac 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -320,8 +320,8 @@ int swap_readpage(struct page *page) int ret = 0; struct swap_info_struct *sis = page_swap_info(page); - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageUptodate(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageUptodate(page), page); if (frontswap_load(page) == 0) { SetPageUptodate(page); unlock_page(page); diff --git a/mm/rmap.c b/mm/rmap.c index 962e2a1e13a0..2dcd3353c3f6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -894,9 +894,9 @@ void page_move_anon_rmap(struct page *page, { struct anon_vma *anon_vma = vma->anon_vma; - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON(!anon_vma); - VM_BUG_ON(page->index != linear_page_index(vma, address)); + VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; @@ -995,7 +995,7 @@ void do_page_add_anon_rmap(struct page *page, if (unlikely(PageKsm(page))) return; - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); /* address might be in next vma when migration races vma_adjust */ if (first) __page_set_anon_rmap(page, vma, address, exclusive); @@ -1481,7 +1481,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) .anon_lock = page_lock_anon_vma_read, }; - VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); + VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); /* * During exec, a temporary VMA is setup and later moved. @@ -1533,7 +1533,7 @@ int try_to_munlock(struct page *page) }; - VM_BUG_ON(!PageLocked(page) || PageLRU(page)); + VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); ret = rmap_walk(page, &rwc); return ret; diff --git a/mm/shmem.c b/mm/shmem.c index 902a14842b74..8156f95ec0cf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -285,8 +285,8 @@ static int shmem_add_to_page_cache(struct page *page, { int error; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageSwapBacked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); page_cache_get(page); page->mapping = mapping; @@ -491,7 +491,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, continue; if (!unfalloc || !PageUptodate(page)) { if (page->mapping == mapping) { - VM_BUG_ON(PageWriteback(page)); + VM_BUG_ON_PAGE(PageWriteback(page), page); truncate_inode_page(mapping, page); } } @@ -568,7 +568,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, lock_page(page); if (!unfalloc || !PageUptodate(page)) { if (page->mapping == mapping) { - VM_BUG_ON(PageWriteback(page)); + VM_BUG_ON_PAGE(PageWriteback(page), page); truncate_inode_page(mapping, page); } } diff --git a/mm/slub.c b/mm/slub.c index 545a170ebf9f..34bb8c65a2d8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s, new.freelist = freelist; } - VM_BUG_ON(new.frozen); + VM_BUG_ON_PAGE(new.frozen, &new); new.frozen = 1; if (!__cmpxchg_double_slab(s, page, @@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, set_freepointer(s, freelist, prior); new.counters = counters; new.inuse--; - VM_BUG_ON(!new.frozen); + VM_BUG_ON_PAGE(!new.frozen, &new); } while (!__cmpxchg_double_slab(s, page, prior, counters, @@ -1840,7 +1840,7 @@ redo: old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON(!old.frozen); + VM_BUG_ON_PAGE(!old.frozen, &old); /* Determine target state of the slab */ new.counters = old.counters; @@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s, old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON(!old.frozen); + VM_BUG_ON_PAGE(!old.frozen, &old); new.counters = old.counters; new.freelist = old.freelist; @@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) counters = page->counters; new.counters = counters; - VM_BUG_ON(!new.frozen); + VM_BUG_ON_PAGE(!new.frozen, &new); new.inuse = page->objects; new.frozen = freelist != NULL; @@ -2319,7 +2319,7 @@ load_freelist: * page is pointing to the page from which the objects are obtained. * That page must be frozen for per cpu allocations to work. */ - VM_BUG_ON(!c->page->frozen); + VM_BUG_ON_PAGE(!c->page->frozen, c->page); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_irq_restore(flags); diff --git a/mm/swap.c b/mm/swap.c index d1100b619e61..b31ba67d440a 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -57,7 +57,7 @@ static void __page_cache_release(struct page *page) spin_lock_irqsave(&zone->lru_lock, flags); lruvec = mem_cgroup_page_lruvec(page, zone); - VM_BUG_ON(!PageLRU(page)); + VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); @@ -130,8 +130,8 @@ static void put_compound_page(struct page *page) * __split_huge_page_refcount cannot race * here. */ - VM_BUG_ON(!PageHead(page_head)); - VM_BUG_ON(page_mapcount(page) != 0); + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); + VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); if (put_page_testzero(page_head)) { /* * If this is the tail of a slab @@ -148,7 +148,7 @@ static void put_compound_page(struct page *page) * the compound page enters the buddy * allocator. */ - VM_BUG_ON(PageSlab(page_head)); + VM_BUG_ON_PAGE(PageSlab(page_head), page_head); __put_compound_page(page_head); } return; @@ -199,7 +199,7 @@ out_put_single: __put_single_page(page); return; } - VM_BUG_ON(page_head != page->first_page); + VM_BUG_ON_PAGE(page_head != page->first_page, page); /* * We can release the refcount taken by * get_page_unless_zero() now that @@ -207,12 +207,12 @@ out_put_single: * compound_lock. */ if (put_page_testzero(page_head)) - VM_BUG_ON(1); + VM_BUG_ON_PAGE(1, page_head); /* __split_huge_page_refcount will wait now */ - VM_BUG_ON(page_mapcount(page) <= 0); + VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); atomic_dec(&page->_mapcount); - VM_BUG_ON(atomic_read(&page_head->_count) <= 0); - VM_BUG_ON(atomic_read(&page->_count) != 0); + VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { @@ -223,7 +223,7 @@ out_put_single: } } else { /* page_head is a dangling pointer */ - VM_BUG_ON(PageTail(page)); + VM_BUG_ON_PAGE(PageTail(page), page); goto out_put_single; } } @@ -264,7 +264,7 @@ bool __get_page_tail(struct page *page) * page. __split_huge_page_refcount * cannot race here. */ - VM_BUG_ON(!PageHead(page_head)); + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); __get_page_tail_foll(page, true); return true; } else { @@ -604,8 +604,8 @@ EXPORT_SYMBOL(__lru_cache_add); */ void lru_cache_add(struct page *page) { - VM_BUG_ON(PageActive(page) && PageUnevictable(page)); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); + VM_BUG_ON_PAGE(PageLRU(page), page); __lru_cache_add(page); } @@ -846,7 +846,7 @@ void release_pages(struct page **pages, int nr, int cold) } lruvec = mem_cgroup_page_lruvec(page, zone); - VM_BUG_ON(!PageLRU(page)); + VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); } @@ -888,9 +888,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, { const int file = 0; - VM_BUG_ON(!PageHead(page)); - VM_BUG_ON(PageCompound(page_tail)); - VM_BUG_ON(PageLRU(page_tail)); + VM_BUG_ON_PAGE(!PageHead(page), page); + VM_BUG_ON_PAGE(PageCompound(page_tail), page); + VM_BUG_ON_PAGE(PageLRU(page_tail), page); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); @@ -929,7 +929,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, int active = PageActive(page); enum lru_list lru = page_lru(page); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, lru); diff --git a/mm/swap_state.c b/mm/swap_state.c index e6f15f8ca2af..98e85e9c2b2d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -83,9 +83,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) int error; struct address_space *address_space; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(PageSwapCache(page)); - VM_BUG_ON(!PageSwapBacked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageSwapCache(page), page); + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); page_cache_get(page); SetPageSwapCache(page); @@ -139,9 +139,9 @@ void __delete_from_swap_cache(struct page *page) swp_entry_t entry; struct address_space *address_space; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageSwapCache(page)); - VM_BUG_ON(PageWriteback(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageSwapCache(page), page); + VM_BUG_ON_PAGE(PageWriteback(page), page); entry.val = page_private(page); address_space = swap_address_space(entry); @@ -165,8 +165,8 @@ int add_to_swap(struct page *page, struct list_head *list) swp_entry_t entry; int err; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageUptodate(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageUptodate(page), page); entry = get_swap_page(); if (!entry.val) diff --git a/mm/swapfile.c b/mm/swapfile.c index 612a7c9795f6..d443dea95c27 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -906,7 +906,7 @@ int reuse_swap_page(struct page *page) { int count; - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (unlikely(PageKsm(page))) return 0; count = page_mapcount(page); @@ -926,7 +926,7 @@ int reuse_swap_page(struct page *page) */ int try_to_free_swap(struct page *page) { - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (!PageSwapCache(page)) return 0; @@ -2714,7 +2714,7 @@ struct swap_info_struct *page_swap_info(struct page *page) */ struct address_space *__page_file_mapping(struct page *page) { - VM_BUG_ON(!PageSwapCache(page)); + VM_BUG_ON_PAGE(!PageSwapCache(page), page); return page_swap_info(page)->swap_file->f_mapping; } EXPORT_SYMBOL_GPL(__page_file_mapping); @@ -2722,7 +2722,7 @@ EXPORT_SYMBOL_GPL(__page_file_mapping); pgoff_t __page_file_index(struct page *page) { swp_entry_t swap = { .val = page_private(page) }; - VM_BUG_ON(!PageSwapCache(page)); + VM_BUG_ON_PAGE(!PageSwapCache(page), page); return swp_offset(swap); } EXPORT_SYMBOL_GPL(__page_file_index); diff --git a/mm/vmscan.c b/mm/vmscan.c index eea668d9cff6..2254f36b74b8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -603,7 +603,7 @@ void putback_lru_page(struct page *page) bool is_unevictable; int was_unevictable = PageUnevictable(page); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); redo: ClearPageUnevictable(page); @@ -794,8 +794,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (!trylock_page(page)) goto keep; - VM_BUG_ON(PageActive(page)); - VM_BUG_ON(page_zone(page) != zone); + VM_BUG_ON_PAGE(PageActive(page), page); + VM_BUG_ON_PAGE(page_zone(page) != zone, page); sc->nr_scanned++; @@ -1079,14 +1079,14 @@ activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) try_to_free_swap(page); - VM_BUG_ON(PageActive(page)); + VM_BUG_ON_PAGE(PageActive(page), page); SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); - VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); + VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); } free_hot_cold_page_list(&free_pages, 1); @@ -1240,7 +1240,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - VM_BUG_ON(!PageLRU(page)); + VM_BUG_ON_PAGE(!PageLRU(page), page); switch (__isolate_lru_page(page, mode)) { case 0: @@ -1295,7 +1295,7 @@ int isolate_lru_page(struct page *page) { int ret = -EBUSY; - VM_BUG_ON(!page_count(page)); + VM_BUG_ON_PAGE(!page_count(page), page); if (PageLRU(page)) { struct zone *zone = page_zone(page); @@ -1366,7 +1366,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) struct page *page = lru_to_page(page_list); int lru; - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); list_del(&page->lru); if (unlikely(!page_evictable(page))) { spin_unlock_irq(&zone->lru_lock); @@ -1586,7 +1586,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec, page = lru_to_page(list); lruvec = mem_cgroup_page_lruvec(page, zone); - VM_BUG_ON(PageLRU(page)); + VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); nr_pages = hpage_nr_pages(page); @@ -3701,7 +3701,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) if (page_evictable(page)) { enum lru_list lru = page_lru_base_type(page); - VM_BUG_ON(PageActive(page)); + VM_BUG_ON_PAGE(PageActive(page), page); ClearPageUnevictable(page); del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); add_page_to_lru_list(page, lruvec, lru); -- cgit v1.2.3 From 3965fc3652244651006ebb31c8c45318ce84818f Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:55 -0800 Subject: slab: clean up kmem_cache_create_memcg() error handling Currently kmem_cache_create_memcg() backoffs on failure inside conditionals, without using gotos. This results in the rollback code duplication, which makes the function look cumbersome even though on error we should only free the allocated cache. Since in the next patch I am going to add yet another rollback function call on error path there, let's employ labels instead of conditionals for undoing any changes on failure to keep things clean. Signed-off-by: Vladimir Davydov Reviewed-by: Pekka Enberg Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 65 +++++++++++++++++++++++++++----------------------------- 1 file changed, 31 insertions(+), 34 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 0b7bb399b0e4..f70df3ef6f1a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -171,13 +171,14 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, struct kmem_cache *parent_cache) { struct kmem_cache *s = NULL; - int err = 0; + int err; get_online_cpus(); mutex_lock(&slab_mutex); - if (!kmem_cache_sanity_check(memcg, name, size) == 0) - goto out_locked; + err = kmem_cache_sanity_check(memcg, name, size); + if (err) + goto out_unlock; /* * Some allocators will constraint the set of valid flags to a subset @@ -189,45 +190,38 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); if (s) - goto out_locked; + goto out_unlock; + err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); - if (s) { - s->object_size = s->size = size; - s->align = calculate_alignment(flags, align, size); - s->ctor = ctor; + if (!s) + goto out_unlock; - if (memcg_register_cache(memcg, s, parent_cache)) { - kmem_cache_free(kmem_cache, s); - err = -ENOMEM; - goto out_locked; - } + s->object_size = s->size = size; + s->align = calculate_alignment(flags, align, size); + s->ctor = ctor; - s->name = kstrdup(name, GFP_KERNEL); - if (!s->name) { - kmem_cache_free(kmem_cache, s); - err = -ENOMEM; - goto out_locked; - } + s->name = kstrdup(name, GFP_KERNEL); + if (!s->name) + goto out_free_cache; - err = __kmem_cache_create(s, flags); - if (!err) { - s->refcount = 1; - list_add(&s->list, &slab_caches); - memcg_cache_list_add(memcg, s); - } else { - kfree(s->name); - kmem_cache_free(kmem_cache, s); - } - } else - err = -ENOMEM; + err = memcg_register_cache(memcg, s, parent_cache); + if (err) + goto out_free_cache; + + err = __kmem_cache_create(s, flags); + if (err) + goto out_free_cache; + + s->refcount = 1; + list_add(&s->list, &slab_caches); + memcg_cache_list_add(memcg, s); -out_locked: +out_unlock: mutex_unlock(&slab_mutex); put_online_cpus(); if (err) { - if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); @@ -236,11 +230,14 @@ out_locked: name, err); dump_stack(); } - return NULL; } - return s; + +out_free_cache: + kfree(s->name); + kmem_cache_free(kmem_cache, s); + goto out_unlock; } struct kmem_cache * -- cgit v1.2.3 From 363a044f739b0f07a8c063b838c5528d10720e02 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:56 -0800 Subject: memcg, slab: kmem_cache_create_memcg(): fix memleak on fail path We do not free the cache's memcg_params if __kmem_cache_create fails. Fix this. Plus, rename memcg_register_cache() to memcg_alloc_cache_params(), because it actually does not register the cache anywhere, but simply initialize kmem_cache::memcg_params. [akpm@linux-foundation.org: fix build] Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 14 +++++++++----- mm/memcontrol.c | 11 ++++++++--- mm/slab_common.c | 3 ++- 3 files changed, 19 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b3e7a667e03c..284daff507fb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -497,8 +497,9 @@ void __memcg_kmem_commit_charge(struct page *page, void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); +int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache); +void memcg_free_cache_params(struct kmem_cache *s); void memcg_release_cache(struct kmem_cache *cachep); void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); @@ -640,13 +641,16 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int -memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache) +static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) { return 0; } +static inline void memcg_free_cache_params(struct kmem_cache *s) +{ +} + static inline void memcg_release_cache(struct kmem_cache *cachep) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 72f2d90e7ef6..b8ebe71f872d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3231,8 +3231,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache) +int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache) { size_t size; @@ -3260,6 +3260,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, return 0; } +void memcg_free_cache_params(struct kmem_cache *s) +{ + kfree(s->memcg_params); +} + void memcg_release_cache(struct kmem_cache *s) { struct kmem_cache *root; @@ -3288,7 +3293,7 @@ void memcg_release_cache(struct kmem_cache *s) css_put(&memcg->css); out: - kfree(s->memcg_params); + memcg_free_cache_params(s); } /* diff --git a/mm/slab_common.c b/mm/slab_common.c index f70df3ef6f1a..70f9e249ac30 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -205,7 +205,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, if (!s->name) goto out_free_cache; - err = memcg_register_cache(memcg, s, parent_cache); + err = memcg_alloc_cache_params(memcg, s, parent_cache); if (err) goto out_free_cache; @@ -235,6 +235,7 @@ out_unlock: return s; out_free_cache: + memcg_free_cache_params(s); kfree(s->name); kmem_cache_free(kmem_cache, s); goto out_unlock; -- cgit v1.2.3 From 1aa13254259bdef0bca723849ab3bab308d2f0c3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:58 -0800 Subject: memcg, slab: clean up memcg cache initialization/destruction Currently, we have rather a messy function set relating to per-memcg kmem cache initialization/destruction. Per-memcg caches are created in memcg_create_kmem_cache(). This function calls kmem_cache_create_memcg() to allocate and initialize a kmem cache and then "registers" the new cache in the memcg_params::memcg_caches array of the parent cache. During its work-flow, kmem_cache_create_memcg() executes the following memcg-related functions: - memcg_alloc_cache_params(), to initialize memcg_params of the newly created cache; - memcg_cache_list_add(), to add the new cache to the memcg_slab_caches list. On the other hand, kmem_cache_destroy() called on a cache destruction only calls memcg_release_cache(), which does all the work: it cleans the reference to the cache in its parent's memcg_params::memcg_caches, removes the cache from the memcg_slab_caches list, and frees memcg_params. Such an inconsistency between destruction and initialization paths make the code difficult to read, so let's clean this up a bit. This patch moves all the code relating to registration of per-memcg caches (adding to memcg list, setting the pointer to a cache from its parent) to the newly created memcg_register_cache() and memcg_unregister_cache() functions making the initialization and destruction paths look symmetrical. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 9 +++---- mm/memcontrol.c | 64 ++++++++++++++++++++++------------------------ mm/slab_common.c | 5 ++-- 3 files changed, 37 insertions(+), 41 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 284daff507fb..abd0113b6620 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -500,8 +500,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache); void memcg_free_cache_params(struct kmem_cache *s); -void memcg_release_cache(struct kmem_cache *cachep); -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); +void memcg_register_cache(struct kmem_cache *s); +void memcg_unregister_cache(struct kmem_cache *s); int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -651,12 +651,11 @@ static inline void memcg_free_cache_params(struct kmem_cache *s) { } -static inline void memcg_release_cache(struct kmem_cache *cachep) +static inline void memcg_register_cache(struct kmem_cache *s) { } -static inline void memcg_cache_list_add(struct mem_cgroup *memcg, - struct kmem_cache *s) +static inline void memcg_unregister_cache(struct kmem_cache *s) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b8ebe71f872d..739383cd3f70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) css_put(&memcg->css); } -void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) -{ - if (!memcg) - return; - - mutex_lock(&memcg->slab_caches_mutex); - list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); - mutex_unlock(&memcg->slab_caches_mutex); -} - /* * helper for acessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } -void memcg_release_cache(struct kmem_cache *s) +void memcg_register_cache(struct kmem_cache *s) { struct kmem_cache *root; struct mem_cgroup *memcg; int id; + if (is_root_cache(s)) + return; + + root = s->memcg_params->root_cache; + memcg = s->memcg_params->memcg; + id = memcg_cache_id(memcg); + + css_get(&memcg->css); + + mutex_lock(&memcg->slab_caches_mutex); + list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); + mutex_unlock(&memcg->slab_caches_mutex); + + root->memcg_params->memcg_caches[id] = s; /* - * This happens, for instance, when a root cache goes away before we - * add any memcg. + * the readers won't lock, make sure everybody sees the updated value, + * so they won't put stuff in the queue again for no reason */ - if (!s->memcg_params) - return; + wmb(); +} - if (s->memcg_params->is_root_cache) - goto out; +void memcg_unregister_cache(struct kmem_cache *s) +{ + struct kmem_cache *root; + struct mem_cgroup *memcg; + int id; + + if (is_root_cache(s)) + return; memcg = s->memcg_params->memcg; id = memcg_cache_id(memcg); @@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s) mutex_unlock(&memcg->slab_caches_mutex); css_put(&memcg->css); -out: - memcg_free_cache_params(s); } /* @@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, mutex_lock(&memcg_cache_mutex); new_cachep = cache_from_memcg_idx(cachep, idx); - if (new_cachep) { - css_put(&memcg->css); + if (new_cachep) goto out; - } new_cachep = kmem_cache_dup(memcg, cachep); - if (new_cachep == NULL) { + if (new_cachep == NULL) new_cachep = cachep; - css_put(&memcg->css); - goto out; - } - - atomic_set(&new_cachep->memcg_params->nr_pages , 0); - cachep->memcg_params->memcg_caches[idx] = new_cachep; - /* - * the readers won't lock, make sure everybody sees the updated value, - * so they won't put stuff in the queue again for no reason - */ - wmb(); out: mutex_unlock(&memcg_cache_mutex); return new_cachep; @@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) cw = container_of(w, struct create_work, work); memcg_create_kmem_cache(cw->memcg, cw->cachep); + css_put(&cw->memcg->css); kfree(cw); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 70f9e249ac30..db24ec48b946 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -215,7 +215,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_cache_list_add(memcg, s); + memcg_register_cache(s); out_unlock: mutex_unlock(&slab_mutex); @@ -265,7 +265,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - memcg_release_cache(s); + memcg_unregister_cache(s); + memcg_free_cache_params(s); kfree(s->name); kmem_cache_free(kmem_cache, s); } else { -- cgit v1.2.3 From 959c8963fc6c8c9b97e80c55ce77105247040e7d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:52:59 -0800 Subject: memcg, slab: fix barrier usage when accessing memcg_caches Each root kmem_cache has pointers to per-memcg caches stored in its memcg_params::memcg_caches array. Whenever we want to allocate a slab for a memcg, we access this array to get per-memcg cache to allocate from (see memcg_kmem_get_cache()). The access must be lock-free for performance reasons, so we should use barriers to assert the kmem_cache is up-to-date. First, we should place a write barrier immediately before setting the pointer to it in the memcg_caches array in order to make sure nobody will see a partially initialized object. Second, we should issue a read barrier before dereferencing the pointer to conform to the write barrier. However, currently the barrier usage looks rather strange. We have a write barrier *after* setting the pointer and a read barrier *before* reading the pointer, which is incorrect. This patch fixes this. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 24 ++++++++++-------------- mm/slab.h | 12 +++++++++++- 2 files changed, 21 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 739383cd3f70..322d18dc17f0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3274,12 +3274,14 @@ void memcg_register_cache(struct kmem_cache *s) list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); mutex_unlock(&memcg->slab_caches_mutex); - root->memcg_params->memcg_caches[id] = s; /* - * the readers won't lock, make sure everybody sees the updated value, - * so they won't put stuff in the queue again for no reason + * Since readers won't lock (see cache_from_memcg_idx()), we need a + * barrier here to ensure nobody will see the kmem_cache partially + * initialized. */ - wmb(); + smp_wmb(); + + root->memcg_params->memcg_caches[id] = s; } void memcg_unregister_cache(struct kmem_cache *s) @@ -3605,7 +3607,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { struct mem_cgroup *memcg; - int idx; + struct kmem_cache *memcg_cachep; VM_BUG_ON(!cachep->memcg_params); VM_BUG_ON(!cachep->memcg_params->is_root_cache); @@ -3619,15 +3621,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, if (!memcg_can_account_kmem(memcg)) goto out; - idx = memcg_cache_id(memcg); - - /* - * barrier to mare sure we're always seeing the up to date value. The - * code updating memcg_caches will issue a write barrier to match this. - */ - read_barrier_depends(); - if (likely(cache_from_memcg_idx(cachep, idx))) { - cachep = cache_from_memcg_idx(cachep, idx); + memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); + if (likely(memcg_cachep)) { + cachep = memcg_cachep; goto out; } diff --git a/mm/slab.h b/mm/slab.h index 0859c4241ba1..72d1f9df71bd 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -163,9 +163,19 @@ static inline const char *cache_name(struct kmem_cache *s) static inline struct kmem_cache * cache_from_memcg_idx(struct kmem_cache *s, int idx) { + struct kmem_cache *cachep; + if (!s->memcg_params) return NULL; - return s->memcg_params->memcg_caches[idx]; + cachep = s->memcg_params->memcg_caches[idx]; + + /* + * Make sure we will access the up-to-date value. The code updating + * memcg_caches issues a write barrier to match this (see + * memcg_register_cache()). + */ + smp_read_barrier_depends(); + return cachep; } static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) -- cgit v1.2.3 From 96403da244443d9842dbf290c2a02390b78a158e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:01 -0800 Subject: memcg: fix possible NULL deref while traversing memcg_slab_caches list All caches of the same memory cgroup are linked in the memcg_slab_caches list via kmem_cache::memcg_params::list. This list is traversed, for example, when we read memory.kmem.slabinfo. Since the list actually consists of memcg_cache_params objects, we have to convert an element of the list to a kmem_cache object using memcg_params_to_cache(), which obtains the pointer to the cache from the memcg_params::memcg_caches array of the corresponding root cache. That said the pointer to a kmem_cache in its parent's memcg_params must be initialized before adding the cache to the list, and cleared only after it has been unlinked. Currently it is vice-versa, which can result in a NULL ptr dereference while traversing the memcg_slab_caches list. This patch restores the correct order. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 322d18dc17f0..014a4f1acf1c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3270,9 +3270,6 @@ void memcg_register_cache(struct kmem_cache *s) css_get(&memcg->css); - mutex_lock(&memcg->slab_caches_mutex); - list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); - mutex_unlock(&memcg->slab_caches_mutex); /* * Since readers won't lock (see cache_from_memcg_idx()), we need a @@ -3281,7 +3278,16 @@ void memcg_register_cache(struct kmem_cache *s) */ smp_wmb(); + /* + * Initialize the pointer to this cache in its parent's memcg_params + * before adding it to the memcg_slab_caches list, otherwise we can + * fail to convert memcg_params_to_cache() while traversing the list. + */ root->memcg_params->memcg_caches[id] = s; + + mutex_lock(&memcg->slab_caches_mutex); + list_add(&s->memcg_params->list, &memcg->memcg_slab_caches); + mutex_unlock(&memcg->slab_caches_mutex); } void memcg_unregister_cache(struct kmem_cache *s) @@ -3293,16 +3299,21 @@ void memcg_unregister_cache(struct kmem_cache *s) if (is_root_cache(s)) return; - memcg = s->memcg_params->memcg; - id = memcg_cache_id(memcg); - root = s->memcg_params->root_cache; - root->memcg_params->memcg_caches[id] = NULL; + memcg = s->memcg_params->memcg; + id = memcg_cache_id(memcg); mutex_lock(&memcg->slab_caches_mutex); list_del(&s->memcg_params->list); mutex_unlock(&memcg->slab_caches_mutex); + /* + * Clear the pointer to this cache in its parent's memcg_params only + * after removing it from the memcg_slab_caches list, otherwise we can + * fail to convert memcg_params_to_cache() while traversing the list. + */ + root->memcg_params->memcg_caches[id] = NULL; + css_put(&memcg->css); } -- cgit v1.2.3 From 2edefe1155b3ad3dc92065f6e1018d363525296e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:02 -0800 Subject: memcg, slab: fix races in per-memcg cache creation/destruction We obtain a per-memcg cache from a root kmem_cache by dereferencing an entry of the root cache's memcg_params::memcg_caches array. If we find no cache for a memcg there on allocation, we initiate the memcg cache creation (see memcg_kmem_get_cache()). The cache creation proceeds asynchronously in memcg_create_kmem_cache() in order to avoid lock clashes, so there can be several threads trying to create the same kmem_cache concurrently, but only one of them may succeed. However, due to a race in the code, it is not always true. The point is that the memcg_caches array can be relocated when we activate kmem accounting for a memcg (see memcg_update_all_caches(), memcg_update_cache_size()). If memcg_update_cache_size() and memcg_create_kmem_cache() proceed concurrently as described below, we can leak a kmem_cache. Asume two threads schedule creation of the same kmem_cache. One of them successfully creates it. Another one should fail then, but if memcg_create_kmem_cache() interleaves with memcg_update_cache_size() as follows, it won't: memcg_create_kmem_cache() memcg_update_cache_size() (called w/o mutexes held) (called with slab_mutex, set_limit_mutex held) ------------------------- ------------------------- mutex_lock(&memcg_cache_mutex) s->memcg_params=kzalloc(...) new_cachep=cache_from_memcg_idx(cachep,idx) // new_cachep==NULL => proceed to creation s->memcg_params->memcg_caches[i] =cur_params->memcg_caches[i] // kmem_cache_create_memcg takes slab_mutex // so we will hang around until // memcg_update_cache_size finishes, but // nothing will prevent it from succeeding so // memcg_caches[idx] will be overwritten in // memcg_register_cache! new_cachep = kmem_cache_create_memcg(...) mutex_unlock(&memcg_cache_mutex) Let's fix this by moving the check for existence of the memcg cache to kmem_cache_create_memcg() to be called under the slab_mutex and make it return NULL if so. A similar race is possible when destroying a memcg cache (see kmem_cache_destroy()). Since memcg_unregister_cache(), which clears the pointer in the memcg_caches array, is called w/o protection, we can race with memcg_update_cache_size() and omit clearing the pointer. Therefore memcg_unregister_cache() should be moved before we release the slab_mutex. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 23 ++++++++++++++--------- mm/slab_common.c | 14 +++++++++++++- 2 files changed, 27 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 014a4f1acf1c..d2da65c4cd84 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3264,6 +3264,12 @@ void memcg_register_cache(struct kmem_cache *s) if (is_root_cache(s)) return; + /* + * Holding the slab_mutex assures nobody will touch the memcg_caches + * array while we are modifying it. + */ + lockdep_assert_held(&slab_mutex); + root = s->memcg_params->root_cache; memcg = s->memcg_params->memcg; id = memcg_cache_id(memcg); @@ -3283,6 +3289,7 @@ void memcg_register_cache(struct kmem_cache *s) * before adding it to the memcg_slab_caches list, otherwise we can * fail to convert memcg_params_to_cache() while traversing the list. */ + VM_BUG_ON(root->memcg_params->memcg_caches[id]); root->memcg_params->memcg_caches[id] = s; mutex_lock(&memcg->slab_caches_mutex); @@ -3299,6 +3306,12 @@ void memcg_unregister_cache(struct kmem_cache *s) if (is_root_cache(s)) return; + /* + * Holding the slab_mutex assures nobody will touch the memcg_caches + * array while we are modifying it. + */ + lockdep_assert_held(&slab_mutex); + root = s->memcg_params->root_cache; memcg = s->memcg_params->memcg; id = memcg_cache_id(memcg); @@ -3312,6 +3325,7 @@ void memcg_unregister_cache(struct kmem_cache *s) * after removing it from the memcg_slab_caches list, otherwise we can * fail to convert memcg_params_to_cache() while traversing the list. */ + VM_BUG_ON(!root->memcg_params->memcg_caches[id]); root->memcg_params->memcg_caches[id] = NULL; css_put(&memcg->css); @@ -3464,22 +3478,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *cachep) { struct kmem_cache *new_cachep; - int idx; BUG_ON(!memcg_can_account_kmem(memcg)); - idx = memcg_cache_id(memcg); - mutex_lock(&memcg_cache_mutex); - new_cachep = cache_from_memcg_idx(cachep, idx); - if (new_cachep) - goto out; - new_cachep = kmem_cache_dup(memcg, cachep); if (new_cachep == NULL) new_cachep = cachep; - -out: mutex_unlock(&memcg_cache_mutex); return new_cachep; } diff --git a/mm/slab_common.c b/mm/slab_common.c index db24ec48b946..f34707eeacc7 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -180,6 +180,18 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, if (err) goto out_unlock; + if (memcg) { + /* + * Since per-memcg caches are created asynchronously on first + * allocation (see memcg_kmem_get_cache()), several threads can + * try to create the same cache, but only one of them may + * succeed. Therefore if we get here and see the cache has + * already been created, we silently return NULL. + */ + if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) + goto out_unlock; + } + /* * Some allocators will constraint the set of valid flags to a subset * of all flags. We expect them to define CACHE_CREATE_MASK in this @@ -261,11 +273,11 @@ void kmem_cache_destroy(struct kmem_cache *s) list_del(&s->list); if (!__kmem_cache_shutdown(s)) { + memcg_unregister_cache(s); mutex_unlock(&slab_mutex); if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); - memcg_unregister_cache(s); memcg_free_cache_params(s); kfree(s->name); kmem_cache_free(kmem_cache, s); -- cgit v1.2.3 From 842e2873697e023d140a8905a41fcf39d4e546f1 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:03 -0800 Subject: memcg: get rid of kmem_cache_dup() kmem_cache_dup() is only called from memcg_create_kmem_cache(). The latter, in fact, does nothing besides this, so let's fold kmem_cache_dup() into memcg_create_kmem_cache(). This patch also makes the memcg_cache_mutex private to memcg_create_kmem_cache(), because it is not used anywhere else. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 39 ++++++++------------------------------- 1 file changed, 8 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d2da65c4cd84..80197e544764 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3427,27 +3427,16 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) schedule_work(&cachep->memcg_params->destroy); } -/* - * This lock protects updaters, not readers. We want readers to be as fast as - * they can, and they will either see NULL or a valid cache value. Our model - * allow them to see NULL, in which case the root memcg will be selected. - * - * We need this lock because multiple allocations to the same cache from a non - * will span more than one worker. Only one of them can create the cache. - */ -static DEFINE_MUTEX(memcg_cache_mutex); - -/* - * Called with memcg_cache_mutex held - */ -static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, - struct kmem_cache *s) +static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, + struct kmem_cache *s) { struct kmem_cache *new; static char *tmp_name = NULL; + static DEFINE_MUTEX(mutex); /* protects tmp_name */ - lockdep_assert_held(&memcg_cache_mutex); + BUG_ON(!memcg_can_account_kmem(memcg)); + mutex_lock(&mutex); /* * kmem_cache_create_memcg duplicates the given name and * cgroup_name for this name requires RCU context. @@ -3470,25 +3459,13 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, if (new) new->allocflags |= __GFP_KMEMCG; + else + new = s; + mutex_unlock(&mutex); return new; } -static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, - struct kmem_cache *cachep) -{ - struct kmem_cache *new_cachep; - - BUG_ON(!memcg_can_account_kmem(memcg)); - - mutex_lock(&memcg_cache_mutex); - new_cachep = kmem_cache_dup(memcg, cachep); - if (new_cachep == NULL) - new_cachep = cachep; - mutex_unlock(&memcg_cache_mutex); - return new_cachep; -} - void kmem_cache_destroy_memcg_children(struct kmem_cache *s) { struct kmem_cache *c; -- cgit v1.2.3 From f717eb3abb5ea38f60e671dbfdbf512c2c93d22e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:05 -0800 Subject: slab: do not panic if we fail to create memcg cache There is no point in flooding logs with warnings or especially crashing the system if we fail to create a cache for a memcg. In this case we will be accounting the memcg allocation to the root cgroup until we succeed to create its own cache, but it isn't that critical. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index f34707eeacc7..8e40321da091 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -233,7 +233,14 @@ out_unlock: mutex_unlock(&slab_mutex); put_online_cpus(); - if (err) { + /* + * There is no point in flooding logs with warnings or especially + * crashing the system if we fail to create a cache for a memcg. In + * this case we will be accounting the memcg allocation to the root + * cgroup until we succeed to create its own cache, but it isn't that + * critical. + */ + if (err && !memcg) { if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); -- cgit v1.2.3 From f8570263ee16eb1d5038b8e20d7db3a68bbb2b49 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:06 -0800 Subject: memcg, slab: RCU protect memcg_params for root caches We relocate root cache's memcg_params whenever we need to grow the memcg_caches array to accommodate all kmem-active memory cgroups. Currently on relocation we free the old version immediately, which can lead to use-after-free, because the memcg_caches array is accessed lock-free (see cache_from_memcg_idx()). This patch fixes this by making memcg_params RCU-protected for root caches. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 9 +++++++-- mm/memcontrol.c | 15 ++++++++------- mm/slab.h | 16 +++++++++++++++- 3 files changed, 30 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1e2f4fe12773..a060142aa5f5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * * Both the root cache and the child caches will have it. For the root cache, * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. + * information about the currently limited memcgs in the system. To allow the + * array to be accessed without taking any locks, on relocation we free the old + * version only after a grace period. * * Child caches will hold extra metadata needed for its operation. Fields are: * @@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) struct memcg_cache_params { bool is_root_cache; union { - struct kmem_cache *memcg_caches[0]; + struct { + struct rcu_head rcu_head; + struct kmem_cache *memcg_caches[0]; + }; struct { struct mem_cgroup *memcg; struct list_head list; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 80197e544764..216659d4441a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3178,18 +3178,17 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) if (num_groups > memcg_limited_groups_array_size) { int i; + struct memcg_cache_params *new_params; ssize_t size = memcg_caches_array_size(num_groups); size *= sizeof(void *); size += offsetof(struct memcg_cache_params, memcg_caches); - s->memcg_params = kzalloc(size, GFP_KERNEL); - if (!s->memcg_params) { - s->memcg_params = cur_params; + new_params = kzalloc(size, GFP_KERNEL); + if (!new_params) return -ENOMEM; - } - s->memcg_params->is_root_cache = true; + new_params->is_root_cache = true; /* * There is the chance it will be bigger than @@ -3203,7 +3202,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) for (i = 0; i < memcg_limited_groups_array_size; i++) { if (!cur_params->memcg_caches[i]) continue; - s->memcg_params->memcg_caches[i] = + new_params->memcg_caches[i] = cur_params->memcg_caches[i]; } @@ -3216,7 +3215,9 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) * bigger than the others. And all updates will reset this * anyway. */ - kfree(cur_params); + rcu_assign_pointer(s->memcg_params, new_params); + if (cur_params) + kfree_rcu(cur_params, rcu_head); } return 0; } diff --git a/mm/slab.h b/mm/slab.h index 72d1f9df71bd..8184a7cde272 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -160,14 +160,28 @@ static inline const char *cache_name(struct kmem_cache *s) return s->name; } +/* + * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. + * That said the caller must assure the memcg's cache won't go away. Since once + * created a memcg's cache is destroyed only along with the root cache, it is + * true if we are going to allocate from the cache or hold a reference to the + * root cache by other means. Otherwise, we should hold either the slab_mutex + * or the memcg's slab_caches_mutex while calling this function and accessing + * the returned value. + */ static inline struct kmem_cache * cache_from_memcg_idx(struct kmem_cache *s, int idx) { struct kmem_cache *cachep; + struct memcg_cache_params *params; if (!s->memcg_params) return NULL; - cachep = s->memcg_params->memcg_caches[idx]; + + rcu_read_lock(); + params = rcu_dereference(s->memcg_params); + cachep = params->memcg_caches[idx]; + rcu_read_unlock(); /* * Make sure we will access the up-to-date value. The code updating -- cgit v1.2.3 From 6de64beb3435ab8f2ac1428dd7dddad5dc679c4b Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:08 -0800 Subject: memcg: remove KMEM_ACCOUNTED_ACTIVATED flag Currently we have two state bits in mem_cgroup::kmem_account_flags regarding kmem accounting activation, ACTIVATED and ACTIVE. We start kmem accounting only if both flags are set (memcg_can_account_kmem()), plus throughout the code there are several places where we check only the ACTIVE flag, but we never check the ACTIVATED flag alone. These flags are both set from memcg_update_kmem_limit() under the set_limit_mutex, the ACTIVE flag always being set after ACTIVATED, and they never get cleared. That said checking if both flags are set is equivalent to checking only for the ACTIVE flag, and since there is no ACTIVATED flag checks, we can safely remove the ACTIVATED flag, and nothing will change. Let's try to understand what was the reason for introducing these flags. The purpose of the ACTIVE flag is clear - it states that kmem should be accounting to the cgroup. The only requirement for it is that it should be set after we have fully initialized kmem accounting bits for the cgroup and patched all static branches relating to kmem accounting. Since we always check if static branch is enabled before actually considering if we should account (otherwise we wouldn't benefit from static branching), this guarantees us that we won't skip a commit or uncharge after a charge due to an unpatched static branch. Now let's move on to the ACTIVATED bit. As I proved in the beginning of this message, it is absolutely useless, and removing it will change nothing. So what was the reason introducing it? The ACTIVATED flag was introduced by commit a8964b9b84f9 ("memcg: use static branches when code not in use") in order to guarantee that static_key_slow_inc(&memcg_kmem_enabled_key) would be called only once for each memory cgroup when its kmem accounting was activated. The point was that at that time the memcg_update_kmem_limit() function's work-flow looked like this: bool must_inc_static_branch = false; cgroup_lock(); mutex_lock(&set_limit_mutex); if (!memcg->kmem_account_flags && val != RESOURCE_MAX) { /* The kmem limit is set for the first time */ ret = res_counter_set_limit(&memcg->kmem, val); memcg_kmem_set_activated(memcg); must_inc_static_branch = true; } else ret = res_counter_set_limit(&memcg->kmem, val); mutex_unlock(&set_limit_mutex); cgroup_unlock(); if (must_inc_static_branch) { /* We can't do this under cgroup_lock */ static_key_slow_inc(&memcg_kmem_enabled_key); memcg_kmem_set_active(memcg); } So that without the ACTIVATED flag we could race with other threads trying to set the limit and increment the static branching ref-counter more than once. Today we call the whole memcg_update_kmem_limit() function under the set_limit_mutex and this race is impossible. As now we understand why the ACTIVATED bit was introduced and why we don't need it now, and know that removing it will change nothing anyway, let's get rid of it. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 216659d4441a..706f7bc16db2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -382,15 +382,10 @@ struct mem_cgroup { /* internal only representation about the status of kmem accounting. */ enum { - KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ - KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ + KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ }; -/* We account when limit is on, but only after call sites are patched */ -#define KMEM_ACCOUNTED_MASK \ - ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) - #ifdef CONFIG_MEMCG_KMEM static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) { @@ -402,16 +397,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } -static void memcg_kmem_set_activated(struct mem_cgroup *memcg) -{ - set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); -} - -static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) -{ - clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); -} - static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) { /* @@ -2995,8 +2980,7 @@ static DEFINE_MUTEX(set_limit_mutex); static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) { return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && - (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK) == - KMEM_ACCOUNTED_MASK; + memcg_kmem_is_active(memcg); } /* @@ -3120,19 +3104,10 @@ static int memcg_update_cache_sizes(struct mem_cgroup *memcg) 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); if (num < 0) return num; - /* - * After this point, kmem_accounted (that we test atomically in - * the beginning of this conditional), is no longer 0. This - * guarantees only one process will set the following boolean - * to true. We don't need test_and_set because we're protected - * by the set_limit_mutex anyway. - */ - memcg_kmem_set_activated(memcg); ret = memcg_update_all_caches(num+1); if (ret) { ida_simple_remove(&kmem_limited_groups, num); - memcg_kmem_clear_activated(memcg); return ret; } -- cgit v1.2.3 From d6441637709ba302905f1076f2afcb6d4ea3a901 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:09 -0800 Subject: memcg: rework memcg_update_kmem_limit synchronization Currently we take both the memcg_create_mutex and the set_limit_mutex when we enable kmem accounting for a memory cgroup, which makes kmem activation events serialize with both memcg creations and other memcg limit updates (memory.limit, memory.memsw.limit). However, there is no point in such strict synchronization rules there. First, the set_limit_mutex was introduced to keep the memory.limit and memory.memsw.limit values in sync. Since memory.kmem.limit can be set independently of them, it is better to introduce a separate mutex to synchronize against concurrent kmem limit updates. Second, we take the memcg_create_mutex in order to make sure all children of this memcg will be kmem-active as well. For achieving that, it is enough to hold this mutex only while checking if memcg_has_children() though. This guarantees that if a child is added after we checked that the memcg has no children, the newly added cgroup will see its parent kmem-active (of course if the latter succeeded), and call kmem activation for itself. This patch simplifies the locking rules of memcg_update_kmem_limit() according to these considerations. [vdavydov@parallels.com: fix unintialized var warning] Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Glauber Costa Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 196 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 105 insertions(+), 91 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 706f7bc16db2..c8715056e1ef 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2977,6 +2977,8 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, static DEFINE_MUTEX(set_limit_mutex); #ifdef CONFIG_MEMCG_KMEM +static DEFINE_MUTEX(activate_kmem_mutex); + static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) { return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && @@ -3089,34 +3091,6 @@ int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -/* - * This ends up being protected by the set_limit mutex, during normal - * operation, because that is its main call site. - * - * But when we create a new cache, we can call this as well if its parent - * is kmem-limited. That will have to hold set_limit_mutex as well. - */ -static int memcg_update_cache_sizes(struct mem_cgroup *memcg) -{ - int num, ret; - - num = ida_simple_get(&kmem_limited_groups, - 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); - if (num < 0) - return num; - - ret = memcg_update_all_caches(num+1); - if (ret) { - ida_simple_remove(&kmem_limited_groups, num); - return ret; - } - - memcg->kmemcg_id = num; - INIT_LIST_HEAD(&memcg->memcg_slab_caches); - mutex_init(&memcg->slab_caches_mutex); - return 0; -} - static size_t memcg_caches_array_size(int num_groups) { ssize_t size; @@ -3459,9 +3433,10 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) * * Still, we don't want anyone else freeing memcg_caches under our * noses, which can happen if a new memcg comes to life. As usual, - * we'll take the set_limit_mutex to protect ourselves against this. + * we'll take the activate_kmem_mutex to protect ourselves against + * this. */ - mutex_lock(&set_limit_mutex); + mutex_lock(&activate_kmem_mutex); for_each_memcg_cache_index(i) { c = cache_from_memcg_idx(s, i); if (!c) @@ -3484,7 +3459,7 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s) cancel_work_sync(&c->memcg_params->destroy); kmem_cache_destroy(c); } - mutex_unlock(&set_limit_mutex); + mutex_unlock(&activate_kmem_mutex); } struct create_work { @@ -5148,11 +5123,23 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, return val; } -static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) -{ - int ret = -EINVAL; #ifdef CONFIG_MEMCG_KMEM - struct mem_cgroup *memcg = mem_cgroup_from_css(css); +/* should be called with activate_kmem_mutex held */ +static int __memcg_activate_kmem(struct mem_cgroup *memcg, + unsigned long long limit) +{ + int err = 0; + int memcg_id; + + if (memcg_kmem_is_active(memcg)) + return 0; + + /* + * We are going to allocate memory for data shared by all memory + * cgroups so let's stop accounting here. + */ + memcg_stop_kmem_account(); + /* * For simplicity, we won't allow this to be disabled. It also can't * be changed if the cgroup has children already, or if tasks had @@ -5166,72 +5153,101 @@ static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) * of course permitted. */ mutex_lock(&memcg_create_mutex); - mutex_lock(&set_limit_mutex); - if (!memcg->kmem_account_flags && val != RES_COUNTER_MAX) { - if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) { - ret = -EBUSY; - goto out; - } - ret = res_counter_set_limit(&memcg->kmem, val); - VM_BUG_ON(ret); + if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg)) + err = -EBUSY; + mutex_unlock(&memcg_create_mutex); + if (err) + goto out; - ret = memcg_update_cache_sizes(memcg); - if (ret) { - res_counter_set_limit(&memcg->kmem, RES_COUNTER_MAX); - goto out; - } - static_key_slow_inc(&memcg_kmem_enabled_key); - /* - * setting the active bit after the inc will guarantee no one - * starts accounting before all call sites are patched - */ - memcg_kmem_set_active(memcg); - } else - ret = res_counter_set_limit(&memcg->kmem, val); + memcg_id = ida_simple_get(&kmem_limited_groups, + 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); + if (memcg_id < 0) { + err = memcg_id; + goto out; + } + + /* + * Make sure we have enough space for this cgroup in each root cache's + * memcg_params. + */ + err = memcg_update_all_caches(memcg_id + 1); + if (err) + goto out_rmid; + + memcg->kmemcg_id = memcg_id; + INIT_LIST_HEAD(&memcg->memcg_slab_caches); + mutex_init(&memcg->slab_caches_mutex); + + /* + * We couldn't have accounted to this cgroup, because it hasn't got the + * active bit set yet, so this should succeed. + */ + err = res_counter_set_limit(&memcg->kmem, limit); + VM_BUG_ON(err); + + static_key_slow_inc(&memcg_kmem_enabled_key); + /* + * Setting the active bit after enabling static branching will + * guarantee no one starts accounting before all call sites are + * patched. + */ + memcg_kmem_set_active(memcg); out: - mutex_unlock(&set_limit_mutex); - mutex_unlock(&memcg_create_mutex); -#endif + memcg_resume_kmem_account(); + return err; + +out_rmid: + ida_simple_remove(&kmem_limited_groups, memcg_id); + goto out; +} + +static int memcg_activate_kmem(struct mem_cgroup *memcg, + unsigned long long limit) +{ + int ret; + + mutex_lock(&activate_kmem_mutex); + ret = __memcg_activate_kmem(memcg, limit); + mutex_unlock(&activate_kmem_mutex); + return ret; +} + +static int memcg_update_kmem_limit(struct mem_cgroup *memcg, + unsigned long long val) +{ + int ret; + + if (!memcg_kmem_is_active(memcg)) + ret = memcg_activate_kmem(memcg, val); + else + ret = res_counter_set_limit(&memcg->kmem, val); return ret; } -#ifdef CONFIG_MEMCG_KMEM static int memcg_propagate_kmem(struct mem_cgroup *memcg) { int ret = 0; struct mem_cgroup *parent = parent_mem_cgroup(memcg); - if (!parent) - goto out; - memcg->kmem_account_flags = parent->kmem_account_flags; - /* - * When that happen, we need to disable the static branch only on those - * memcgs that enabled it. To achieve this, we would be forced to - * complicate the code by keeping track of which memcgs were the ones - * that actually enabled limits, and which ones got it from its - * parents. - * - * It is a lot simpler just to do static_key_slow_inc() on every child - * that is accounted. - */ - if (!memcg_kmem_is_active(memcg)) - goto out; + if (!parent) + return 0; + mutex_lock(&activate_kmem_mutex); /* - * __mem_cgroup_free() will issue static_key_slow_dec() because this - * memcg is active already. If the later initialization fails then the - * cgroup core triggers the cleanup so we do not have to do it here. + * If the parent cgroup is not kmem-active now, it cannot be activated + * after this point, because it has at least one child already. */ - static_key_slow_inc(&memcg_kmem_enabled_key); - - mutex_lock(&set_limit_mutex); - memcg_stop_kmem_account(); - ret = memcg_update_cache_sizes(memcg); - memcg_resume_kmem_account(); - mutex_unlock(&set_limit_mutex); -out: + if (memcg_kmem_is_active(parent)) + ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); + mutex_unlock(&activate_kmem_mutex); return ret; } +#else +static int memcg_update_kmem_limit(struct mem_cgroup *memcg, + unsigned long long val) +{ + return -EINVAL; +} #endif /* CONFIG_MEMCG_KMEM */ /* @@ -5265,7 +5281,7 @@ static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, else if (type == _MEMSWAP) ret = mem_cgroup_resize_memsw_limit(memcg, val); else if (type == _KMEM) - ret = memcg_update_kmem_limit(css, val); + ret = memcg_update_kmem_limit(memcg, val); else return -EINVAL; break; @@ -6499,7 +6515,6 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); - int error = 0; if (css->cgroup->id > MEM_CGROUP_ID_MAX) return -ENOSPC; @@ -6534,10 +6549,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) if (parent != root_mem_cgroup) mem_cgroup_subsys.broken_hierarchy = true; } - - error = memcg_init_kmem(memcg, &mem_cgroup_subsys); mutex_unlock(&memcg_create_mutex); - return error; + + return memcg_init_kmem(memcg, &mem_cgroup_subsys); } /* -- cgit v1.2.3 From 87379ec8c2b8ae0acd526b87d2240afca92a7505 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Thu, 23 Jan 2014 15:53:10 -0800 Subject: mm/nobootmem.c: add return value check in __alloc_memory_core_early() When memblock_reserve() fails because memblock.reserved.regions cannot be resized, the caller (e.g. alloc_bootmem()) is not informed of the failed allocation. Therefore alloc_bootmem() silently returns the same pointer again and again. This patch adds a check for the return value of memblock_reserve() in __alloc_memory_core(). Signed-off-by: Philipp Hachtmann Reviewed-by: Tejun Heo Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Tang Chen Cc: Toshi Kani Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nobootmem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 19121ceb8874..bb1a70cc97a7 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -45,7 +45,9 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, if (!addr) return NULL; - memblock_reserve(addr, size); + if (memblock_reserve(addr, size)) + return NULL; + ptr = phys_to_virt(addr); memset(ptr, 0, size); /* -- cgit v1.2.3 From 5e270e254885893f8c82ab9b91caa648af3690df Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Thu, 23 Jan 2014 15:53:11 -0800 Subject: mm: free memblock.memory in free_all_bootmem When calling free_all_bootmem() the free areas under memblock's control are released to the buddy allocator. Additionally the reserved list is freed if it was reallocated by memblock. The same should apply for the memory list. Signed-off-by: Philipp Hachtmann Reviewed-by: Tejun Heo Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Tang Chen Cc: Toshi Kani Cc: Jianguo Wu Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 1 + mm/memblock.c | 16 ++++++++++++++++ mm/nobootmem.c | 10 +++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cd0274bebd4c..1ef66360f0b0 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -61,6 +61,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); +phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); diff --git a/mm/memblock.c b/mm/memblock.c index 1c2ef2c7edab..64ed2439cf75 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -291,6 +291,22 @@ phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( memblock.reserved.max); } +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK + +phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( + phys_addr_t *addr) +{ + if (memblock.memory.regions == memblock_memory_init_regions) + return 0; + + *addr = __pa(memblock.memory.regions); + + return PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.memory.max); +} + +#endif + /** * memblock_double_array - double the size of the memblock regions array * @type: memblock type of the regions array being doubled diff --git a/mm/nobootmem.c b/mm/nobootmem.c index bb1a70cc97a7..17c89023184f 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -122,11 +122,19 @@ static unsigned long __init free_low_memory_core_early(void) for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) count += __free_memory_core(start, end); - /* free range that is used for reserved array if we allocate it */ + /* Free memblock.reserved array if it was allocated */ size = get_allocated_memblock_reserved_regions_info(&start); if (size) count += __free_memory_core(start, start + size); +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK + + /* Free memblock.memory array if it was allocated */ + size = get_allocated_memblock_memory_regions_info(&start); + if (size) + count += __free_memory_core(start, start + size); +#endif + return count; } -- cgit v1.2.3 From 54a43d54988a3731d644fdeb7a1d6f46b4ac64c7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 23 Jan 2014 15:53:13 -0800 Subject: numa: add a sysctl for numa_balancing Add a working sysctl to enable/disable automatic numa memory balancing at runtime. This allows us to track down performance problems with this feature and is generally a good idea. This was possible earlier through debugfs, but only with special debugging options set. Also fix the boot message. [akpm@linux-foundation.org: s/sched_numa_balancing/sysctl_numa_balancing/] Signed-off-by: Andi Kleen Acked-by: Mel Gorman Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/sysctl.h | 4 ++++ kernel/sched/core.c | 24 +++++++++++++++++++++++- kernel/sysctl.c | 9 +++++++++ mm/mempolicy.c | 2 +- 4 files changed, 37 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 31e0193cb0c5..b13cf430764f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -99,4 +99,8 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + #endif /* _SCHED_SYSCTL_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d6964e49711..7fea865a810d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1770,7 +1770,29 @@ void set_numabalancing_state(bool enabled) numabalancing_enabled = enabled; } #endif /* CONFIG_SCHED_DEBUG */ -#endif /* CONFIG_NUMA_BALANCING */ + +#ifdef CONFIG_PROC_SYSCTL +int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table t; + int err; + int state = numabalancing_enabled; + + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + t = *table; + t.data = &state; + err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (err < 0) + return err; + if (write) + set_numabalancing_state(state); + return err; +} +#endif +#endif /* * fork()/clone()-time setup: diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 332cefcdb04b..693eac39c202 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -389,6 +389,15 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "numa_balancing", + .data = NULL, /* filled in by handler */ + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_numa_balancing, + .extra1 = &zero, + .extra2 = &one, + }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0cd2c4d4e270..947293e76533 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2668,7 +2668,7 @@ static void __init check_numabalancing_enable(void) if (nr_node_ids > 1 && !numabalancing_override) { printk(KERN_INFO "Enabling automatic NUMA balancing. " - "Configure with numa_balancing= or sysctl"); + "Configure with numa_balancing= or the kernel.numa_balancing sysctl"); set_numabalancing_state(numabalancing_default); } } -- cgit v1.2.3 From 54b9dd14d09f24927285359a227aa363ce46089e Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 23 Jan 2014 15:53:14 -0800 Subject: mm/memory-failure.c: shift page lock from head page to tail page after thp split After thp split in hwpoison_user_mappings(), we hold page lock on the raw error page only between try_to_unmap, hence we are in danger of race condition. I found in the RHEL7 MCE-relay testing that we have "bad page" error when a memory error happens on a thp tail page used by qemu-kvm: Triggering MCE exception on CPU 10 mce: [Hardware Error]: Machine check events logged MCE exception done on CPU 10 MCE 0x38c535: Killing qemu-kvm:8418 due to hardware memory corruption MCE 0x38c535: dirty LRU page recovery: Recovered qemu-kvm[8418]: segfault at 20 ip 00007ffb0f0f229a sp 00007fffd6bc5240 error 4 in qemu-kvm[7ffb0ef14000+420000] BUG: Bad page state in process qemu-kvm pfn:38c400 page:ffffea000e310000 count:0 mapcount:0 mapping: (null) index:0x7ffae3c00 page flags: 0x2fffff0008001d(locked|referenced|uptodate|dirty|swapbacked) Modules linked in: hwpoison_inject mce_inject vhost_net macvtap macvlan ... CPU: 0 PID: 8418 Comm: qemu-kvm Tainted: G M -------------- 3.10.0-54.0.1.el7.mce_test_fixed.x86_64 #1 Hardware name: NEC NEC Express5800/R120b-1 [N8100-1719F]/MS-91E7-001, BIOS 4.6.3C19 02/10/2011 Call Trace: dump_stack+0x19/0x1b bad_page.part.59+0xcf/0xe8 free_pages_prepare+0x148/0x160 free_hot_cold_page+0x31/0x140 free_hot_cold_page_list+0x46/0xa0 release_pages+0x1c1/0x200 free_pages_and_swap_cache+0xad/0xd0 tlb_flush_mmu.part.46+0x4c/0x90 tlb_finish_mmu+0x55/0x60 exit_mmap+0xcb/0x170 mmput+0x67/0xf0 vhost_dev_cleanup+0x231/0x260 [vhost_net] vhost_net_release+0x3f/0x90 [vhost_net] __fput+0xe9/0x270 ____fput+0xe/0x10 task_work_run+0xc4/0xe0 do_exit+0x2bb/0xa40 do_group_exit+0x3f/0xa0 get_signal_to_deliver+0x1d0/0x6e0 do_signal+0x48/0x5e0 do_notify_resume+0x71/0xc0 retint_signal+0x48/0x8c The reason of this bug is that a page fault happens before unlocking the head page at the end of memory_failure(). This strange page fault is trying to access to address 0x20 and I'm not sure why qemu-kvm does this, but anyway as a result the SIGSEGV makes qemu-kvm exit and on the way we catch the bad page bug/warning because we try to free a locked page (which was the former head page.) To fix this, this patch suggests to shift page lock from head page to tail page just after thp split. SIGSEGV still happens, but it affects only error affected VMs, not a whole system. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: Wanpeng Li Cc: [3.9+] # a3e0f9e47d5ef "mm/memory-failure.c: transfer page count from head page to tail page after split thp" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b25ed321e667..4f08a2d61487 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -856,14 +856,14 @@ static int page_action(struct page_state *ps, struct page *p, * the pages and send SIGBUS to the processes if the data was dirty. */ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, - int trapno, int flags) + int trapno, int flags, struct page **hpagep) { enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; struct address_space *mapping; LIST_HEAD(tokill); int ret; int kill = 1, forcekill; - struct page *hpage = compound_head(p); + struct page *hpage = *hpagep; struct page *ppage; if (PageReserved(p) || PageSlab(p)) @@ -942,11 +942,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * We pinned the head page for hwpoison handling, * now we split the thp and we are interested in * the hwpoisoned raw page, so move the refcount - * to it. + * to it. Similarly, page lock is shifted. */ if (hpage != p) { put_page(hpage); get_page(p); + lock_page(p); + unlock_page(hpage); + *hpagep = p; } /* THP is split, so ppage should be the real poisoned page. */ ppage = p; @@ -964,17 +967,11 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, if (kill) collect_procs(ppage, &tokill); - if (hpage != ppage) - lock_page(ppage); - ret = try_to_unmap(ppage, ttu); if (ret != SWAP_SUCCESS) printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(ppage)); - if (hpage != ppage) - unlock_page(ppage); - /* * Now that the dirty bit has been propagated to the * struct page and all unmaps done we can decide if @@ -1193,8 +1190,12 @@ int memory_failure(unsigned long pfn, int trapno, int flags) /* * Now take care of user space mappings. * Abort on fail: __delete_from_page_cache() assumes unmapped page. + * + * When the raw error page is thp tail page, hpage points to the raw + * page after thp split. */ - if (hwpoison_user_mappings(p, pfn, trapno, flags) != SWAP_SUCCESS) { + if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) + != SWAP_SUCCESS) { printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); res = -EBUSY; goto out; -- cgit v1.2.3 From cc81717ed3bc6d4f3738d13a1e097437caada0e9 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 23 Jan 2014 15:53:15 -0800 Subject: mm: new_vma_page() cannot see NULL vma for hugetlb pages Commit 11c731e81bb0 ("mm/mempolicy: fix !vma in new_vma_page()") has removed BUG_ON(!vma) from new_vma_page which is partially correct because page_address_in_vma will return EFAULT for non-linear mappings and at least shared shmem might be mapped this way. The patch also tried to prevent NULL ptr for hugetlb pages which is not correct AFAICS because hugetlb pages cannot be mapped as VM_NONLINEAR and other conditions in page_address_in_vma seem to be legit and catch real bugs. This patch restores BUG_ON for PageHuge to catch potential issues when the to-be-migrated page is not setup properly. Signed-off-by: Michal Hocko Reviewed-by: Bob Liu Cc: Sasha Levin Cc: Wanpeng Li Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 947293e76533..463b7fbf0d1d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1199,10 +1199,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int * } if (PageHuge(page)) { - if (vma) - return alloc_huge_page_noerr(vma, address, 1); - else - return NULL; + BUG_ON(!vma); + return alloc_huge_page_noerr(vma, address, 1); } /* * if !vma, alloc_page_vma() will use task or system default policy -- cgit v1.2.3 From da8c757b080ee84f219fa2368cb5dd23ac304fc0 Mon Sep 17 00:00:00 2001 From: Han Pingtian Date: Thu, 23 Jan 2014 15:53:17 -0800 Subject: mm: prevent setting of a value less than 0 to min_free_kbytes If echo -1 > /proc/vm/sys/min_free_kbytes, the system will hang. Changing proc_dointvec() to proc_dointvec_minmax() in the min_free_kbytes_sysctl_handler() can prevent this to happen. mhocko said: : You can still do echo $BIG_VALUE > /proc/vm/sys/min_free_kbytes and make : your machine unusable but I agree that proc_dointvec_minmax is more : suitable here as we already have: : : .proc_handler = min_free_kbytes_sysctl_handler, : .extra1 = &zero, : : It used to work properly but then 6fce56ec91b5 ("sysctl: Remove references : to ctl_name and strategy from the generic sysctl table") has removed : sysctl_intvec strategy and so extra1 is ignored. Signed-off-by: Han Pingtian Acked-by: Michal Hocko Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f18f016cca80..a818d568ddf3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5754,7 +5754,12 @@ module_init(init_per_zone_wmark_min) int min_free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { - proc_dointvec(table, write, buffer, length, ppos); + int rc; + + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + if (write) { user_min_free_kbytes = min_free_kbytes; setup_per_zone_wmarks(); -- cgit v1.2.3 From c980e66a556659f14da2294e1fc696e1352b5d00 Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Thu, 23 Jan 2014 15:53:19 -0800 Subject: mm: do_mincore() cleanup Two cleanups: 1. remove redundant codes for hugetlb pages. 2. end = pmd_addr_end(addr, end) restricts [addr, end) within PMD_SIZE, this may increase do_mincore() calls, remove it. Signed-off-by: Jianguo Wu Acked-by: Johannes Weiner Cc: Minchan Kim Cc: qiuxishi Reviewed-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mincore.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/mincore.c b/mm/mincore.c index da2be56a7b8f..101623378fbf 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -225,13 +225,6 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); - if (is_vm_hugetlb_page(vma)) { - mincore_hugetlb_page_range(vma, addr, end, vec); - return (end - addr) >> PAGE_SHIFT; - } - - end = pmd_addr_end(addr, end); - if (is_vm_hugetlb_page(vma)) mincore_hugetlb_page_range(vma, addr, end, vec); else -- cgit v1.2.3 From baae911b27b8dbee6830f4e3ef0fcf4dc8e9c07b Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 23 Jan 2014 15:53:21 -0800 Subject: sched/numa: fix setting of cpupid on page migration twice Commit 7851a45cd3f6 ("mm: numa: Copy cpupid on page migration") copiess over the cpupid at page migration time. It is unnecessary to set it again in migrate_misplaced_transhuge_page(). Signed-off-by: Wanpeng Li Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Mel Gorman Cc: Rik van Riel Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 4b3996eb7f0f..734704f6f29b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1753,8 +1753,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, if (!new_page) goto out_fail; - page_cpupid_xchg_last(new_page, page_cpupid_last(page)); - isolated = numamigrate_isolate_page(pgdat, page); if (!isolated) { put_page(new_page); -- cgit v1.2.3 From 0b1fb40a3b1291f2f12f13f644ac95cf756a00e6 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:22 -0800 Subject: mm: vmscan: shrink all slab objects if tight on memory When reclaiming kmem, we currently don't scan slabs that have less than batch_size objects (see shrink_slab_node()): while (total_scan >= batch_size) { shrinkctl->nr_to_scan = batch_size; shrinker->scan_objects(shrinker, shrinkctl); total_scan -= batch_size; } If there are only a few shrinkers available, such a behavior won't cause any problems, because the batch_size is usually small, but if we have a lot of slab shrinkers, which is perfectly possible since FS shrinkers are now per-superblock, we can end up with hundreds of megabytes of practically unreclaimable kmem objects. For instance, mounting a thousand of ext2 FS images with a hundred of files in each and iterating over all the files using du(1) will result in about 200 Mb of FS caches that cannot be dropped even with the aid of the vm.drop_caches sysctl! This problem was initially pointed out by Glauber Costa [*]. Glauber proposed to fix it by making the shrink_slab() always take at least one pass, to put it simply, turning the scan loop above to a do{}while() loop. However, this proposal was rejected, because it could result in more aggressive and frequent slab shrinking even under low memory pressure when total_scan is naturally very small. This patch is a slightly modified version of Glauber's approach. Similarly to Glauber's patch, it makes shrink_slab() scan less than batch_size objects, but only if the total number of objects we want to scan (total_scan) is greater than the total number of objects available (max_pass). Since total_scan is biased as half max_pass if the current delta change is small: if (delta < max_pass / 4) total_scan = min(total_scan, max_pass / 2); this is only possible if we are scanning at high prio. That said, this patch shouldn't change the vmscan behaviour if the memory pressure is low, but if we are tight on memory, we will do our best by trying to reclaim all available objects, which sounds reasonable. [*] http://www.spinics.net/lists/cgroups/msg06913.html Signed-off-by: Vladimir Davydov Cc: Mel Gorman Cc: Michal Hocko Cc: Johannes Weiner Cc: Rik van Riel Cc: Dave Chinner Cc: Glauber Costa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 2254f36b74b8..45c1cf61cbed 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -281,17 +281,34 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, nr_pages_scanned, lru_pages, max_pass, delta, total_scan); - while (total_scan >= batch_size) { + /* + * Normally, we should not scan less than batch_size objects in one + * pass to avoid too frequent shrinker calls, but if the slab has less + * than batch_size objects in total and we are really tight on memory, + * we will try to reclaim all available objects, otherwise we can end + * up failing allocations although there are plenty of reclaimable + * objects spread over several slabs with usage less than the + * batch_size. + * + * We detect the "tight on memory" situations by looking at the total + * number of objects we want to scan (total_scan). If it is greater + * than the total number of objects on slab (max_pass), we must be + * scanning at high prio and therefore should try to reclaim as much as + * possible. + */ + while (total_scan >= batch_size || + total_scan >= max_pass) { unsigned long ret; + unsigned long nr_to_scan = min(batch_size, total_scan); - shrinkctl->nr_to_scan = batch_size; + shrinkctl->nr_to_scan = nr_to_scan; ret = shrinker->scan_objects(shrinker, shrinkctl); if (ret == SHRINK_STOP) break; freed += ret; - count_vm_events(SLABS_SCANNED, batch_size); - total_scan -= batch_size; + count_vm_events(SLABS_SCANNED, nr_to_scan); + total_scan -= nr_to_scan; cond_resched(); } -- cgit v1.2.3 From ec97097bca147d5718a5d2c024d1ec740b10096d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:23 -0800 Subject: mm: vmscan: call NUMA-unaware shrinkers irrespective of nodemask If a shrinker is not NUMA-aware, shrink_slab() should call it exactly once with nid=0, but currently it is not true: if node 0 is not set in the nodemask or if it is not online, we will not call such shrinkers at all. As a result some slabs will be left untouched under some circumstances. Let us fix it. Signed-off-by: Vladimir Davydov Reported-by: Dave Chinner Cc: Mel Gorman Cc: Michal Hocko Cc: Johannes Weiner Cc: Rik van Riel Cc: Glauber Costa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 45c1cf61cbed..90c4075d8d75 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -369,16 +369,17 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl, } list_for_each_entry(shrinker, &shrinker_list, list) { - for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { - if (!node_online(shrinkctl->nid)) - continue; - - if (!(shrinker->flags & SHRINKER_NUMA_AWARE) && - (shrinkctl->nid != 0)) - break; - + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) { + shrinkctl->nid = 0; freed += shrink_slab_node(shrinkctl, shrinker, - nr_pages_scanned, lru_pages); + nr_pages_scanned, lru_pages); + continue; + } + + for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { + if (node_online(shrinkctl->nid)) + freed += shrink_slab_node(shrinkctl, shrinker, + nr_pages_scanned, lru_pages); } } -- cgit v1.2.3 From 354f17e1e2512018a603793cc133e2f296f6ebc6 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Thu, 23 Jan 2014 15:53:24 -0800 Subject: mm/nobootmem: free_all_bootmem again get_allocated_memblock_reserved_regions_info() should work if it is compiled in. Extended the ifdef around get_allocated_memblock_memory_regions_info() to include get_allocated_memblock_reserved_regions_info() as well. Similar changes in nobootmem.c/free_low_memory_core_early() where the two functions are called. [akpm@linux-foundation.org: cleanup] Signed-off-by: Philipp Hachtmann Cc: qiuxishi Cc: David Howells Cc: Daeseok Youn Cc: Jiang Liu Acked-by: Yinghai Lu Cc: Zhang Yanfei Cc: Santosh Shilimkar Cc: Grygorii Strashko Cc: Tang Chen Cc: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 17 ++--------------- mm/nobootmem.c | 25 ++++++++++++++----------- 2 files changed, 16 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 64ed2439cf75..9c0aeef19440 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -266,33 +266,20 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u } } +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK + phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( phys_addr_t *addr) { if (memblock.reserved.regions == memblock_reserved_init_regions) return 0; - /* - * Don't allow nobootmem allocator to free reserved memory regions - * array if - * - CONFIG_DEBUG_FS is enabled; - * - CONFIG_ARCH_DISCARD_MEMBLOCK is not enabled; - * - reserved memory regions array have been resized during boot. - * Otherwise debug_fs entry "sys/kernel/debug/memblock/reserved" - * will show garbage instead of state of memory reservations. - */ - if (IS_ENABLED(CONFIG_DEBUG_FS) && - !IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK)) - return 0; - *addr = __pa(memblock.reserved.regions); return PAGE_ALIGN(sizeof(struct memblock_region) * memblock.reserved.max); } -#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK - phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( phys_addr_t *addr) { diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 17c89023184f..f73f2987a852 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -116,23 +116,26 @@ static unsigned long __init __free_memory_core(phys_addr_t start, static unsigned long __init free_low_memory_core_early(void) { unsigned long count = 0; - phys_addr_t start, end, size; + phys_addr_t start, end; u64 i; for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) count += __free_memory_core(start, end); - /* Free memblock.reserved array if it was allocated */ - size = get_allocated_memblock_reserved_regions_info(&start); - if (size) - count += __free_memory_core(start, start + size); - #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK - - /* Free memblock.memory array if it was allocated */ - size = get_allocated_memblock_memory_regions_info(&start); - if (size) - count += __free_memory_core(start, start + size); + { + phys_addr_t size; + + /* Free memblock.reserved array if it was allocated */ + size = get_allocated_memblock_reserved_regions_info(&start); + if (size) + count += __free_memory_core(start, start + size); + + /* Free memblock.memory array if it was allocated */ + size = get_allocated_memblock_memory_regions_info(&start); + if (size) + count += __free_memory_core(start, start + size); + } #endif return count; -- cgit v1.2.3 From ac13c4622bda2a9ff8f57bbbfeff48b2a42d0963 Mon Sep 17 00:00:00 2001 From: Nathan Zimmer Date: Thu, 23 Jan 2014 15:53:26 -0800 Subject: mm/memory_hotplug.c: move register_memory_resource out of the lock_memory_hotplug We don't need to do register_memory_resource() under lock_memory_hotplug() since it has its own lock and doesn't make any callbacks. Also register_memory_resource return NULL on failure so we don't have anything to cleanup at this point. The reason for this rfc is I was doing some experiments with hotplugging of memory on some of our larger systems. While it seems to work, it can be quite slow. With some preliminary digging I found that lock_memory_hotplug is clearly ripe for breakup. It could be broken up per nid or something but it also covers the online_page_callback. The online_page_callback shouldn't be very hard to break out. Also there is the issue of various structures(wmarks come to mind) that are only updated under the lock_memory_hotplug that would need to be dealt with. Cc: Tang Chen Cc: Wen Congyang Cc: Kamezawa Hiroyuki Reviewed-by: Yasuaki Ishimatsu Cc: "Rafael J. Wysocki" Cc: Hedi Cc: Mike Travis Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a512a47241a4..a650db29606f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1107,17 +1107,18 @@ int __ref add_memory(int nid, u64 start, u64 size) if (ret) return ret; - lock_memory_hotplug(); - res = register_memory_resource(start, size); ret = -EEXIST; if (!res) - goto out; + return ret; { /* Stupid hack to suppress address-never-null warning */ void *p = NODE_DATA(nid); new_pgdat = !p; } + + lock_memory_hotplug(); + new_node = !node_online(nid); if (new_node) { pgdat = hotadd_new_pgdat(nid, start); -- cgit v1.2.3 From 42aa83cb6757800f4e2b499f5db3127761517a6a Mon Sep 17 00:00:00 2001 From: Han Pingtian Date: Thu, 23 Jan 2014 15:53:28 -0800 Subject: mm: show message when updating min_free_kbytes in thp min_free_kbytes may be raised during THP's initialization. Sometimes, this will change the value which was set by the user. Showing this message will clarify this confusion. Only show this message when changing a value which was set by the user according to Michal Hocko's suggestion. Show the old value of min_free_kbytes according to Dave Hansen's suggestion. This will give user the chance to restore old value of min_free_kbytes. Signed-off-by: Han Pingtian Reviewed-by: Michal Hocko Cc: David Rientjes Cc: Mel Gorman Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 8 +++++++- mm/internal.h | 1 + mm/page_alloc.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 25fab7150fa0..afe738358370 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -130,8 +130,14 @@ static int set_recommended_min_free_kbytes(void) (unsigned long) nr_free_buffer_pages() / 20); recommended_min <<= (PAGE_SHIFT-10); - if (recommended_min > min_free_kbytes) + if (recommended_min > min_free_kbytes) { + if (user_min_free_kbytes >= 0) + pr_info("raising min_free_kbytes from %d to %lu " + "to help transparent hugepage allocations\n", + min_free_kbytes, recommended_min); + min_free_kbytes = recommended_min; + } setup_per_zone_wmarks(); return 0; } diff --git a/mm/internal.h b/mm/internal.h index dc95e979ae56..7e145e8cd1e6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -99,6 +99,7 @@ extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif +extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a818d568ddf3..e3758a09a009 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -205,7 +205,7 @@ static char * const zone_names[MAX_NR_ZONES] = { }; int min_free_kbytes = 1024; -int user_min_free_kbytes; +int user_min_free_kbytes = -1; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; -- cgit v1.2.3 From da29bd36224bfa008df5d83df496c07e31a0da6d Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 23 Jan 2014 15:53:29 -0800 Subject: mm/mm_init.c: make creation of the mm_kobj happen earlier than device_initcall The use of __initcall is to be eventually replaced by choosing one from the prioritized groupings laid out in init.h header: pure_initcall 0 core_initcall 1 postcore_initcall 2 arch_initcall 3 subsys_initcall 4 fs_initcall 5 device_initcall 6 late_initcall 7 In the interim, all __initcall are mapped onto device_initcall, which as can be seen above, comes quite late in the ordering. Currently the mm_kobj is created with __initcall in mm_sysfs_init(). This means that any other initcalls that want to reference the mm_kobj have to be device_initcall (or later), otherwise we will for example, trip the BUG_ON(!kobj) in sysfs's internal_create_group(). This unfairly restricts those users; for example something that clearly makes sense to be an arch_initcall will not be able to choose that. However, upon examination, it is only this way for historical reasons (i.e. simply not reprioritized yet). We see that sysfs is ready quite earlier in init/main.c via: vfs_caches_init |_ mnt_init |_ sysfs_init well ahead of the processing of the prioritized calls listed above. So we can recategorize mm_sysfs_init to be a pure_initcall, which in turn allows any mm_kobj initcall users a wider range (1 --> 7) of initcall priorities to choose from. Signed-off-by: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mm_init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 68562e92d50c..857a6434e3a5 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -202,5 +202,4 @@ static int __init mm_sysfs_init(void) return 0; } - -__initcall(mm_sysfs_init); +pure_initcall(mm_sysfs_init); -- cgit v1.2.3 From a64fb3cd610c8e6806512dbac63f3fc45812d8fd Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 23 Jan 2014 15:53:30 -0800 Subject: mm: audit/fix non-modular users of module_init in core code Code that is obj-y (always built-in) or dependent on a bool Kconfig (built-in or absent) can never be modular. So using module_init as an alias for __initcall can be somewhat misleading. Fix these up now, so that we can relocate module_init from init.h into module.h in the future. If we don't do this, we'd have to add module.h to obviously non-modular code, and that would be a worse thing. The audit targets the following module_init users for change: mm/ksm.c bool KSM mm/mmap.c bool MMU mm/huge_memory.c bool TRANSPARENT_HUGEPAGE mm/mmu_notifier.c bool MMU_NOTIFIER Note that direct use of __initcall is discouraged, vs. one of the priority categorized subgroups. As __initcall gets mapped onto device_initcall, our use of subsys_initcall (which makes sense for these files) will thus change this registration from level 6-device to level 4-subsys (i.e. slightly earlier). However no observable impact of that difference has been observed during testing. One might think that core_initcall (l2) or postcore_initcall (l3) would be more appropriate for anything in mm/ but if we look at some actual init functions themselves, we see things like: mm/huge_memory.c --> hugepage_init --> hugepage_init_sysfs mm/mmap.c --> init_user_reserve --> sysctl_user_reserve_kbytes mm/ksm.c --> ksm_init --> sysfs_create_group and hence the choice of subsys_initcall (l4) seems reasonable, and at the same time minimizes the risk of changing the priority too drastically all at once. We can adjust further in the future. Also, several instances of missing ";" at EOL are fixed. Signed-off-by: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 2 +- mm/ksm.c | 2 +- mm/mmap.c | 6 +++--- mm/mmu_notifier.c | 3 +-- 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index afe738358370..65c98eb5483c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -661,7 +661,7 @@ out: hugepage_exit_sysfs(hugepage_kobj); return err; } -module_init(hugepage_init) +subsys_initcall(hugepage_init); static int __init setup_transparent_hugepage(char *str) { diff --git a/mm/ksm.c b/mm/ksm.c index f91ddf5c3688..aa4c7c7250c1 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2345,4 +2345,4 @@ out_free: out: return err; } -module_init(ksm_init) +subsys_initcall(ksm_init); diff --git a/mm/mmap.c b/mm/mmap.c index a0e7153a79e6..126d8b976bfd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3142,7 +3142,7 @@ static int init_user_reserve(void) sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } -module_init(init_user_reserve) +subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. @@ -3163,7 +3163,7 @@ static int init_admin_reserve(void) sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } -module_init(init_admin_reserve) +subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. @@ -3233,4 +3233,4 @@ static int __meminit init_reserve_notifier(void) return 0; } -module_init(init_reserve_notifier) +subsys_initcall(init_reserve_notifier); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 93e6089cb456..41cefdf0aadd 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -329,5 +329,4 @@ static int __init mmu_notifier_init(void) { return init_srcu_struct(&srcu); } - -module_init(mmu_notifier_init); +subsys_initcall(mmu_notifier_init); -- cgit v1.2.3 From d2ab70aaae74456ed608740915dc82ef52291f69 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 23 Jan 2014 15:53:30 -0800 Subject: mm/memcg: fix last_dead_count memory wastage Shorten mem_cgroup_reclaim_iter.last_dead_count from unsigned long to int: it's assigned from an int and compared with an int, and adjacent to an unsigned int: so there's no point to it being unsigned long, which wasted 104 bytes in every mem_cgroup_per_zone. Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c8715056e1ef..aa66cc4c9e79 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -149,7 +149,7 @@ struct mem_cgroup_reclaim_iter { * matches memcg->dead_count of the hierarchy root group. */ struct mem_cgroup *last_visited; - unsigned long last_dead_count; + int last_dead_count; /* scan generation, increased every round-trip */ unsigned int generation; -- cgit v1.2.3 From d8ad30559715ce97afb7d1a93a12fd90e8fff312 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 23 Jan 2014 15:53:32 -0800 Subject: mm/memcg: iteration skip memcgs not yet fully initialized It is surprising that the mem_cgroup iterator can return memcgs which have not yet been fully initialized. By accident (or trial and error?) this appears not to present an actual problem; but it may be better to prevent such surprises, by skipping memcgs not yet online. Signed-off-by: Hugh Dickins Cc: Tejun Heo Acked-by: Michal Hocko Cc: Johannes Weiner Cc: [3.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index aa66cc4c9e79..9537e1389ee6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1119,10 +1119,8 @@ skip_node: * protected by css_get and the tree walk is rcu safe. */ if (next_css) { - struct mem_cgroup *mem = mem_cgroup_from_css(next_css); - - if (css_tryget(&mem->css)) - return mem; + if ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)) + return mem_cgroup_from_css(next_css); else { prev_css = next_css; goto skip_node; -- cgit v1.2.3 From d49ad9355420c743c736bfd1dee9eaa5b1a7722a Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 23 Jan 2014 15:53:34 -0800 Subject: mm, oom: prefer thread group leaders for display purposes When two threads have the same badness score, it's preferable to kill the thread group leader so that the actual process name is printed to the kernel log rather than the thread group name which may be shared amongst several processes. This was the behavior when select_bad_process() used to do for_each_process(), but it now iterates threads instead and leads to ambiguity. Signed-off-by: David Rientjes Cc: Johannes Weiner Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 19 ++++++++++++------- mm/oom_kill.c | 12 ++++++++---- 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9537e1389ee6..c8336e8f8df0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1841,13 +1841,18 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, break; }; points = oom_badness(task, memcg, NULL, totalpages); - if (points > chosen_points) { - if (chosen) - put_task_struct(chosen); - chosen = task; - chosen_points = points; - get_task_struct(chosen); - } + if (!points || points < chosen_points) + continue; + /* Prefer thread group leaders for display purposes */ + if (points == chosen_points && + thread_group_leader(chosen)) + continue; + + if (chosen) + put_task_struct(chosen); + chosen = task; + chosen_points = points; + get_task_struct(chosen); } css_task_iter_end(&it); } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 054ff47c4478..37b1b1903fb2 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -327,10 +327,14 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, break; }; points = oom_badness(p, NULL, nodemask, totalpages); - if (points > chosen_points) { - chosen = p; - chosen_points = points; - } + if (!points || points < chosen_points) + continue; + /* Prefer thread group leaders for display purposes */ + if (points == chosen_points && thread_group_leader(chosen)) + continue; + + chosen = p; + chosen_points = points; } if (chosen) get_task_struct(chosen); -- cgit v1.2.3 From ecc736fc3c71c411a9d201d8588c9e7e049e5d8c Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 23 Jan 2014 15:53:35 -0800 Subject: memcg: fix endless loop caused by mem_cgroup_iter Hugh has reported an endless loop when the hardlimit reclaim sees the same group all the time. This might happen when the reclaim races with the memcg removal. shrink_zone [rmdir root] mem_cgroup_iter(root, NULL, reclaim) // prev = NULL rcu_read_lock() mem_cgroup_iter_load last_visited = iter->last_visited // gets root || NULL css_tryget(last_visited) // failed last_visited = NULL [1] memcg = root = __mem_cgroup_iter_next(root, NULL) mem_cgroup_iter_update iter->last_visited = root; reclaim->generation = iter->generation mem_cgroup_iter(root, root, reclaim) // prev = root rcu_read_lock mem_cgroup_iter_load last_visited = iter->last_visited // gets root css_tryget(last_visited) // failed [1] The issue seemed to be introduced by commit 5f5781619718 ("memcg: relax memcg iter caching") which has replaced unconditional css_get/css_put by css_tryget/css_put for the cached iterator. This patch fixes the issue by skipping css_tryget on the root of the tree walk in mem_cgroup_iter_load and symmetrically doesn't release it in mem_cgroup_iter_update. Signed-off-by: Michal Hocko Reported-by: Hugh Dickins Tested-by: Hugh Dickins Cc: Johannes Weiner Cc: Greg Thelen Cc: [3.10+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c8336e8f8df0..da07784dde87 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1158,7 +1158,15 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, if (iter->last_dead_count == *sequence) { smp_rmb(); position = iter->last_visited; - if (position && !css_tryget(&position->css)) + + /* + * We cannot take a reference to root because we might race + * with root removal and returning NULL would end up in + * an endless loop on the iterator user level when root + * would be returned all the time. + */ + if (position && position != root && + !css_tryget(&position->css)) position = NULL; } return position; @@ -1167,9 +1175,11 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, struct mem_cgroup *last_visited, struct mem_cgroup *new_position, + struct mem_cgroup *root, int sequence) { - if (last_visited) + /* root reference counting symmetric to mem_cgroup_iter_load */ + if (last_visited && last_visited != root) css_put(&last_visited->css); /* * We store the sequence count from the time @last_visited was @@ -1244,7 +1254,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, memcg = __mem_cgroup_iter_next(root, last_visited); if (reclaim) { - mem_cgroup_iter_update(iter, last_visited, memcg, seq); + mem_cgroup_iter_update(iter, last_visited, memcg, root, + seq); if (!memcg) iter->generation++; -- cgit v1.2.3 From 0eef615665ede1e0d603ea9ecca88c1da6f02234 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 23 Jan 2014 15:53:37 -0800 Subject: memcg: fix css reference leak and endless loop in mem_cgroup_iter Commit 19f39402864e ("memcg: simplify mem_cgroup_iter") has reorganized mem_cgroup_iter code in order to simplify it. A part of that change was dropping an optimization which didn't call css_tryget on the root of the walked tree. The patch however didn't change the css_put part in mem_cgroup_iter which excludes root. This wasn't an issue at the time because __mem_cgroup_iter_next bailed out for root early without taking a reference as cgroup iterators (css_next_descendant_pre) didn't visit root themselves. Nevertheless cgroup iterators have been reworked to visit root by commit bd8815a6d802 ("cgroup: make css_for_each_descendant() and friends include the origin css in the iteration") when the root bypass have been dropped in __mem_cgroup_iter_next. This means that css_put is not called for root and so css along with mem_cgroup and other cgroup internal object tied by css lifetime are never freed. Fix the issue by reintroducing root check in __mem_cgroup_iter_next and do not take css reference for it. This reference counting magic protects us also from another issue, an endless loop reported by Hugh Dickins when reclaim races with root removal and css_tryget called by iterator internally would fail. There would be no other nodes to visit so __mem_cgroup_iter_next would return NULL and mem_cgroup_iter would interpret it as "start looping from root again" and so mem_cgroup_iter would loop forever internally. Signed-off-by: Michal Hocko Reported-by: Hugh Dickins Tested-by: Hugh Dickins Cc: Johannes Weiner Cc: Greg Thelen Cc: [3.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index da07784dde87..98f80beeac7f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1117,14 +1117,22 @@ skip_node: * skipped and we should continue the tree walk. * last_visited css is safe to use because it is * protected by css_get and the tree walk is rcu safe. + * + * We do not take a reference on the root of the tree walk + * because we might race with the root removal when it would + * be the only node in the iterated hierarchy and mem_cgroup_iter + * would end up in an endless loop because it expects that at + * least one valid node will be returned. Root cannot disappear + * because caller of the iterator should hold it already so + * skipping css reference should be safe. */ if (next_css) { - if ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)) + if ((next_css->flags & CSS_ONLINE) && + (next_css == &root->css || css_tryget(next_css))) return mem_cgroup_from_css(next_css); - else { - prev_css = next_css; - goto skip_node; - } + + prev_css = next_css; + goto skip_node; } return NULL; -- cgit v1.2.3 From 6c14466cc00ff13121ae782d33d9df0fde20b124 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 23 Jan 2014 15:53:38 -0800 Subject: mm: improve documentation of page_order Developers occasionally try and optimise PFN scanners by using page_order but miss that in general it requires zone->lock. This has happened twice for compaction.c and rejected both times. This patch clarifies the documentation of page_order and adds a note to compaction.c why page_order is not used. [akpm@linux-foundation.org: tweaks] [lauraa@codeaurora.org: Corrected a page_zone(page)->lock reference] Signed-off-by: Mel Gorman Acked-by: Rafael Aquini Acked-by: Minchan Kim Cc: Laura Abbott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 5 ++++- mm/internal.h | 8 +++++--- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index e0ab02d70f13..b48c5259ea33 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -523,7 +523,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (!isolation_suitable(cc, page)) goto next_pageblock; - /* Skip if free */ + /* + * Skip if free. page_order cannot be used without zone->lock + * as nothing prevents parallel allocations or buddy merging. + */ if (PageBuddy(page)) continue; diff --git a/mm/internal.h b/mm/internal.h index 7e145e8cd1e6..612c14f5e0f5 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -143,9 +143,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, #endif /* - * function for dealing with page's order in buddy system. - * zone->lock is already acquired when we use these. - * So, we don't need atomic page->flags operations here. + * This function returns the order of a free page in the buddy system. In + * general, page_zone(page)->lock must be held by the caller to prevent the + * page from being allocated in parallel and returning garbage as the order. + * If a caller does not hold page_zone(page)->lock, it must guarantee that the + * page cannot be allocated or merged in parallel. */ static inline unsigned long page_order(struct page *page) { -- cgit v1.2.3 From 0d8a4a3799ab007b7a5e50aff9da9558925e0c15 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 23 Jan 2014 15:53:39 -0800 Subject: memcg: remove unused code from kmem_cache_destroy_work_func Signed-off-by: Vladimir Davydov Reviewed-by: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 98f80beeac7f..19d5d4274e22 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3359,11 +3359,9 @@ static void kmem_cache_destroy_work_func(struct work_struct *w) * So if we aren't down to zero, we'll just schedule a worker and try * again */ - if (atomic_read(&cachep->memcg_params->nr_pages) != 0) { + if (atomic_read(&cachep->memcg_params->nr_pages) != 0) kmem_cache_shrink(cachep); - if (atomic_read(&cachep->memcg_params->nr_pages) == 0) - return; - } else + else kmem_cache_destroy(cachep); } -- cgit v1.2.3 From a5998061daab27802c418debe662be98a6e42874 Mon Sep 17 00:00:00 2001 From: Jamie Liu Date: Thu, 23 Jan 2014 15:53:40 -0800 Subject: mm/swapfile.c: do not skip lowest_bit in scan_swap_map() scan loop In the second half of scan_swap_map()'s scan loop, offset is set to si->lowest_bit and then incremented before entering the loop for the first time, causing si->swap_map[si->lowest_bit] to be skipped. Signed-off-by: Jamie Liu Cc: Shaohua Li Acked-by: Hugh Dickins Cc: Minchan Kim Cc: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/swapfile.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index d443dea95c27..c6c13b050a58 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -616,7 +616,7 @@ scan: } } offset = si->lowest_bit; - while (++offset < scan_base) { + while (offset < scan_base) { if (!si->swap_map[offset]) { spin_lock(&si->lock); goto checks; @@ -629,6 +629,7 @@ scan: cond_resched(); latency_ration = LATENCY_LIMIT; } + offset++; } spin_lock(&si->lock); -- cgit v1.2.3 From 871beb8c313ab270242022d314e37db5044e2bab Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Thu, 23 Jan 2014 15:53:41 -0800 Subject: mm/rmap: fix coccinelle warnings mm/rmap.c:851:9-10: WARNING: return of 0/1 in function 'invalid_mkclean_vma' with return type bool Return statements in functions returning bool should use true/false instead of 1/0. Generated by: coccinelle/misc/boolreturn.cocci Signed-off-by: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index 2dcd3353c3f6..d9d42316a99a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -848,9 +848,9 @@ out: static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) - return 0; + return false; - return 1; + return true; } int page_mkclean(struct page *page) -- cgit v1.2.3 From 34228d473efe764d4db7c0536375f0c993e6e06a Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 23 Jan 2014 15:53:42 -0800 Subject: mm: ignore VM_SOFTDIRTY on VMA merging The VM_SOFTDIRTY bit affects vma merge routine: if two VMAs has all bits in vm_flags matched except dirty bit the kernel can't longer merge them and this forces the kernel to generate new VMAs instead. It finally may lead to the situation when userspace application reaches vm.max_map_count limit and get crashed in worse case | (gimp:11768): GLib-ERROR **: gmem.c:110: failed to allocate 4096 bytes | | (file-tiff-load:12038): LibGimpBase-WARNING **: file-tiff-load: gimp_wire_read(): error | xinit: connection to X server lost | | waiting for X server to shut down | /usr/lib64/gimp/2.0/plug-ins/file-tiff-load terminated: Hangup | /usr/lib64/gimp/2.0/plug-ins/script-fu terminated: Hangup | /usr/lib64/gimp/2.0/plug-ins/script-fu terminated: Hangup https://bugzilla.kernel.org/show_bug.cgi?id=67651 https://bugzilla.gnome.org/show_bug.cgi?id=719619#c0 Initial problem came from missed VM_SOFTDIRTY in do_brk() routine but even if we would set up VM_SOFTDIRTY here, there is still a way to prevent VMAs from merging: one can call | echo 4 > /proc/$PID/clear_refs and clear all VM_SOFTDIRTY over all VMAs presented in memory map, then new do_brk() will try to extend old VMA and finds that dirty bit doesn't match thus new VMA will be generated. As discussed with Pavel, the right approach should be to ignore VM_SOFTDIRTY bit when we're trying to merge VMAs and if merge successed we mark extended VMA with dirty bit where needed. Signed-off-by: Cyrill Gorcunov Reported-by: Bastian Hougaard Reported-by: Mel Gorman Cc: Pavel Emelyanov Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 126d8b976bfd..20ff0c33274c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -894,7 +894,15 @@ again: remove_next = 1 + (end > next->vm_end); static inline int is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags) { - if (vma->vm_flags ^ vm_flags) + /* + * VM_SOFTDIRTY should not prevent from VMA merging, if we + * match the flags but dirty bit -- the caller should mark + * merged VMA as dirty. If dirty bit won't be excluded from + * comparison, we increase pressue on the memory system forcing + * the kernel to generate new VMAs when old one could be + * extended instead. + */ + if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return 0; if (vma->vm_file != file) return 0; @@ -1083,7 +1091,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && + !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } -- cgit v1.2.3 From feda821e76f3bbbba4bd54d30b4d4005a7848aa5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 20 Dec 2013 05:16:54 -0800 Subject: fs: remove generic_acl And instead convert tmpfs to use the new generic ACL code, with two stub methods provided for in-memory filesystems. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/Kconfig | 6 +- fs/Makefile | 1 - fs/generic_acl.c | 184 -------------------------------------------- fs/posix_acl.c | 36 +++++++++ include/linux/generic_acl.h | 14 ---- include/linux/posix_acl.h | 9 +++ mm/shmem.c | 57 ++++++-------- 7 files changed, 69 insertions(+), 238 deletions(-) delete mode 100644 fs/generic_acl.c delete mode 100644 include/linux/generic_acl.h (limited to 'mm') diff --git a/fs/Kconfig b/fs/Kconfig index c229f828eb01..7385e54be4b9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -68,10 +68,6 @@ source "fs/quota/Kconfig" source "fs/autofs4/Kconfig" source "fs/fuse/Kconfig" -config GENERIC_ACL - bool - select FS_POSIX_ACL - menu "Caches" source "fs/fscache/Kconfig" @@ -119,7 +115,7 @@ config TMPFS_POSIX_ACL bool "Tmpfs POSIX Access Control Lists" depends on TMPFS select TMPFS_XATTR - select GENERIC_ACL + select FS_POSIX_ACL help POSIX Access Control Lists (ACLs) support additional access rights for users and groups beyond the standard owner/group/world scheme, diff --git a/fs/Makefile b/fs/Makefile index f2c1843820e3..5bebad4b01c6 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -44,7 +44,6 @@ obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o obj-$(CONFIG_FS_MBCACHE) += mbcache.o obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o obj-$(CONFIG_NFS_COMMON) += nfs_common/ -obj-$(CONFIG_GENERIC_ACL) += generic_acl.o obj-$(CONFIG_COREDUMP) += coredump.o obj-$(CONFIG_SYSCTL) += drop_caches.o diff --git a/fs/generic_acl.c b/fs/generic_acl.c deleted file mode 100644 index 4357f39c8441..000000000000 --- a/fs/generic_acl.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * (C) 2005 Andreas Gruenbacher - * - * This file is released under the GPL. - * - * Generic ACL support for in-memory filesystems. - */ - -#include -#include -#include -#include -#include -#include - - -static size_t -generic_acl_list(struct dentry *dentry, char *list, size_t list_size, - const char *name, size_t name_len, int type) -{ - struct posix_acl *acl; - const char *xname; - size_t size; - - acl = get_cached_acl(dentry->d_inode, type); - if (!acl) - return 0; - posix_acl_release(acl); - - switch (type) { - case ACL_TYPE_ACCESS: - xname = POSIX_ACL_XATTR_ACCESS; - break; - case ACL_TYPE_DEFAULT: - xname = POSIX_ACL_XATTR_DEFAULT; - break; - default: - return 0; - } - size = strlen(xname) + 1; - if (list && size <= list_size) - memcpy(list, xname, size); - return size; -} - -static int -generic_acl_get(struct dentry *dentry, const char *name, void *buffer, - size_t size, int type) -{ - struct posix_acl *acl; - int error; - - if (strcmp(name, "") != 0) - return -EINVAL; - - acl = get_cached_acl(dentry->d_inode, type); - if (!acl) - return -ENODATA; - error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); - posix_acl_release(acl); - - return error; -} - -static int -generic_acl_set(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags, int type) -{ - struct inode *inode = dentry->d_inode; - struct posix_acl *acl = NULL; - int error; - - if (strcmp(name, "") != 0) - return -EINVAL; - if (S_ISLNK(inode->i_mode)) - return -EOPNOTSUPP; - if (!inode_owner_or_capable(inode)) - return -EPERM; - if (value) { - acl = posix_acl_from_xattr(&init_user_ns, value, size); - if (IS_ERR(acl)) - return PTR_ERR(acl); - } - if (acl) { - error = posix_acl_valid(acl); - if (error) - goto failed; - switch (type) { - case ACL_TYPE_ACCESS: - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) - goto failed; - inode->i_ctime = CURRENT_TIME; - if (error == 0) { - posix_acl_release(acl); - acl = NULL; - } - break; - case ACL_TYPE_DEFAULT: - if (!S_ISDIR(inode->i_mode)) { - error = -EINVAL; - goto failed; - } - break; - } - } - set_cached_acl(inode, type, acl); - error = 0; -failed: - posix_acl_release(acl); - return error; -} - -/** - * generic_acl_init - Take care of acl inheritance at @inode create time - * - * Files created inside a directory with a default ACL inherit the - * directory's default ACL. - */ -int -generic_acl_init(struct inode *inode, struct inode *dir) -{ - struct posix_acl *acl = NULL; - int error; - - if (!S_ISLNK(inode->i_mode)) - acl = get_cached_acl(dir, ACL_TYPE_DEFAULT); - if (acl) { - if (S_ISDIR(inode->i_mode)) - set_cached_acl(inode, ACL_TYPE_DEFAULT, acl); - error = __posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode); - if (error < 0) - return error; - if (error > 0) - set_cached_acl(inode, ACL_TYPE_ACCESS, acl); - } else { - inode->i_mode &= ~current_umask(); - } - error = 0; - - posix_acl_release(acl); - return error; -} - -/** - * generic_acl_chmod - change the access acl of @inode upon chmod() - * - * A chmod also changes the permissions of the owner, group/mask, and - * other ACL entries. - */ -int -generic_acl_chmod(struct inode *inode) -{ - struct posix_acl *acl; - int error = 0; - - if (S_ISLNK(inode->i_mode)) - return -EOPNOTSUPP; - acl = get_cached_acl(inode, ACL_TYPE_ACCESS); - if (acl) { - error = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); - if (error) - return error; - set_cached_acl(inode, ACL_TYPE_ACCESS, acl); - posix_acl_release(acl); - } - return error; -} - -const struct xattr_handler generic_acl_access_handler = { - .prefix = POSIX_ACL_XATTR_ACCESS, - .flags = ACL_TYPE_ACCESS, - .list = generic_acl_list, - .get = generic_acl_get, - .set = generic_acl_set, -}; - -const struct xattr_handler generic_acl_default_handler = { - .prefix = POSIX_ACL_XATTR_DEFAULT, - .flags = ACL_TYPE_DEFAULT, - .list = generic_acl_list, - .get = generic_acl_get, - .set = generic_acl_set, -}; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 8f245ab20143..f40df9b665fb 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -786,3 +786,39 @@ const struct xattr_handler posix_acl_default_xattr_handler = { .set = posix_acl_xattr_set, }; EXPORT_SYMBOL_GPL(posix_acl_default_xattr_handler); + +int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + int error; + + if (type == ACL_TYPE_ACCESS) { + error = posix_acl_equiv_mode(acl, &inode->i_mode); + if (error < 0) + return 0; + if (error == 0) + acl = NULL; + } + + inode->i_ctime = CURRENT_TIME; + set_cached_acl(inode, type, acl); + return 0; +} + +int simple_acl_create(struct inode *dir, struct inode *inode) +{ + struct posix_acl *default_acl, *acl; + int error; + + error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); + if (error) + return error; + + set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl); + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + + if (default_acl) + posix_acl_release(default_acl); + if (acl) + posix_acl_release(acl); + return 0; +} diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h deleted file mode 100644 index b6d657544ef1..000000000000 --- a/include/linux/generic_acl.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef LINUX_GENERIC_ACL_H -#define LINUX_GENERIC_ACL_H - -#include - -struct inode; - -extern const struct xattr_handler generic_acl_access_handler; -extern const struct xattr_handler generic_acl_default_handler; - -int generic_acl_init(struct inode *, struct inode *); -int generic_acl_chmod(struct inode *); - -#endif /* LINUX_GENERIC_ACL_H */ diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 3d14be8e14db..6b12b3d57e90 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -99,6 +99,9 @@ extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); +extern int simple_set_acl(struct inode *, struct posix_acl *, int); +extern int simple_acl_create(struct inode *, struct inode *); + static inline struct posix_acl **acl_by_type(struct inode *inode, int type) { switch (type) { @@ -181,6 +184,12 @@ static inline int posix_acl_chmod(struct inode *inode, umode_t mode) return 0; } +#define simple_set_acl NULL + +static inline int simple_acl_create(struct inode *dir, struct inode *inode) +{ + return 0; +} static inline void cache_no_acl(struct inode *inode) { } diff --git a/mm/shmem.c b/mm/shmem.c index 902a14842b74..b21ca543458c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -45,7 +45,7 @@ static struct vfsmount *shm_mnt; #include #include #include -#include +#include #include #include #include @@ -620,10 +620,8 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) } setattr_copy(inode, attr); -#ifdef CONFIG_TMPFS_POSIX_ACL if (attr->ia_valid & ATTR_MODE) - error = generic_acl_chmod(inode); -#endif + error = posix_acl_chmod(inode, inode->i_mode); return error; } @@ -1937,22 +1935,14 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); if (inode) { -#ifdef CONFIG_TMPFS_POSIX_ACL - error = generic_acl_init(inode, dir); - if (error) { - iput(inode); - return error; - } -#endif + error = simple_acl_create(dir, inode); + if (error) + goto out_iput; error = security_inode_init_security(inode, dir, &dentry->d_name, shmem_initxattrs, NULL); - if (error) { - if (error != -EOPNOTSUPP) { - iput(inode); - return error; - } - } + if (error && error != -EOPNOTSUPP) + goto out_iput; error = 0; dir->i_size += BOGO_DIRENT_SIZE; @@ -1961,6 +1951,9 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) dget(dentry); /* Extra count - pin the dentry in core */ } return error; +out_iput: + iput(inode); + return error; } static int @@ -1974,24 +1967,17 @@ shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) error = security_inode_init_security(inode, dir, NULL, shmem_initxattrs, NULL); - if (error) { - if (error != -EOPNOTSUPP) { - iput(inode); - return error; - } - } -#ifdef CONFIG_TMPFS_POSIX_ACL - error = generic_acl_init(inode, dir); - if (error) { - iput(inode); - return error; - } -#else - error = 0; -#endif + if (error && error != -EOPNOTSUPP) + goto out_iput; + error = simple_acl_create(dir, inode); + if (error) + goto out_iput; d_tmpfile(dentry, inode); } return error; +out_iput: + iput(inode); + return error; } static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) @@ -2223,8 +2209,8 @@ static int shmem_initxattrs(struct inode *inode, static const struct xattr_handler *shmem_xattr_handlers[] = { #ifdef CONFIG_TMPFS_POSIX_ACL - &generic_acl_access_handler, - &generic_acl_default_handler, + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, #endif NULL }; @@ -2740,6 +2726,7 @@ static const struct inode_operations shmem_inode_operations = { .getxattr = shmem_getxattr, .listxattr = shmem_listxattr, .removexattr = shmem_removexattr, + .set_acl = simple_set_acl, #endif }; @@ -2764,6 +2751,7 @@ static const struct inode_operations shmem_dir_inode_operations = { #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, + .set_acl = simple_set_acl, #endif }; @@ -2776,6 +2764,7 @@ static const struct inode_operations shmem_special_inode_operations = { #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, + .set_acl = simple_set_acl, #endif }; -- cgit v1.2.3 From 9fe55eea7e4b444bafc42fa0000cc2d1d2847275 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 24 Jan 2014 14:42:22 +0000 Subject: Fix race when checking i_size on direct i/o read So far I've had one ACK for this, and no other comments. So I think it is probably time to send this via some suitable tree. I'm guessing that the vfs tree would be the most appropriate route, but not sure that there is one at the moment (don't see anything recent at kernel.org) so in that case I think -mm is the "back up plan". Al, please let me know if you will take this? Steve. --------------------- Following on from the "Re: [PATCH v3] vfs: fix a bug when we do some dio reads with append dio writes" thread on linux-fsdevel, this patch is my current version of the fix proposed as option (b) in that thread. Removing the i_size test from the direct i/o read path at vfs level means that filesystems now have to deal with requests which are beyond i_size themselves. These I've divided into three sets: a) Those with "no op" ->direct_IO (9p, cifs, ceph) These are obviously not going to be an issue b) Those with "home brew" ->direct_IO (nfs, fuse) I've been told that NFS should not have any problem with the larger i_size, however I've added an extra test to FUSE to duplicate the original behaviour just to be on the safe side. c) Those using __blockdev_direct_IO() These call through to ->get_block() which should deal with the EOF condition correctly. I've verified that with GFS2 and I believe that Zheng has verified it for ext4. I've also run the test on XFS and it passes both before and after this change. The part of the patch in filemap.c looks a lot larger than it really is - there are only two lines of real change. The rest is just indentation of the contained code. There remains a test of i_size though, which was added for btrfs. It doesn't cause the other filesystems a problem as the test is performed after ->direct_IO has been called. It is possible that there is a race that does matter to btrfs, however this patch doesn't change that, so its still an overall improvement. Signed-off-by: Steven Whitehouse Reported-by: Zheng Liu Cc: Jan Kara Cc: Dave Chinner Acked-by: Miklos Szeredi Cc: Chris Mason Cc: Josef Bacik Cc: Christoph Hellwig Cc: Alexander Viro Signed-off-by: Al Viro --- fs/fuse/file.c | 3 +++ mm/filemap.c | 42 ++++++++++++++++++++---------------------- 2 files changed, 23 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 7e70506297bc..89fdfd1919af 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2710,6 +2710,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, inode = file->f_mapping->host; i_size = i_size_read(inode); + if ((rw == READ) && (offset > i_size)) + return 0; + /* optimization for short read */ if (async_dio && rw != WRITE && offset + count > i_size) { if (offset >= i_size) diff --git a/mm/filemap.c b/mm/filemap.c index b7749a92021c..01842867c9d2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1428,30 +1428,28 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, if (!count) goto out; /* skip atime */ size = i_size_read(inode); - if (pos < size) { - retval = filemap_write_and_wait_range(mapping, pos, + retval = filemap_write_and_wait_range(mapping, pos, pos + iov_length(iov, nr_segs) - 1); - if (!retval) { - retval = mapping->a_ops->direct_IO(READ, iocb, - iov, pos, nr_segs); - } - if (retval > 0) { - *ppos = pos + retval; - count -= retval; - } + if (!retval) { + retval = mapping->a_ops->direct_IO(READ, iocb, + iov, pos, nr_segs); + } + if (retval > 0) { + *ppos = pos + retval; + count -= retval; + } - /* - * Btrfs can have a short DIO read if we encounter - * compressed extents, so if there was an error, or if - * we've already read everything we wanted to, or if - * there was a short read because we hit EOF, go ahead - * and return. Otherwise fallthrough to buffered io for - * the rest of the read. - */ - if (retval < 0 || !count || *ppos >= size) { - file_accessed(filp); - goto out; - } + /* + * Btrfs can have a short DIO read if we encounter + * compressed extents, so if there was an error, or if + * we've already read everything we wanted to, or if + * there was a short read because we hit EOF, go ahead + * and return. Otherwise fallthrough to buffered io for + * the rest of the read. + */ + if (retval < 0 || !count || *ppos >= size) { + file_accessed(filp); + goto out; } } -- cgit v1.2.3 From fb5bb60cd004a00c1d11db680a37942ecdedb1c5 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 27 Jan 2014 17:06:52 -0800 Subject: memblock: don't silently align size in memblock_virt_alloc() In original __alloc_memory_core_early() for bootmem wrapper, we do not align size silently. We should not do that, as later free with old size will leave some range not freed. It's obvious that code is copied from memblock_base_nid(), and that code is wrong for the same reason. Also remove that in memblock_alloc_base. Signed-off-by: Yinghai Lu Acked-by: Santosh Shilimkar Cc: Dave Hansen Cc: Russell King Cc: Konrad Rzeszutek Wilk Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 9c0aeef19440..87d21a6ff63c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - found = memblock_find_in_range_node(size, align, 0, max_addr, nid); if (found && !memblock_reserve(found, size)) return found; @@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal( if (!align) align = SMP_CACHE_BYTES; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - again: alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, nid); -- cgit v1.2.3 From add688fbd32158440dbe62c07269a39ed969c059 Mon Sep 17 00:00:00 2001 From: malc Date: Mon, 27 Jan 2014 17:06:53 -0800 Subject: Revert "mm/vmalloc: interchage the implementation of vmalloc_to_{pfn,page}" Revert commit ece86e222db4, which was intended as a small performance improvement. Despite the claim that the patch doesn't introduce any functional changes in fact it does. The "no page" path behaves different now. Originally, vmalloc_to_page might return NULL under some conditions, with new implementation it returns pfn_to_page(0) which is not the same as NULL. Simple test shows the difference. test.c #include #include #include #include int __init myi(void) { struct page *p; void *v; v = vmalloc(PAGE_SIZE); /* trigger the "no page" path in vmalloc_to_page*/ vfree(v); p = vmalloc_to_page(v); pr_err("expected val = NULL, returned val = %p", p); return -EBUSY; } void __exit mye(void) { } module_init(myi) module_exit(mye) Before interchange: expected val = NULL, returned val = (null) After interchange: expected val = NULL, returned val = c7ebe000 Signed-off-by: Vladimir Murzin Cc: Jianyu Zhan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e4f0db2a3eae..0fdf96803c5b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) } /* - * Walk a vmap address to the physical pfn it maps to. + * Walk a vmap address to the struct page it maps. */ -unsigned long vmalloc_to_pfn(const void *vmalloc_addr) +struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; - unsigned long pfn = 0; + struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* @@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) - pfn = pte_pfn(pte); + page = pte_page(pte); pte_unmap(ptep); } } } - return pfn; + return page; } -EXPORT_SYMBOL(vmalloc_to_pfn); +EXPORT_SYMBOL(vmalloc_to_page); /* - * Map a vmalloc()-space virtual address to the struct page. + * Map a vmalloc()-space virtual address to the physical page frame number. */ -struct page *vmalloc_to_page(const void *vmalloc_addr) +unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { - return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); + return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } -EXPORT_SYMBOL(vmalloc_to_page); +EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ -- cgit v1.2.3 From e82cb95d626a6bb0e4fe7db1f311dc22039c2ed3 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 27 Jan 2014 17:06:55 -0800 Subject: mm: bring back /sys/kernel/mm Commit da29bd36224b ("mm/mm_init.c: make creation of the mm_kobj happen earlier than device_initcall") changed to pure_initcall(mm_sysfs_init). That's too early: mm_sysfs_init() depends on core_initcall(ksysfs_init) to have made the kernel_kobj directory "kernel" in which to create "mm". Make it postcore_initcall(mm_sysfs_init). We could use core_initcall(), and depend upon Makefile link order kernel/ mm/ fs/ ipc/ security/ ... as core_initcall(debugfs_init) and core_initcall(securityfs_init) do; but better not. Signed-off-by: Hugh Dickins Acked-by: Paul Gortmaker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mm_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 857a6434e3a5..4074caf9936b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void) return 0; } -pure_initcall(mm_sysfs_init); +postcore_initcall(mm_sysfs_init); -- cgit v1.2.3 From a3978a519461b095b776f44a86079f5448c96963 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 27 Jan 2014 17:07:17 -0800 Subject: mm/migrate.c: fix setting of cpupid on page migration twice against normal page Commit 7851a45cd3f6 ("mm: numa: Copy cpupid on page migration") copies over the cpupid at page migration time. It is unnecessary to set it again in alloc_misplaced_dst_page(). Signed-off-by: Wanpeng Li Reviewed-by: Naoya Horiguchi Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 734704f6f29b..482a33d89134 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & ~GFP_IOFS, 0); - if (newpage) - page_cpupid_xchg_last(newpage, page_cpupid_last(page)); return newpage; } -- cgit v1.2.3 From a804552b9a15c931cfc2a92a2e0aed1add8b580a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 29 Jan 2014 14:05:39 -0800 Subject: mm/page-writeback.c: fix dirty_balance_reserve subtraction from dirtyable memory Tejun reported stuttering and latency spikes on a system where random tasks would enter direct reclaim and get stuck on dirty pages. Around 50% of memory was occupied by tmpfs backed by an SSD, and another disk (rotating) was reading and writing at max speed to shrink a partition. : The problem was pretty ridiculous. It's a 8gig machine w/ one ssd and 10k : rpm harddrive and I could reliably reproduce constant stuttering every : several seconds for as long as buffered IO was going on on the hard drive : either with tmpfs occupying somewhere above 4gig or a test program which : allocates about the same amount of anon memory. Although swap usage was : zero, turning off swap also made the problem go away too. : : The trigger conditions seem quite plausible - high anon memory usage w/ : heavy buffered IO and swap configured - and it's highly likely that this : is happening in the wild too. (this can happen with copying large files : to usb sticks too, right?) This patch (of 2): The dirty_balance_reserve is an approximation of the fraction of free pages that the page allocator does not make available for page cache allocations. As a result, it has to be taken into account when calculating the amount of "dirtyable memory", the baseline to which dirty_background_ratio and dirty_ratio are applied. However, currently the reserve is subtracted from the sum of free and reclaimable pages, which is non-sensical and leads to erroneous results when the system is dominated by unreclaimable pages and the dirty_balance_reserve is bigger than free+reclaimable. In that case, at least the already allocated cache should be considered dirtyable. Fix the calculation by subtracting the reserve from the amount of free pages, then adding the reclaimable pages on top. [akpm@linux-foundation.org: fix CONFIG_HIGHMEM build] Signed-off-by: Johannes Weiner Reported-by: Tejun Heo Tested-by: Tejun Heo Reviewed-by: Rik van Riel Cc: Mel Gorman Cc: Wu Fengguang Reviewed-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 55 +++++++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 63807583d8e8..61119b8a11e6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -191,6 +191,25 @@ static unsigned long writeout_period_time = 0; * global dirtyable memory first. */ +/** + * zone_dirtyable_memory - number of dirtyable pages in a zone + * @zone: the zone + * + * Returns the zone's number of pages potentially available for dirty + * page cache. This is the base value for the per-zone dirty limits. + */ +static unsigned long zone_dirtyable_memory(struct zone *zone) +{ + unsigned long nr_pages; + + nr_pages = zone_page_state(zone, NR_FREE_PAGES); + nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + + nr_pages += zone_reclaimable_pages(zone); + + return nr_pages; +} + static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM @@ -198,11 +217,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { - struct zone *z = - &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; + struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; - x += zone_page_state(z, NR_FREE_PAGES) + - zone_reclaimable_pages(z) - z->dirty_balance_reserve; + x += zone_dirtyable_memory(z); } /* * Unreclaimable memory (kernel memory or anonymous memory @@ -238,9 +255,11 @@ static unsigned long global_dirtyable_memory(void) { unsigned long x; - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); + x = global_page_state(NR_FREE_PAGES); x -= min(x, dirty_balance_reserve); + x += global_reclaimable_pages(); + if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); @@ -288,32 +307,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) trace_global_dirty_state(background, dirty); } -/** - * zone_dirtyable_memory - number of dirtyable pages in a zone - * @zone: the zone - * - * Returns the zone's number of pages potentially available for dirty - * page cache. This is the base value for the per-zone dirty limits. - */ -static unsigned long zone_dirtyable_memory(struct zone *zone) -{ - /* - * The effective global number of dirtyable pages may exclude - * highmem as a big-picture measure to keep the ratio between - * dirty memory and lowmem reasonable. - * - * But this function is purely about the individual zone and a - * highmem zone can hold its share of dirty pages, so we don't - * care about vm_highmem_is_dirtyable here. - */ - unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + - zone_reclaimable_pages(zone); - - /* don't allow this to underflow */ - nr_pages -= min(nr_pages, zone->dirty_balance_reserve); - return nr_pages; -} - /** * zone_dirty_limit - maximum number of dirty pages allowed in a zone * @zone: the zone -- cgit v1.2.3 From a1c3bfb2f67ef766de03f1f56bdfff9c8595ab14 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 29 Jan 2014 14:05:41 -0800 Subject: mm/page-writeback.c: do not count anon pages as dirtyable memory The VM is currently heavily tuned to avoid swapping. Whether that is good or bad is a separate discussion, but as long as the VM won't swap to make room for dirty cache, we can not consider anonymous pages when calculating the amount of dirtyable memory, the baseline to which dirty_background_ratio and dirty_ratio are applied. A simple workload that occupies a significant size (40+%, depending on memory layout, storage speeds etc.) of memory with anon/tmpfs pages and uses the remainder for a streaming writer demonstrates this problem. In that case, the actual cache pages are a small fraction of what is considered dirtyable overall, which results in an relatively large portion of the cache pages to be dirtied. As kswapd starts rotating these, random tasks enter direct reclaim and stall on IO. Only consider free pages and file pages dirtyable. Signed-off-by: Johannes Weiner Reported-by: Tejun Heo Tested-by: Tejun Heo Reviewed-by: Rik van Riel Cc: Mel Gorman Cc: Wu Fengguang Reviewed-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 2 -- mm/internal.h | 1 - mm/page-writeback.c | 6 ++++-- mm/vmscan.c | 23 +---------------------- 4 files changed, 5 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index e4b948080d20..a67b38415768 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -142,8 +142,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -extern unsigned long global_reclaimable_pages(void); - #ifdef CONFIG_NUMA /* * Determine the per node value of a stat item. This function diff --git a/mm/internal.h b/mm/internal.h index 612c14f5e0f5..29e1e761f9eb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -83,7 +83,6 @@ extern unsigned long highest_memmap_pfn; */ extern int isolate_lru_page(struct page *page); extern void putback_lru_page(struct page *page); -extern unsigned long zone_reclaimable_pages(struct zone *zone); extern bool zone_reclaimable(struct zone *zone); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 61119b8a11e6..2d30e2cfe804 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -205,7 +205,8 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) nr_pages = zone_page_state(zone, NR_FREE_PAGES); nr_pages -= min(nr_pages, zone->dirty_balance_reserve); - nr_pages += zone_reclaimable_pages(zone); + nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); + nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); return nr_pages; } @@ -258,7 +259,8 @@ static unsigned long global_dirtyable_memory(void) x = global_page_state(NR_FREE_PAGES); x -= min(x, dirty_balance_reserve); - x += global_reclaimable_pages(); + x += global_page_state(NR_INACTIVE_FILE); + x += global_page_state(NR_ACTIVE_FILE); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); diff --git a/mm/vmscan.c b/mm/vmscan.c index 90c4075d8d75..a9c74b409681 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc) } #endif -unsigned long zone_reclaimable_pages(struct zone *zone) +static unsigned long zone_reclaimable_pages(struct zone *zone) { int nr; @@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) wake_up_interruptible(&pgdat->kswapd_wait); } -/* - * The reclaimable count would be mostly accurate. - * The less reclaimable pages may be - * - mlocked pages, which will be moved to unevictable list when encountered - * - mapped pages, which may require several travels to be reclaimed - * - dirty pages, which is not "instantly" reclaimable - */ -unsigned long global_reclaimable_pages(void) -{ - int nr; - - nr = global_page_state(NR_ACTIVE_FILE) + - global_page_state(NR_INACTIVE_FILE); - - if (get_nr_swap_pages() > 0) - nr += global_page_state(NR_ACTIVE_ANON) + - global_page_state(NR_INACTIVE_ANON); - - return nr; -} - #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of -- cgit v1.2.3 From c297663c0b3930491a3cb2aba4b6e5a7159c3503 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 29 Jan 2014 14:05:42 -0800 Subject: mm: numa: initialise numa balancing after jump label initialisation The command line parsing takes place before jump labels are initialised which generates a warning if numa_balancing= is specified and CONFIG_JUMP_LABEL is set. On older kernels before commit c4b2c0c5f647 ("static_key: WARN on usage before jump_label_init was called") the kernel would have crashed. This patch enables automatic numa balancing later in the initialisation process if numa_balancing= is specified. Signed-off-by: Mel Gorman Acked-by: Rik van Riel Cc: stable Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 36cb46cddf61..79cea01f9f78 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2654,7 +2654,7 @@ void mpol_free_shared_policy(struct shared_policy *p) } #ifdef CONFIG_NUMA_BALANCING -static bool __initdata numabalancing_override; +static int __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { @@ -2663,9 +2663,15 @@ static void __init check_numabalancing_enable(void) if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; + /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ + if (numabalancing_override) + set_numabalancing_state(numabalancing_override == 1); + if (nr_node_ids > 1 && !numabalancing_override) { - printk(KERN_INFO "Enabling automatic NUMA balancing. " - "Configure with numa_balancing= or the kernel.numa_balancing sysctl"); + printk(KERN_INFO "%s automatic NUMA balancing. " + "Configure with numa_balancing= or the " + "kernel.numa_balancing sysctl", + numabalancing_default ? "Enabling" : "Disabling"); set_numabalancing_state(numabalancing_default); } } @@ -2675,13 +2681,12 @@ static int __init setup_numabalancing(char *str) int ret = 0; if (!str) goto out; - numabalancing_override = true; if (!strcmp(str, "enable")) { - set_numabalancing_state(true); + numabalancing_override = 1; ret = 1; } else if (!strcmp(str, "disable")) { - set_numabalancing_state(false); + numabalancing_override = -1; ret = 1; } out: -- cgit v1.2.3 From 4a404bea941ac3c62e11b88c9d16197334eee2f1 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 29 Jan 2014 14:05:43 -0800 Subject: mm/mempolicy.c: convert to pr_foo() A few printk(KERN_*'s have snuck in there. Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 79cea01f9f78..873de7e542bc 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2668,7 +2668,7 @@ static void __init check_numabalancing_enable(void) set_numabalancing_state(numabalancing_override == 1); if (nr_node_ids > 1 && !numabalancing_override) { - printk(KERN_INFO "%s automatic NUMA balancing. " + pr_info("%s automatic NUMA balancing. " "Configure with numa_balancing= or the " "kernel.numa_balancing sysctl", numabalancing_default ? "Enabling" : "Disabling"); @@ -2691,7 +2691,7 @@ static int __init setup_numabalancing(char *str) } out: if (!ret) - printk(KERN_WARNING "Unable to parse numa_balancing=\n"); + pr_warn("Unable to parse numa_balancing=\n"); return ret; } -- cgit v1.2.3 From ba3253c78d7443d2c80c544b1e7aec9f39938395 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 29 Jan 2014 14:05:48 -0800 Subject: slab: fix wrong retval on kmem_cache_create_memcg error path On kmem_cache_create_memcg() error path we set 'err', but leave 's' (the new cache ptr) undefined. The latter can be NULL if we could not allocate the cache, or pointing to a freed area if we failed somewhere later while trying to initialize it. Initially we checked 'err' immediately before exiting the function and returned NULL if it was set ignoring the value of 's': out_unlock: ... if (err) { /* report error */ return NULL; } return s; Recently this check was, in fact, broken by commit f717eb3abb5e ("slab: do not panic if we fail to create memcg cache"), which turned it to: out_unlock: ... if (err && !memcg) { /* report error */ return NULL; } return s; As a result, if we are failing creating a cache for a memcg, we will skip the check and return 's' that can contain crap. Obviously, commit f717eb3abb5e intended not to return crap on error allocating a cache for a memcg, but only to remove the error reporting in this case, so the check should look like this: out_unlock: ... if (err) { if (!memcg) return NULL; /* report error */ return NULL; } return s; [rientjes@google.com: despaghettification] [vdavydov@parallels.com: patch monkeying] Signed-off-by: David Rientjes Signed-off-by: Vladimir Davydov Signed-off-by: Dave Jones Reported-by: Dave Jones Acked-by: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 8e40321da091..1ec3c619ba04 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -233,14 +233,17 @@ out_unlock: mutex_unlock(&slab_mutex); put_online_cpus(); - /* - * There is no point in flooding logs with warnings or especially - * crashing the system if we fail to create a cache for a memcg. In - * this case we will be accounting the memcg allocation to the root - * cgroup until we succeed to create its own cache, but it isn't that - * critical. - */ - if (err && !memcg) { + if (err) { + /* + * There is no point in flooding logs with warnings or + * especially crashing the system if we fail to create a cache + * for a memcg. In this case we will be accounting the memcg + * allocation to the root cgroup until we succeed to create its + * own cache, but it isn't that critical. + */ + if (!memcg) + return NULL; + if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); -- cgit v1.2.3 From a0132ac0f275434db32111b8cf7372d991899da3 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 29 Jan 2014 14:05:50 -0800 Subject: mm/slub.c: do not VM_BUG_ON_PAGE() for temporary on-stack pages Commit 309381feaee5 ("mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE") added a bunch of VM_BUG_ON_PAGE() calls. But, most of the ones in the slub code are for _temporary_ 'struct page's which are declared on the stack and likely have lots of gunk in them. Dumping their contents out will just confuse folks looking at bad_page() output. Plus, if we try to page_to_pfn() on them or soemthing, we'll probably oops anyway. Turn them back in to VM_BUG_ON()s. Signed-off-by: Dave Hansen Cc: Sasha Levin Cc: "Kirill A. Shutemov" Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 34bb8c65a2d8..545a170ebf9f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s, new.freelist = freelist; } - VM_BUG_ON_PAGE(new.frozen, &new); + VM_BUG_ON(new.frozen); new.frozen = 1; if (!__cmpxchg_double_slab(s, page, @@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, set_freepointer(s, freelist, prior); new.counters = counters; new.inuse--; - VM_BUG_ON_PAGE(!new.frozen, &new); + VM_BUG_ON(!new.frozen); } while (!__cmpxchg_double_slab(s, page, prior, counters, @@ -1840,7 +1840,7 @@ redo: old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON_PAGE(!old.frozen, &old); + VM_BUG_ON(!old.frozen); /* Determine target state of the slab */ new.counters = old.counters; @@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s, old.freelist = page->freelist; old.counters = page->counters; - VM_BUG_ON_PAGE(!old.frozen, &old); + VM_BUG_ON(!old.frozen); new.counters = old.counters; new.freelist = old.freelist; @@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) counters = page->counters; new.counters = counters; - VM_BUG_ON_PAGE(!new.frozen, &new); + VM_BUG_ON(!new.frozen); new.inuse = page->objects; new.frozen = freelist != NULL; @@ -2319,7 +2319,7 @@ load_freelist: * page is pointing to the page from which the objects are obtained. * That page must be frozen for per cpu allocations to work. */ - VM_BUG_ON_PAGE(!c->page->frozen, c->page); + VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); local_irq_restore(flags); -- cgit v1.2.3 From 58d5640ebdb273cc817b0d0cda7bcf2efbbc2ff7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 29 Jan 2014 14:05:51 -0800 Subject: mm/readahead.c: fix do_readahead() for no readpage(s) Commit 63d0f0a3c7e1 ("mm/readahead.c:do_readhead(): don't check for ->readpage") unintentionally made do_readahead return 0 for all valid files regardless of whether readahead was supported, rather than the expected -EINVAL. This gets forwarded on to userspace, and results in sys_readahead appearing to succeed in cases that don't make sense (e.g. when called on pipes or sockets). This issue is detected by the LTP readahead01 testcase. As the exact return value of force_page_cache_readahead is currently never used, we can simplify it to return only 0 or -EINVAL (when readpage or readpages is missing). With that in place we can simply forward on the return value of force_page_cache_readahead in do_readahead. This patch performs said change, restoring the expected semantics. Signed-off-by: Mark Rutland Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/readahead.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/readahead.c b/mm/readahead.c index 7cdbb44aa90b..0de2360d65f3 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -211,8 +211,6 @@ out: int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { - int ret = 0; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) return -EINVAL; @@ -226,15 +224,13 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); - if (err < 0) { - ret = err; - break; - } - ret += err; + if (err < 0) + return err; + offset += this_chunk; nr_to_read -= this_chunk; } - return ret; + return 0; } /* @@ -576,8 +572,7 @@ do_readahead(struct address_space *mapping, struct file *filp, if (!mapping || !mapping->a_ops) return -EINVAL; - force_page_cache_readahead(mapping, filp, index, nr); - return 0; + return force_page_cache_readahead(mapping, filp, index, nr); } SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) -- cgit v1.2.3 From f544e14f3e765b5241d7f234fee677506b8ce07f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 29 Jan 2014 14:05:52 -0800 Subject: memblock: add limit checking to memblock_virt_alloc In original bootmem wrapper for memblock, we have limit checking. Add it to memblock_virt_alloc, to address arm and x86 booting crash. Signed-off-by: Yinghai Lu Cc: Ingo Molnar Cc: "H. Peter Anvin" Reported-by: Kevin Hilman Tested-by: Kevin Hilman Reported-by: Olof Johansson Tested-by: Olof Johansson Reported-by: Konrad Rzeszutek Wilk Tested-by: Konrad Rzeszutek Wilk Cc: Dave Hansen Cc: Santosh Shilimkar Cc: "Strashko, Grygorii" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 87d21a6ff63c..39a31e7f0045 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1077,6 +1077,9 @@ static void * __init memblock_virt_alloc_internal( if (!align) align = SMP_CACHE_BYTES; + if (max_addr > memblock.current_limit) + max_addr = memblock.current_limit; + again: alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, nid); -- cgit v1.2.3 From bcf1647d0899666f0fb90d176abf63bae22abb7c Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 30 Jan 2014 15:45:50 -0800 Subject: zsmalloc: move it under mm This patch moves zsmalloc under mm directory. Before that, description will explain why we have needed custom allocator. Zsmalloc is a new slab-based memory allocator for storing compressed pages. It is designed for low fragmentation and high allocation success rate on large object, but <= PAGE_SIZE allocations. zsmalloc differs from the kernel slab allocator in two primary ways to achieve these design goals. zsmalloc never requires high order page allocations to back slabs, or "size classes" in zsmalloc terms. Instead it allows multiple single-order pages to be stitched together into a "zspage" which backs the slab. This allows for higher allocation success rate under memory pressure. Also, zsmalloc allows objects to span page boundaries within the zspage. This allows for lower fragmentation than could be had with the kernel slab allocator for objects between PAGE_SIZE/2 and PAGE_SIZE. With the kernel slab allocator, if a page compresses to 60% of it original size, the memory savings gained through compression is lost in fragmentation because another object of the same size can't be stored in the leftover space. This ability to span pages results in zsmalloc allocations not being directly addressable by the user. The user is given an non-dereferencable handle in response to an allocation request. That handle must be mapped, using zs_map_object(), which returns a pointer to the mapped region that can be used. The mapping is necessary since the object data may reside in two different noncontigious pages. The zsmalloc fulfills the allocation needs for zram perfectly [sjenning@linux.vnet.ibm.com: borrow Seth's quote] Signed-off-by: Minchan Kim Acked-by: Nitin Gupta Reviewed-by: Konrad Rzeszutek Wilk Cc: Bob Liu Cc: Greg Kroah-Hartman Cc: Hugh Dickins Cc: Jens Axboe Cc: Luigi Semenzato Cc: Mel Gorman Cc: Pekka Enberg Cc: Rik van Riel Cc: Seth Jennings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - drivers/staging/zram/zram_drv.h | 3 +- drivers/staging/zsmalloc/Kconfig | 24 - drivers/staging/zsmalloc/Makefile | 3 - drivers/staging/zsmalloc/zsmalloc-main.c | 1106 ------------------------------ drivers/staging/zsmalloc/zsmalloc.h | 50 -- include/linux/zsmalloc.h | 50 ++ mm/Kconfig | 25 + mm/Makefile | 1 + mm/zsmalloc.c | 1105 +++++++++++++++++++++++++++++ 11 files changed, 1182 insertions(+), 1188 deletions(-) delete mode 100644 drivers/staging/zsmalloc/Kconfig delete mode 100644 drivers/staging/zsmalloc/Makefile delete mode 100644 drivers/staging/zsmalloc/zsmalloc-main.c delete mode 100644 drivers/staging/zsmalloc/zsmalloc.h create mode 100644 include/linux/zsmalloc.h create mode 100644 mm/zsmalloc.c (limited to 'mm') diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 4bb6b11166b3..120d2fa9e531 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -76,8 +76,6 @@ source "drivers/staging/sep/Kconfig" source "drivers/staging/iio/Kconfig" -source "drivers/staging/zsmalloc/Kconfig" - source "drivers/staging/zram/Kconfig" source "drivers/staging/wlags49_h2/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 9f07e5e16094..cb19d0afa0da 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_ZRAM) += zram/ -obj-$(CONFIG_ZSMALLOC) += zsmalloc/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ obj-$(CONFIG_FB_SM7XX) += sm7xxfb/ diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h index 97a3acf6ab76..d8f6596513c3 100644 --- a/drivers/staging/zram/zram_drv.h +++ b/drivers/staging/zram/zram_drv.h @@ -17,8 +17,7 @@ #include #include - -#include "../zsmalloc/zsmalloc.h" +#include /* * Some arbitrary value. This is just to catch diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig deleted file mode 100644 index 9d1f2a24ad62..000000000000 --- a/drivers/staging/zsmalloc/Kconfig +++ /dev/null @@ -1,24 +0,0 @@ -config ZSMALLOC - bool "Memory allocator for compressed pages" - depends on MMU - default n - help - zsmalloc is a slab-based memory allocator designed to store - compressed RAM pages. zsmalloc uses virtual memory mapping - in order to reduce fragmentation. However, this results in a - non-standard allocator interface where a handle, not a pointer, is - returned by an alloc(). This handle must be mapped in order to - access the allocated space. - -config PGTABLE_MAPPING - bool "Use page table mapping to access object in zsmalloc" - depends on ZSMALLOC - help - By default, zsmalloc uses a copy-based object mapping method to - access allocations that span two pages. However, if a particular - architecture (ex, ARM) performs VM mapping faster than copying, - then you should select this. This causes zsmalloc to use page table - mapping rather than copying for object mapping. - - You can check speed with zsmalloc benchmark[1]. - [1] https://github.com/spartacus06/zsmalloc diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile deleted file mode 100644 index b134848a590d..000000000000 --- a/drivers/staging/zsmalloc/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -zsmalloc-y := zsmalloc-main.o - -obj-$(CONFIG_ZSMALLOC) += zsmalloc.o diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c deleted file mode 100644 index 7660c87d8b2a..000000000000 --- a/drivers/staging/zsmalloc/zsmalloc-main.c +++ /dev/null @@ -1,1106 +0,0 @@ -/* - * zsmalloc memory allocator - * - * Copyright (C) 2011 Nitin Gupta - * - * This code is released using a dual license strategy: BSD/GPL - * You can choose the license that better fits your requirements. - * - * Released under the terms of 3-clause BSD License - * Released under the terms of GNU General Public License Version 2.0 - */ - -/* - * This allocator is designed for use with zram. Thus, the allocator is - * supposed to work well under low memory conditions. In particular, it - * never attempts higher order page allocation which is very likely to - * fail under memory pressure. On the other hand, if we just use single - * (0-order) pages, it would suffer from very high fragmentation -- - * any object of size PAGE_SIZE/2 or larger would occupy an entire page. - * This was one of the major issues with its predecessor (xvmalloc). - * - * To overcome these issues, zsmalloc allocates a bunch of 0-order pages - * and links them together using various 'struct page' fields. These linked - * pages act as a single higher-order page i.e. an object can span 0-order - * page boundaries. The code refers to these linked pages as a single entity - * called zspage. - * - * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE - * since this satisfies the requirements of all its current users (in the - * worst case, page is incompressible and is thus stored "as-is" i.e. in - * uncompressed form). For allocation requests larger than this size, failure - * is returned (see zs_malloc). - * - * Additionally, zs_malloc() does not return a dereferenceable pointer. - * Instead, it returns an opaque handle (unsigned long) which encodes actual - * location of the allocated object. The reason for this indirection is that - * zsmalloc does not keep zspages permanently mapped since that would cause - * issues on 32-bit systems where the VA region for kernel space mappings - * is very small. So, before using the allocating memory, the object has to - * be mapped using zs_map_object() to get a usable pointer and subsequently - * unmapped using zs_unmap_object(). - * - * Following is how we use various fields and flags of underlying - * struct page(s) to form a zspage. - * - * Usage of struct page fields: - * page->first_page: points to the first component (0-order) page - * page->index (union with page->freelist): offset of the first object - * starting in this page. For the first page, this is - * always 0, so we use this field (aka freelist) to point - * to the first free object in zspage. - * page->lru: links together all component pages (except the first page) - * of a zspage - * - * For _first_ page only: - * - * page->private (union with page->first_page): refers to the - * component page after the first page - * page->freelist: points to the first free object in zspage. - * Free objects are linked together using in-place - * metadata. - * page->objects: maximum number of objects we can store in this - * zspage (class->zspage_order * PAGE_SIZE / class->size) - * page->lru: links together first pages of various zspages. - * Basically forming list of zspages in a fullness group. - * page->mapping: class index and fullness group of the zspage - * - * Usage of struct page flags: - * PG_private: identifies the first component page - * PG_private2: identifies the last component page - * - */ - -#ifdef CONFIG_ZSMALLOC_DEBUG -#define DEBUG -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "zsmalloc.h" - -/* - * This must be power of 2 and greater than of equal to sizeof(link_free). - * These two conditions ensure that any 'struct link_free' itself doesn't - * span more than 1 page which avoids complex case of mapping 2 pages simply - * to restore link_free pointer values. - */ -#define ZS_ALIGN 8 - -/* - * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) - * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. - */ -#define ZS_MAX_ZSPAGE_ORDER 2 -#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) - -/* - * Object location (, ) is encoded as - * as single (unsigned long) handle value. - * - * Note that object index is relative to system - * page it is stored in, so for each sub-page belonging - * to a zspage, obj_idx starts with 0. - * - * This is made more complicated by various memory models and PAE. - */ - -#ifndef MAX_PHYSMEM_BITS -#ifdef CONFIG_HIGHMEM64G -#define MAX_PHYSMEM_BITS 36 -#else /* !CONFIG_HIGHMEM64G */ -/* - * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just - * be PAGE_SHIFT - */ -#define MAX_PHYSMEM_BITS BITS_PER_LONG -#endif -#endif -#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) -#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) -#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) - -#define MAX(a, b) ((a) >= (b) ? (a) : (b)) -/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ -#define ZS_MIN_ALLOC_SIZE \ - MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) -#define ZS_MAX_ALLOC_SIZE PAGE_SIZE - -/* - * On systems with 4K page size, this gives 254 size classes! There is a - * trader-off here: - * - Large number of size classes is potentially wasteful as free page are - * spread across these classes - * - Small number of size classes causes large internal fragmentation - * - Probably its better to use specific size classes (empirically - * determined). NOTE: all those class sizes must be set as multiple of - * ZS_ALIGN to make sure link_free itself never has to span 2 pages. - * - * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN - * (reason above) - */ -#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) -#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \ - ZS_SIZE_CLASS_DELTA + 1) - -/* - * We do not maintain any list for completely empty or full pages - */ -enum fullness_group { - ZS_ALMOST_FULL, - ZS_ALMOST_EMPTY, - _ZS_NR_FULLNESS_GROUPS, - - ZS_EMPTY, - ZS_FULL -}; - -/* - * We assign a page to ZS_ALMOST_EMPTY fullness group when: - * n <= N / f, where - * n = number of allocated objects - * N = total number of objects zspage can store - * f = 1/fullness_threshold_frac - * - * Similarly, we assign zspage to: - * ZS_ALMOST_FULL when n > N / f - * ZS_EMPTY when n == 0 - * ZS_FULL when n == N - * - * (see: fix_fullness_group()) - */ -static const int fullness_threshold_frac = 4; - -struct size_class { - /* - * Size of objects stored in this class. Must be multiple - * of ZS_ALIGN. - */ - int size; - unsigned int index; - - /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ - int pages_per_zspage; - - spinlock_t lock; - - /* stats */ - u64 pages_allocated; - - struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; -}; - -/* - * Placed within free objects to form a singly linked list. - * For every zspage, first_page->freelist gives head of this list. - * - * This must be power of 2 and less than or equal to ZS_ALIGN - */ -struct link_free { - /* Handle of next free chunk (encodes ) */ - void *next; -}; - -struct zs_pool { - struct size_class size_class[ZS_SIZE_CLASSES]; - - gfp_t flags; /* allocation flags used when growing pool */ -}; - -/* - * A zspage's class index and fullness group - * are encoded in its (first)page->mapping - */ -#define CLASS_IDX_BITS 28 -#define FULLNESS_BITS 4 -#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) -#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) - -struct mapping_area { -#ifdef CONFIG_PGTABLE_MAPPING - struct vm_struct *vm; /* vm area for mapping object that span pages */ -#else - char *vm_buf; /* copy buffer for objects that span pages */ -#endif - char *vm_addr; /* address of kmap_atomic()'ed pages */ - enum zs_mapmode vm_mm; /* mapping mode */ -}; - - -/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ -static DEFINE_PER_CPU(struct mapping_area, zs_map_area); - -static int is_first_page(struct page *page) -{ - return PagePrivate(page); -} - -static int is_last_page(struct page *page) -{ - return PagePrivate2(page); -} - -static void get_zspage_mapping(struct page *page, unsigned int *class_idx, - enum fullness_group *fullness) -{ - unsigned long m; - BUG_ON(!is_first_page(page)); - - m = (unsigned long)page->mapping; - *fullness = m & FULLNESS_MASK; - *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; -} - -static void set_zspage_mapping(struct page *page, unsigned int class_idx, - enum fullness_group fullness) -{ - unsigned long m; - BUG_ON(!is_first_page(page)); - - m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | - (fullness & FULLNESS_MASK); - page->mapping = (struct address_space *)m; -} - -/* - * zsmalloc divides the pool into various size classes where each - * class maintains a list of zspages where each zspage is divided - * into equal sized chunks. Each allocation falls into one of these - * classes depending on its size. This function returns index of the - * size class which has chunk size big enough to hold the give size. - */ -static int get_size_class_index(int size) -{ - int idx = 0; - - if (likely(size > ZS_MIN_ALLOC_SIZE)) - idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, - ZS_SIZE_CLASS_DELTA); - - return idx; -} - -/* - * For each size class, zspages are divided into different groups - * depending on how "full" they are. This was done so that we could - * easily find empty or nearly empty zspages when we try to shrink - * the pool (not yet implemented). This function returns fullness - * status of the given page. - */ -static enum fullness_group get_fullness_group(struct page *page) -{ - int inuse, max_objects; - enum fullness_group fg; - BUG_ON(!is_first_page(page)); - - inuse = page->inuse; - max_objects = page->objects; - - if (inuse == 0) - fg = ZS_EMPTY; - else if (inuse == max_objects) - fg = ZS_FULL; - else if (inuse <= max_objects / fullness_threshold_frac) - fg = ZS_ALMOST_EMPTY; - else - fg = ZS_ALMOST_FULL; - - return fg; -} - -/* - * Each size class maintains various freelists and zspages are assigned - * to one of these freelists based on the number of live objects they - * have. This functions inserts the given zspage into the freelist - * identified by . - */ -static void insert_zspage(struct page *page, struct size_class *class, - enum fullness_group fullness) -{ - struct page **head; - - BUG_ON(!is_first_page(page)); - - if (fullness >= _ZS_NR_FULLNESS_GROUPS) - return; - - head = &class->fullness_list[fullness]; - if (*head) - list_add_tail(&page->lru, &(*head)->lru); - - *head = page; -} - -/* - * This function removes the given zspage from the freelist identified - * by . - */ -static void remove_zspage(struct page *page, struct size_class *class, - enum fullness_group fullness) -{ - struct page **head; - - BUG_ON(!is_first_page(page)); - - if (fullness >= _ZS_NR_FULLNESS_GROUPS) - return; - - head = &class->fullness_list[fullness]; - BUG_ON(!*head); - if (list_empty(&(*head)->lru)) - *head = NULL; - else if (*head == page) - *head = (struct page *)list_entry((*head)->lru.next, - struct page, lru); - - list_del_init(&page->lru); -} - -/* - * Each size class maintains zspages in different fullness groups depending - * on the number of live objects they contain. When allocating or freeing - * objects, the fullness status of the page can change, say, from ALMOST_FULL - * to ALMOST_EMPTY when freeing an object. This function checks if such - * a status change has occurred for the given page and accordingly moves the - * page from the freelist of the old fullness group to that of the new - * fullness group. - */ -static enum fullness_group fix_fullness_group(struct zs_pool *pool, - struct page *page) -{ - int class_idx; - struct size_class *class; - enum fullness_group currfg, newfg; - - BUG_ON(!is_first_page(page)); - - get_zspage_mapping(page, &class_idx, &currfg); - newfg = get_fullness_group(page); - if (newfg == currfg) - goto out; - - class = &pool->size_class[class_idx]; - remove_zspage(page, class, currfg); - insert_zspage(page, class, newfg); - set_zspage_mapping(page, class_idx, newfg); - -out: - return newfg; -} - -/* - * We have to decide on how many pages to link together - * to form a zspage for each size class. This is important - * to reduce wastage due to unusable space left at end of - * each zspage which is given as: - * wastage = Zp - Zp % size_class - * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... - * - * For example, for size class of 3/8 * PAGE_SIZE, we should - * link together 3 PAGE_SIZE sized pages to form a zspage - * since then we can perfectly fit in 8 such objects. - */ -static int get_pages_per_zspage(int class_size) -{ - int i, max_usedpc = 0; - /* zspage order which gives maximum used size per KB */ - int max_usedpc_order = 1; - - for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { - int zspage_size; - int waste, usedpc; - - zspage_size = i * PAGE_SIZE; - waste = zspage_size % class_size; - usedpc = (zspage_size - waste) * 100 / zspage_size; - - if (usedpc > max_usedpc) { - max_usedpc = usedpc; - max_usedpc_order = i; - } - } - - return max_usedpc_order; -} - -/* - * A single 'zspage' is composed of many system pages which are - * linked together using fields in struct page. This function finds - * the first/head page, given any component page of a zspage. - */ -static struct page *get_first_page(struct page *page) -{ - if (is_first_page(page)) - return page; - else - return page->first_page; -} - -static struct page *get_next_page(struct page *page) -{ - struct page *next; - - if (is_last_page(page)) - next = NULL; - else if (is_first_page(page)) - next = (struct page *)page_private(page); - else - next = list_entry(page->lru.next, struct page, lru); - - return next; -} - -/* - * Encode as a single handle value. - * On hardware platforms with physical memory starting at 0x0 the pfn - * could be 0 so we ensure that the handle will never be 0 by adjusting the - * encoded obj_idx value before encoding. - */ -static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) -{ - unsigned long handle; - - if (!page) { - BUG_ON(obj_idx); - return NULL; - } - - handle = page_to_pfn(page) << OBJ_INDEX_BITS; - handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); - - return (void *)handle; -} - -/* - * Decode pair from the given object handle. We adjust the - * decoded obj_idx back to its original value since it was adjusted in - * obj_location_to_handle(). - */ -static void obj_handle_to_location(unsigned long handle, struct page **page, - unsigned long *obj_idx) -{ - *page = pfn_to_page(handle >> OBJ_INDEX_BITS); - *obj_idx = (handle & OBJ_INDEX_MASK) - 1; -} - -static unsigned long obj_idx_to_offset(struct page *page, - unsigned long obj_idx, int class_size) -{ - unsigned long off = 0; - - if (!is_first_page(page)) - off = page->index; - - return off + obj_idx * class_size; -} - -static void reset_page(struct page *page) -{ - clear_bit(PG_private, &page->flags); - clear_bit(PG_private_2, &page->flags); - set_page_private(page, 0); - page->mapping = NULL; - page->freelist = NULL; - page_mapcount_reset(page); -} - -static void free_zspage(struct page *first_page) -{ - struct page *nextp, *tmp, *head_extra; - - BUG_ON(!is_first_page(first_page)); - BUG_ON(first_page->inuse); - - head_extra = (struct page *)page_private(first_page); - - reset_page(first_page); - __free_page(first_page); - - /* zspage with only 1 system page */ - if (!head_extra) - return; - - list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { - list_del(&nextp->lru); - reset_page(nextp); - __free_page(nextp); - } - reset_page(head_extra); - __free_page(head_extra); -} - -/* Initialize a newly allocated zspage */ -static void init_zspage(struct page *first_page, struct size_class *class) -{ - unsigned long off = 0; - struct page *page = first_page; - - BUG_ON(!is_first_page(first_page)); - while (page) { - struct page *next_page; - struct link_free *link; - unsigned int i, objs_on_page; - - /* - * page->index stores offset of first object starting - * in the page. For the first page, this is always 0, - * so we use first_page->index (aka ->freelist) to store - * head of corresponding zspage's freelist. - */ - if (page != first_page) - page->index = off; - - link = (struct link_free *)kmap_atomic(page) + - off / sizeof(*link); - objs_on_page = (PAGE_SIZE - off) / class->size; - - for (i = 1; i <= objs_on_page; i++) { - off += class->size; - if (off < PAGE_SIZE) { - link->next = obj_location_to_handle(page, i); - link += class->size / sizeof(*link); - } - } - - /* - * We now come to the last (full or partial) object on this - * page, which must point to the first object on the next - * page (if present) - */ - next_page = get_next_page(page); - link->next = obj_location_to_handle(next_page, 0); - kunmap_atomic(link); - page = next_page; - off = (off + class->size) % PAGE_SIZE; - } -} - -/* - * Allocate a zspage for the given size class - */ -static struct page *alloc_zspage(struct size_class *class, gfp_t flags) -{ - int i, error; - struct page *first_page = NULL, *uninitialized_var(prev_page); - - /* - * Allocate individual pages and link them together as: - * 1. first page->private = first sub-page - * 2. all sub-pages are linked together using page->lru - * 3. each sub-page is linked to the first page using page->first_page - * - * For each size class, First/Head pages are linked together using - * page->lru. Also, we set PG_private to identify the first page - * (i.e. no other sub-page has this flag set) and PG_private_2 to - * identify the last page. - */ - error = -ENOMEM; - for (i = 0; i < class->pages_per_zspage; i++) { - struct page *page; - - page = alloc_page(flags); - if (!page) - goto cleanup; - - INIT_LIST_HEAD(&page->lru); - if (i == 0) { /* first page */ - SetPagePrivate(page); - set_page_private(page, 0); - first_page = page; - first_page->inuse = 0; - } - if (i == 1) - set_page_private(first_page, (unsigned long)page); - if (i >= 1) - page->first_page = first_page; - if (i >= 2) - list_add(&page->lru, &prev_page->lru); - if (i == class->pages_per_zspage - 1) /* last page */ - SetPagePrivate2(page); - prev_page = page; - } - - init_zspage(first_page, class); - - first_page->freelist = obj_location_to_handle(first_page, 0); - /* Maximum number of objects we can store in this zspage */ - first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; - - error = 0; /* Success */ - -cleanup: - if (unlikely(error) && first_page) { - free_zspage(first_page); - first_page = NULL; - } - - return first_page; -} - -static struct page *find_get_zspage(struct size_class *class) -{ - int i; - struct page *page; - - for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { - page = class->fullness_list[i]; - if (page) - break; - } - - return page; -} - -#ifdef CONFIG_PGTABLE_MAPPING -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm) - return 0; - area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); - if (!area->vm) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - if (area->vm) - free_vm_area(area->vm); - area->vm = NULL; -} - -static inline void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); - area->vm_addr = area->vm->addr; - return area->vm_addr + off; -} - -static inline void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - unsigned long addr = (unsigned long)area->vm_addr; - - unmap_kernel_range(addr, PAGE_SIZE * 2); -} - -#else /* CONFIG_PGTABLE_MAPPING */ - -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm_buf) - return 0; - area->vm_buf = (char *)__get_free_page(GFP_KERNEL); - if (!area->vm_buf) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - if (area->vm_buf) - free_page((unsigned long)area->vm_buf); - area->vm_buf = NULL; -} - -static void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - int sizes[2]; - void *addr; - char *buf = area->vm_buf; - - /* disable page faults to match kmap_atomic() return conditions */ - pagefault_disable(); - - /* no read fastpath */ - if (area->vm_mm == ZS_MM_WO) - goto out; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy object to per-cpu buffer */ - addr = kmap_atomic(pages[0]); - memcpy(buf, addr + off, sizes[0]); - kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); - memcpy(buf + sizes[0], addr, sizes[1]); - kunmap_atomic(addr); -out: - return area->vm_buf; -} - -static void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - int sizes[2]; - void *addr; - char *buf = area->vm_buf; - - /* no write fastpath */ - if (area->vm_mm == ZS_MM_RO) - goto out; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy per-cpu buffer to object */ - addr = kmap_atomic(pages[0]); - memcpy(addr + off, buf, sizes[0]); - kunmap_atomic(addr); - addr = kmap_atomic(pages[1]); - memcpy(addr, buf + sizes[0], sizes[1]); - kunmap_atomic(addr); - -out: - /* enable page faults to match kunmap_atomic() return conditions */ - pagefault_enable(); -} - -#endif /* CONFIG_PGTABLE_MAPPING */ - -static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, - void *pcpu) -{ - int ret, cpu = (long)pcpu; - struct mapping_area *area; - - switch (action) { - case CPU_UP_PREPARE: - area = &per_cpu(zs_map_area, cpu); - ret = __zs_cpu_up(area); - if (ret) - return notifier_from_errno(ret); - break; - case CPU_DEAD: - case CPU_UP_CANCELED: - area = &per_cpu(zs_map_area, cpu); - __zs_cpu_down(area); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block zs_cpu_nb = { - .notifier_call = zs_cpu_notifier -}; - -static void zs_exit(void) -{ - int cpu; - - for_each_online_cpu(cpu) - zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); - unregister_cpu_notifier(&zs_cpu_nb); -} - -static int zs_init(void) -{ - int cpu, ret; - - register_cpu_notifier(&zs_cpu_nb); - for_each_online_cpu(cpu) { - ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); - if (notifier_to_errno(ret)) - goto fail; - } - return 0; -fail: - zs_exit(); - return notifier_to_errno(ret); -} - -/** - * zs_create_pool - Creates an allocation pool to work from. - * @flags: allocation flags used to allocate pool metadata - * - * This function must be called before anything when using - * the zsmalloc allocator. - * - * On success, a pointer to the newly created pool is returned, - * otherwise NULL. - */ -struct zs_pool *zs_create_pool(gfp_t flags) -{ - int i, ovhd_size; - struct zs_pool *pool; - - ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); - pool = kzalloc(ovhd_size, GFP_KERNEL); - if (!pool) - return NULL; - - for (i = 0; i < ZS_SIZE_CLASSES; i++) { - int size; - struct size_class *class; - - size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; - if (size > ZS_MAX_ALLOC_SIZE) - size = ZS_MAX_ALLOC_SIZE; - - class = &pool->size_class[i]; - class->size = size; - class->index = i; - spin_lock_init(&class->lock); - class->pages_per_zspage = get_pages_per_zspage(size); - - } - - pool->flags = flags; - - return pool; -} -EXPORT_SYMBOL_GPL(zs_create_pool); - -void zs_destroy_pool(struct zs_pool *pool) -{ - int i; - - for (i = 0; i < ZS_SIZE_CLASSES; i++) { - int fg; - struct size_class *class = &pool->size_class[i]; - - for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { - if (class->fullness_list[fg]) { - pr_info("Freeing non-empty class with size %db, fullness group %d\n", - class->size, fg); - } - } - } - kfree(pool); -} -EXPORT_SYMBOL_GPL(zs_destroy_pool); - -/** - * zs_malloc - Allocate block of given size from pool. - * @pool: pool to allocate from - * @size: size of block to allocate - * - * On success, handle to the allocated object is returned, - * otherwise 0. - * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. - */ -unsigned long zs_malloc(struct zs_pool *pool, size_t size) -{ - unsigned long obj; - struct link_free *link; - int class_idx; - struct size_class *class; - - struct page *first_page, *m_page; - unsigned long m_objidx, m_offset; - - if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) - return 0; - - class_idx = get_size_class_index(size); - class = &pool->size_class[class_idx]; - BUG_ON(class_idx != class->index); - - spin_lock(&class->lock); - first_page = find_get_zspage(class); - - if (!first_page) { - spin_unlock(&class->lock); - first_page = alloc_zspage(class, pool->flags); - if (unlikely(!first_page)) - return 0; - - set_zspage_mapping(first_page, class->index, ZS_EMPTY); - spin_lock(&class->lock); - class->pages_allocated += class->pages_per_zspage; - } - - obj = (unsigned long)first_page->freelist; - obj_handle_to_location(obj, &m_page, &m_objidx); - m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); - - link = (struct link_free *)kmap_atomic(m_page) + - m_offset / sizeof(*link); - first_page->freelist = link->next; - memset(link, POISON_INUSE, sizeof(*link)); - kunmap_atomic(link); - - first_page->inuse++; - /* Now move the zspage to another fullness group, if required */ - fix_fullness_group(pool, first_page); - spin_unlock(&class->lock); - - return obj; -} -EXPORT_SYMBOL_GPL(zs_malloc); - -void zs_free(struct zs_pool *pool, unsigned long obj) -{ - struct link_free *link; - struct page *first_page, *f_page; - unsigned long f_objidx, f_offset; - - int class_idx; - struct size_class *class; - enum fullness_group fullness; - - if (unlikely(!obj)) - return; - - obj_handle_to_location(obj, &f_page, &f_objidx); - first_page = get_first_page(f_page); - - get_zspage_mapping(first_page, &class_idx, &fullness); - class = &pool->size_class[class_idx]; - f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); - - spin_lock(&class->lock); - - /* Insert this object in containing zspage's freelist */ - link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) - + f_offset); - link->next = first_page->freelist; - kunmap_atomic(link); - first_page->freelist = (void *)obj; - - first_page->inuse--; - fullness = fix_fullness_group(pool, first_page); - - if (fullness == ZS_EMPTY) - class->pages_allocated -= class->pages_per_zspage; - - spin_unlock(&class->lock); - - if (fullness == ZS_EMPTY) - free_zspage(first_page); -} -EXPORT_SYMBOL_GPL(zs_free); - -/** - * zs_map_object - get address of allocated object from handle. - * @pool: pool from which the object was allocated - * @handle: handle returned from zs_malloc - * - * Before using an object allocated from zs_malloc, it must be mapped using - * this function. When done with the object, it must be unmapped using - * zs_unmap_object. - * - * Only one object can be mapped per cpu at a time. There is no protection - * against nested mappings. - * - * This function returns with preemption and page faults disabled. - */ -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm) -{ - struct page *page; - unsigned long obj_idx, off; - - unsigned int class_idx; - enum fullness_group fg; - struct size_class *class; - struct mapping_area *area; - struct page *pages[2]; - - BUG_ON(!handle); - - /* - * Because we use per-cpu mapping areas shared among the - * pools/users, we can't allow mapping in interrupt context - * because it can corrupt another users mappings. - */ - BUG_ON(in_interrupt()); - - obj_handle_to_location(handle, &page, &obj_idx); - get_zspage_mapping(get_first_page(page), &class_idx, &fg); - class = &pool->size_class[class_idx]; - off = obj_idx_to_offset(page, obj_idx, class->size); - - area = &get_cpu_var(zs_map_area); - area->vm_mm = mm; - if (off + class->size <= PAGE_SIZE) { - /* this object is contained entirely within a page */ - area->vm_addr = kmap_atomic(page); - return area->vm_addr + off; - } - - /* this object spans two pages */ - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); - - return __zs_map_object(area, pages, off, class->size); -} -EXPORT_SYMBOL_GPL(zs_map_object); - -void zs_unmap_object(struct zs_pool *pool, unsigned long handle) -{ - struct page *page; - unsigned long obj_idx, off; - - unsigned int class_idx; - enum fullness_group fg; - struct size_class *class; - struct mapping_area *area; - - BUG_ON(!handle); - - obj_handle_to_location(handle, &page, &obj_idx); - get_zspage_mapping(get_first_page(page), &class_idx, &fg); - class = &pool->size_class[class_idx]; - off = obj_idx_to_offset(page, obj_idx, class->size); - - area = &__get_cpu_var(zs_map_area); - if (off + class->size <= PAGE_SIZE) - kunmap_atomic(area->vm_addr); - else { - struct page *pages[2]; - - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); - - __zs_unmap_object(area, pages, off, class->size); - } - put_cpu_var(zs_map_area); -} -EXPORT_SYMBOL_GPL(zs_unmap_object); - -u64 zs_get_total_size_bytes(struct zs_pool *pool) -{ - int i; - u64 npages = 0; - - for (i = 0; i < ZS_SIZE_CLASSES; i++) - npages += pool->size_class[i].pages_allocated; - - return npages << PAGE_SHIFT; -} -EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); - -module_init(zs_init); -module_exit(zs_exit); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Nitin Gupta "); diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h deleted file mode 100644 index c2eb174b97ee..000000000000 --- a/drivers/staging/zsmalloc/zsmalloc.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * zsmalloc memory allocator - * - * Copyright (C) 2011 Nitin Gupta - * - * This code is released using a dual license strategy: BSD/GPL - * You can choose the license that better fits your requirements. - * - * Released under the terms of 3-clause BSD License - * Released under the terms of GNU General Public License Version 2.0 - */ - -#ifndef _ZS_MALLOC_H_ -#define _ZS_MALLOC_H_ - -#include - -/* - * zsmalloc mapping modes - * - * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when PGTABLE_MAPPING is selected. - */ -enum zs_mapmode { - ZS_MM_RW, /* normal read-write mapping */ - ZS_MM_RO, /* read-only (no copy-out at unmap time) */ - ZS_MM_WO /* write-only (no copy-in at map time) */ - /* - * NOTE: ZS_MM_WO should only be used for initializing new - * (uninitialized) allocations. Partial writes to already - * initialized allocations should use ZS_MM_RW to preserve the - * existing data. - */ -}; - -struct zs_pool; - -struct zs_pool *zs_create_pool(gfp_t flags); -void zs_destroy_pool(struct zs_pool *pool); - -unsigned long zs_malloc(struct zs_pool *pool, size_t size); -void zs_free(struct zs_pool *pool, unsigned long obj); - -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm); -void zs_unmap_object(struct zs_pool *pool, unsigned long handle); - -u64 zs_get_total_size_bytes(struct zs_pool *pool); - -#endif diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h new file mode 100644 index 000000000000..c2eb174b97ee --- /dev/null +++ b/include/linux/zsmalloc.h @@ -0,0 +1,50 @@ +/* + * zsmalloc memory allocator + * + * Copyright (C) 2011 Nitin Gupta + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the license that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + */ + +#ifndef _ZS_MALLOC_H_ +#define _ZS_MALLOC_H_ + +#include + +/* + * zsmalloc mapping modes + * + * NOTE: These only make a difference when a mapped object spans pages. + * They also have no effect when PGTABLE_MAPPING is selected. + */ +enum zs_mapmode { + ZS_MM_RW, /* normal read-write mapping */ + ZS_MM_RO, /* read-only (no copy-out at unmap time) */ + ZS_MM_WO /* write-only (no copy-in at map time) */ + /* + * NOTE: ZS_MM_WO should only be used for initializing new + * (uninitialized) allocations. Partial writes to already + * initialized allocations should use ZS_MM_RW to preserve the + * existing data. + */ +}; + +struct zs_pool; + +struct zs_pool *zs_create_pool(gfp_t flags); +void zs_destroy_pool(struct zs_pool *pool); + +unsigned long zs_malloc(struct zs_pool *pool, size_t size); +void zs_free(struct zs_pool *pool, unsigned long obj); + +void *zs_map_object(struct zs_pool *pool, unsigned long handle, + enum zs_mapmode mm); +void zs_unmap_object(struct zs_pool *pool, unsigned long handle); + +u64 zs_get_total_size_bytes(struct zs_pool *pool); + +#endif diff --git a/mm/Kconfig b/mm/Kconfig index 723bbe04a0b0..2d9f1504d75e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -552,3 +552,28 @@ config MEM_SOFT_DIRTY it can be cleared by hands. See Documentation/vm/soft-dirty.txt for more details. + +config ZSMALLOC + bool "Memory allocator for compressed pages" + depends on MMU + default n + help + zsmalloc is a slab-based memory allocator designed to store + compressed RAM pages. zsmalloc uses virtual memory mapping + in order to reduce fragmentation. However, this results in a + non-standard allocator interface where a handle, not a pointer, is + returned by an alloc(). This handle must be mapped in order to + access the allocated space. + +config PGTABLE_MAPPING + bool "Use page table mapping to access object in zsmalloc" + depends on ZSMALLOC + help + By default, zsmalloc uses a copy-based object mapping method to + access allocations that span two pages. However, if a particular + architecture (ex, ARM) performs VM mapping faster than copying, + then you should select this. This causes zsmalloc to use page table + mapping rather than copying for object mapping. + + You can check speed with zsmalloc benchmark[1]. + [1] https://github.com/spartacus06/zsmalloc diff --git a/mm/Makefile b/mm/Makefile index 305d10acd081..310c90a09264 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -60,3 +60,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZBUD) += zbud.o +obj-$(CONFIG_ZSMALLOC) += zsmalloc.o diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c new file mode 100644 index 000000000000..5d42adfcb67b --- /dev/null +++ b/mm/zsmalloc.c @@ -0,0 +1,1105 @@ +/* + * zsmalloc memory allocator + * + * Copyright (C) 2011 Nitin Gupta + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the license that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + */ + +/* + * This allocator is designed for use with zram. Thus, the allocator is + * supposed to work well under low memory conditions. In particular, it + * never attempts higher order page allocation which is very likely to + * fail under memory pressure. On the other hand, if we just use single + * (0-order) pages, it would suffer from very high fragmentation -- + * any object of size PAGE_SIZE/2 or larger would occupy an entire page. + * This was one of the major issues with its predecessor (xvmalloc). + * + * To overcome these issues, zsmalloc allocates a bunch of 0-order pages + * and links them together using various 'struct page' fields. These linked + * pages act as a single higher-order page i.e. an object can span 0-order + * page boundaries. The code refers to these linked pages as a single entity + * called zspage. + * + * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE + * since this satisfies the requirements of all its current users (in the + * worst case, page is incompressible and is thus stored "as-is" i.e. in + * uncompressed form). For allocation requests larger than this size, failure + * is returned (see zs_malloc). + * + * Additionally, zs_malloc() does not return a dereferenceable pointer. + * Instead, it returns an opaque handle (unsigned long) which encodes actual + * location of the allocated object. The reason for this indirection is that + * zsmalloc does not keep zspages permanently mapped since that would cause + * issues on 32-bit systems where the VA region for kernel space mappings + * is very small. So, before using the allocating memory, the object has to + * be mapped using zs_map_object() to get a usable pointer and subsequently + * unmapped using zs_unmap_object(). + * + * Following is how we use various fields and flags of underlying + * struct page(s) to form a zspage. + * + * Usage of struct page fields: + * page->first_page: points to the first component (0-order) page + * page->index (union with page->freelist): offset of the first object + * starting in this page. For the first page, this is + * always 0, so we use this field (aka freelist) to point + * to the first free object in zspage. + * page->lru: links together all component pages (except the first page) + * of a zspage + * + * For _first_ page only: + * + * page->private (union with page->first_page): refers to the + * component page after the first page + * page->freelist: points to the first free object in zspage. + * Free objects are linked together using in-place + * metadata. + * page->objects: maximum number of objects we can store in this + * zspage (class->zspage_order * PAGE_SIZE / class->size) + * page->lru: links together first pages of various zspages. + * Basically forming list of zspages in a fullness group. + * page->mapping: class index and fullness group of the zspage + * + * Usage of struct page flags: + * PG_private: identifies the first component page + * PG_private2: identifies the last component page + * + */ + +#ifdef CONFIG_ZSMALLOC_DEBUG +#define DEBUG +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This must be power of 2 and greater than of equal to sizeof(link_free). + * These two conditions ensure that any 'struct link_free' itself doesn't + * span more than 1 page which avoids complex case of mapping 2 pages simply + * to restore link_free pointer values. + */ +#define ZS_ALIGN 8 + +/* + * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) + * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. + */ +#define ZS_MAX_ZSPAGE_ORDER 2 +#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) + +/* + * Object location (, ) is encoded as + * as single (unsigned long) handle value. + * + * Note that object index is relative to system + * page it is stored in, so for each sub-page belonging + * to a zspage, obj_idx starts with 0. + * + * This is made more complicated by various memory models and PAE. + */ + +#ifndef MAX_PHYSMEM_BITS +#ifdef CONFIG_HIGHMEM64G +#define MAX_PHYSMEM_BITS 36 +#else /* !CONFIG_HIGHMEM64G */ +/* + * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just + * be PAGE_SHIFT + */ +#define MAX_PHYSMEM_BITS BITS_PER_LONG +#endif +#endif +#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) +#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) + +#define MAX(a, b) ((a) >= (b) ? (a) : (b)) +/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ +#define ZS_MIN_ALLOC_SIZE \ + MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) +#define ZS_MAX_ALLOC_SIZE PAGE_SIZE + +/* + * On systems with 4K page size, this gives 254 size classes! There is a + * trader-off here: + * - Large number of size classes is potentially wasteful as free page are + * spread across these classes + * - Small number of size classes causes large internal fragmentation + * - Probably its better to use specific size classes (empirically + * determined). NOTE: all those class sizes must be set as multiple of + * ZS_ALIGN to make sure link_free itself never has to span 2 pages. + * + * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN + * (reason above) + */ +#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) +#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \ + ZS_SIZE_CLASS_DELTA + 1) + +/* + * We do not maintain any list for completely empty or full pages + */ +enum fullness_group { + ZS_ALMOST_FULL, + ZS_ALMOST_EMPTY, + _ZS_NR_FULLNESS_GROUPS, + + ZS_EMPTY, + ZS_FULL +}; + +/* + * We assign a page to ZS_ALMOST_EMPTY fullness group when: + * n <= N / f, where + * n = number of allocated objects + * N = total number of objects zspage can store + * f = 1/fullness_threshold_frac + * + * Similarly, we assign zspage to: + * ZS_ALMOST_FULL when n > N / f + * ZS_EMPTY when n == 0 + * ZS_FULL when n == N + * + * (see: fix_fullness_group()) + */ +static const int fullness_threshold_frac = 4; + +struct size_class { + /* + * Size of objects stored in this class. Must be multiple + * of ZS_ALIGN. + */ + int size; + unsigned int index; + + /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ + int pages_per_zspage; + + spinlock_t lock; + + /* stats */ + u64 pages_allocated; + + struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; +}; + +/* + * Placed within free objects to form a singly linked list. + * For every zspage, first_page->freelist gives head of this list. + * + * This must be power of 2 and less than or equal to ZS_ALIGN + */ +struct link_free { + /* Handle of next free chunk (encodes ) */ + void *next; +}; + +struct zs_pool { + struct size_class size_class[ZS_SIZE_CLASSES]; + + gfp_t flags; /* allocation flags used when growing pool */ +}; + +/* + * A zspage's class index and fullness group + * are encoded in its (first)page->mapping + */ +#define CLASS_IDX_BITS 28 +#define FULLNESS_BITS 4 +#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) +#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) + +struct mapping_area { +#ifdef CONFIG_PGTABLE_MAPPING + struct vm_struct *vm; /* vm area for mapping object that span pages */ +#else + char *vm_buf; /* copy buffer for objects that span pages */ +#endif + char *vm_addr; /* address of kmap_atomic()'ed pages */ + enum zs_mapmode vm_mm; /* mapping mode */ +}; + + +/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ +static DEFINE_PER_CPU(struct mapping_area, zs_map_area); + +static int is_first_page(struct page *page) +{ + return PagePrivate(page); +} + +static int is_last_page(struct page *page) +{ + return PagePrivate2(page); +} + +static void get_zspage_mapping(struct page *page, unsigned int *class_idx, + enum fullness_group *fullness) +{ + unsigned long m; + BUG_ON(!is_first_page(page)); + + m = (unsigned long)page->mapping; + *fullness = m & FULLNESS_MASK; + *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; +} + +static void set_zspage_mapping(struct page *page, unsigned int class_idx, + enum fullness_group fullness) +{ + unsigned long m; + BUG_ON(!is_first_page(page)); + + m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | + (fullness & FULLNESS_MASK); + page->mapping = (struct address_space *)m; +} + +/* + * zsmalloc divides the pool into various size classes where each + * class maintains a list of zspages where each zspage is divided + * into equal sized chunks. Each allocation falls into one of these + * classes depending on its size. This function returns index of the + * size class which has chunk size big enough to hold the give size. + */ +static int get_size_class_index(int size) +{ + int idx = 0; + + if (likely(size > ZS_MIN_ALLOC_SIZE)) + idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, + ZS_SIZE_CLASS_DELTA); + + return idx; +} + +/* + * For each size class, zspages are divided into different groups + * depending on how "full" they are. This was done so that we could + * easily find empty or nearly empty zspages when we try to shrink + * the pool (not yet implemented). This function returns fullness + * status of the given page. + */ +static enum fullness_group get_fullness_group(struct page *page) +{ + int inuse, max_objects; + enum fullness_group fg; + BUG_ON(!is_first_page(page)); + + inuse = page->inuse; + max_objects = page->objects; + + if (inuse == 0) + fg = ZS_EMPTY; + else if (inuse == max_objects) + fg = ZS_FULL; + else if (inuse <= max_objects / fullness_threshold_frac) + fg = ZS_ALMOST_EMPTY; + else + fg = ZS_ALMOST_FULL; + + return fg; +} + +/* + * Each size class maintains various freelists and zspages are assigned + * to one of these freelists based on the number of live objects they + * have. This functions inserts the given zspage into the freelist + * identified by . + */ +static void insert_zspage(struct page *page, struct size_class *class, + enum fullness_group fullness) +{ + struct page **head; + + BUG_ON(!is_first_page(page)); + + if (fullness >= _ZS_NR_FULLNESS_GROUPS) + return; + + head = &class->fullness_list[fullness]; + if (*head) + list_add_tail(&page->lru, &(*head)->lru); + + *head = page; +} + +/* + * This function removes the given zspage from the freelist identified + * by . + */ +static void remove_zspage(struct page *page, struct size_class *class, + enum fullness_group fullness) +{ + struct page **head; + + BUG_ON(!is_first_page(page)); + + if (fullness >= _ZS_NR_FULLNESS_GROUPS) + return; + + head = &class->fullness_list[fullness]; + BUG_ON(!*head); + if (list_empty(&(*head)->lru)) + *head = NULL; + else if (*head == page) + *head = (struct page *)list_entry((*head)->lru.next, + struct page, lru); + + list_del_init(&page->lru); +} + +/* + * Each size class maintains zspages in different fullness groups depending + * on the number of live objects they contain. When allocating or freeing + * objects, the fullness status of the page can change, say, from ALMOST_FULL + * to ALMOST_EMPTY when freeing an object. This function checks if such + * a status change has occurred for the given page and accordingly moves the + * page from the freelist of the old fullness group to that of the new + * fullness group. + */ +static enum fullness_group fix_fullness_group(struct zs_pool *pool, + struct page *page) +{ + int class_idx; + struct size_class *class; + enum fullness_group currfg, newfg; + + BUG_ON(!is_first_page(page)); + + get_zspage_mapping(page, &class_idx, &currfg); + newfg = get_fullness_group(page); + if (newfg == currfg) + goto out; + + class = &pool->size_class[class_idx]; + remove_zspage(page, class, currfg); + insert_zspage(page, class, newfg); + set_zspage_mapping(page, class_idx, newfg); + +out: + return newfg; +} + +/* + * We have to decide on how many pages to link together + * to form a zspage for each size class. This is important + * to reduce wastage due to unusable space left at end of + * each zspage which is given as: + * wastage = Zp - Zp % size_class + * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... + * + * For example, for size class of 3/8 * PAGE_SIZE, we should + * link together 3 PAGE_SIZE sized pages to form a zspage + * since then we can perfectly fit in 8 such objects. + */ +static int get_pages_per_zspage(int class_size) +{ + int i, max_usedpc = 0; + /* zspage order which gives maximum used size per KB */ + int max_usedpc_order = 1; + + for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { + int zspage_size; + int waste, usedpc; + + zspage_size = i * PAGE_SIZE; + waste = zspage_size % class_size; + usedpc = (zspage_size - waste) * 100 / zspage_size; + + if (usedpc > max_usedpc) { + max_usedpc = usedpc; + max_usedpc_order = i; + } + } + + return max_usedpc_order; +} + +/* + * A single 'zspage' is composed of many system pages which are + * linked together using fields in struct page. This function finds + * the first/head page, given any component page of a zspage. + */ +static struct page *get_first_page(struct page *page) +{ + if (is_first_page(page)) + return page; + else + return page->first_page; +} + +static struct page *get_next_page(struct page *page) +{ + struct page *next; + + if (is_last_page(page)) + next = NULL; + else if (is_first_page(page)) + next = (struct page *)page_private(page); + else + next = list_entry(page->lru.next, struct page, lru); + + return next; +} + +/* + * Encode as a single handle value. + * On hardware platforms with physical memory starting at 0x0 the pfn + * could be 0 so we ensure that the handle will never be 0 by adjusting the + * encoded obj_idx value before encoding. + */ +static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) +{ + unsigned long handle; + + if (!page) { + BUG_ON(obj_idx); + return NULL; + } + + handle = page_to_pfn(page) << OBJ_INDEX_BITS; + handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); + + return (void *)handle; +} + +/* + * Decode pair from the given object handle. We adjust the + * decoded obj_idx back to its original value since it was adjusted in + * obj_location_to_handle(). + */ +static void obj_handle_to_location(unsigned long handle, struct page **page, + unsigned long *obj_idx) +{ + *page = pfn_to_page(handle >> OBJ_INDEX_BITS); + *obj_idx = (handle & OBJ_INDEX_MASK) - 1; +} + +static unsigned long obj_idx_to_offset(struct page *page, + unsigned long obj_idx, int class_size) +{ + unsigned long off = 0; + + if (!is_first_page(page)) + off = page->index; + + return off + obj_idx * class_size; +} + +static void reset_page(struct page *page) +{ + clear_bit(PG_private, &page->flags); + clear_bit(PG_private_2, &page->flags); + set_page_private(page, 0); + page->mapping = NULL; + page->freelist = NULL; + page_mapcount_reset(page); +} + +static void free_zspage(struct page *first_page) +{ + struct page *nextp, *tmp, *head_extra; + + BUG_ON(!is_first_page(first_page)); + BUG_ON(first_page->inuse); + + head_extra = (struct page *)page_private(first_page); + + reset_page(first_page); + __free_page(first_page); + + /* zspage with only 1 system page */ + if (!head_extra) + return; + + list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { + list_del(&nextp->lru); + reset_page(nextp); + __free_page(nextp); + } + reset_page(head_extra); + __free_page(head_extra); +} + +/* Initialize a newly allocated zspage */ +static void init_zspage(struct page *first_page, struct size_class *class) +{ + unsigned long off = 0; + struct page *page = first_page; + + BUG_ON(!is_first_page(first_page)); + while (page) { + struct page *next_page; + struct link_free *link; + unsigned int i, objs_on_page; + + /* + * page->index stores offset of first object starting + * in the page. For the first page, this is always 0, + * so we use first_page->index (aka ->freelist) to store + * head of corresponding zspage's freelist. + */ + if (page != first_page) + page->index = off; + + link = (struct link_free *)kmap_atomic(page) + + off / sizeof(*link); + objs_on_page = (PAGE_SIZE - off) / class->size; + + for (i = 1; i <= objs_on_page; i++) { + off += class->size; + if (off < PAGE_SIZE) { + link->next = obj_location_to_handle(page, i); + link += class->size / sizeof(*link); + } + } + + /* + * We now come to the last (full or partial) object on this + * page, which must point to the first object on the next + * page (if present) + */ + next_page = get_next_page(page); + link->next = obj_location_to_handle(next_page, 0); + kunmap_atomic(link); + page = next_page; + off = (off + class->size) % PAGE_SIZE; + } +} + +/* + * Allocate a zspage for the given size class + */ +static struct page *alloc_zspage(struct size_class *class, gfp_t flags) +{ + int i, error; + struct page *first_page = NULL, *uninitialized_var(prev_page); + + /* + * Allocate individual pages and link them together as: + * 1. first page->private = first sub-page + * 2. all sub-pages are linked together using page->lru + * 3. each sub-page is linked to the first page using page->first_page + * + * For each size class, First/Head pages are linked together using + * page->lru. Also, we set PG_private to identify the first page + * (i.e. no other sub-page has this flag set) and PG_private_2 to + * identify the last page. + */ + error = -ENOMEM; + for (i = 0; i < class->pages_per_zspage; i++) { + struct page *page; + + page = alloc_page(flags); + if (!page) + goto cleanup; + + INIT_LIST_HEAD(&page->lru); + if (i == 0) { /* first page */ + SetPagePrivate(page); + set_page_private(page, 0); + first_page = page; + first_page->inuse = 0; + } + if (i == 1) + set_page_private(first_page, (unsigned long)page); + if (i >= 1) + page->first_page = first_page; + if (i >= 2) + list_add(&page->lru, &prev_page->lru); + if (i == class->pages_per_zspage - 1) /* last page */ + SetPagePrivate2(page); + prev_page = page; + } + + init_zspage(first_page, class); + + first_page->freelist = obj_location_to_handle(first_page, 0); + /* Maximum number of objects we can store in this zspage */ + first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; + + error = 0; /* Success */ + +cleanup: + if (unlikely(error) && first_page) { + free_zspage(first_page); + first_page = NULL; + } + + return first_page; +} + +static struct page *find_get_zspage(struct size_class *class) +{ + int i; + struct page *page; + + for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { + page = class->fullness_list[i]; + if (page) + break; + } + + return page; +} + +#ifdef CONFIG_PGTABLE_MAPPING +static inline int __zs_cpu_up(struct mapping_area *area) +{ + /* + * Make sure we don't leak memory if a cpu UP notification + * and zs_init() race and both call zs_cpu_up() on the same cpu + */ + if (area->vm) + return 0; + area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); + if (!area->vm) + return -ENOMEM; + return 0; +} + +static inline void __zs_cpu_down(struct mapping_area *area) +{ + if (area->vm) + free_vm_area(area->vm); + area->vm = NULL; +} + +static inline void *__zs_map_object(struct mapping_area *area, + struct page *pages[2], int off, int size) +{ + BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages)); + area->vm_addr = area->vm->addr; + return area->vm_addr + off; +} + +static inline void __zs_unmap_object(struct mapping_area *area, + struct page *pages[2], int off, int size) +{ + unsigned long addr = (unsigned long)area->vm_addr; + + unmap_kernel_range(addr, PAGE_SIZE * 2); +} + +#else /* CONFIG_PGTABLE_MAPPING */ + +static inline int __zs_cpu_up(struct mapping_area *area) +{ + /* + * Make sure we don't leak memory if a cpu UP notification + * and zs_init() race and both call zs_cpu_up() on the same cpu + */ + if (area->vm_buf) + return 0; + area->vm_buf = (char *)__get_free_page(GFP_KERNEL); + if (!area->vm_buf) + return -ENOMEM; + return 0; +} + +static inline void __zs_cpu_down(struct mapping_area *area) +{ + if (area->vm_buf) + free_page((unsigned long)area->vm_buf); + area->vm_buf = NULL; +} + +static void *__zs_map_object(struct mapping_area *area, + struct page *pages[2], int off, int size) +{ + int sizes[2]; + void *addr; + char *buf = area->vm_buf; + + /* disable page faults to match kmap_atomic() return conditions */ + pagefault_disable(); + + /* no read fastpath */ + if (area->vm_mm == ZS_MM_WO) + goto out; + + sizes[0] = PAGE_SIZE - off; + sizes[1] = size - sizes[0]; + + /* copy object to per-cpu buffer */ + addr = kmap_atomic(pages[0]); + memcpy(buf, addr + off, sizes[0]); + kunmap_atomic(addr); + addr = kmap_atomic(pages[1]); + memcpy(buf + sizes[0], addr, sizes[1]); + kunmap_atomic(addr); +out: + return area->vm_buf; +} + +static void __zs_unmap_object(struct mapping_area *area, + struct page *pages[2], int off, int size) +{ + int sizes[2]; + void *addr; + char *buf = area->vm_buf; + + /* no write fastpath */ + if (area->vm_mm == ZS_MM_RO) + goto out; + + sizes[0] = PAGE_SIZE - off; + sizes[1] = size - sizes[0]; + + /* copy per-cpu buffer to object */ + addr = kmap_atomic(pages[0]); + memcpy(addr + off, buf, sizes[0]); + kunmap_atomic(addr); + addr = kmap_atomic(pages[1]); + memcpy(addr, buf + sizes[0], sizes[1]); + kunmap_atomic(addr); + +out: + /* enable page faults to match kunmap_atomic() return conditions */ + pagefault_enable(); +} + +#endif /* CONFIG_PGTABLE_MAPPING */ + +static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, + void *pcpu) +{ + int ret, cpu = (long)pcpu; + struct mapping_area *area; + + switch (action) { + case CPU_UP_PREPARE: + area = &per_cpu(zs_map_area, cpu); + ret = __zs_cpu_up(area); + if (ret) + return notifier_from_errno(ret); + break; + case CPU_DEAD: + case CPU_UP_CANCELED: + area = &per_cpu(zs_map_area, cpu); + __zs_cpu_down(area); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block zs_cpu_nb = { + .notifier_call = zs_cpu_notifier +}; + +static void zs_exit(void) +{ + int cpu; + + for_each_online_cpu(cpu) + zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); + unregister_cpu_notifier(&zs_cpu_nb); +} + +static int zs_init(void) +{ + int cpu, ret; + + register_cpu_notifier(&zs_cpu_nb); + for_each_online_cpu(cpu) { + ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); + if (notifier_to_errno(ret)) + goto fail; + } + return 0; +fail: + zs_exit(); + return notifier_to_errno(ret); +} + +/** + * zs_create_pool - Creates an allocation pool to work from. + * @flags: allocation flags used to allocate pool metadata + * + * This function must be called before anything when using + * the zsmalloc allocator. + * + * On success, a pointer to the newly created pool is returned, + * otherwise NULL. + */ +struct zs_pool *zs_create_pool(gfp_t flags) +{ + int i, ovhd_size; + struct zs_pool *pool; + + ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); + pool = kzalloc(ovhd_size, GFP_KERNEL); + if (!pool) + return NULL; + + for (i = 0; i < ZS_SIZE_CLASSES; i++) { + int size; + struct size_class *class; + + size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; + if (size > ZS_MAX_ALLOC_SIZE) + size = ZS_MAX_ALLOC_SIZE; + + class = &pool->size_class[i]; + class->size = size; + class->index = i; + spin_lock_init(&class->lock); + class->pages_per_zspage = get_pages_per_zspage(size); + + } + + pool->flags = flags; + + return pool; +} +EXPORT_SYMBOL_GPL(zs_create_pool); + +void zs_destroy_pool(struct zs_pool *pool) +{ + int i; + + for (i = 0; i < ZS_SIZE_CLASSES; i++) { + int fg; + struct size_class *class = &pool->size_class[i]; + + for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { + if (class->fullness_list[fg]) { + pr_info("Freeing non-empty class with size %db, fullness group %d\n", + class->size, fg); + } + } + } + kfree(pool); +} +EXPORT_SYMBOL_GPL(zs_destroy_pool); + +/** + * zs_malloc - Allocate block of given size from pool. + * @pool: pool to allocate from + * @size: size of block to allocate + * + * On success, handle to the allocated object is returned, + * otherwise 0. + * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. + */ +unsigned long zs_malloc(struct zs_pool *pool, size_t size) +{ + unsigned long obj; + struct link_free *link; + int class_idx; + struct size_class *class; + + struct page *first_page, *m_page; + unsigned long m_objidx, m_offset; + + if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) + return 0; + + class_idx = get_size_class_index(size); + class = &pool->size_class[class_idx]; + BUG_ON(class_idx != class->index); + + spin_lock(&class->lock); + first_page = find_get_zspage(class); + + if (!first_page) { + spin_unlock(&class->lock); + first_page = alloc_zspage(class, pool->flags); + if (unlikely(!first_page)) + return 0; + + set_zspage_mapping(first_page, class->index, ZS_EMPTY); + spin_lock(&class->lock); + class->pages_allocated += class->pages_per_zspage; + } + + obj = (unsigned long)first_page->freelist; + obj_handle_to_location(obj, &m_page, &m_objidx); + m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); + + link = (struct link_free *)kmap_atomic(m_page) + + m_offset / sizeof(*link); + first_page->freelist = link->next; + memset(link, POISON_INUSE, sizeof(*link)); + kunmap_atomic(link); + + first_page->inuse++; + /* Now move the zspage to another fullness group, if required */ + fix_fullness_group(pool, first_page); + spin_unlock(&class->lock); + + return obj; +} +EXPORT_SYMBOL_GPL(zs_malloc); + +void zs_free(struct zs_pool *pool, unsigned long obj) +{ + struct link_free *link; + struct page *first_page, *f_page; + unsigned long f_objidx, f_offset; + + int class_idx; + struct size_class *class; + enum fullness_group fullness; + + if (unlikely(!obj)) + return; + + obj_handle_to_location(obj, &f_page, &f_objidx); + first_page = get_first_page(f_page); + + get_zspage_mapping(first_page, &class_idx, &fullness); + class = &pool->size_class[class_idx]; + f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); + + spin_lock(&class->lock); + + /* Insert this object in containing zspage's freelist */ + link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) + + f_offset); + link->next = first_page->freelist; + kunmap_atomic(link); + first_page->freelist = (void *)obj; + + first_page->inuse--; + fullness = fix_fullness_group(pool, first_page); + + if (fullness == ZS_EMPTY) + class->pages_allocated -= class->pages_per_zspage; + + spin_unlock(&class->lock); + + if (fullness == ZS_EMPTY) + free_zspage(first_page); +} +EXPORT_SYMBOL_GPL(zs_free); + +/** + * zs_map_object - get address of allocated object from handle. + * @pool: pool from which the object was allocated + * @handle: handle returned from zs_malloc + * + * Before using an object allocated from zs_malloc, it must be mapped using + * this function. When done with the object, it must be unmapped using + * zs_unmap_object. + * + * Only one object can be mapped per cpu at a time. There is no protection + * against nested mappings. + * + * This function returns with preemption and page faults disabled. + */ +void *zs_map_object(struct zs_pool *pool, unsigned long handle, + enum zs_mapmode mm) +{ + struct page *page; + unsigned long obj_idx, off; + + unsigned int class_idx; + enum fullness_group fg; + struct size_class *class; + struct mapping_area *area; + struct page *pages[2]; + + BUG_ON(!handle); + + /* + * Because we use per-cpu mapping areas shared among the + * pools/users, we can't allow mapping in interrupt context + * because it can corrupt another users mappings. + */ + BUG_ON(in_interrupt()); + + obj_handle_to_location(handle, &page, &obj_idx); + get_zspage_mapping(get_first_page(page), &class_idx, &fg); + class = &pool->size_class[class_idx]; + off = obj_idx_to_offset(page, obj_idx, class->size); + + area = &get_cpu_var(zs_map_area); + area->vm_mm = mm; + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ + area->vm_addr = kmap_atomic(page); + return area->vm_addr + off; + } + + /* this object spans two pages */ + pages[0] = page; + pages[1] = get_next_page(page); + BUG_ON(!pages[1]); + + return __zs_map_object(area, pages, off, class->size); +} +EXPORT_SYMBOL_GPL(zs_map_object); + +void zs_unmap_object(struct zs_pool *pool, unsigned long handle) +{ + struct page *page; + unsigned long obj_idx, off; + + unsigned int class_idx; + enum fullness_group fg; + struct size_class *class; + struct mapping_area *area; + + BUG_ON(!handle); + + obj_handle_to_location(handle, &page, &obj_idx); + get_zspage_mapping(get_first_page(page), &class_idx, &fg); + class = &pool->size_class[class_idx]; + off = obj_idx_to_offset(page, obj_idx, class->size); + + area = &__get_cpu_var(zs_map_area); + if (off + class->size <= PAGE_SIZE) + kunmap_atomic(area->vm_addr); + else { + struct page *pages[2]; + + pages[0] = page; + pages[1] = get_next_page(page); + BUG_ON(!pages[1]); + + __zs_unmap_object(area, pages, off, class->size); + } + put_cpu_var(zs_map_area); +} +EXPORT_SYMBOL_GPL(zs_unmap_object); + +u64 zs_get_total_size_bytes(struct zs_pool *pool) +{ + int i; + u64 npages = 0; + + for (i = 0; i < ZS_SIZE_CLASSES; i++) + npages += pool->size_class[i].pages_allocated; + + return npages << PAGE_SHIFT; +} +EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); + +module_init(zs_init); +module_exit(zs_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Nitin Gupta "); -- cgit v1.2.3 From 31fc00bb788ffde7d8d861d8b2bba798ab445992 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 30 Jan 2014 15:45:55 -0800 Subject: zsmalloc: add copyright Add my copyright to the zsmalloc source code which I maintain. Signed-off-by: Minchan Kim Cc: Nitin Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/zsmalloc.h | 1 + mm/zsmalloc.c | 1 + 2 files changed, 2 insertions(+) (limited to 'mm') diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index c2eb174b97ee..e44d634e7fb7 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -2,6 +2,7 @@ * zsmalloc memory allocator * * Copyright (C) 2011 Nitin Gupta + * Copyright (C) 2012, 2013 Minchan Kim * * This code is released using a dual license strategy: BSD/GPL * You can choose the license that better fits your requirements. diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5d42adfcb67b..c03ca5e9fe15 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2,6 +2,7 @@ * zsmalloc memory allocator * * Copyright (C) 2011 Nitin Gupta + * Copyright (C) 2012, 2013 Minchan Kim * * This code is released using a dual license strategy: BSD/GPL * You can choose the license that better fits your requirements. -- cgit v1.2.3 From 8790c71a18e5d2d93532ae250bcf5eddbba729cd Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 30 Jan 2014 15:46:08 -0800 Subject: mm/mempolicy.c: fix mempolicy printing in numa_maps As a result of commit 5606e3877ad8 ("mm: numa: Migrate on reference policy"), /proc//numa_maps prints the mempolicy for any as "prefer:N" for the local node, N, of the process reading the file. This should only be printed when the mempolicy of is MPOL_PREFERRED for node N. If the process is actually only using the default mempolicy for local node allocation, make sure "default" is printed as expected. Signed-off-by: David Rientjes Reported-by: Robert Lippert Cc: Peter Zijlstra Acked-by: Mel Gorman Cc: Ingo Molnar Cc: [3.7+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mempolicy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 873de7e542bc..ae3c8f3595d4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2930,7 +2930,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; - if (pol && pol != &default_policy) { + if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { mode = pol->mode; flags = pol->flags; } -- cgit v1.2.3 From a03208652dad18232e9ec3432df69f9c19c856ec Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 30 Jan 2014 15:46:09 -0800 Subject: mm/slub.c: fix page->_count corruption (again) Commit abca7c496584 ("mm: fix slab->page _count corruption when using slub") notes that we can not _set_ a page->counters directly, except when using a real double-cmpxchg. Doing so can lose updates to ->_count. That is an absolute rule: You may not *set* page->counters except via a cmpxchg. Commit abca7c496584 fixed this for the folks who have the slub cmpxchg_double code turned off at compile time, but it left the bad case alone. It can still be reached, and the same bug triggered in two cases: 1. Turning on slub debugging at runtime, which is available on the distro kernels that I looked at. 2. On 64-bit CPUs with no CMPXCHG16B (some early AMD x86-64 cpus, evidently) There are at least 3 ways we could fix this: 1. Take all of the exising calls to cmpxchg_double_slab() and __cmpxchg_double_slab() and convert them to take an old, new and target 'struct page'. 2. Do (1), but with the newly-introduced 'slub_data'. 3. Do some magic inside the two cmpxchg...slab() functions to pull the counters out of new_counters and only set those fields in page->{inuse,frozen,objects}. I've done (2) as well, but it's a bunch more code. This patch is an attempt at (3). This was the most straightforward and foolproof way that I could think to do this. This would also technically allow us to get rid of the ugly #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) in 'struct page', but leaving it alone has the added benefit that 'counters' stays 'unsigned' instead of 'unsigned long', so all the copies that the slub code does stay a bit smaller. Signed-off-by: Dave Hansen Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Cc: Pravin B Shelar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 545a170ebf9f..2b1a6970e46f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -355,6 +355,21 @@ static __always_inline void slab_unlock(struct page *page) __bit_spin_unlock(PG_locked, &page->flags); } +static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) +{ + struct page tmp; + tmp.counters = counters_new; + /* + * page->counters can cover frozen/inuse/objects as well + * as page->_count. If we assign to ->counters directly + * we run the risk of losing updates to page->_count, so + * be careful and only assign to the fields we need. + */ + page->frozen = tmp.frozen; + page->inuse = tmp.inuse; + page->objects = tmp.objects; +} + /* Interrupts must be disabled (for the fallback code to work right) */ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, @@ -376,7 +391,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; - page->counters = counters_new; + set_page_slub_counters(page, counters_new); slab_unlock(page); return 1; } @@ -415,7 +430,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; - page->counters = counters_new; + set_page_slub_counters(page, counters_new); slab_unlock(page); local_irq_restore(flags); return 1; -- cgit v1.2.3 From 778c14affaf94a9e4953179d3e13a544ccce7707 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 30 Jan 2014 15:46:11 -0800 Subject: mm, oom: base root bonus on current usage A 3% of system memory bonus is sometimes too excessive in comparison to other processes. With commit a63d83f427fb ("oom: badness heuristic rewrite"), the OOM killer tries to avoid killing privileged tasks by subtracting 3% of overall memory (system or cgroup) from their per-task consumption. But as a result, all root tasks that consume less than 3% of overall memory are considered equal, and so it only takes 33+ privileged tasks pushing the system out of memory for the OOM killer to do something stupid and kill dhclient or other root-owned processes. For example, on a 32G machine it can't tell the difference between the 1M agetty and the 10G fork bomb member. The changelog describes this 3% boost as the equivalent to the global overcommit limit being 3% higher for privileged tasks, but this is not the same as discounting 3% of overall memory from _every privileged task individually_ during OOM selection. Replace the 3% of system memory bonus with a 3% of current memory usage bonus. By giving root tasks a bonus that is proportional to their actual size, they remain comparable even when relatively small. In the example above, the OOM killer will discount the 1M agetty's 256 badness points down to 179, and the 10G fork bomb's 262144 points down to 183500 points and make the right choice, instead of discounting both to 0 and killing agetty because it's first in the task list. Signed-off-by: David Rientjes Reported-by: Johannes Weiner Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 4 ++-- mm/oom_kill.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 31f76178c987..f00bee144add 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -1386,8 +1386,8 @@ may allocate from based on an estimation of its current memory and swap use. For example, if a task is using all allowed memory, its badness score will be 1000. If it is using half of its allowed memory, its score will be 500. -There is an additional factor included in the badness score: root -processes are given 3% extra memory over other tasks. +There is an additional factor included in the badness score: the current memory +and swap usage is discounted by 3% for root processes. The amount of "allowed" memory depends on the context in which the oom killer was called. If it is due to the memory assigned to the allocating task's cpuset diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 37b1b1903fb2..3291e82d4352 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -178,7 +178,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, * implementation used by LSMs. */ if (has_capability_noaudit(p, CAP_SYS_ADMIN)) - adj -= 30; + points -= (points * 3) / 100; /* Normalize to oom_score_adj units */ adj *= totalpages / 1000; -- cgit v1.2.3 From 7c094fd698de2f333fa39b6da213f880d40b9bfe Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 30 Jan 2014 15:46:14 -0800 Subject: memcg: fix mutex not unlocked on memcg_create_kmem_cache fail path Commit 842e2873697e ("memcg: get rid of kmem_cache_dup()") introduced a mutex for memcg_create_kmem_cache() to protect the tmp_name buffer that holds the memcg name. It failed to unlock the mutex if this buffer could not be allocated. This patch fixes the issue by appropriately unlocking the mutex if the allocation fails. Signed-off-by: Vladimir Davydov Cc: Michal Hocko Cc: Johannes Weiner Cc: Glauber Costa Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 19d5d4274e22..53385cd4e6f0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3400,7 +3400,7 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *s) { - struct kmem_cache *new; + struct kmem_cache *new = NULL; static char *tmp_name = NULL; static DEFINE_MUTEX(mutex); /* protects tmp_name */ @@ -3416,7 +3416,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, if (!tmp_name) { tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); if (!tmp_name) - return NULL; + goto out; } rcu_read_lock(); @@ -3426,12 +3426,11 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, (s->flags & ~SLAB_PANIC), s->ctor, s); - if (new) new->allocflags |= __GFP_KMEMCG; else new = s; - +out: mutex_unlock(&mutex); return new; } -- cgit v1.2.3 From 67b6c900dc6dce65478d6fe37b60cd1e65bb80c2 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 24 Jan 2014 07:20:23 -0800 Subject: mm: slub: work around unneeded lockdep warning The slub code does some setup during early boot in early_kmem_cache_node_alloc() with some local data. There is no possible way that another CPU can see this data, so the slub code doesn't unnecessarily lock it. However, some new lockdep asserts check to make sure that add_partial() _always_ has the list_lock held. Just add the locking, even though it is technically unnecessary. Cc: Peter Zijlstra Cc: Russell King Acked-by: David Rientjes Signed-off-by: Dave Hansen Signed-off-by: Pekka Enberg --- mm/slub.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index a99e9e67c60e..432bddf484bb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2890,7 +2890,13 @@ static void early_kmem_cache_node_alloc(int node) init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); + /* + * the lock is for lockdep's sake, not for any actual + * race protection + */ + spin_lock(&n->list_lock); add_partial(n, page, DEACTIVATE_TO_HEAD); + spin_unlock(&n->list_lock); } static void free_kmem_cache_nodes(struct kmem_cache *s) -- cgit v1.2.3 From cb8ee1a3d429f8898972c869dd4792afb04e961a Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 28 Jan 2014 02:57:08 +0900 Subject: mm: Fix warning on make htmldocs caused by slab.c This patch fixed following errors while make htmldocs Warning(/mm/slab.c:1956): No description found for parameter 'page' Warning(/mm/slab.c:1956): Excess function parameter 'slabp' description in 'slab_destroy' Incorrect function parameter "slabp" was set instead of "page" Acked-by: Christoph Lameter Signed-off-by: Masanari Iida Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index eb043bf05f4c..b264214c77ea 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed - * @slabp: slab pointer being destroyed + * @page: page pointer being destroyed * * Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. The -- cgit v1.2.3