diff options
author | Reza Arbab | 2016-07-26 15:22:23 -0700 |
---|---|---|
committer | Linus Torvalds | 2016-07-26 16:19:19 -0700 |
commit | df429ac039360005299d56247647ca77098d660e (patch) | |
tree | b2e77a3454ffbf68030b7caa4f9eb508057eb072 /mm | |
parent | e51e6c8f80731d723ada126c029301cee2827fac (diff) |
memory-hotplug: more general validation of zone during online
When memory is onlined, we are only able to rezone from ZONE_MOVABLE to
ZONE_KERNEL, or from (ZONE_MOVABLE - 1) to ZONE_MOVABLE.
To be more flexible, use the following criteria instead; to online
memory from zone X into zone Y,
* Any zones between X and Y must be unused.
* If X is lower than Y, the onlined memory must lie at the end of X.
* If X is higher than Y, the onlined memory must lie at the start of X.
Add zone_can_shift() to make this determination.
Link: http://lkml.kernel.org/r/1462816419-4479-3-git-send-email-arbab@linux.vnet.ibm.com
Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com>
Reviewd-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Daniel Kiper <daniel.kiper@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andrew Banman <abanman@sgi.com>
Cc: Chen Yucong <slaoub@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Zhang Zhen <zhenzhang.zhang@huawei.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 42 |
1 files changed, 35 insertions, 7 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a86a66cbef77..82d0b98d27f8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1047,6 +1047,37 @@ static void node_states_set_node(int node, struct memory_notify *arg) node_set_state(node, N_MEMORY); } +int zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target) +{ + struct zone *zone = page_zone(pfn_to_page(pfn)); + enum zone_type idx = zone_idx(zone); + int i; + + if (idx < target) { + /* pages must be at end of current zone */ + if (pfn + nr_pages != zone_end_pfn(zone)) + return 0; + + /* no zones in use between current zone and target */ + for (i = idx + 1; i < target; i++) + if (zone_is_initialized(zone - idx + i)) + return 0; + } + + if (target < idx) { + /* pages must be at beginning of current zone */ + if (pfn != zone->zone_start_pfn) + return 0; + + /* no zones in use between current zone and target */ + for (i = target + 1; i < idx; i++) + if (zone_is_initialized(zone - idx + i)) + return 0; + } + + return target - idx; +} /* Must be protected by mem_hotplug_begin() */ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) @@ -1072,13 +1103,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ !can_online_high_movable(zone)) return -EINVAL; - if (online_type == MMOP_ONLINE_KERNEL && - zone_idx(zone) == ZONE_MOVABLE) - zone_shift = -1; - - if (online_type == MMOP_ONLINE_MOVABLE && - zone_idx(zone) == ZONE_MOVABLE - 1) - zone_shift = 1; + if (online_type == MMOP_ONLINE_KERNEL) + zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); + else if (online_type == MMOP_ONLINE_MOVABLE) + zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); if (!zone) |