diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-12-11 16:03:23 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 17:22:28 -0800 |
commit | 74d42d8fe146e870c52bde3b1c692f86cc8ff844 (patch) | |
tree | 0b17084ef171572fb9f1c6f01bcb667e638607ed | |
parent | e455a9b92d6e19a3f0f7eb6f6241efa566a7e81a (diff) |
memory_hotplug: ensure every online node has NORMAL memory
Old memory hotplug code and new online/movable may cause a online node
don't have any normal memory, but memory-management acts bad when we have
nodes which is online but don't have any normal memory. Example: it may
cause a bound task fail on all kernel allocation and cause the task can't
create task or create other kernel object.
So we disable non-normal-memory-node here, we will enable it when we
prepared.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memory_hotplug.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c370491bdb97..de9cb14ae753 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -581,6 +581,12 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, return 0; } +/* ensure every online node has NORMAL memory */ +static bool can_online_high_movable(struct zone *zone) +{ + return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); +} + /* check which state of node_states will be changed when online memory */ static void node_states_check_changes_online(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) @@ -646,6 +652,12 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ */ zone = page_zone(pfn_to_page(pfn)); + if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && + !can_online_high_movable(zone)) { + unlock_memory_hotplug(); + return -1; + } + if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { unlock_memory_hotplug(); @@ -1054,6 +1066,30 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) return offlined; } +/* ensure the node has NORMAL memory if it is still online */ +static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) +{ + struct pglist_data *pgdat = zone->zone_pgdat; + unsigned long present_pages = 0; + enum zone_type zt; + + for (zt = 0; zt <= ZONE_NORMAL; zt++) + present_pages += pgdat->node_zones[zt].present_pages; + + if (present_pages > nr_pages) + return true; + + present_pages = 0; + for (; zt <= ZONE_MOVABLE; zt++) + present_pages += pgdat->node_zones[zt].present_pages; + + /* + * we can't offline the last normal memory until all + * higher memory is offlined. + */ + return present_pages == 0; +} + /* check which state of node_states will be changed when offline memory */ static void node_states_check_changes_offline(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) @@ -1141,6 +1177,10 @@ static int __ref __offline_pages(unsigned long start_pfn, node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; + ret = -EINVAL; + if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) + goto out; + /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, true); |