summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/page_alloc.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8cbc38f923aa..085de0442dd4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2059,7 +2059,7 @@ out_unlock:
* intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM.
*/
-static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac)
{
struct zonelist *zonelist = ac->zonelist;
unsigned long flags;
@@ -2067,6 +2067,7 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
struct zone *zone;
struct page *page;
int order;
+ bool ret;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
ac->nodemask) {
@@ -2115,12 +2116,14 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
* may increase.
*/
set_pageblock_migratetype(page, ac->migratetype);
- move_freepages_block(zone, page, ac->migratetype);
+ ret = move_freepages_block(zone, page, ac->migratetype);
spin_unlock_irqrestore(&zone->lock, flags);
- return;
+ return ret;
}
spin_unlock_irqrestore(&zone->lock, flags);
}
+
+ return false;
}
/* Remove an element from the buddy allocator from the fallback list */
@@ -3436,8 +3439,10 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
* Make sure we converge to OOM if we cannot make any progress
* several times in the row.
*/
- if (*no_progress_loops > MAX_RECLAIM_RETRIES)
- return false;
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+ /* Before OOM, exhaust highatomic_reserve */
+ return unreserve_highatomic_pageblock(ac);
+ }
/*
* Keep reclaiming pages while there is a chance this will lead