summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorOscar Salvador <osalvador@suse.de>2020-10-15 20:07:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-16 11:11:16 -0700
commit06be6ff3d2ec8be806b859fc054a1909b16d2473 (patch)
treee679355cd59c86024cbcfca207fc089f9800cc1e /mm
parent694bf0b0cdf91be50e6f037ca93733ed83ca1187 (diff)
mm,hwpoison: rework soft offline for free pages
When trying to soft-offline a free page, we need to first take it off the buddy allocator. Once we know is out of reach, we can safely flag it as poisoned. take_page_off_buddy will be used to take a page meant to be poisoned off the buddy allocator. take_page_off_buddy calls break_down_buddy_pages, which splits a higher-order page in case our page belongs to one. Once the page is under our control, we call page_handle_poison to set it as poisoned and grab a refcount on it. Signed-off-by: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Aristeu Rozanski <aris@ruivo.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Dmitry Yakunin <zeil@yandex-team.ru> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.com> Cc: Qian Cai <cai@lca.pw> Cc: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/20200922135650.1634-9-osalvador@suse.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c18
-rw-r--r--mm/page_alloc.c68
2 files changed, 80 insertions, 6 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2ff4ba7fa99f..af7fb23b93ae 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -65,6 +65,13 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+static void page_handle_poison(struct page *page)
+{
+ SetPageHWPoison(page);
+ page_ref_inc(page);
+ num_poisoned_pages_inc();
+}
+
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
u32 hwpoison_filter_enable = 0;
@@ -1884,14 +1891,13 @@ static int soft_offline_in_use_page(struct page *page, int flags)
static int soft_offline_free_page(struct page *page)
{
- int rc = dissolve_free_huge_page(page);
+ int rc = -EBUSY;
- if (!rc) {
- if (set_hwpoison_free_buddy_page(page))
- num_poisoned_pages_inc();
- else
- rc = -EBUSY;
+ if (!dissolve_free_huge_page(page) && take_page_off_buddy(page)) {
+ page_handle_poison(page);
+ rc = 0;
}
+
return rc;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index febb2d731f9e..12dcfb3dc588 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8778,6 +8778,74 @@ bool is_free_buddy_page(struct page *page)
#ifdef CONFIG_MEMORY_FAILURE
/*
+ * Break down a higher-order page in sub-pages, and keep our target out of
+ * buddy allocator.
+ */
+static void break_down_buddy_pages(struct zone *zone, struct page *page,
+ struct page *target, int low, int high,
+ int migratetype)
+{
+ unsigned long size = 1 << high;
+ struct page *current_buddy, *next_page;
+
+ while (high > low) {
+ high--;
+ size >>= 1;
+
+ if (target >= &page[size]) {
+ next_page = page + size;
+ current_buddy = page;
+ } else {
+ next_page = page;
+ current_buddy = page + size;
+ }
+
+ if (set_page_guard(zone, current_buddy, high, migratetype))
+ continue;
+
+ if (current_buddy != target) {
+ add_to_free_list(current_buddy, zone, high, migratetype);
+ set_page_order(current_buddy, high);
+ page = next_page;
+ }
+ }
+}
+
+/*
+ * Take a page that will be marked as poisoned off the buddy allocator.
+ */
+bool take_page_off_buddy(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long flags;
+ unsigned int order;
+ bool ret = false;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ struct page *page_head = page - (pfn & ((1 << order) - 1));
+ int buddy_order = page_order(page_head);
+
+ if (PageBuddy(page_head) && buddy_order >= order) {
+ unsigned long pfn_head = page_to_pfn(page_head);
+ int migratetype = get_pfnblock_migratetype(page_head,
+ pfn_head);
+
+ del_page_from_free_list(page_head, zone, buddy_order);
+ break_down_buddy_pages(zone, page_head, page, 0,
+ buddy_order, migratetype);
+ ret = true;
+ break;
+ }
+ if (page_count(page_head) > 0)
+ break;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return ret;
+}
+
+/*
* Set PG_hwpoison flag if a given page is confirmed to be a free page. This
* test is performed under the zone lock to prevent a race against page
* allocation.