From 226ab561075f6f8f3cd5f7b3b7544f3997aab51f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jul 2018 21:49:34 -0700 Subject: device-dax: Convert to vmf_insert_mixed and vm_fault_t Use new return type vm_fault_t for fault and huge_fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. Commit 1c8f422059ae ("mm: change return type to vm_fault_t") Previously vm_insert_mixed() returned an error code which driver mapped into VM_FAULT_* type. The new function vmf_insert_mixed() will replace this inefficiency by returning VM_FAULT_* type. Signed-off-by: Souptick Joarder Reviewed-by: Matthew Wilcox Reviewed-by: Ross Zwisler Signed-off-by: Dan Williams Signed-off-by: Dave Jiang --- drivers/dax/device.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index de2f8297a210..ad5e7b4a15dc 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -244,11 +244,11 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, return -1; } -static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) +static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, + struct vm_fault *vmf) { struct device *dev = &dev_dax->dev; struct dax_region *dax_region; - int rc = VM_FAULT_SIGBUS; phys_addr_t phys; pfn_t pfn; unsigned int fault_size = PAGE_SIZE; @@ -274,17 +274,11 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); - - if (rc == -ENOMEM) - return VM_FAULT_OOM; - if (rc < 0 && rc != -EBUSY) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; + return vmf_insert_mixed(vmf->vma, vmf->address, pfn); } -static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) +static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, + struct vm_fault *vmf) { unsigned long pmd_addr = vmf->address & PMD_MASK; struct device *dev = &dev_dax->dev; @@ -334,7 +328,8 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) +static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, + struct vm_fault *vmf) { unsigned long pud_addr = vmf->address & PUD_MASK; struct device *dev = &dev_dax->dev; @@ -384,13 +379,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) vmf->flags & FAULT_FLAG_WRITE); } #else -static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) +static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, + struct vm_fault *vmf) { return VM_FAULT_FALLBACK; } #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ -static int dev_dax_huge_fault(struct vm_fault *vmf, +static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { int rc, id; @@ -420,7 +416,7 @@ static int dev_dax_huge_fault(struct vm_fault *vmf, return rc; } -static int dev_dax_fault(struct vm_fault *vmf) +static vm_fault_t dev_dax_fault(struct vm_fault *vmf) { return dev_dax_huge_fault(vmf, PE_SIZE_PTE); } -- cgit v1.2.3 From 2232c6382a453db73d2e723df1b52030066e135e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jul 2018 21:49:40 -0700 Subject: device-dax: Enable page_mapping() In support of enabling memory_failure() handling for device-dax mappings, set the ->mapping association of pages backing device-dax mappings. The rmap implementation requires page_mapping() to return the address_space hosting the vmas that map the page. The ->mapping pointer is never cleared. There is no possibility for the page to become associated with another address_space while the device is enabled. When the device is disabled the 'struct page' array for the device is destroyed / later reinitialized to zero. Reviewed-by: Jan Kara Signed-off-by: Dan Williams Signed-off-by: Dave Jiang --- drivers/dax/device.c | 55 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index ad5e7b4a15dc..95cfcfd612df 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -245,12 +245,11 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, } static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf) + struct vm_fault *vmf, pfn_t *pfn) { struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; - pfn_t pfn; unsigned int fault_size = PAGE_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -272,20 +271,19 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); + *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf) + struct vm_fault *vmf, pfn_t *pfn) { unsigned long pmd_addr = vmf->address & PMD_MASK; struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; pgoff_t pgoff; - pfn_t pfn; unsigned int fault_size = PMD_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) @@ -321,22 +319,21 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); + *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, + return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf) + struct vm_fault *vmf, pfn_t *pfn) { unsigned long pud_addr = vmf->address & PUD_MASK; struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; pgoff_t pgoff; - pfn_t pfn; unsigned int fault_size = PUD_SIZE; @@ -373,14 +370,14 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); + *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, + return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, - struct vm_fault *vmf) + struct vm_fault *vmf, pfn_t *pfn) { return VM_FAULT_FALLBACK; } @@ -389,8 +386,10 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { - int rc, id; struct file *filp = vmf->vma->vm_file; + unsigned long fault_size; + int rc, id; + pfn_t pfn; struct dev_dax *dev_dax = filp->private_data; dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, @@ -400,17 +399,39 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, id = dax_read_lock(); switch (pe_size) { case PE_SIZE_PTE: - rc = __dev_dax_pte_fault(dev_dax, vmf); + fault_size = PAGE_SIZE; + rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PMD: - rc = __dev_dax_pmd_fault(dev_dax, vmf); + fault_size = PMD_SIZE; + rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PUD: - rc = __dev_dax_pud_fault(dev_dax, vmf); + fault_size = PUD_SIZE; + rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); break; default: rc = VM_FAULT_SIGBUS; } + + if (rc == VM_FAULT_NOPAGE) { + unsigned long i; + + /* + * In the device-dax case the only possibility for a + * VM_FAULT_NOPAGE result is when device-dax capacity is + * mapped. No need to consider the zero page, or racing + * conflicting mappings. + */ + for (i = 0; i < fault_size / PAGE_SIZE; i++) { + struct page *page; + + page = pfn_to_page(pfn_t_to_pfn(pfn) + i); + if (page->mapping) + continue; + page->mapping = filp->f_mapping; + } + } dax_read_unlock(id); return rc; -- cgit v1.2.3 From 35de299547d1c3300e078f9f7c6eb01dadae47f9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jul 2018 21:49:45 -0700 Subject: device-dax: Set page->index In support of enabling memory_failure() handling for device-dax mappings, set ->index to the pgoff of the page. The rmap implementation requires ->index to bound the search through the vma interval tree. The ->index value is never cleared. There is no possibility for the page to become associated with another pgoff while the device is enabled. When the device is disabled the 'struct page' array for the device is destroyed and ->index is reinitialized to zero. Reviewed-by: Jan Kara Signed-off-by: Dan Williams Signed-off-by: Dave Jiang --- drivers/dax/device.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 95cfcfd612df..361a11089591 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -416,6 +416,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, if (rc == VM_FAULT_NOPAGE) { unsigned long i; + pgoff_t pgoff; /* * In the device-dax case the only possibility for a @@ -423,6 +424,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, * mapped. No need to consider the zero page, or racing * conflicting mappings. */ + pgoff = linear_page_index(vmf->vma, vmf->address + & ~(fault_size - 1)); for (i = 0; i < fault_size / PAGE_SIZE; i++) { struct page *page; @@ -430,6 +433,7 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, if (page->mapping) continue; page->mapping = filp->f_mapping; + page->index = pgoff + i; } } dax_read_unlock(id); -- cgit v1.2.3 From c953cc987ab87d180e1d5de2f1c217abe33aac77 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jul 2018 21:50:37 -0700 Subject: libnvdimm, pmem: Restore page attributes when clearing errors Use clear_mce_nospec() to restore WB mode for the kernel linear mapping of a pmem page that was marked 'HWPoison'. A page with 'HWPoison' set has also been marked UC in PAT (page attribute table) via set_mce_nospec() to prevent speculative retrievals of poison. The 'HWPoison' flag is only cleared when overwriting an entire page. Signed-off-by: Dan Williams Signed-off-by: Dave Jiang --- drivers/nvdimm/pmem.c | 26 ++++++++++++++++++++++++++ drivers/nvdimm/pmem.h | 13 +++++++++++++ 2 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 8b1fd7f1a224..55c7a69751d3 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,30 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } +static void hwpoison_clear(struct pmem_device *pmem, + phys_addr_t phys, unsigned int len) +{ + unsigned long pfn_start, pfn_end, pfn; + + /* only pmem in the linear map supports HWPoison */ + if (is_vmalloc_addr(pmem->virt_addr)) + return; + + pfn_start = PHYS_PFN(phys); + pfn_end = pfn_start + PHYS_PFN(len); + for (pfn = pfn_start; pfn < pfn_end; pfn++) { + struct page *page = pfn_to_page(pfn); + + /* + * Note, no need to hold a get_dev_pagemap() reference + * here since we're in the driver I/O path and + * outstanding I/O requests pin the dev_pagemap. + */ + if (test_and_clear_pmem_poison(page)) + clear_mce_nospec(pfn); + } +} + static blk_status_t pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, unsigned int len) { @@ -65,6 +90,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem, if (cleared < len) rc = BLK_STS_IOERR; if (cleared > 0 && cleared / 512) { + hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); cleared /= 512; dev_dbg(dev, "%#llx clear %ld sector%s\n", (unsigned long long) sector, cleared, diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index a64ebc78b5df..59cfe13ea8a8 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NVDIMM_PMEM_H__ #define __NVDIMM_PMEM_H__ +#include #include #include #include @@ -27,4 +28,16 @@ struct pmem_device { long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); + +#ifdef CONFIG_MEMORY_FAILURE +static inline bool test_and_clear_pmem_poison(struct page *page) +{ + return TestClearPageHWPoison(page); +} +#else +static inline bool test_and_clear_pmem_poison(struct page *page) +{ + return false; +} +#endif #endif /* __NVDIMM_PMEM_H__ */ -- cgit v1.2.3