diff options
Diffstat (limited to 'drivers/dax')
-rw-r--r-- | drivers/dax/Kconfig | 5 | ||||
-rw-r--r-- | drivers/dax/device.c | 38 | ||||
-rw-r--r-- | drivers/dax/pmem.c | 18 | ||||
-rw-r--r-- | drivers/dax/super.c | 15 |
4 files changed, 37 insertions, 39 deletions
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index b79aa8f7a497..e0700bf4893a 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -1,3 +1,7 @@ +config DAX_DRIVER + select DAX + bool + menuconfig DAX tristate "DAX: direct access to differentiated memory" select SRCU @@ -16,7 +20,6 @@ config DEV_DAX baseline memory pool. Mappings of a /dev/daxX.Y device impose restrictions that make the mapping behavior deterministic. - config DEV_DAX_PMEM tristate "PMEM DAX: direct access to persistent memory" depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 0b61f48f21a6..be8606457f27 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PAGE_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } @@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - vmf->pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); return VM_FAULT_SIGBUS; } @@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PMD_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pmd mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { - dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); + dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } @@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pmd_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } @@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PUD_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pud mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { - dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); + dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } @@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pud_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } @@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf, struct file *filp = vmf->vma->vm_file; struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__, - current->comm, (vmf->flags & FAULT_FLAG_WRITE) - ? "write" : "read", + dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, + (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", vmf->vma->vm_start, vmf->vma->vm_end, pe_size); id = dax_read_lock(); @@ -460,7 +456,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) struct dev_dax *dev_dax = filp->private_data; int rc, id; - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); /* * We lock to check dax_dev liveness and will re-check at @@ -518,7 +514,7 @@ static int dax_open(struct inode *inode, struct file *filp) struct inode *__dax_inode = dax_inode(dax_dev); struct dev_dax *dev_dax = dax_get_private(dax_dev); - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping->host = __dax_inode; filp->f_mapping = inode->i_mapping; @@ -533,7 +529,7 @@ static int dax_release(struct inode *inode, struct file *filp) { struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); return 0; } @@ -575,7 +571,7 @@ static void unregister_dev_dax(void *dev) struct inode *inode = dax_inode(dax_dev); struct cdev *cdev = inode->i_cdev; - dev_dbg(dev, "%s\n", __func__); + dev_dbg(dev, "trace\n"); kill_dev_dax(dev_dax); cdev_device_del(cdev, dev); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 31b6ecce4c64..fd49b24fd6af 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref) { struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); complete(&dax_pmem->cmp); } @@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data) struct percpu_ref *ref = data; struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); wait_for_completion(&dax_pmem->cmp); percpu_ref_exit(ref); } @@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data) struct percpu_ref *ref = data; struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); percpu_ref_kill(ref); } @@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = { .type = ND_DRIVER_DAX_PMEM, }; -static int __init dax_pmem_init(void) -{ - return nd_driver_register(&dax_pmem_driver); -} -module_init(dax_pmem_init); - -static void __exit dax_pmem_exit(void) -{ - driver_unregister(&dax_pmem_driver.drv); -} -module_exit(dax_pmem_exit); +module_nd_driver(dax_pmem_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ecdc292aa4e4..2b2332b605e4 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -124,10 +124,19 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) return len < 0 ? len : -EIO; } - if ((IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) - || pfn_t_devmap(pfn)) + if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { + /* + * An arch that has enabled the pmem api should also + * have its drivers support pfn_t_devmap() + * + * This is a developer warning and should not trigger in + * production. dax_flush() will crash since it depends + * on being able to do (page_address(pfn_to_page())). + */ + WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); + } else if (pfn_t_devmap(pfn)) { /* pass */; - else { + } else { pr_debug("VFS (%s): error: dax support not enabled\n", sb->s_id); return -EOPNOTSUPP; |