From 7a9eb20666317794d0279843fbd091af93907780 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 3 Jun 2016 18:06:47 -0700 Subject: pmem: kill __pmem address space The __pmem address space was meant to annotate codepaths that touch persistent memory and need to coordinate a call to wmb_pmem(). Now that wmb_pmem() is gone, there is little need to keep this annotation. Cc: Christoph Hellwig Cc: Ross Zwisler Signed-off-by: Dan Williams --- include/linux/blkdev.h | 6 ++--- include/linux/compiler.h | 2 -- include/linux/nd.h | 2 +- include/linux/pmem.h | 70 ++++++++++++------------------------------------ 4 files changed, 21 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3d9cf326574f..fde908b2836b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1659,7 +1659,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req, */ struct blk_dax_ctl { sector_t sector; - void __pmem *addr; + void *addr; long size; pfn_t pfn; }; @@ -1670,8 +1670,8 @@ struct block_device_operations { int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - long (*direct_access)(struct block_device *, sector_t, void __pmem **, - pfn_t *, long); + long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, + long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 793c0829e3a3..b966974938ed 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -17,7 +17,6 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) -# define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) #else /* CONFIG_SPARSE_RCU_POINTER */ @@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __cond_lock(x,c) (c) # define __percpu # define __rcu -# define __pmem # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) #endif /* __CHECKER__ */ diff --git a/include/linux/nd.h b/include/linux/nd.h index 1ecd64643512..f1ea426d6a5e 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -68,7 +68,7 @@ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; resource_size_t size; - void __pmem *addr; + void *addr; struct badblocks bb; }; diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 9e3ea94b8157..e856c2cb0fe8 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -26,37 +26,35 @@ * calling these symbols with arch_has_pmem_api() and redirect to the * implementation in asm/pmem.h. */ -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) { BUG(); } -static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, - size_t n) +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) { BUG(); return -EFAULT; } -static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { BUG(); return 0; } -static inline void arch_clear_pmem(void __pmem *addr, size_t size) +static inline void arch_clear_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) +static inline void arch_wb_cache_pmem(void *addr, size_t size) { BUG(); } -static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +static inline void arch_invalidate_pmem(void *addr, size_t size) { BUG(); } @@ -67,13 +65,6 @@ static inline bool arch_has_pmem_api(void) return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); } -static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) -{ - memcpy(dst, (void __force *) src, size); - return 0; -} - /* * memcpy_from_pmem - read from persistent memory with error handling * @dst: destination buffer @@ -82,40 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, * * Returns 0 on success negative error code on failure. */ -static inline int memcpy_from_pmem(void *dst, void __pmem const *src, - size_t size) +static inline int memcpy_from_pmem(void *dst, void const *src, size_t size) { if (arch_has_pmem_api()) return arch_memcpy_from_pmem(dst, src, size); else - return default_memcpy_from_pmem(dst, src, size); -} - -/* - * These defaults seek to offer decent performance and minimize the - * window between i/o completion and writes being durable on media. - * However, it is undefined / architecture specific whether - * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for - * making data durable relative to i/o completion. - */ -static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t size) -{ - memcpy((void __force *) dst, src, size); -} - -static inline size_t default_copy_from_iter_pmem(void __pmem *addr, - size_t bytes, struct iov_iter *i) -{ - return copy_from_iter_nocache((void __force *)addr, bytes, i); -} - -static inline void default_clear_pmem(void __pmem *addr, size_t size) -{ - if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) - clear_page((void __force *)addr); - else - memset((void __force *)addr, 0, size); + memcpy(dst, src, size); + return 0; } /** @@ -130,12 +94,12 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) * data may still reside in cpu or platform buffers, so this operation * must be followed by a blkdev_issue_flush() on the pmem block device. */ -static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) +static inline void memcpy_to_pmem(void *dst, const void *src, size_t n) { if (arch_has_pmem_api()) arch_memcpy_to_pmem(dst, src, n); else - default_memcpy_to_pmem(dst, src, n); + memcpy(dst, src, n); } /** @@ -147,12 +111,12 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { if (arch_has_pmem_api()) return arch_copy_from_iter_pmem(addr, bytes, i); - return default_copy_from_iter_pmem(addr, bytes, i); + return copy_from_iter_nocache(addr, bytes, i); } /** @@ -163,12 +127,12 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, * Write zeros into the memory range starting at 'addr' for 'size' bytes. * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void clear_pmem(void __pmem *addr, size_t size) +static inline void clear_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_clear_pmem(addr, size); else - default_clear_pmem(addr, size); + memset(addr, 0, size); } /** @@ -179,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size) * For platforms that support clearing poison this flushes any poisoned * ranges out of the cache */ -static inline void invalidate_pmem(void __pmem *addr, size_t size) +static inline void invalidate_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_invalidate_pmem(addr, size); @@ -193,7 +157,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size) * Write back the processor cache range starting at 'addr' for 'size' bytes. * See blkdev_issue_flush() note for memcpy_to_pmem(). */ -static inline void wb_cache_pmem(void __pmem *addr, size_t size) +static inline void wb_cache_pmem(void *addr, size_t size) { if (arch_has_pmem_api()) arch_wb_cache_pmem(addr, size); -- cgit v1.2.3