summaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-05-05 13:32:34 +0530
committerVineet Gupta <vgupta@synopsys.com>2016-05-05 16:35:28 +0530
commite5bc0478ab6cf565619224536d75ecb2aedca43b (patch)
tree03e3e24bd50b01e178bb10e9aad22fee0b7f5704 /arch/arc
parent1b10cb21d888c021bedbe678f7c26aee1bf04ffa (diff)
ARC: Add missing io barriers to io{read,write}{16,32}be()
While reviewing a different change to asm-generic/io.h Arnd spotted that ARC ioread32 and ioread32be both of which come from asm-generic versions are not symmetrical in terms of calling the io barriers. generic ioread32 -> ARC readl() [ has barriers] generic ioread32be -> __be32_to_cpu(__raw_readl()) [ lacks barriers] While generic ioread32be is being remediated to call readl(), that involves a swab32(), causing double swaps on ioread32be() on Big Endian systems. So provide our versions of big endian IO accessors to ensure io barrier calls while also keeping them optimal Suggested-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Arnd Bergmann <arnd@arndb.de> Cc: stable@vger.kernel.org [4.2+] Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/io.h27
1 files changed, 18 insertions, 9 deletions
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 17f85c9c73cf..c22b181e8206 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -13,6 +13,15 @@
#include <asm/byteorder.h>
#include <asm/page.h>
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags);
@@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr);
#define ioremap_wc(phy, sz) ioremap(phy, sz)
#define ioremap_wt(phy, sz) ioremap(phy, sz)
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
/* Change struct page to physical address */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb() rmb()
-#define __iowmb() wmb()
-#else
-#define __iormb() do { } while (0)
-#define __iowmb() do { } while (0)
-#endif
-
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case