From 48c72fccbfb1db01b5d0b98baff4442fea50d7a4 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 4 Jun 2009 20:20:24 +0900 Subject: sh: 16-bit get_unaligned() sh4a fix This patch fixes the 16-bit case of the sh4a specific unaligned access implementation. Without this patch the 16-bit version of sh4a get_unaligned() results in a 32-bit read which may read more data than intended and/or cross page boundaries. Unbreaks mtd NOR write handling on Migo-R. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/unaligned-sh4a.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h index d8f89770275b..9f4dd252c981 100644 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -3,9 +3,9 @@ /* * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. - * Support for 16 and 64-bit accesses are done through shifting and - * masking relative to the endianness. Unaligned stores are not supported - * by the instruction encoding, so these continue to use the packed + * Support for 64-bit accesses are done through shifting and masking + * relative to the endianness. Unaligned stores are not supported by the + * instruction encoding, so these continue to use the packed * struct. * * The same note as with the movli.l/movco.l pair applies here, as long @@ -41,9 +41,9 @@ struct __una_u64 { u64 x __attribute__((packed)); }; static inline u16 __get_unaligned_cpu16(const u8 *p) { #ifdef __LITTLE_ENDIAN - return __get_unaligned_cpu32(p) & 0xffff; + return p[0] | p[1] << 8; #else - return __get_unaligned_cpu32(p) >> 16; + return p[0] << 8 | p[1]; #endif } -- cgit v1.2.3