From 19f6b8b44e3f633d5d7d1ed68848b1eb89a1e800 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Feb 2010 15:41:45 +0900 Subject: sh64: fix up memory offset calculation. The linker script offsets were broken by the recent 29/32-bit integration, so this fixes it up for sh64. Signed-off-by: Paul Mundt --- arch/sh/include/asm/vmlinux.lds.h | 8 ++++++++ arch/sh/kernel/vmlinux.lds.S | 19 +++++-------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h index 244ec4ad9a79..d58ad493b3a6 100644 --- a/arch/sh/include/asm/vmlinux.lds.h +++ b/arch/sh/include/asm/vmlinux.lds.h @@ -14,4 +14,12 @@ #define DWARF_EH_FRAME #endif +#ifdef CONFIG_SUPERH64 +#define EXTRA_TEXT \ + *(.text64) \ + *(.text..SHmedia32) +#else +#define EXTRA_TEXT +#endif + #endif /* __ASM_SH_VMLINUX_LDS_H */ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index f0bc6b886eed..dcf48915e2ea 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -3,10 +3,12 @@ * Written by Niibe Yutaka and Paul Mundt */ #ifdef CONFIG_SUPERH64 -#define LOAD_OFFSET CONFIG_PAGE_OFFSET +#define LOAD_OFFSET PAGE_OFFSET +#define MEMORY_OFFSET __MEMORY_START OUTPUT_ARCH(sh:sh5) #else #define LOAD_OFFSET 0 +#define MEMORY_OFFSET 0 OUTPUT_ARCH(sh) #endif @@ -14,16 +16,10 @@ OUTPUT_ARCH(sh) #include #include -#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY) -#define MEMORY_OFFSET 0 -#else -#define MEMORY_OFFSET (CONFIG_MEMORY_START & 0x1fffffff) -#endif - ENTRY(_start) SECTIONS { - . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET; + . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET; _text = .; /* Text and read-only data */ @@ -34,12 +30,7 @@ SECTIONS .text : AT(ADDR(.text) - LOAD_OFFSET) { HEAD_TEXT TEXT_TEXT - -#ifdef CONFIG_SUPERH64 - *(.text64) - *(.text..SHmedia32) -#endif - + EXTRA_TEXT SCHED_TEXT LOCK_TEXT KPROBES_TEXT -- cgit v1.2.3