diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 08:34:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 08:34:07 -0700 |
commit | 874f6d1be7699b5d1873283b4737712cbabd7754 (patch) | |
tree | 55f8a0d08d32f3a4beed6c76f9e48db896b8d3bb /arch | |
parent | e486b4c4ba601410772136ba0f8337bf545dccad (diff) | |
parent | d50ba3687b99213501463a1947e3dd5b98bc2d99 (diff) |
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar:
"Misc smaller cleanups"
* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/lib: Fix spelling, put space between a numeral and its units
x86/lib: Fix spelling in the comments
x86, quirks: Shut-up a long-standing gcc warning
x86, msr: Unify variable names
x86-64, docs, mm: Add vsyscall range to virtual address space layout
x86: Drop KERNEL_IMAGE_START
x86_64: Use __BOOT_DS instead_of __KERNEL_DS for safety
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/msr.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/page_64_types.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/head64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/quirks.c | 18 | ||||
-rw-r--r-- | arch/x86/lib/checksum_32.S | 2 | ||||
-rw-r--r-- | arch/x86/lib/memcpy_32.c | 6 | ||||
-rw-r--r-- | arch/x86/lib/memcpy_64.S | 2 | ||||
-rw-r--r-- | arch/x86/lib/memmove_64.S | 6 |
9 files changed, 30 insertions, 27 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c1d383d1fb7e..16f24e6dad79 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -52,7 +52,7 @@ ENTRY(startup_32) jnz 1f cli - movl $(__KERNEL_DS), %eax + movl $(__BOOT_DS), %eax movl %eax, %ds movl %eax, %es movl %eax, %ss diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9264802e2824..cb7502852acb 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr, val1, val2) \ +#define rdmsr(msr, low, high) \ do { \ u64 __val = native_read_msr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ + (void)((low) = (u32)__val); \ + (void)((high) = (u32)(__val >> 32)); \ } while (0) static inline void wrmsr(unsigned msr, unsigned low, unsigned high) @@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) } /* rdmsr with exception handling */ -#define rdmsr_safe(msr, p1, p2) \ +#define rdmsr_safe(msr, low, high) \ ({ \ int __err; \ u64 __val = native_read_msr_safe((msr), &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ + (*low) = (u32)__val; \ + (*high) = (u32)(__val >> 32); \ __err; \ }) @@ -208,7 +208,7 @@ do { \ #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ (u32)((val) >> 32)) -#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) +#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8b491e66eaa8..6c896fbe21db 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -48,6 +48,5 @@ * arch/x86/kernel/head_64.S), and it is mapped here: */ #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) #endif /* _ASM_X86_PAGE_64_DEFS_H */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c5e403f6d869..101ac1a9263e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data) * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ - BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); - BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); + BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); + BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); - BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); + BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 26ee48a33dc4..04ee1e2e4c02 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void) static u32 ati_ixp4x0_rev(struct pci_dev *dev) { - u32 d; - u8 b; + int err = 0; + u32 d = 0; + u8 b = 0; - pci_read_config_byte(dev, 0xac, &b); + err = pci_read_config_byte(dev, 0xac, &b); b &= ~(1<<5); - pci_write_config_byte(dev, 0xac, b); - pci_read_config_dword(dev, 0x70, &d); + err |= pci_write_config_byte(dev, 0xac, b); + err |= pci_read_config_dword(dev, 0x70, &d); d |= 1<<8; - pci_write_config_dword(dev, 0x70, d); - pci_read_config_dword(dev, 0x8, &d); + err |= pci_write_config_dword(dev, 0x70, d); + err |= pci_read_config_dword(dev, 0x8, &d); d &= 0xff; dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); + + WARN_ON_ONCE(err); + return d; } diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 2af5df3ade7c..e78b8eee6615 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -61,7 +61,7 @@ ENTRY(csum_partial) testl $3, %esi # Check alignment. jz 2f # Jump if alignment is ok. testl $1, %esi # Check alignment. - jz 10f # Jump if alignment is boundary of 2bytes. + jz 10f # Jump if alignment is boundary of 2 bytes. # buf is odd dec %ecx diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index b908a59eccf5..e78761d6b7f8 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n) char *ret = dest; __asm__ __volatile__( - /* Handle more 16bytes in loop */ + /* Handle more 16 bytes in loop */ "cmp $0x10, %0\n\t" "jb 1f\n\t" @@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts forward in each loop. + * We gobble 16 bytes forward in each loop. */ "3:\n\t" "sub $0x10, %0\n\t" @@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n) "sub $0x10, %0\n\t" /* - * We gobble 16byts backward in each loop. + * We gobble 16 bytes backward in each loop. */ "7:\n\t" "sub $0x10, %0\n\t" diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1c273be7c97e..56313a326188 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -98,7 +98,7 @@ ENTRY(memcpy) subq $0x20, %rdx /* * At most 3 ALU operations in one cycle, - * so append NOPS in the same 16bytes trunk. + * so append NOPS in the same 16 bytes trunk. */ .p2align 4 .Lcopy_backward_loop: diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index ee164610ec46..65268a6104f4 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -27,7 +27,7 @@ ENTRY(memmove) CFI_STARTPROC - /* Handle more 32bytes in loop */ + /* Handle more 32 bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx jb 1f @@ -56,7 +56,7 @@ ENTRY(memmove) 3: sub $0x20, %rdx /* - * We gobble 32byts forward in each loop. + * We gobble 32 bytes forward in each loop. */ 5: sub $0x20, %rdx @@ -122,7 +122,7 @@ ENTRY(memmove) addq %rdx, %rdi subq $0x20, %rdx /* - * We gobble 32byts backward in each loop. + * We gobble 32 bytes backward in each loop. */ 8: subq $0x20, %rdx |