diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/sleep.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 30 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 8 |
8 files changed, 39 insertions, 37 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0c18d9..8b5393ec1080 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu = smp_processor_id(); if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); #ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Re-load page tables */ load_cr3(next->pgd); + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + /* * load the LDT, if the LDT is different: */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4c2f63c7fc1b..1f4695136776 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); /* Static state in head.S used to set up a CPU */ -extern struct { - void *sp; - unsigned short ss; -} stack_start; +extern unsigned long stack_start; /* Initial stack pointer address */ struct smp_ops { void (*smp_prepare_boot_cpu)(void); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 69fd72aa5594..4d9ebbab2230 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -100,7 +100,7 @@ int acpi_save_state_mem(void) #else /* CONFIG_64BIT */ header->trampoline_segment = setup_trampoline() >> 4; #ifdef CONFIG_SMP - stack_start.sp = temp_stack + sizeof(temp_stack); + stack_start = (unsigned long)temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); initial_gs = per_cpu_offset(smp_processor_id()); diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3ee6cc3..bebabec5b448 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) } /* - * MTRR initialization for all AP's + * Delayed MTRR initialization for all AP's */ void mtrr_aps_init(void) { if (!use_intel()) return; + /* + * Check if someone has requested the delay of AP MTRR initialization, + * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, + * then we are done. + */ + if (!mtrr_aps_delayed_init) + return; + set_mtrr(~0U, 0, 0, 0); mtrr_aps_delayed_init = false; } diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index e56b9bfbabd1..f7a0993c1e7c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event) * if an event is shared accross the logical threads * the user needs special permissions to be able to use it */ - if (p4_event_bind_map[v].shared) { + if (p4_ht_active() && p4_event_bind_map[v].shared) { if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return -EACCES; } @@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event) event->hw.config = p4_set_ht_bit(event->hw.config); if (event->attr.type == PERF_TYPE_RAW) { - + struct p4_event_bind *bind; + unsigned int esel; /* * Clear bits we reserve to be managed by kernel itself * and never allowed from a user space @@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event) * bits since we keep additional info here (for cache events and etc) */ event->hw.config |= event->attr.config; + bind = p4_config_get_bind(event->attr.config); + if (!bind) { + rc = -EINVAL; + goto out; + } + esel = P4_OPCODE_ESEL(bind->opcode); + event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); } rc = x86_setup_perfctr(event); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fc293dc8dc35..767d6c43de37 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) */ __HEAD ENTRY(startup_32) + movl pa(stack_start),%ecx + /* test KEEP_SEGMENTS flag to see if the bootloader is asking us to not reload segments */ testb $(1<<6), BP_loadflags(%esi) @@ -99,7 +101,9 @@ ENTRY(startup_32) movl %eax,%es movl %eax,%fs movl %eax,%gs + movl %eax,%ss 2: + leal -__PAGE_OFFSET(%ecx),%esp /* * Clear BSS first so that there are no surprises... @@ -145,8 +149,6 @@ ENTRY(startup_32) * _brk_end is set up to point to the first "safe" location. * Mappings are created both at virtual address 0 (identity mapping) * and PAGE_OFFSET for up to _end. - * - * Note that the stack is not yet set up! */ #ifdef CONFIG_X86_PAE @@ -282,6 +284,9 @@ ENTRY(startup_32_smp) movl %eax,%es movl %eax,%fs movl %eax,%gs + movl pa(stack_start),%ecx + movl %eax,%ss + leal -__PAGE_OFFSET(%ecx),%esp #endif /* CONFIG_SMP */ default_entry: @@ -347,8 +352,8 @@ default_entry: movl %eax,%cr0 /* ..and set paging (PG) bit */ ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 1: - /* Set up the stack pointer */ - lss stack_start,%esp + /* Shift the stack pointer to a virtual address */ + addl $__PAGE_OFFSET, %esp /* * Initialize eflags. Some BIOS's leave bits like NT set. This would @@ -360,9 +365,7 @@ default_entry: #ifdef CONFIG_SMP cmpb $0, ready - jz 1f /* Initial CPU cleans BSS */ - jmp checkCPUtype -1: + jnz checkCPUtype #endif /* CONFIG_SMP */ /* @@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP cld # gcc2 wants the direction flag cleared at all times pushl $0 # fake return address for unwinder -#ifdef CONFIG_SMP - movb ready, %cl movb $1, ready - cmpb $0,%cl # the first CPU calls start_kernel - je 1f - movl (stack_start), %esp -1: -#endif /* CONFIG_SMP */ jmp *(initial_code) /* @@ -670,15 +666,15 @@ ENTRY(initial_page_table) #endif .data +.balign 4 ENTRY(stack_start) .long init_thread_union+THREAD_SIZE - .long __BOOT_DS - -ready: .byte 0 early_recursion_flag: .long 0 +ready: .byte 0 + int_msg: .asciz "Unknown interrupt or fault at: %p %p %p\n" diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0cbe8c0b35ed..03273b6c272c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) * target processor state. */ startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, - (unsigned long)stack_start.sp); + stack_start); /* * Run STARTUP IPI loop. @@ -785,7 +785,7 @@ do_rest: #endif early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); initial_code = (unsigned long)start_secondary; - stack_start.sp = (void *) c_idle.idle->thread.sp; + stack_start = c_idle.idle->thread.sp; /* start_ip had better be page-aligned! */ start_ip = setup_trampoline(); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8b830ca14ac4..d343b3c81f3c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, unsigned long pfn) { pgprot_t forbidden = __pgprot(0); - pgprot_t required = __pgprot(0); /* * The BIOS area between 640k and 1Mb needs to be executable for @@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; - /* - * .data and .bss should always be writable. - */ - if (within(address, (unsigned long)_sdata, (unsigned long)_edata) || - within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop)) - pgprot_val(required) |= _PAGE_RW; #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) /* @@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, #endif prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); - prot = __pgprot(pgprot_val(prot) | pgprot_val(required)); return prot; } |