summaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c34
-rw-r--r--arch/x86_64/kernel/signal.c5
-rw-r--r--arch/x86_64/kernel/smpboot.c17
3 files changed, 46 insertions, 10 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 6ded3a50dfe6..b548dea4e5b9 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -185,6 +185,40 @@ unsigned long __init e820_end_of_ram(void)
}
/*
+ * Compute how much memory is missing in a range.
+ * Unlike the other functions in this file the arguments are in page numbers.
+ */
+unsigned long __init
+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long ram = 0;
+ unsigned long start = start_pfn << PAGE_SHIFT;
+ unsigned long end = end_pfn << PAGE_SHIFT;
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long last, addr;
+
+ if (ei->type != E820_RAM ||
+ ei->addr+ei->size <= start ||
+ ei->addr >= end)
+ continue;
+
+ addr = round_up(ei->addr, PAGE_SIZE);
+ if (addr < start)
+ addr = start;
+
+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
+ if (last >= end)
+ last = end;
+
+ if (last > addr)
+ ram += last - addr;
+ }
+ return ((end - start) - ram) >> PAGE_SHIFT;
+}
+
+/*
* Mark e820 reserved areas as busy for the resource manager.
*/
void __init e820_reserve_resources(void)
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 98590a989f3d..d642fbf3da29 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -394,10 +394,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
#endif
ret = setup_rt_frame(sig, ka, info, oldset, regs);
- if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- sigaddset(&current->blocked,sig);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 6e4807d64d46..fa25e39fe54d 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
{
if (notscsync || !cpu_has_tsc)
return;
- sync_tsc(boot_cpu_id);
+ sync_tsc(0);
}
static __init int notscsync_setup(char *s)
@@ -492,6 +492,14 @@ void __cpuinit start_secondary(void)
*/
set_cpu_sibling_map(smp_processor_id());
+ /*
+ * Wait for TSC sync to not schedule things before.
+ * We still process interrupts, which could see an inconsistent
+ * time in that window unfortunately.
+ * Do this here because TSC sync has global unprotected state.
+ */
+ tsc_sync_wait();
+
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
@@ -509,13 +517,6 @@ void __cpuinit start_secondary(void)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
unlock_ipi_call_lock();
- mb();
-
- /* Wait for TSC sync to not schedule things before.
- We still process interrupts, which could see an inconsistent
- time in that window unfortunately. */
- tsc_sync_wait();
-
cpu_idle();
}