summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-23 12:04:16 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-23 12:45:08 +0100
commit8fb9331391af95ca1f4e5c0a0da8120b13cbae01 (patch)
treea0c0ac8e2dffd562ed023a012db3fd56540e7db6 /arch
parent94c46572a6d9bb497eda0a14099d9f1360d57d5d (diff)
perfcounters: remove warnings
Impact: remove debug checks Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index bdbdb56eaa34..89fad5d4fb37 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter,
{
u64 prev_raw_count, new_raw_count, delta;
- WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
/*
* Careful: an NMI might modify the previous counter value.
*
@@ -89,7 +88,6 @@ again:
* of the count, so we do that by clipping the delta to 32 bits:
*/
delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
- WARN_ON_ONCE((int)delta < 0);
atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
@@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter,
int err;
err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
- WARN_ON_ONCE(err);
}
static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
@@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
s32 left = atomic64_read(&hwc->period_left);
s32 period = hwc->irq_period;
- WARN_ON_ONCE(period <= 0);
-
/*
* If we are way outside a reasoable range then just skip forward:
*/
@@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
atomic64_set(&hwc->period_left, left);
}
- WARN_ON_ONCE(left <= 0);
-
per_cpu(prev_left[idx], smp_processor_id()) = left;
/*