summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2009-06-09 15:37:01 +0200
committerAvi Kivity <avi@redhat.com>2009-09-10 08:32:57 +0300
commitf7104db26ab2bc5f642892774ac8fb0f15400969 (patch)
treebecb4695f6028d9636c0cbdc2bd4dff42d4066bf /arch/x86
parent33e4c68656a2e461b296ce714ec322978de85412 (diff)
KVM: Fix racy event propagation in timer
Minor issue that likely had no practical relevance: the kvm timer function so far incremented the pending counter and then may reset it again to 1 in case reinjection was disabled. This opened a small racy window with the corresponding VCPU loop that may have happened to run on another (real) CPU and already consumed the value. Fix it by skipping the incrementation in case pending is already > 0. This opens a different race windows, but may only rarely cause lost events in case we do not care about them anyway (!reinject). Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/timer.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 85cc743a8203..1baed414b57a 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -9,12 +9,16 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
int restart_timer = 0;
wait_queue_head_t *q = &vcpu->wq;
- /* FIXME: this code should not know anything about vcpus */
- if (!atomic_inc_and_test(&ktimer->pending))
- set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
-
- if (!ktimer->reinject)
- atomic_set(&ktimer->pending, 1);
+ /*
+ * There is a race window between reading and incrementing, but we do
+ * not care about potentially loosing timer events in the !reinject
+ * case anyway.
+ */
+ if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
+ /* FIXME: this code should not know anything about vcpus */
+ if (!atomic_inc_and_test(&ktimer->pending))
+ set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
+ }
if (waitqueue_active(q))
wake_up_interruptible(q);