summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2009-06-20 23:28:22 -0700
committerH. Peter Anvin <hpa@zytor.com>2009-06-20 23:28:22 -0700
commita95436e44a76a32dcbe7c8df59701ddde53017c1 (patch)
treecf6b8f52a66cd151b2c4bbb759f4958f6481f471 /arch/x86
parente487683990972bf9aa4e688434c46ead76748bca (diff)
x86, mce: use atomic_inc_return() instead of add by 1
Use atomic_inc_return() instead of atomic_add_return() by 1. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> Cc: Andi Kleen <ak@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968bc..7da8fec9ca88 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -242,7 +242,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
/*
* Make sure only one CPU runs in machine check panic
*/
- if (atomic_add_return(1, &mce_paniced) > 1)
+ if (atomic_inc_return(&mce_paniced) > 1)
wait_for_panic();
barrier();
@@ -705,7 +705,7 @@ static int mce_start(int *no_way_out)
* global_nwo should be updated before mce_callin
*/
smp_wmb();
- order = atomic_add_return(1, &mce_callin);
+ order = atomic_inc_return(&mce_callin);
/*
* Wait for everyone.