mirror of https://github.com/Qortal/Brooklyn
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
115 lines
3.2 KiB
115 lines
3.2 KiB
/* SPDX-License-Identifier: GPL-2.0 */ |
|
#ifndef _ASM_X86_CMPXCHG_32_H |
|
#define _ASM_X86_CMPXCHG_32_H |
|
|
|
/* |
|
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, |
|
* you need to test for the feature in boot_cpu_data. |
|
*/ |
|
|
|
/* |
|
* CMPXCHG8B only writes to the target if we had the previous |
|
* value in registers, otherwise it acts as a read and gives us the |
|
* "new previous" value. That is why there is a loop. Preloading |
|
* EDX:EAX is a performance optimization: in the common case it means |
|
* we need only one locked operation. |
|
* |
|
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
|
* least an FPU save and/or %cr0.ts manipulation. |
|
* |
|
* cmpxchg8b must be used with the lock prefix here to allow the |
|
* instruction to be executed atomically. We need to have the reader |
|
* side to see the coherent 64bit value. |
|
*/ |
|
static inline void set_64bit(volatile u64 *ptr, u64 value) |
|
{ |
|
u32 low = value; |
|
u32 high = value >> 32; |
|
u64 prev = *ptr; |
|
|
|
asm volatile("\n1:\t" |
|
LOCK_PREFIX "cmpxchg8b %0\n\t" |
|
"jnz 1b" |
|
: "=m" (*ptr), "+A" (prev) |
|
: "b" (low), "c" (high) |
|
: "memory"); |
|
} |
|
|
|
#ifdef CONFIG_X86_CMPXCHG64 |
|
#define arch_cmpxchg64(ptr, o, n) \ |
|
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
|
(unsigned long long)(n))) |
|
#define arch_cmpxchg64_local(ptr, o, n) \ |
|
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
|
(unsigned long long)(n))) |
|
#endif |
|
|
|
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
|
{ |
|
u64 prev; |
|
asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
|
: "=A" (prev), |
|
"+m" (*ptr) |
|
: "b" ((u32)new), |
|
"c" ((u32)(new >> 32)), |
|
"0" (old) |
|
: "memory"); |
|
return prev; |
|
} |
|
|
|
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
|
{ |
|
u64 prev; |
|
asm volatile("cmpxchg8b %1" |
|
: "=A" (prev), |
|
"+m" (*ptr) |
|
: "b" ((u32)new), |
|
"c" ((u32)(new >> 32)), |
|
"0" (old) |
|
: "memory"); |
|
return prev; |
|
} |
|
|
|
#ifndef CONFIG_X86_CMPXCHG64 |
|
/* |
|
* Building a kernel capable running on 80386 and 80486. It may be necessary |
|
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
|
*/ |
|
|
|
#define arch_cmpxchg64(ptr, o, n) \ |
|
({ \ |
|
__typeof__(*(ptr)) __ret; \ |
|
__typeof__(*(ptr)) __old = (o); \ |
|
__typeof__(*(ptr)) __new = (n); \ |
|
alternative_io(LOCK_PREFIX_HERE \ |
|
"call cmpxchg8b_emu", \ |
|
"lock; cmpxchg8b (%%esi)" , \ |
|
X86_FEATURE_CX8, \ |
|
"=A" (__ret), \ |
|
"S" ((ptr)), "0" (__old), \ |
|
"b" ((unsigned int)__new), \ |
|
"c" ((unsigned int)(__new>>32)) \ |
|
: "memory"); \ |
|
__ret; }) |
|
|
|
|
|
#define arch_cmpxchg64_local(ptr, o, n) \ |
|
({ \ |
|
__typeof__(*(ptr)) __ret; \ |
|
__typeof__(*(ptr)) __old = (o); \ |
|
__typeof__(*(ptr)) __new = (n); \ |
|
alternative_io("call cmpxchg8b_emu", \ |
|
"cmpxchg8b (%%esi)" , \ |
|
X86_FEATURE_CX8, \ |
|
"=A" (__ret), \ |
|
"S" ((ptr)), "0" (__old), \ |
|
"b" ((unsigned int)__new), \ |
|
"c" ((unsigned int)(__new>>32)) \ |
|
: "memory"); \ |
|
__ret; }) |
|
|
|
#endif |
|
|
|
#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) |
|
|
|
#endif /* _ASM_X86_CMPXCHG_32_H */
|
|
|