1
0
Fork 0

atomic_load/atomic_store : to ensure return the latest value, should we add a memory barrier here?

If have no these memory barriers, sometime it will cause bug in multiple threads program.
This commit is contained in:
kbkpbot 2024-02-05 08:37:41 +08:00
parent da0d43903b
commit 105d70f7b4
1 changed files with 52 additions and 50 deletions

View File

@ -15,6 +15,56 @@
#define __ATOMIC_SEQ_CST 5
typedef __SIZE_TYPE__ size_t;
/* uses alias to allow building with gcc/clang */
#ifdef __TINYC__
#define ATOMIC(x) __atomic_##x
#else
#define ATOMIC(x) __tcc_atomic_##x
#endif
void ATOMIC(signal_fence) (int memorder)
{
}
void ATOMIC(thread_fence) (int memorder)
{
#if defined __i386__
__asm__ volatile("lock orl $0, (%esp)");
#elif defined __x86_64__
__asm__ volatile("lock orq $0, (%rsp)");
#elif defined __arm__
__asm__ volatile(".int 0xee070fba"); // mcr p15, 0, r0, c7, c10, 5
#elif defined __aarch64__
__asm__ volatile(".int 0xd5033bbf"); // dmb ish
#elif defined __riscv
__asm__ volatile(".int 0x0ff0000f"); // fence iorw,iorw
#endif
}
bool ATOMIC(is_lock_free) (unsigned long size, const volatile void *ptr)
{
bool ret;
switch (size) {
case 1: ret = true; break;
case 2: ret = true; break;
case 4: ret = true; break;
#if defined __x86_64__ || defined __aarch64__ || defined __riscv
case 8: ret = true; break;
#else
case 8: ret = false; break;
#endif
default: ret = false; break;
}
return ret;
}
#ifndef __TINYC__
void __atomic_signal_fence(int memorder) __attribute__((alias("__tcc_atomic_signal_fence")));
void __atomic_thread_fence(int memorder) __attribute__((alias("__tcc_atomic_thread_fence")));
bool __atomic_is_lock_free(unsigned long size, const volatile void *ptr) __attribute__((alias("__tcc_atomic_is_lock_free")));
#endif
#if defined __i386__ || defined __x86_64__
#define ATOMIC_COMPARE_EXCHANGE(TYPE, MODE, SUFFIX) \
bool __atomic_compare_exchange_##MODE \
@ -42,6 +92,7 @@ typedef __SIZE_TYPE__ size_t;
#define ATOMIC_LOAD(TYPE, MODE) \
TYPE __atomic_load_##MODE(const volatile void *atom, int memorder) \
{ \
__atomic_thread_fence(__ATOMIC_ACQUIRE); \
return *(volatile TYPE *)atom; \
}
@ -49,6 +100,7 @@ typedef __SIZE_TYPE__ size_t;
void __atomic_store_##MODE(volatile void *atom, TYPE value, int memorder) \
{ \
*(volatile TYPE *)atom = value; \
__atomic_thread_fence(__ATOMIC_RELEASE); \
}
#define ATOMIC_GEN_OP(TYPE, MODE, NAME, OP, RET) \
@ -114,53 +166,3 @@ ATOMIC_GEN(uint32_t, 4, "l")
#if defined __x86_64__ || defined __aarch64__ || defined __riscv
ATOMIC_GEN(uint64_t, 8, "q")
#endif
/* uses alias to allow building with gcc/clang */
#ifdef __TINYC__
#define ATOMIC(x) __atomic_##x
#else
#define ATOMIC(x) __tcc_atomic_##x
#endif
void ATOMIC(signal_fence) (int memorder)
{
}
void ATOMIC(thread_fence) (int memorder)
{
#if defined __i386__
__asm__ volatile("lock orl $0, (%esp)");
#elif defined __x86_64__
__asm__ volatile("lock orq $0, (%rsp)");
#elif defined __arm__
__asm__ volatile(".int 0xee070fba"); // mcr p15, 0, r0, c7, c10, 5
#elif defined __aarch64__
__asm__ volatile(".int 0xd5033bbf"); // dmb ish
#elif defined __riscv
__asm__ volatile(".int 0x0ff0000f"); // fence iorw,iorw
#endif
}
bool ATOMIC(is_lock_free) (unsigned long size, const volatile void *ptr)
{
bool ret;
switch (size) {
case 1: ret = true; break;
case 2: ret = true; break;
case 4: ret = true; break;
#if defined __x86_64__ || defined __aarch64__ || defined __riscv
case 8: ret = true; break;
#else
case 8: ret = false; break;
#endif
default: ret = false; break;
}
return ret;
}
#ifndef __TINYC__
void __atomic_signal_fence(int memorder) __attribute__((alias("__tcc_atomic_signal_fence")));
void __atomic_thread_fence(int memorder) __attribute__((alias("__tcc_atomic_thread_fence")));
bool __atomic_is_lock_free(unsigned long size, const volatile void *ptr) __attribute__((alias("__tcc_atomic_is_lock_free")));
#endif