summaryrefslogtreecommitdiff
path: root/src/prebuilt/wasm2c_atomicops_source_declarations.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/prebuilt/wasm2c_atomicops_source_declarations.cc')
-rw-r--r--src/prebuilt/wasm2c_atomicops_source_declarations.cc437
1 files changed, 223 insertions, 214 deletions
diff --git a/src/prebuilt/wasm2c_atomicops_source_declarations.cc b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
index 5e82c15b..68b687f3 100644
--- a/src/prebuilt/wasm2c_atomicops_source_declarations.cc
+++ b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
@@ -1,424 +1,430 @@
-const char* s_atomicops_source_declarations = R"w2c_template(#if defined(_MSC_VER)
+const char* s_atomicops_source_declarations = R"w2c_template(#include <stdatomic.h>
)w2c_template"
R"w2c_template(
-#include <intrin.h>
+#if WABT_BIG_ENDIAN
)w2c_template"
-R"w2c_template(
-// Use MSVC intrinsics
+R"w2c_template(#error "wasm2c atomics not supported on big endian"
)w2c_template"
-R"w2c_template(
-// For loads and stores, its not clear if we can rely on register width loads
+R"w2c_template(#endif
)w2c_template"
-R"w2c_template(// and stores to be atomic as reported here
+R"w2c_template(
+#ifndef WASM_RT_C11_AVAILABLE
)w2c_template"
-R"w2c_template(// https://learn.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access?redirectedfrom=MSDN
+R"w2c_template(#error "C11 is required for Wasm threads and shared memory support"
)w2c_template"
-R"w2c_template(// or if we have to reuse other instrinsics
+R"w2c_template(#endif
)w2c_template"
-R"w2c_template(// https://stackoverflow.com/questions/42660091/atomic-load-in-c-with-msvc
+R"w2c_template(
+#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
)w2c_template"
-R"w2c_template(// We reuse other intrinsics to be cautious
+R"w2c_template( if (UNLIKELY(addr % sizeof(t1))) { \
)w2c_template"
-R"w2c_template(#define atomic_load_u8(a) _InterlockedOr8(a, 0)
+R"w2c_template( TRAP(UNALIGNED); \
)w2c_template"
-R"w2c_template(#define atomic_load_u16(a) _InterlockedOr16(a, 0)
+R"w2c_template( }
)w2c_template"
-R"w2c_template(#define atomic_load_u32(a) _InterlockedOr(a, 0)
+R"w2c_template(
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template(#define atomic_load_u64(a) _InterlockedOr64(a, 0)
+R"w2c_template( static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template( result = atomic_load_explicit( \
)w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
)w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) _InterlockedExchangeAdd8(a, v)
+R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) _InterlockedExchangeAdd16(a, v)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) _InterlockedExchangeAdd(a, v)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) _InterlockedExchangeAdd64(a, v)
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-#define atomic_sub_u8(a, v) _InterlockedExchangeAdd8(a, -(v))
+DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) _InterlockedExchangeAdd16(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) _InterlockedExchangeAdd(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) _InterlockedExchangeAdd64(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
)w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) _InterlockedAnd8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) _InterlockedAnd16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) _InterlockedAnd(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) _InterlockedAnd64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) _InterlockedOr8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) _InterlockedOr16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) _InterlockedOr(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) _InterlockedOr64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
)w2c_template"
R"w2c_template(
-#define atomic_xor_u8(a, v) _InterlockedXor8(a, v)
+#define DEFINE_SHARED_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) _InterlockedXor16(a, v)
+R"w2c_template( static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) _InterlockedXor(a, v)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) _InterlockedXor64(a, v)
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(
-#define atomic_exchange_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template( atomic_store_explicit( \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped, \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-// clang-format off
+DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) _InterlockedCompareExchange8(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) _InterlockedCompareExchange16(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) _InterlockedCompareExchange(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) _InterlockedCompareExchange64(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
)w2c_template"
-R"w2c_template(
-#define atomic_fence() _ReadWriteBarrier()
+R"w2c_template(DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
)w2c_template"
-R"w2c_template(
-#else
+R"w2c_template(DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
)w2c_template"
R"w2c_template(
-// Use gcc/clang/icc intrinsics
+#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template(#define atomic_load_u8(a) __atomic_load_n((u8*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(#define atomic_load_u16(a) __atomic_load_n((u16*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_load_u32(a) __atomic_load_n((u32*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_load_u64(a) __atomic_load_n((u64*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) __atomic_store_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
)w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) __atomic_store_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) __atomic_store_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) __atomic_store_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) __atomic_fetch_add((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) __atomic_fetch_add((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) __atomic_fetch_add((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) __atomic_fetch_add((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(
-#define atomic_sub_u8(a, v) __atomic_fetch_sub((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( result = \
)w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) __atomic_fetch_sub((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
)w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) __atomic_fetch_sub((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) __atomic_fetch_sub((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) __atomic_fetch_and((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( }
)w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) __atomic_fetch_and((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(
+DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) __atomic_fetch_and((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) __atomic_fetch_and((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) __atomic_fetch_or((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) __atomic_fetch_or((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) __atomic_fetch_or((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) __atomic_fetch_or((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
)w2c_template"
R"w2c_template(
-#define atomic_xor_u8(a, v) __atomic_fetch_xor((u8*)(a), v, __ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) __atomic_fetch_xor((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) __atomic_fetch_xor((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) __atomic_fetch_xor((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(
-// clang-format off
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u8(a, v) __atomic_exchange_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) __atomic_exchange_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) __atomic_exchange_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) __atomic_exchange_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(
-#define __atomic_compare_exchange_helper(a, expected_ptr, desired) \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( (__atomic_compare_exchange_n(a, expected_ptr, desired, 0 /* is_weak */, \
+R"w2c_template( t1 wrapped = (t1)value; \
+)w2c_template"
+R"w2c_template( atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
)w2c_template"
-R"w2c_template( __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST), \
+R"w2c_template( wrapped); \
)w2c_template"
-R"w2c_template( *(expected_ptr))
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-// clang-format off
+DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+)w2c_template"
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) __atomic_compare_exchange_helper((u8*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) __atomic_compare_exchange_helper((u16*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) __atomic_compare_exchange_helper((u32*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) __atomic_compare_exchange_helper((u64*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
)w2c_template"
R"w2c_template(
-#define atomic_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2) \
)w2c_template"
-R"w2c_template(
-#endif
+R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(
-#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template( if (UNLIKELY(addr % sizeof(t1))) { \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( TRAP(UNALIGNED); \
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( t1 ret; \
)w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
+R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+)w2c_template"
+R"w2c_template( ret = ret op wrapped; \
+)w2c_template"
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1)); \
)w2c_template"
-R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
+R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( t1 result; \
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template( result = atomic_load_##t1(MEM_ADDR(mem, addr, sizeof(t1))); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( force_read(result); \
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template( return (t3)(t2)result; \
+R"w2c_template( t1 ret = atomic_##opname( \
+)w2c_template"
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
)w2c_template"
R"w2c_template( }
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)
)w2c_template"
R"w2c_template(
-#define DEFINE_ATOMIC_STORE(name, t1, t2) \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
)w2c_template"
-R"w2c_template( t1 wrapped = (t1)value; \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
)w2c_template"
-R"w2c_template( atomic_store_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
)w2c_template"
-R"w2c_template( }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)
)w2c_template"
R"w2c_template(
-#define DEFINE_ATOMIC_RMW(name, op, t1, t2) \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
)w2c_template"
-R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
)w2c_template"
-R"w2c_template( t1 wrapped = (t1)value; \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
)w2c_template"
-R"w2c_template( t1 ret = atomic_##op##_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
)w2c_template"
-R"w2c_template( return (t2)ret; \
-)w2c_template"
-R"w2c_template( }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, add, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, add, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, add, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, add, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, add, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, add, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, add, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, sub, u8, u32)
+#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2) \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, sub, u16, u32)
+R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, sub, u32, u32)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, sub, u8, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, sub, u16, u64)
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, sub, u32, u64)
+R"w2c_template( t1 ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, sub, u64, u64)
+R"w2c_template( wasm_rt_memcpy(&ret, &mem->data[addr], sizeof(t1)); \
)w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, and, u8, u32)
+R"w2c_template( wasm_rt_memcpy(&mem->data[addr], &wrapped, sizeof(t1)); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, and, u16, u32)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, and, u32, u32)
+R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, and, u8, u64)
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, and, u16, u64)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, and, u32, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, and, u64, u64)
+R"w2c_template( t1 wrapped = (t1)value; \
+)w2c_template"
+R"w2c_template( t1 ret = atomic_##opname((_Atomic volatile t1*)&mem->data[addr], wrapped); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
+)w2c_template"
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, or, u8, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, or, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, or, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, or, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, or, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, or, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, or, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, xor, u8, u32)
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, xor, u16, u32)
+R"w2c_template( static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, xor, u32, u32)
+R"w2c_template( t1 replacement) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, xor, u8, u64)
+R"w2c_template( MEMCHECK(mem, addr, t2); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, xor, u16, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, xor, u32, u64)
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, xor, u64, u64)
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
+R"w2c_template( t2 ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
+R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xchg, exchange, u32, u32)
+R"w2c_template( if (ret == expected_wrapped) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
+R"w2c_template( sizeof(t2)); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xchg, exchange, u64, u64)
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
+R"w2c_template( static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( t1 replacement) { \
+R"w2c_template( t1 expected, t1 replacement) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t2); \
+R"w2c_template( MEMCHECK(mem, addr, t2); \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template( t2 expected_wrapped = (t2)expected; \
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template( t2 old = \
+R"w2c_template( atomic_compare_exchange_strong( \
)w2c_template"
-R"w2c_template( atomic_compare_exchange_##t2(MEM_ADDR(mem, addr, sizeof(t2)), \
+R"w2c_template( (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
)w2c_template"
-R"w2c_template( &expected_wrapped, replacement_wrapped); \
+R"w2c_template( &expected_wrapped, replacement_wrapped); \
)w2c_template"
-R"w2c_template( return (t1)old; \
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
R"w2c_template( }
)w2c_template"
@@ -437,4 +443,7 @@ R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
)w2c_template"
R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);
)w2c_template"
+R"w2c_template(
+#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)
+)w2c_template"
;