From 80b4f087aa65a7f23d3489f80eaa298a1cdb3913 Mon Sep 17 00:00:00 2001
From: Shravan Narayan <shravanrn@gmail.com>
Date: Tue, 30 Jan 2024 19:37:52 -0600
Subject: wasm2c: atomic and shared mem operations using c11

---
 src/binary-reader-ir.cc                            |   6 +
 src/c-writer.cc                                    |  60 ++-
 .../wasm2c_atomicops_source_declarations.cc        | 437 +++++++++++----------
 src/template/wasm2c_atomicops.declarations.c       | 396 +++++++++----------
 4 files changed, 466 insertions(+), 433 deletions(-)

(limited to 'src')

diff --git a/src/binary-reader-ir.cc b/src/binary-reader-ir.cc
index 07546996..a918c39d 100644
--- a/src/binary-reader-ir.cc
+++ b/src/binary-reader-ir.cc
@@ -616,6 +616,9 @@ Result BinaryReaderIR::OnImportMemory(Index import_index,
   import->module_name = module_name;
   import->field_name = field_name;
   import->memory.page_limits = *page_limits;
+  if (import->memory.page_limits.is_shared) {
+    module_->features_used.threads = true;
+  }
   module_->AppendField(
       std::make_unique<ImportModuleField>(std::move(import), GetLocation()));
   return Result::Ok;
@@ -697,6 +700,9 @@ Result BinaryReaderIR::OnMemory(Index index, const Limits* page_limits) {
   auto field = std::make_unique<MemoryModuleField>(GetLocation());
   Memory& memory = field->memory;
   memory.page_limits = *page_limits;
+  if (memory.page_limits.is_shared) {
+    module_->features_used.threads = true;
+  }
   module_->AppendField(std::move(field));
   return Result::Ok;
 }
diff --git a/src/c-writer.cc b/src/c-writer.cc
index 0326c886..24bc0057 100644
--- a/src/c-writer.cc
+++ b/src/c-writer.cc
@@ -388,8 +388,8 @@ class CWriter {
   void WriteGlobal(const Global&, const std::string&);
   void WriteGlobalPtr(const Global&, const std::string&);
   void WriteMemories();
-  void WriteMemory(const std::string&);
-  void WriteMemoryPtr(const std::string&);
+  void WriteMemory(const std::string&, const Memory& memory);
+  void WriteMemoryPtr(const std::string&, const Memory& memory);
   void WriteTables();
   void WriteTable(const std::string&, const wabt::Type&);
   void WriteTablePtr(const std::string&, const Table&);
@@ -1328,6 +1328,15 @@ void CWriter::WriteGetFuncTypeDecl() {
         Newline());
 }
 
+static std::string GetMemoryTypeString(const Memory& memory) {
+  return memory.page_limits.is_shared ? "wasm_rt_shared_memory_t"
+                                      : "wasm_rt_memory_t";
+}
+
+static std::string GetMemoryAPIString(const Memory& memory, std::string api) {
+  return memory.page_limits.is_shared ? (api + "_shared") : api;
+}
+
 void CWriter::WriteInitExpr(const ExprList& expr_list) {
   if (expr_list.empty()) {
     WABT_UNREACHABLE;
@@ -1736,7 +1745,7 @@ void CWriter::BeginInstance() {
       }
 
       case ExternalKind::Memory: {
-        Write("wasm_rt_memory_t");
+        Write(GetMemoryTypeString(cast<MemoryImport>(import)->memory));
         break;
       }
 
@@ -1779,7 +1788,8 @@ void CWriter::BeginInstance() {
 
       case ExternalKind::Memory:
         WriteMemory(std::string("*") +
-                    ExportName(import->module_name, import->field_name));
+                        ExportName(import->module_name, import->field_name),
+                    cast<MemoryImport>(import)->memory);
         break;
 
       case ExternalKind::Table: {
@@ -2027,19 +2037,20 @@ void CWriter::WriteMemories() {
     bool is_import = memory_index < module_->num_memory_imports;
     if (!is_import) {
       WriteMemory(
-          DefineInstanceMemberName(ModuleFieldType::Memory, memory->name));
+          DefineInstanceMemberName(ModuleFieldType::Memory, memory->name),
+          *memory);
       Write(Newline());
     }
     ++memory_index;
   }
 }
 
-void CWriter::WriteMemory(const std::string& name) {
-  Write("wasm_rt_memory_t ", name, ";");
+void CWriter::WriteMemory(const std::string& name, const Memory& memory) {
+  Write(GetMemoryTypeString(memory), " ", name, ";");
 }
 
-void CWriter::WriteMemoryPtr(const std::string& name) {
-  Write("wasm_rt_memory_t* ", name, "(", ModuleInstanceTypeName(),
+void CWriter::WriteMemoryPtr(const std::string& name, const Memory& memory) {
+  Write(GetMemoryTypeString(memory), "* ", name, "(", ModuleInstanceTypeName(),
         "* instance)");
 }
 
@@ -2169,7 +2180,8 @@ void CWriter::WriteDataInitializers() {
         max = memory->page_limits.is_64 ? (static_cast<uint64_t>(1) << 48)
                                         : 65536;
       }
-      Write("wasm_rt_allocate_memory(",
+      std::string func = GetMemoryAPIString(*memory, "wasm_rt_allocate_memory");
+      Write(func, "(",
             ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ", ",
             memory->page_limits.initial, ", ", max, ", ",
             memory->page_limits.is_64, ");", Newline());
@@ -2444,7 +2456,7 @@ void CWriter::WriteExports(CWriterPhase kind) {
       case ExternalKind::Memory: {
         const Memory* memory = module_->GetMemory(export_->var);
         internal_name = memory->name;
-        WriteMemoryPtr(mangled_name);
+        WriteMemoryPtr(mangled_name, *memory);
         break;
       }
 
@@ -2754,7 +2766,8 @@ void CWriter::WriteFree() {
     for (const Memory* memory : module_->memories) {
       bool is_import = memory_index < module_->num_memory_imports;
       if (!is_import) {
-        Write("wasm_rt_free_memory(",
+        std::string func = GetMemoryAPIString(*memory, "wasm_rt_free_memory");
+        Write(func, "(",
               ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ");",
               Newline());
       }
@@ -3706,7 +3719,8 @@ void CWriter::Write(const ExprList& exprs) {
         Memory* memory = module_->memories[module_->GetMemoryIndex(
             cast<MemoryGrowExpr>(&expr)->memidx)];
 
-        Write(StackVar(0), " = wasm_rt_grow_memory(",
+        std::string func = GetMemoryAPIString(*memory, "wasm_rt_grow_memory");
+        Write(StackVar(0), " = ", func, "(",
               ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ", ",
               StackVar(0), ");", Newline());
         break;
@@ -4923,7 +4937,7 @@ void CWriter::Write(const ConvertExpr& expr) {
 }
 
 void CWriter::Write(const LoadExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch (expr.opcode) {
     case Opcode::I32Load: func = "i32_load"; break;
@@ -4954,6 +4968,7 @@ void CWriter::Write(const LoadExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
 
   Type result_type = expr.opcode.GetResultType();
   Write(StackVar(0, result_type), " = ", func, "(",
@@ -4967,7 +4982,7 @@ void CWriter::Write(const LoadExpr& expr) {
 }
 
 void CWriter::Write(const StoreExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch (expr.opcode) {
     case Opcode::I32Store: func = "i32_store"; break;
@@ -4987,6 +5002,7 @@ void CWriter::Write(const StoreExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
 
   Write(func, "(", ExternalInstancePtr(ModuleFieldType::Memory, memory->name),
         ", (u64)(", StackVar(1), ")");
@@ -5560,7 +5576,7 @@ void CWriter::Write(const LoadZeroExpr& expr) {
 }
 
 void CWriter::Write(const AtomicLoadExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch (expr.opcode) {
     case Opcode::I32AtomicLoad: func = "i32_atomic_load"; break;
@@ -5577,6 +5593,7 @@ void CWriter::Write(const AtomicLoadExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
 
   Type result_type = expr.opcode.GetResultType();
   Write(StackVar(0, result_type), " = ", func, "(",
@@ -5590,7 +5607,7 @@ void CWriter::Write(const AtomicLoadExpr& expr) {
 }
 
 void CWriter::Write(const AtomicStoreExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch (expr.opcode) {
     case Opcode::I32AtomicStore: func = "i32_atomic_store"; break;
@@ -5607,6 +5624,7 @@ void CWriter::Write(const AtomicStoreExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
 
   Write(func, "(", ExternalInstancePtr(ModuleFieldType::Memory, memory->name),
         ", (u64)(", StackVar(1), ")");
@@ -5617,7 +5635,7 @@ void CWriter::Write(const AtomicStoreExpr& expr) {
 }
 
 void CWriter::Write(const AtomicRmwExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch (expr.opcode) {
     case Opcode::I32AtomicRmwAdd: func = "i32_atomic_rmw_add"; break;
@@ -5668,6 +5686,8 @@ void CWriter::Write(const AtomicRmwExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
+
   Type result_type = expr.opcode.GetResultType();
 
   Write(StackVar(1, result_type), " = ", func, "(",
@@ -5681,7 +5701,7 @@ void CWriter::Write(const AtomicRmwExpr& expr) {
 }
 
 void CWriter::Write(const AtomicRmwCmpxchgExpr& expr) {
-  const char* func = nullptr;
+  std::string func;
   // clang-format off
   switch(expr.opcode) {
     case Opcode::I32AtomicRmwCmpxchg: func = "i32_atomic_rmw_cmpxchg"; break;
@@ -5697,6 +5717,8 @@ void CWriter::Write(const AtomicRmwCmpxchgExpr& expr) {
   // clang-format on
 
   Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+  func = GetMemoryAPIString(*memory, func);
+
   Type result_type = expr.opcode.GetResultType();
 
   Write(StackVar(2, result_type), " = ", func, "(",
diff --git a/src/prebuilt/wasm2c_atomicops_source_declarations.cc b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
index 5e82c15b..68b687f3 100644
--- a/src/prebuilt/wasm2c_atomicops_source_declarations.cc
+++ b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
@@ -1,424 +1,430 @@
-const char* s_atomicops_source_declarations = R"w2c_template(#if defined(_MSC_VER)
+const char* s_atomicops_source_declarations = R"w2c_template(#include <stdatomic.h>
 )w2c_template"
 R"w2c_template(
-#include <intrin.h>
+#if WABT_BIG_ENDIAN
 )w2c_template"
-R"w2c_template(
-// Use MSVC intrinsics
+R"w2c_template(#error "wasm2c atomics not supported on big endian"
 )w2c_template"
-R"w2c_template(
-// For loads and stores, its not clear if we can rely on register width loads
+R"w2c_template(#endif
 )w2c_template"
-R"w2c_template(// and stores to be atomic as reported here
+R"w2c_template(
+#ifndef WASM_RT_C11_AVAILABLE
 )w2c_template"
-R"w2c_template(// https://learn.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access?redirectedfrom=MSDN
+R"w2c_template(#error "C11 is required for Wasm threads and shared memory support"
 )w2c_template"
-R"w2c_template(// or if we have to reuse other instrinsics
+R"w2c_template(#endif
 )w2c_template"
-R"w2c_template(// https://stackoverflow.com/questions/42660091/atomic-load-in-c-with-msvc
+R"w2c_template(
+#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
 )w2c_template"
-R"w2c_template(// We reuse other intrinsics to be cautious
+R"w2c_template(  if (UNLIKELY(addr % sizeof(t1))) {     \
 )w2c_template"
-R"w2c_template(#define atomic_load_u8(a) _InterlockedOr8(a, 0)
+R"w2c_template(    TRAP(UNALIGNED);                     \
 )w2c_template"
-R"w2c_template(#define atomic_load_u16(a) _InterlockedOr16(a, 0)
+R"w2c_template(  }
 )w2c_template"
-R"w2c_template(#define atomic_load_u32(a) _InterlockedOr(a, 0)
+R"w2c_template(
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read)          \
 )w2c_template"
-R"w2c_template(#define atomic_load_u64(a) _InterlockedOr64(a, 0)
+R"w2c_template(  static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
 )w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                      \
 )w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template(    t1 result;                                                    \
 )w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template(    result = atomic_load_explicit(                                \
 )w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template(        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
 )w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) _InterlockedExchangeAdd8(a, v)
+R"w2c_template(        memory_order_relaxed);                                    \
 )w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) _InterlockedExchangeAdd16(a, v)
+R"w2c_template(    force_read(result);                                           \
 )w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) _InterlockedExchangeAdd(a, v)
+R"w2c_template(    return (t3)(t2)result;                                        \
 )w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) _InterlockedExchangeAdd64(a, v)
+R"w2c_template(  }
 )w2c_template"
 R"w2c_template(
-#define atomic_sub_u8(a, v) _InterlockedExchangeAdd8(a, -(v))
+DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) _InterlockedExchangeAdd16(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) _InterlockedExchangeAdd(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
 )w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) _InterlockedExchangeAdd64(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
 )w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) _InterlockedAnd8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) _InterlockedAnd16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) _InterlockedAnd(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) _InterlockedAnd64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) _InterlockedOr8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) _InterlockedOr16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) _InterlockedOr(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) _InterlockedOr64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
 )w2c_template"
 R"w2c_template(
-#define atomic_xor_u8(a, v) _InterlockedXor8(a, v)
+#define DEFINE_SHARED_STORE(name, t1, t2)                                     \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) _InterlockedXor16(a, v)
+R"w2c_template(  static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) _InterlockedXor(a, v)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                                  \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) _InterlockedXor64(a, v)
+R"w2c_template(    t1 wrapped = (t1)value;                                                   \
 )w2c_template"
-R"w2c_template(
-#define atomic_exchange_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template(    atomic_store_explicit(                                                    \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template(        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped,       \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template(        memory_order_relaxed);                                                \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template(  }
 )w2c_template"
 R"w2c_template(
-// clang-format off
+DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) _InterlockedCompareExchange8(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) _InterlockedCompareExchange16(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) _InterlockedCompareExchange(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) _InterlockedCompareExchange64(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
 )w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
 )w2c_template"
-R"w2c_template(
-#define atomic_fence() _ReadWriteBarrier()
+R"w2c_template(DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
 )w2c_template"
-R"w2c_template(
-#else
+R"w2c_template(DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
 )w2c_template"
 R"w2c_template(
-// Use gcc/clang/icc intrinsics
+#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read)                    \
 )w2c_template"
-R"w2c_template(#define atomic_load_u8(a) __atomic_load_n((u8*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template(  static inline t3 name(wasm_rt_memory_t* mem, u64 addr) {                  \
 )w2c_template"
-R"w2c_template(#define atomic_load_u16(a) __atomic_load_n((u16*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                                \
 )w2c_template"
-R"w2c_template(#define atomic_load_u32(a) __atomic_load_n((u32*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
 )w2c_template"
-R"w2c_template(#define atomic_load_u64(a) __atomic_load_n((u64*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template(    t1 result;                                                              \
 )w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) __atomic_store_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
 )w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) __atomic_store_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    force_read(result);                                                     \
 )w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) __atomic_store_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    return (t3)(t2)result;                                                  \
 )w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) __atomic_store_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  }                                                                         \
 )w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) __atomic_fetch_add((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) {  \
 )w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) __atomic_fetch_add((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                                \
 )w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) __atomic_fetch_add((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
 )w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) __atomic_fetch_add((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    t1 result;                                                              \
 )w2c_template"
-R"w2c_template(
-#define atomic_sub_u8(a, v) __atomic_fetch_sub((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    result =                                                                \
 )w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) __atomic_fetch_sub((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(        atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
 )w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) __atomic_fetch_sub((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    force_read(result);                                                     \
 )w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) __atomic_fetch_sub((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    return (t3)(t2)result;                                                  \
 )w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) __atomic_fetch_and((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  }
 )w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) __atomic_fetch_and((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(
+DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) __atomic_fetch_and((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) __atomic_fetch_and((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) __atomic_fetch_or((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) __atomic_fetch_or((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) __atomic_fetch_or((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
 )w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) __atomic_fetch_or((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
 )w2c_template"
 R"w2c_template(
-#define atomic_xor_u8(a, v) __atomic_fetch_xor((u8*)(a), v, __ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_STORE(name, t1, t2)                                  \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) __atomic_fetch_xor((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) __atomic_fetch_xor((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                               \
 )w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) __atomic_fetch_xor((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
 )w2c_template"
-R"w2c_template(
-// clang-format off
+R"w2c_template(    t1 wrapped = (t1)value;                                                \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u8(a, v) __atomic_exchange_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) __atomic_exchange_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  }                                                                        \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) __atomic_exchange_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(  static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
 )w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) __atomic_exchange_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(                                   t2 value) {                             \
 )w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                               \
 )w2c_template"
-R"w2c_template(
-#define __atomic_compare_exchange_helper(a, expected_ptr, desired)        \
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
 )w2c_template"
-R"w2c_template(  (__atomic_compare_exchange_n(a, expected_ptr, desired, 0 /* is_weak */, \
+R"w2c_template(    t1 wrapped = (t1)value;                                                \
+)w2c_template"
+R"w2c_template(    atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
 )w2c_template"
-R"w2c_template(                               __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST),       \
+R"w2c_template(                 wrapped);                                                 \
 )w2c_template"
-R"w2c_template(   *(expected_ptr))
+R"w2c_template(  }
 )w2c_template"
 R"w2c_template(
-// clang-format off
+DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+)w2c_template"
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) __atomic_compare_exchange_helper((u8*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) __atomic_compare_exchange_helper((u16*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) __atomic_compare_exchange_helper((u32*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
 )w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) __atomic_compare_exchange_helper((u64*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
 )w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
 )w2c_template"
 R"w2c_template(
-#define atomic_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2)                      \
 )w2c_template"
-R"w2c_template(
-#endif
+R"w2c_template(  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
 )w2c_template"
-R"w2c_template(
-#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                             \
 )w2c_template"
-R"w2c_template(  if (UNLIKELY(addr % sizeof(t1))) {     \
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
 )w2c_template"
-R"w2c_template(    TRAP(UNALIGNED);                     \
+R"w2c_template(    t1 wrapped = (t1)value;                                              \
 )w2c_template"
-R"w2c_template(  }
+R"w2c_template(    t1 ret;                                                              \
 )w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read)                  \
+R"w2c_template(    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
+)w2c_template"
+R"w2c_template(    ret = ret op wrapped;                                                \
+)w2c_template"
+R"w2c_template(    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1));   \
 )w2c_template"
-R"w2c_template(  static inline t3 name(wasm_rt_memory_t* mem, u64 addr) {                \
+R"w2c_template(    return (t2)ret;                                                      \
 )w2c_template"
-R"w2c_template(    MEMCHECK(mem, addr, t1);                                              \
+R"w2c_template(  }                                                                      \
 )w2c_template"
-R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                     \
+R"w2c_template(  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
 )w2c_template"
-R"w2c_template(    t1 result;                                                            \
+R"w2c_template(                                 t2 value) {                             \
 )w2c_template"
-R"w2c_template(    wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                             \
 )w2c_template"
-R"w2c_template(    result = atomic_load_##t1(MEM_ADDR(mem, addr, sizeof(t1)));           \
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
 )w2c_template"
-R"w2c_template(    force_read(result);                                                   \
+R"w2c_template(    t1 wrapped = (t1)value;                                              \
 )w2c_template"
-R"w2c_template(    return (t3)(t2)result;                                                \
+R"w2c_template(    t1 ret = atomic_##opname(                                            \
+)w2c_template"
+R"w2c_template(        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+)w2c_template"
+R"w2c_template(    return (t2)ret;                                                      \
 )w2c_template"
 R"w2c_template(  }
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)
 )w2c_template"
 R"w2c_template(
-#define DEFINE_ATOMIC_STORE(name, t1, t2)                              \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
 )w2c_template"
-R"w2c_template(  static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
 )w2c_template"
-R"w2c_template(    MEMCHECK(mem, addr, t1);                                           \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
 )w2c_template"
-R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                  \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
 )w2c_template"
-R"w2c_template(    t1 wrapped = (t1)value;                                            \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
 )w2c_template"
-R"w2c_template(    atomic_store_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped);       \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
 )w2c_template"
-R"w2c_template(  }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)
 )w2c_template"
 R"w2c_template(
-#define DEFINE_ATOMIC_RMW(name, op, t1, t2)                                \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
 )w2c_template"
-R"w2c_template(  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {       \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
 )w2c_template"
-R"w2c_template(    MEMCHECK(mem, addr, t1);                                               \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
 )w2c_template"
-R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
 )w2c_template"
-R"w2c_template(    t1 wrapped = (t1)value;                                                \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
 )w2c_template"
-R"w2c_template(    t1 ret = atomic_##op##_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
 )w2c_template"
-R"w2c_template(    return (t2)ret;                                                        \
-)w2c_template"
-R"w2c_template(  }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, add, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, add, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, add, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, add, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, add, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, add, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, add, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, sub, u8, u32)
+#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2)                               \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, sub, u16, u32)
+R"w2c_template(  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {           \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, sub, u32, u32)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                                   \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, sub, u8, u64)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                          \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, sub, u16, u64)
+R"w2c_template(    t1 wrapped = (t1)value;                                                    \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, sub, u32, u64)
+R"w2c_template(    t1 ret;                                                                    \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, sub, u64, u64)
+R"w2c_template(    wasm_rt_memcpy(&ret, &mem->data[addr], sizeof(t1));                        \
 )w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, and, u8, u32)
+R"w2c_template(    wasm_rt_memcpy(&mem->data[addr], &wrapped, sizeof(t1));                    \
+)w2c_template"
+R"w2c_template(    return (t2)ret;                                                            \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, and, u16, u32)
+R"w2c_template(  }                                                                            \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, and, u32, u32)
+R"w2c_template(  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,       \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, and, u8, u64)
+R"w2c_template(                                 t2 value) {                                   \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, and, u16, u64)
+R"w2c_template(    MEMCHECK(mem, addr, t1);                                                   \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, and, u32, u64)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                          \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, and, u64, u64)
+R"w2c_template(    t1 wrapped = (t1)value;                                                    \
+)w2c_template"
+R"w2c_template(    t1 ret = atomic_##opname((_Atomic volatile t1*)&mem->data[addr], wrapped); \
+)w2c_template"
+R"w2c_template(    return (t2)ret;                                                            \
+)w2c_template"
+R"w2c_template(  }
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, or, u8, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, or, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, or, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, or, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, or, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, or, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, or, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
 )w2c_template"
 R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, xor, u8, u32)
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2)                                \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, xor, u16, u32)
+R"w2c_template(  static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected,       \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, xor, u32, u32)
+R"w2c_template(                        t1 replacement) {                                   \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, xor, u8, u64)
+R"w2c_template(    MEMCHECK(mem, addr, t2);                                                \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, xor, u16, u64)
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, xor, u32, u64)
+R"w2c_template(    t2 expected_wrapped = (t2)expected;                                     \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, xor, u64, u64)
+R"w2c_template(    t2 replacement_wrapped = (t2)replacement;                               \
 )w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
+R"w2c_template(    t2 ret;                                                                 \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
+R"w2c_template(    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2));      \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xchg, exchange, u32, u32)
+R"w2c_template(    if (ret == expected_wrapped) {                                          \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
+R"w2c_template(      wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
+R"w2c_template(                     sizeof(t2));                                           \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
+R"w2c_template(    }                                                                       \
 )w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xchg, exchange, u64, u64)
+R"w2c_template(    return (t1)expected_wrapped;                                            \
 )w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2)                                  \
+R"w2c_template(  }                                                                         \
 )w2c_template"
-R"w2c_template(  static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected,         \
+R"w2c_template(  static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,    \
 )w2c_template"
-R"w2c_template(                        t1 replacement) {                                     \
+R"w2c_template(                                 t1 expected, t1 replacement) {             \
 )w2c_template"
-R"w2c_template(    MEMCHECK(mem, addr, t2);                                                  \
+R"w2c_template(    MEMCHECK(mem, addr, t2);                                                \
 )w2c_template"
-R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                         \
+R"w2c_template(    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
 )w2c_template"
-R"w2c_template(    t2 expected_wrapped = (t2)expected;                                       \
+R"w2c_template(    t2 expected_wrapped = (t2)expected;                                     \
 )w2c_template"
-R"w2c_template(    t2 replacement_wrapped = (t2)replacement;                                 \
+R"w2c_template(    t2 replacement_wrapped = (t2)replacement;                               \
 )w2c_template"
-R"w2c_template(    t2 old =                                                                  \
+R"w2c_template(    atomic_compare_exchange_strong(                                         \
 )w2c_template"
-R"w2c_template(        atomic_compare_exchange_##t2(MEM_ADDR(mem, addr, sizeof(t2)),         \
+R"w2c_template(        (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)),              \
 )w2c_template"
-R"w2c_template(                                     &expected_wrapped, replacement_wrapped); \
+R"w2c_template(        &expected_wrapped, replacement_wrapped);                            \
 )w2c_template"
-R"w2c_template(    return (t1)old;                                                           \
+R"w2c_template(    return (t1)expected_wrapped;                                            \
 )w2c_template"
 R"w2c_template(  }
 )w2c_template"
@@ -437,4 +443,7 @@ R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
 )w2c_template"
 R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);
 )w2c_template"
+R"w2c_template(
+#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)
+)w2c_template"
 ;
diff --git a/src/template/wasm2c_atomicops.declarations.c b/src/template/wasm2c_atomicops.declarations.c
index 976f7f95..5d319991 100644
--- a/src/template/wasm2c_atomicops.declarations.c
+++ b/src/template/wasm2c_atomicops.declarations.c
@@ -1,123 +1,11 @@
-#if defined(_MSC_VER)
-
-#include <intrin.h>
-
-// Use MSVC intrinsics
-
-// For loads and stores, its not clear if we can rely on register width loads
-// and stores to be atomic as reported here
-// https://learn.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access?redirectedfrom=MSDN
-// or if we have to reuse other instrinsics
-// https://stackoverflow.com/questions/42660091/atomic-load-in-c-with-msvc
-// We reuse other intrinsics to be cautious
-#define atomic_load_u8(a) _InterlockedOr8(a, 0)
-#define atomic_load_u16(a) _InterlockedOr16(a, 0)
-#define atomic_load_u32(a) _InterlockedOr(a, 0)
-#define atomic_load_u64(a) _InterlockedOr64(a, 0)
-
-#define atomic_store_u8(a, v) _InterlockedExchange8(a, v)
-#define atomic_store_u16(a, v) _InterlockedExchange16(a, v)
-#define atomic_store_u32(a, v) _InterlockedExchange(a, v)
-#define atomic_store_u64(a, v) _InterlockedExchange64(a, v)
-
-#define atomic_add_u8(a, v) _InterlockedExchangeAdd8(a, v)
-#define atomic_add_u16(a, v) _InterlockedExchangeAdd16(a, v)
-#define atomic_add_u32(a, v) _InterlockedExchangeAdd(a, v)
-#define atomic_add_u64(a, v) _InterlockedExchangeAdd64(a, v)
-
-#define atomic_sub_u8(a, v) _InterlockedExchangeAdd8(a, -(v))
-#define atomic_sub_u16(a, v) _InterlockedExchangeAdd16(a, -(v))
-#define atomic_sub_u32(a, v) _InterlockedExchangeAdd(a, -(v))
-#define atomic_sub_u64(a, v) _InterlockedExchangeAdd64(a, -(v))
-
-#define atomic_and_u8(a, v) _InterlockedAnd8(a, v)
-#define atomic_and_u16(a, v) _InterlockedAnd16(a, v)
-#define atomic_and_u32(a, v) _InterlockedAnd(a, v)
-#define atomic_and_u64(a, v) _InterlockedAnd64(a, v)
-
-#define atomic_or_u8(a, v) _InterlockedOr8(a, v)
-#define atomic_or_u16(a, v) _InterlockedOr16(a, v)
-#define atomic_or_u32(a, v) _InterlockedOr(a, v)
-#define atomic_or_u64(a, v) _InterlockedOr64(a, v)
-
-#define atomic_xor_u8(a, v) _InterlockedXor8(a, v)
-#define atomic_xor_u16(a, v) _InterlockedXor16(a, v)
-#define atomic_xor_u32(a, v) _InterlockedXor(a, v)
-#define atomic_xor_u64(a, v) _InterlockedXor64(a, v)
-
-#define atomic_exchange_u8(a, v) _InterlockedExchange8(a, v)
-#define atomic_exchange_u16(a, v) _InterlockedExchange16(a, v)
-#define atomic_exchange_u32(a, v) _InterlockedExchange(a, v)
-#define atomic_exchange_u64(a, v) _InterlockedExchange64(a, v)
-
-// clang-format off
-#define atomic_compare_exchange_u8(a, expected_ptr, desired) _InterlockedCompareExchange8(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u16(a, expected_ptr, desired) _InterlockedCompareExchange16(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u32(a, expected_ptr, desired) _InterlockedCompareExchange(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u64(a, expected_ptr, desired) _InterlockedCompareExchange64(a, desired, *(expected_ptr))
-// clang-format on
-
-#define atomic_fence() _ReadWriteBarrier()
-
-#else
-
-// Use gcc/clang/icc intrinsics
-#define atomic_load_u8(a) __atomic_load_n((u8*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u16(a) __atomic_load_n((u16*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u32(a) __atomic_load_n((u32*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u64(a) __atomic_load_n((u64*)(a), __ATOMIC_SEQ_CST)
-
-#define atomic_store_u8(a, v) __atomic_store_n((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u16(a, v) __atomic_store_n((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u32(a, v) __atomic_store_n((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u64(a, v) __atomic_store_n((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_add_u8(a, v) __atomic_fetch_add((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u16(a, v) __atomic_fetch_add((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u32(a, v) __atomic_fetch_add((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u64(a, v) __atomic_fetch_add((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_sub_u8(a, v) __atomic_fetch_sub((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u16(a, v) __atomic_fetch_sub((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u32(a, v) __atomic_fetch_sub((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u64(a, v) __atomic_fetch_sub((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_and_u8(a, v) __atomic_fetch_and((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u16(a, v) __atomic_fetch_and((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u32(a, v) __atomic_fetch_and((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u64(a, v) __atomic_fetch_and((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_or_u8(a, v) __atomic_fetch_or((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u16(a, v) __atomic_fetch_or((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u32(a, v) __atomic_fetch_or((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u64(a, v) __atomic_fetch_or((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_xor_u8(a, v) __atomic_fetch_xor((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u16(a, v) __atomic_fetch_xor((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u32(a, v) __atomic_fetch_xor((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u64(a, v) __atomic_fetch_xor((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-// clang-format off
-#define atomic_exchange_u8(a, v) __atomic_exchange_n((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u16(a, v) __atomic_exchange_n((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u32(a, v) __atomic_exchange_n((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u64(a, v) __atomic_exchange_n((u64*)(a), v, __ATOMIC_SEQ_CST)
-// clang-format on
-
-#define __atomic_compare_exchange_helper(a, expected_ptr, desired)        \
-  (__atomic_compare_exchange_n(a, expected_ptr, desired, 0 /* is_weak */, \
-                               __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST),       \
-   *(expected_ptr))
-
-// clang-format off
-#define atomic_compare_exchange_u8(a, expected_ptr, desired) __atomic_compare_exchange_helper((u8*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u16(a, expected_ptr, desired) __atomic_compare_exchange_helper((u16*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u32(a, expected_ptr, desired) __atomic_compare_exchange_helper((u32*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u64(a, expected_ptr, desired) __atomic_compare_exchange_helper((u64*)(a), expected_ptr, desired)
-// clang-format on
-
-#define atomic_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#include <stdatomic.h>
 
+#if WABT_BIG_ENDIAN
+#error "wasm2c atomics not supported on big endian"
+#endif
+
+#ifndef WASM_RT_C11_AVAILABLE
+#error "C11 is required for Wasm threads and shared memory support"
 #endif
 
 #define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
@@ -125,15 +13,68 @@
     TRAP(UNALIGNED);                     \
   }
 
-#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read)                  \
-  static inline t3 name(wasm_rt_memory_t* mem, u64 addr) {                \
-    MEMCHECK(mem, addr, t1);                                              \
-    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                     \
-    t1 result;                                                            \
-    wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
-    result = atomic_load_##t1(MEM_ADDR(mem, addr, sizeof(t1)));           \
-    force_read(result);                                                   \
-    return (t3)(t2)result;                                                \
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read)          \
+  static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
+    MEMCHECK(mem, addr, t1);                                      \
+    t1 result;                                                    \
+    result = atomic_load_explicit(                                \
+        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
+        memory_order_relaxed);                                    \
+    force_read(result);                                           \
+    return (t3)(t2)result;                                        \
+  }
+
+DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
+DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
+DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
+
+#define DEFINE_SHARED_STORE(name, t1, t2)                                     \
+  static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
+    MEMCHECK(mem, addr, t1);                                                  \
+    t1 wrapped = (t1)value;                                                   \
+    atomic_store_explicit(                                                    \
+        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped,       \
+        memory_order_relaxed);                                                \
+  }
+
+DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
+DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
+DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
+DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
+DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
+DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
+DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
+DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
+DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
+
+#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read)                    \
+  static inline t3 name(wasm_rt_memory_t* mem, u64 addr) {                  \
+    MEMCHECK(mem, addr, t1);                                                \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
+    t1 result;                                                              \
+    wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
+    force_read(result);                                                     \
+    return (t3)(t2)result;                                                  \
+  }                                                                         \
+  static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) {  \
+    MEMCHECK(mem, addr, t1);                                                \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
+    t1 result;                                                              \
+    result =                                                                \
+        atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
+    force_read(result);                                                     \
+    return (t3)(t2)result;                                                  \
   }
 
 DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
@@ -144,12 +85,20 @@ DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
 DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
 DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
 
-#define DEFINE_ATOMIC_STORE(name, t1, t2)                              \
-  static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
-    MEMCHECK(mem, addr, t1);                                           \
-    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                  \
-    t1 wrapped = (t1)value;                                            \
-    atomic_store_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped);       \
+#define DEFINE_ATOMIC_STORE(name, t1, t2)                                  \
+  static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
+    MEMCHECK(mem, addr, t1);                                               \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
+    t1 wrapped = (t1)value;                                                \
+    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
+  }                                                                        \
+  static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+                                   t2 value) {                             \
+    MEMCHECK(mem, addr, t1);                                               \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
+    t1 wrapped = (t1)value;                                                \
+    atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
+                 wrapped);                                                 \
   }
 
 DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
@@ -160,74 +109,119 @@ DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
 DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
 DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
 
-#define DEFINE_ATOMIC_RMW(name, op, t1, t2)                                \
-  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {       \
-    MEMCHECK(mem, addr, t1);                                               \
-    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
-    t1 wrapped = (t1)value;                                                \
-    t1 ret = atomic_##op##_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
-    return (t2)ret;                                                        \
+#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2)                      \
+  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
+    MEMCHECK(mem, addr, t1);                                             \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
+    t1 wrapped = (t1)value;                                              \
+    t1 ret;                                                              \
+    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
+    ret = ret op wrapped;                                                \
+    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1));   \
+    return (t2)ret;                                                      \
+  }                                                                      \
+  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+                                 t2 value) {                             \
+    MEMCHECK(mem, addr, t1);                                             \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
+    t1 wrapped = (t1)value;                                              \
+    t1 ret = atomic_##opname(                                            \
+        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+    return (t2)ret;                                                      \
+  }
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
+
+#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2)                               \
+  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {           \
+    MEMCHECK(mem, addr, t1);                                                   \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                          \
+    t1 wrapped = (t1)value;                                                    \
+    t1 ret;                                                                    \
+    wasm_rt_memcpy(&ret, &mem->data[addr], sizeof(t1));                        \
+    wasm_rt_memcpy(&mem->data[addr], &wrapped, sizeof(t1));                    \
+    return (t2)ret;                                                            \
+  }                                                                            \
+  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,       \
+                                 t2 value) {                                   \
+    MEMCHECK(mem, addr, t1);                                                   \
+    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                          \
+    t1 wrapped = (t1)value;                                                    \
+    t1 ret = atomic_##opname((_Atomic volatile t1*)&mem->data[addr], wrapped); \
+    return (t2)ret;                                                            \
   }
 
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, add, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, add, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, add, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, add, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, add, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, add, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, add, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, sub, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, sub, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, sub, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, sub, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, sub, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, sub, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, sub, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, and, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, and, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, and, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, and, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, and, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, and, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, and, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, or, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, or, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, or, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, or, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, or, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, or, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, or, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, xor, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, xor, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, xor, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, xor, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, xor, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, xor, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, xor, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_xchg, exchange, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_xchg, exchange, u64, u64)
-
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2)                                  \
-  static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected,         \
-                        t1 replacement) {                                     \
-    MEMCHECK(mem, addr, t2);                                                  \
-    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                         \
-    t2 expected_wrapped = (t2)expected;                                       \
-    t2 replacement_wrapped = (t2)replacement;                                 \
-    t2 old =                                                                  \
-        atomic_compare_exchange_##t2(MEM_ADDR(mem, addr, sizeof(t2)),         \
-                                     &expected_wrapped, replacement_wrapped); \
-    return (t1)old;                                                           \
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
+
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2)                                \
+  static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected,       \
+                        t1 replacement) {                                   \
+    MEMCHECK(mem, addr, t2);                                                \
+    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
+    t2 expected_wrapped = (t2)expected;                                     \
+    t2 replacement_wrapped = (t2)replacement;                               \
+    t2 ret;                                                                 \
+    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2));      \
+    if (ret == expected_wrapped) {                                          \
+      wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
+                     sizeof(t2));                                           \
+    }                                                                       \
+    return (t1)expected_wrapped;                                            \
+  }                                                                         \
+  static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,    \
+                                 t1 expected, t1 replacement) {             \
+    MEMCHECK(mem, addr, t2);                                                \
+    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
+    t2 expected_wrapped = (t2)expected;                                     \
+    t2 replacement_wrapped = (t2)replacement;                               \
+    atomic_compare_exchange_strong(                                         \
+        (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)),              \
+        &expected_wrapped, replacement_wrapped);                            \
+    return (t1)expected_wrapped;                                            \
   }
 
 DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw8_cmpxchg_u, u32, u8);
@@ -237,3 +231,5 @@ DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw8_cmpxchg_u, u64, u8);
 DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw16_cmpxchg_u, u64, u16);
 DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
 DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);
+
+#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)
-- 
cgit v1.2.3