summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt3
-rw-r--r--src/binary-reader-ir.cc6
-rw-r--r--src/c-writer.cc60
-rw-r--r--src/prebuilt/wasm2c_atomicops_source_declarations.cc437
-rw-r--r--src/template/wasm2c_atomicops.declarations.c396
-rwxr-xr-xtest/run-spec-wasm2c.py33
-rw-r--r--test/wasm2c/spec/threads/atomic.txt51
-rw-r--r--wasm2c/README.md37
-rw-r--r--wasm2c/wasm-rt-impl.c176
-rw-r--r--wasm2c/wasm-rt-mem-impl-helper.inc164
-rw-r--r--wasm2c/wasm-rt-mem-impl.c178
-rw-r--r--wasm2c/wasm-rt.h77
12 files changed, 973 insertions, 645 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5b90b50b..a7d3f4b0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -423,9 +423,10 @@ if (WABT_INSTALL_RULES)
endif ()
if (HAVE_SETJMP_H)
- set(WASM_RT_FILES "wasm2c/wasm-rt-impl.h" "wasm2c/wasm-rt-impl.c" "wasm2c/wasm-rt-exceptions-impl.c")
+ set(WASM_RT_FILES "wasm2c/wasm-rt-impl.h" "wasm2c/wasm-rt-impl.c" "wasm2c/wasm-rt-exceptions-impl.c" "wasm2c/wasm-rt-mem-impl.c")
add_library(wasm-rt-impl STATIC ${WASM_RT_FILES})
+ target_link_libraries(wasm-rt-impl ${CMAKE_THREAD_LIBS_INIT})
add_library(wabt::wasm-rt-impl ALIAS wasm-rt-impl)
if (WABT_BIG_ENDIAN)
target_compile_definitions(wasm-rt-impl PUBLIC WABT_BIG_ENDIAN=1)
diff --git a/src/binary-reader-ir.cc b/src/binary-reader-ir.cc
index 07546996..a918c39d 100644
--- a/src/binary-reader-ir.cc
+++ b/src/binary-reader-ir.cc
@@ -616,6 +616,9 @@ Result BinaryReaderIR::OnImportMemory(Index import_index,
import->module_name = module_name;
import->field_name = field_name;
import->memory.page_limits = *page_limits;
+ if (import->memory.page_limits.is_shared) {
+ module_->features_used.threads = true;
+ }
module_->AppendField(
std::make_unique<ImportModuleField>(std::move(import), GetLocation()));
return Result::Ok;
@@ -697,6 +700,9 @@ Result BinaryReaderIR::OnMemory(Index index, const Limits* page_limits) {
auto field = std::make_unique<MemoryModuleField>(GetLocation());
Memory& memory = field->memory;
memory.page_limits = *page_limits;
+ if (memory.page_limits.is_shared) {
+ module_->features_used.threads = true;
+ }
module_->AppendField(std::move(field));
return Result::Ok;
}
diff --git a/src/c-writer.cc b/src/c-writer.cc
index 0326c886..24bc0057 100644
--- a/src/c-writer.cc
+++ b/src/c-writer.cc
@@ -388,8 +388,8 @@ class CWriter {
void WriteGlobal(const Global&, const std::string&);
void WriteGlobalPtr(const Global&, const std::string&);
void WriteMemories();
- void WriteMemory(const std::string&);
- void WriteMemoryPtr(const std::string&);
+ void WriteMemory(const std::string&, const Memory& memory);
+ void WriteMemoryPtr(const std::string&, const Memory& memory);
void WriteTables();
void WriteTable(const std::string&, const wabt::Type&);
void WriteTablePtr(const std::string&, const Table&);
@@ -1328,6 +1328,15 @@ void CWriter::WriteGetFuncTypeDecl() {
Newline());
}
+static std::string GetMemoryTypeString(const Memory& memory) {
+ return memory.page_limits.is_shared ? "wasm_rt_shared_memory_t"
+ : "wasm_rt_memory_t";
+}
+
+static std::string GetMemoryAPIString(const Memory& memory, std::string api) {
+ return memory.page_limits.is_shared ? (api + "_shared") : api;
+}
+
void CWriter::WriteInitExpr(const ExprList& expr_list) {
if (expr_list.empty()) {
WABT_UNREACHABLE;
@@ -1736,7 +1745,7 @@ void CWriter::BeginInstance() {
}
case ExternalKind::Memory: {
- Write("wasm_rt_memory_t");
+ Write(GetMemoryTypeString(cast<MemoryImport>(import)->memory));
break;
}
@@ -1779,7 +1788,8 @@ void CWriter::BeginInstance() {
case ExternalKind::Memory:
WriteMemory(std::string("*") +
- ExportName(import->module_name, import->field_name));
+ ExportName(import->module_name, import->field_name),
+ cast<MemoryImport>(import)->memory);
break;
case ExternalKind::Table: {
@@ -2027,19 +2037,20 @@ void CWriter::WriteMemories() {
bool is_import = memory_index < module_->num_memory_imports;
if (!is_import) {
WriteMemory(
- DefineInstanceMemberName(ModuleFieldType::Memory, memory->name));
+ DefineInstanceMemberName(ModuleFieldType::Memory, memory->name),
+ *memory);
Write(Newline());
}
++memory_index;
}
}
-void CWriter::WriteMemory(const std::string& name) {
- Write("wasm_rt_memory_t ", name, ";");
+void CWriter::WriteMemory(const std::string& name, const Memory& memory) {
+ Write(GetMemoryTypeString(memory), " ", name, ";");
}
-void CWriter::WriteMemoryPtr(const std::string& name) {
- Write("wasm_rt_memory_t* ", name, "(", ModuleInstanceTypeName(),
+void CWriter::WriteMemoryPtr(const std::string& name, const Memory& memory) {
+ Write(GetMemoryTypeString(memory), "* ", name, "(", ModuleInstanceTypeName(),
"* instance)");
}
@@ -2169,7 +2180,8 @@ void CWriter::WriteDataInitializers() {
max = memory->page_limits.is_64 ? (static_cast<uint64_t>(1) << 48)
: 65536;
}
- Write("wasm_rt_allocate_memory(",
+ std::string func = GetMemoryAPIString(*memory, "wasm_rt_allocate_memory");
+ Write(func, "(",
ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ", ",
memory->page_limits.initial, ", ", max, ", ",
memory->page_limits.is_64, ");", Newline());
@@ -2444,7 +2456,7 @@ void CWriter::WriteExports(CWriterPhase kind) {
case ExternalKind::Memory: {
const Memory* memory = module_->GetMemory(export_->var);
internal_name = memory->name;
- WriteMemoryPtr(mangled_name);
+ WriteMemoryPtr(mangled_name, *memory);
break;
}
@@ -2754,7 +2766,8 @@ void CWriter::WriteFree() {
for (const Memory* memory : module_->memories) {
bool is_import = memory_index < module_->num_memory_imports;
if (!is_import) {
- Write("wasm_rt_free_memory(",
+ std::string func = GetMemoryAPIString(*memory, "wasm_rt_free_memory");
+ Write(func, "(",
ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ");",
Newline());
}
@@ -3706,7 +3719,8 @@ void CWriter::Write(const ExprList& exprs) {
Memory* memory = module_->memories[module_->GetMemoryIndex(
cast<MemoryGrowExpr>(&expr)->memidx)];
- Write(StackVar(0), " = wasm_rt_grow_memory(",
+ std::string func = GetMemoryAPIString(*memory, "wasm_rt_grow_memory");
+ Write(StackVar(0), " = ", func, "(",
ExternalInstancePtr(ModuleFieldType::Memory, memory->name), ", ",
StackVar(0), ");", Newline());
break;
@@ -4923,7 +4937,7 @@ void CWriter::Write(const ConvertExpr& expr) {
}
void CWriter::Write(const LoadExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch (expr.opcode) {
case Opcode::I32Load: func = "i32_load"; break;
@@ -4954,6 +4968,7 @@ void CWriter::Write(const LoadExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
Type result_type = expr.opcode.GetResultType();
Write(StackVar(0, result_type), " = ", func, "(",
@@ -4967,7 +4982,7 @@ void CWriter::Write(const LoadExpr& expr) {
}
void CWriter::Write(const StoreExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch (expr.opcode) {
case Opcode::I32Store: func = "i32_store"; break;
@@ -4987,6 +5002,7 @@ void CWriter::Write(const StoreExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
Write(func, "(", ExternalInstancePtr(ModuleFieldType::Memory, memory->name),
", (u64)(", StackVar(1), ")");
@@ -5560,7 +5576,7 @@ void CWriter::Write(const LoadZeroExpr& expr) {
}
void CWriter::Write(const AtomicLoadExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch (expr.opcode) {
case Opcode::I32AtomicLoad: func = "i32_atomic_load"; break;
@@ -5577,6 +5593,7 @@ void CWriter::Write(const AtomicLoadExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
Type result_type = expr.opcode.GetResultType();
Write(StackVar(0, result_type), " = ", func, "(",
@@ -5590,7 +5607,7 @@ void CWriter::Write(const AtomicLoadExpr& expr) {
}
void CWriter::Write(const AtomicStoreExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch (expr.opcode) {
case Opcode::I32AtomicStore: func = "i32_atomic_store"; break;
@@ -5607,6 +5624,7 @@ void CWriter::Write(const AtomicStoreExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
Write(func, "(", ExternalInstancePtr(ModuleFieldType::Memory, memory->name),
", (u64)(", StackVar(1), ")");
@@ -5617,7 +5635,7 @@ void CWriter::Write(const AtomicStoreExpr& expr) {
}
void CWriter::Write(const AtomicRmwExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch (expr.opcode) {
case Opcode::I32AtomicRmwAdd: func = "i32_atomic_rmw_add"; break;
@@ -5668,6 +5686,8 @@ void CWriter::Write(const AtomicRmwExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
+
Type result_type = expr.opcode.GetResultType();
Write(StackVar(1, result_type), " = ", func, "(",
@@ -5681,7 +5701,7 @@ void CWriter::Write(const AtomicRmwExpr& expr) {
}
void CWriter::Write(const AtomicRmwCmpxchgExpr& expr) {
- const char* func = nullptr;
+ std::string func;
// clang-format off
switch(expr.opcode) {
case Opcode::I32AtomicRmwCmpxchg: func = "i32_atomic_rmw_cmpxchg"; break;
@@ -5697,6 +5717,8 @@ void CWriter::Write(const AtomicRmwCmpxchgExpr& expr) {
// clang-format on
Memory* memory = module_->memories[module_->GetMemoryIndex(expr.memidx)];
+ func = GetMemoryAPIString(*memory, func);
+
Type result_type = expr.opcode.GetResultType();
Write(StackVar(2, result_type), " = ", func, "(",
diff --git a/src/prebuilt/wasm2c_atomicops_source_declarations.cc b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
index 5e82c15b..68b687f3 100644
--- a/src/prebuilt/wasm2c_atomicops_source_declarations.cc
+++ b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
@@ -1,424 +1,430 @@
-const char* s_atomicops_source_declarations = R"w2c_template(#if defined(_MSC_VER)
+const char* s_atomicops_source_declarations = R"w2c_template(#include <stdatomic.h>
)w2c_template"
R"w2c_template(
-#include <intrin.h>
+#if WABT_BIG_ENDIAN
)w2c_template"
-R"w2c_template(
-// Use MSVC intrinsics
+R"w2c_template(#error "wasm2c atomics not supported on big endian"
)w2c_template"
-R"w2c_template(
-// For loads and stores, its not clear if we can rely on register width loads
+R"w2c_template(#endif
)w2c_template"
-R"w2c_template(// and stores to be atomic as reported here
+R"w2c_template(
+#ifndef WASM_RT_C11_AVAILABLE
)w2c_template"
-R"w2c_template(// https://learn.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access?redirectedfrom=MSDN
+R"w2c_template(#error "C11 is required for Wasm threads and shared memory support"
)w2c_template"
-R"w2c_template(// or if we have to reuse other instrinsics
+R"w2c_template(#endif
)w2c_template"
-R"w2c_template(// https://stackoverflow.com/questions/42660091/atomic-load-in-c-with-msvc
+R"w2c_template(
+#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
)w2c_template"
-R"w2c_template(// We reuse other intrinsics to be cautious
+R"w2c_template( if (UNLIKELY(addr % sizeof(t1))) { \
)w2c_template"
-R"w2c_template(#define atomic_load_u8(a) _InterlockedOr8(a, 0)
+R"w2c_template( TRAP(UNALIGNED); \
)w2c_template"
-R"w2c_template(#define atomic_load_u16(a) _InterlockedOr16(a, 0)
+R"w2c_template( }
)w2c_template"
-R"w2c_template(#define atomic_load_u32(a) _InterlockedOr(a, 0)
+R"w2c_template(
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template(#define atomic_load_u64(a) _InterlockedOr64(a, 0)
+R"w2c_template( static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template( result = atomic_load_explicit( \
)w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
)w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) _InterlockedExchangeAdd8(a, v)
+R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) _InterlockedExchangeAdd16(a, v)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) _InterlockedExchangeAdd(a, v)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) _InterlockedExchangeAdd64(a, v)
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-#define atomic_sub_u8(a, v) _InterlockedExchangeAdd8(a, -(v))
+DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) _InterlockedExchangeAdd16(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) _InterlockedExchangeAdd(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
)w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) _InterlockedExchangeAdd64(a, -(v))
+R"w2c_template(DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
)w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) _InterlockedAnd8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) _InterlockedAnd16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) _InterlockedAnd(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) _InterlockedAnd64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) _InterlockedOr8(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) _InterlockedOr16(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) _InterlockedOr(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) _InterlockedOr64(a, v)
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
)w2c_template"
R"w2c_template(
-#define atomic_xor_u8(a, v) _InterlockedXor8(a, v)
+#define DEFINE_SHARED_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) _InterlockedXor16(a, v)
+R"w2c_template( static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) _InterlockedXor(a, v)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) _InterlockedXor64(a, v)
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(
-#define atomic_exchange_u8(a, v) _InterlockedExchange8(a, v)
+R"w2c_template( atomic_store_explicit( \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) _InterlockedExchange16(a, v)
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped, \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) _InterlockedExchange(a, v)
+R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) _InterlockedExchange64(a, v)
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-// clang-format off
+DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) _InterlockedCompareExchange8(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) _InterlockedCompareExchange16(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) _InterlockedCompareExchange(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) _InterlockedCompareExchange64(a, desired, *(expected_ptr))
+R"w2c_template(DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
)w2c_template"
-R"w2c_template(
-#define atomic_fence() _ReadWriteBarrier()
+R"w2c_template(DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
)w2c_template"
-R"w2c_template(
-#else
+R"w2c_template(DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
+)w2c_template"
+R"w2c_template(DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
)w2c_template"
R"w2c_template(
-// Use gcc/clang/icc intrinsics
+#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template(#define atomic_load_u8(a) __atomic_load_n((u8*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(#define atomic_load_u16(a) __atomic_load_n((u16*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_load_u32(a) __atomic_load_n((u32*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_load_u64(a) __atomic_load_n((u64*)(a), __ATOMIC_SEQ_CST)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(
-#define atomic_store_u8(a, v) __atomic_store_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
)w2c_template"
-R"w2c_template(#define atomic_store_u16(a, v) __atomic_store_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_store_u32(a, v) __atomic_store_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(#define atomic_store_u64(a, v) __atomic_store_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(
-#define atomic_add_u8(a, v) __atomic_fetch_add((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template(#define atomic_add_u16(a, v) __atomic_fetch_add((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_add_u32(a, v) __atomic_fetch_add((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_add_u64(a, v) __atomic_fetch_add((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template(
-#define atomic_sub_u8(a, v) __atomic_fetch_sub((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( result = \
)w2c_template"
-R"w2c_template(#define atomic_sub_u16(a, v) __atomic_fetch_sub((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
)w2c_template"
-R"w2c_template(#define atomic_sub_u32(a, v) __atomic_fetch_sub((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template(#define atomic_sub_u64(a, v) __atomic_fetch_sub((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template(
-#define atomic_and_u8(a, v) __atomic_fetch_and((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( }
)w2c_template"
-R"w2c_template(#define atomic_and_u16(a, v) __atomic_fetch_and((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(
+DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u32(a, v) __atomic_fetch_and((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_and_u64(a, v) __atomic_fetch_and((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(
-#define atomic_or_u8(a, v) __atomic_fetch_or((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u16(a, v) __atomic_fetch_or((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u32(a, v) __atomic_fetch_or((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
)w2c_template"
-R"w2c_template(#define atomic_or_u64(a, v) __atomic_fetch_or((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
)w2c_template"
R"w2c_template(
-#define atomic_xor_u8(a, v) __atomic_fetch_xor((u8*)(a), v, __ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template(#define atomic_xor_u16(a, v) __atomic_fetch_xor((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(#define atomic_xor_u32(a, v) __atomic_fetch_xor((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(#define atomic_xor_u64(a, v) __atomic_fetch_xor((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(
-// clang-format off
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u8(a, v) __atomic_exchange_n((u8*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u16(a, v) __atomic_exchange_n((u16*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u32(a, v) __atomic_exchange_n((u32*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template(#define atomic_exchange_u64(a, v) __atomic_exchange_n((u64*)(a), v, __ATOMIC_SEQ_CST)
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(
-#define __atomic_compare_exchange_helper(a, expected_ptr, desired) \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( (__atomic_compare_exchange_n(a, expected_ptr, desired, 0 /* is_weak */, \
+R"w2c_template( t1 wrapped = (t1)value; \
+)w2c_template"
+R"w2c_template( atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
)w2c_template"
-R"w2c_template( __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST), \
+R"w2c_template( wrapped); \
)w2c_template"
-R"w2c_template( *(expected_ptr))
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-// clang-format off
+DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+)w2c_template"
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u8(a, expected_ptr, desired) __atomic_compare_exchange_helper((u8*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u16(a, expected_ptr, desired) __atomic_compare_exchange_helper((u16*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u32(a, expected_ptr, desired) __atomic_compare_exchange_helper((u32*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
)w2c_template"
-R"w2c_template(#define atomic_compare_exchange_u64(a, expected_ptr, desired) __atomic_compare_exchange_helper((u64*)(a), expected_ptr, desired)
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
)w2c_template"
-R"w2c_template(// clang-format on
+R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
)w2c_template"
R"w2c_template(
-#define atomic_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2) \
)w2c_template"
-R"w2c_template(
-#endif
+R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(
-#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template( if (UNLIKELY(addr % sizeof(t1))) { \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( TRAP(UNALIGNED); \
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( t1 ret; \
)w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
+R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+)w2c_template"
+R"w2c_template( ret = ret op wrapped; \
+)w2c_template"
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1)); \
)w2c_template"
-R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
+R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( t1 result; \
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template( result = atomic_load_##t1(MEM_ADDR(mem, addr, sizeof(t1))); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template( force_read(result); \
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template( return (t3)(t2)result; \
+R"w2c_template( t1 ret = atomic_##opname( \
+)w2c_template"
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
)w2c_template"
R"w2c_template( }
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)
)w2c_template"
R"w2c_template(
-#define DEFINE_ATOMIC_STORE(name, t1, t2) \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
)w2c_template"
-R"w2c_template( t1 wrapped = (t1)value; \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
)w2c_template"
-R"w2c_template( atomic_store_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
)w2c_template"
-R"w2c_template( }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)
)w2c_template"
R"w2c_template(
-#define DEFINE_ATOMIC_RMW(name, op, t1, t2) \
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
)w2c_template"
-R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
)w2c_template"
-R"w2c_template( t1 wrapped = (t1)value; \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
)w2c_template"
-R"w2c_template( t1 ret = atomic_##op##_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
)w2c_template"
-R"w2c_template( return (t2)ret; \
-)w2c_template"
-R"w2c_template( }
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, add, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, add, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, add, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, add, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, add, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, add, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, add, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, sub, u8, u32)
+#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2) \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, sub, u16, u32)
+R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, sub, u32, u32)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, sub, u8, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, sub, u16, u64)
+R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, sub, u32, u64)
+R"w2c_template( t1 ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, sub, u64, u64)
+R"w2c_template( wasm_rt_memcpy(&ret, &mem->data[addr], sizeof(t1)); \
)w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, and, u8, u32)
+R"w2c_template( wasm_rt_memcpy(&mem->data[addr], &wrapped, sizeof(t1)); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, and, u16, u32)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, and, u32, u32)
+R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, and, u8, u64)
+R"w2c_template( t2 value) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, and, u16, u64)
+R"w2c_template( MEMCHECK(mem, addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, and, u32, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, and, u64, u64)
+R"w2c_template( t1 wrapped = (t1)value; \
+)w2c_template"
+R"w2c_template( t1 ret = atomic_##opname((_Atomic volatile t1*)&mem->data[addr], wrapped); \
+)w2c_template"
+R"w2c_template( return (t2)ret; \
+)w2c_template"
+R"w2c_template( }
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, or, u8, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, or, u16, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, or, u32, u32)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, or, u8, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, or, u16, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, or, u32, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, or, u64, u64)
+R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
)w2c_template"
R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, xor, u8, u32)
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, xor, u16, u32)
+R"w2c_template( static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, xor, u32, u32)
+R"w2c_template( t1 replacement) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, xor, u8, u64)
+R"w2c_template( MEMCHECK(mem, addr, t2); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, xor, u16, u64)
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, xor, u32, u64)
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, xor, u64, u64)
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template(
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
+R"w2c_template( t2 ret; \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
+R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i32_atomic_rmw_xchg, exchange, u32, u32)
+R"w2c_template( if (ret == expected_wrapped) { \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
+R"w2c_template( sizeof(t2)); \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
+R"w2c_template( } \
)w2c_template"
-R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xchg, exchange, u64, u64)
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
-R"w2c_template(
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
+R"w2c_template( static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( t1 replacement) { \
+R"w2c_template( t1 expected, t1 replacement) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t2); \
+R"w2c_template( MEMCHECK(mem, addr, t2); \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template( t2 expected_wrapped = (t2)expected; \
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template( t2 old = \
+R"w2c_template( atomic_compare_exchange_strong( \
)w2c_template"
-R"w2c_template( atomic_compare_exchange_##t2(MEM_ADDR(mem, addr, sizeof(t2)), \
+R"w2c_template( (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
)w2c_template"
-R"w2c_template( &expected_wrapped, replacement_wrapped); \
+R"w2c_template( &expected_wrapped, replacement_wrapped); \
)w2c_template"
-R"w2c_template( return (t1)old; \
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
R"w2c_template( }
)w2c_template"
@@ -437,4 +443,7 @@ R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
)w2c_template"
R"w2c_template(DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);
)w2c_template"
+R"w2c_template(
+#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)
+)w2c_template"
;
diff --git a/src/template/wasm2c_atomicops.declarations.c b/src/template/wasm2c_atomicops.declarations.c
index 976f7f95..5d319991 100644
--- a/src/template/wasm2c_atomicops.declarations.c
+++ b/src/template/wasm2c_atomicops.declarations.c
@@ -1,123 +1,11 @@
-#if defined(_MSC_VER)
-
-#include <intrin.h>
-
-// Use MSVC intrinsics
-
-// For loads and stores, its not clear if we can rely on register width loads
-// and stores to be atomic as reported here
-// https://learn.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access?redirectedfrom=MSDN
-// or if we have to reuse other instrinsics
-// https://stackoverflow.com/questions/42660091/atomic-load-in-c-with-msvc
-// We reuse other intrinsics to be cautious
-#define atomic_load_u8(a) _InterlockedOr8(a, 0)
-#define atomic_load_u16(a) _InterlockedOr16(a, 0)
-#define atomic_load_u32(a) _InterlockedOr(a, 0)
-#define atomic_load_u64(a) _InterlockedOr64(a, 0)
-
-#define atomic_store_u8(a, v) _InterlockedExchange8(a, v)
-#define atomic_store_u16(a, v) _InterlockedExchange16(a, v)
-#define atomic_store_u32(a, v) _InterlockedExchange(a, v)
-#define atomic_store_u64(a, v) _InterlockedExchange64(a, v)
-
-#define atomic_add_u8(a, v) _InterlockedExchangeAdd8(a, v)
-#define atomic_add_u16(a, v) _InterlockedExchangeAdd16(a, v)
-#define atomic_add_u32(a, v) _InterlockedExchangeAdd(a, v)
-#define atomic_add_u64(a, v) _InterlockedExchangeAdd64(a, v)
-
-#define atomic_sub_u8(a, v) _InterlockedExchangeAdd8(a, -(v))
-#define atomic_sub_u16(a, v) _InterlockedExchangeAdd16(a, -(v))
-#define atomic_sub_u32(a, v) _InterlockedExchangeAdd(a, -(v))
-#define atomic_sub_u64(a, v) _InterlockedExchangeAdd64(a, -(v))
-
-#define atomic_and_u8(a, v) _InterlockedAnd8(a, v)
-#define atomic_and_u16(a, v) _InterlockedAnd16(a, v)
-#define atomic_and_u32(a, v) _InterlockedAnd(a, v)
-#define atomic_and_u64(a, v) _InterlockedAnd64(a, v)
-
-#define atomic_or_u8(a, v) _InterlockedOr8(a, v)
-#define atomic_or_u16(a, v) _InterlockedOr16(a, v)
-#define atomic_or_u32(a, v) _InterlockedOr(a, v)
-#define atomic_or_u64(a, v) _InterlockedOr64(a, v)
-
-#define atomic_xor_u8(a, v) _InterlockedXor8(a, v)
-#define atomic_xor_u16(a, v) _InterlockedXor16(a, v)
-#define atomic_xor_u32(a, v) _InterlockedXor(a, v)
-#define atomic_xor_u64(a, v) _InterlockedXor64(a, v)
-
-#define atomic_exchange_u8(a, v) _InterlockedExchange8(a, v)
-#define atomic_exchange_u16(a, v) _InterlockedExchange16(a, v)
-#define atomic_exchange_u32(a, v) _InterlockedExchange(a, v)
-#define atomic_exchange_u64(a, v) _InterlockedExchange64(a, v)
-
-// clang-format off
-#define atomic_compare_exchange_u8(a, expected_ptr, desired) _InterlockedCompareExchange8(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u16(a, expected_ptr, desired) _InterlockedCompareExchange16(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u32(a, expected_ptr, desired) _InterlockedCompareExchange(a, desired, *(expected_ptr))
-#define atomic_compare_exchange_u64(a, expected_ptr, desired) _InterlockedCompareExchange64(a, desired, *(expected_ptr))
-// clang-format on
-
-#define atomic_fence() _ReadWriteBarrier()
-
-#else
-
-// Use gcc/clang/icc intrinsics
-#define atomic_load_u8(a) __atomic_load_n((u8*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u16(a) __atomic_load_n((u16*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u32(a) __atomic_load_n((u32*)(a), __ATOMIC_SEQ_CST)
-#define atomic_load_u64(a) __atomic_load_n((u64*)(a), __ATOMIC_SEQ_CST)
-
-#define atomic_store_u8(a, v) __atomic_store_n((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u16(a, v) __atomic_store_n((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u32(a, v) __atomic_store_n((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_store_u64(a, v) __atomic_store_n((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_add_u8(a, v) __atomic_fetch_add((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u16(a, v) __atomic_fetch_add((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u32(a, v) __atomic_fetch_add((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_add_u64(a, v) __atomic_fetch_add((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_sub_u8(a, v) __atomic_fetch_sub((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u16(a, v) __atomic_fetch_sub((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u32(a, v) __atomic_fetch_sub((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_sub_u64(a, v) __atomic_fetch_sub((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_and_u8(a, v) __atomic_fetch_and((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u16(a, v) __atomic_fetch_and((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u32(a, v) __atomic_fetch_and((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_and_u64(a, v) __atomic_fetch_and((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_or_u8(a, v) __atomic_fetch_or((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u16(a, v) __atomic_fetch_or((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u32(a, v) __atomic_fetch_or((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_or_u64(a, v) __atomic_fetch_or((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-#define atomic_xor_u8(a, v) __atomic_fetch_xor((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u16(a, v) __atomic_fetch_xor((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u32(a, v) __atomic_fetch_xor((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_xor_u64(a, v) __atomic_fetch_xor((u64*)(a), v, __ATOMIC_SEQ_CST)
-
-// clang-format off
-#define atomic_exchange_u8(a, v) __atomic_exchange_n((u8*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u16(a, v) __atomic_exchange_n((u16*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u32(a, v) __atomic_exchange_n((u32*)(a), v, __ATOMIC_SEQ_CST)
-#define atomic_exchange_u64(a, v) __atomic_exchange_n((u64*)(a), v, __ATOMIC_SEQ_CST)
-// clang-format on
-
-#define __atomic_compare_exchange_helper(a, expected_ptr, desired) \
- (__atomic_compare_exchange_n(a, expected_ptr, desired, 0 /* is_weak */, \
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST), \
- *(expected_ptr))
-
-// clang-format off
-#define atomic_compare_exchange_u8(a, expected_ptr, desired) __atomic_compare_exchange_helper((u8*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u16(a, expected_ptr, desired) __atomic_compare_exchange_helper((u16*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u32(a, expected_ptr, desired) __atomic_compare_exchange_helper((u32*)(a), expected_ptr, desired)
-#define atomic_compare_exchange_u64(a, expected_ptr, desired) __atomic_compare_exchange_helper((u64*)(a), expected_ptr, desired)
-// clang-format on
-
-#define atomic_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#include <stdatomic.h>
+#if WABT_BIG_ENDIAN
+#error "wasm2c atomics not supported on big endian"
+#endif
+
+#ifndef WASM_RT_C11_AVAILABLE
+#error "C11 is required for Wasm threads and shared memory support"
#endif
#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
@@ -125,15 +13,68 @@
TRAP(UNALIGNED); \
}
-#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
- static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t1); \
- ATOMIC_ALIGNMENT_CHECK(addr, t1); \
- t1 result; \
- wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
- result = atomic_load_##t1(MEM_ADDR(mem, addr, sizeof(t1))); \
- force_read(result); \
- return (t3)(t2)result; \
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
+ static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
+ MEMCHECK(mem, addr, t1); \
+ t1 result; \
+ result = atomic_load_explicit( \
+ (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
+ memory_order_relaxed); \
+ force_read(result); \
+ return (t3)(t2)result; \
+ }
+
+DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
+DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
+DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
+DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
+
+#define DEFINE_SHARED_STORE(name, t1, t2) \
+ static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ t1 wrapped = (t1)value; \
+ atomic_store_explicit( \
+ (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped, \
+ memory_order_relaxed); \
+ }
+
+DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
+DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
+DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
+DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
+DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
+DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
+DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
+DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
+DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
+
+#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
+ static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 result; \
+ wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+ force_read(result); \
+ return (t3)(t2)result; \
+ } \
+ static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 result; \
+ result = \
+ atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
+ force_read(result); \
+ return (t3)(t2)result; \
}
DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
@@ -144,12 +85,20 @@ DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
-#define DEFINE_ATOMIC_STORE(name, t1, t2) \
- static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
- ATOMIC_ALIGNMENT_CHECK(addr, t1); \
- t1 wrapped = (t1)value; \
- atomic_store_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+#define DEFINE_ATOMIC_STORE(name, t1, t2) \
+ static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
+ } \
+ static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+ t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
+ wrapped); \
}
DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
@@ -160,74 +109,119 @@ DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
-#define DEFINE_ATOMIC_RMW(name, op, t1, t2) \
- static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
- ATOMIC_ALIGNMENT_CHECK(addr, t1); \
- t1 wrapped = (t1)value; \
- t1 ret = atomic_##op##_##t1(MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
- return (t2)ret; \
+#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2) \
+ static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ t1 ret; \
+ wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
+ ret = ret op wrapped; \
+ wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1)); \
+ return (t2)ret; \
+ } \
+ static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+ t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ t1 ret = atomic_##opname( \
+ (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
+ return (t2)ret; \
+ }
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)
+
+DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
+DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
+DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
+
+#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2) \
+ static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ t1 ret; \
+ wasm_rt_memcpy(&ret, &mem->data[addr], sizeof(t1)); \
+ wasm_rt_memcpy(&mem->data[addr], &wrapped, sizeof(t1)); \
+ return (t2)ret; \
+ } \
+ static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+ t2 value) { \
+ MEMCHECK(mem, addr, t1); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t1); \
+ t1 wrapped = (t1)value; \
+ t1 ret = atomic_##opname((_Atomic volatile t1*)&mem->data[addr], wrapped); \
+ return (t2)ret; \
}
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, add, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, add, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, add, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, add, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, add, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, add, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, add, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, sub, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, sub, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, sub, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, sub, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, sub, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, sub, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, sub, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, and, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, and, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, and, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, and, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, and, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, and, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, and, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, or, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, or, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, or, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, or, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, or, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, or, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, or, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, xor, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, xor, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, xor, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, xor, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, xor, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, xor, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, xor, u64, u64)
-
-DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
-DEFINE_ATOMIC_RMW(i32_atomic_rmw_xchg, exchange, u32, u32)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
-DEFINE_ATOMIC_RMW(i64_atomic_rmw_xchg, exchange, u64, u64)
-
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
- static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
- t1 replacement) { \
- MEMCHECK(mem, addr, t2); \
- ATOMIC_ALIGNMENT_CHECK(addr, t2); \
- t2 expected_wrapped = (t2)expected; \
- t2 replacement_wrapped = (t2)replacement; \
- t2 old = \
- atomic_compare_exchange_##t2(MEM_ADDR(mem, addr, sizeof(t2)), \
- &expected_wrapped, replacement_wrapped); \
- return (t1)old; \
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
+DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
+DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
+
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
+ static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
+ t1 replacement) { \
+ MEMCHECK(mem, addr, t2); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+ t2 expected_wrapped = (t2)expected; \
+ t2 replacement_wrapped = (t2)replacement; \
+ t2 ret; \
+ wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
+ if (ret == expected_wrapped) { \
+ wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
+ sizeof(t2)); \
+ } \
+ return (t1)expected_wrapped; \
+ } \
+ static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+ t1 expected, t1 replacement) { \
+ MEMCHECK(mem, addr, t2); \
+ ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+ t2 expected_wrapped = (t2)expected; \
+ t2 replacement_wrapped = (t2)replacement; \
+ atomic_compare_exchange_strong( \
+ (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
+ &expected_wrapped, replacement_wrapped); \
+ return (t1)expected_wrapped; \
}
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw8_cmpxchg_u, u32, u8);
@@ -237,3 +231,5 @@ DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw8_cmpxchg_u, u64, u8);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw16_cmpxchg_u, u64, u16);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);
+
+#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)
diff --git a/test/run-spec-wasm2c.py b/test/run-spec-wasm2c.py
index 0ab42a04..6a59471d 100755
--- a/test/run-spec-wasm2c.py
+++ b/test/run-spec-wasm2c.py
@@ -445,20 +445,26 @@ class CWriter(object):
raise Error('Unexpected action type: %s' % type_)
-def Compile(cc, c_filename, out_dir, *cflags):
+def Compile(cc, c_filename, out_dir, use_c11, *cflags):
if IS_WINDOWS:
ext = '.obj'
else:
ext = '.o'
o_filename = utils.ChangeDir(utils.ChangeExt(c_filename, ext), out_dir)
args = list(cflags)
+
if IS_WINDOWS:
- args += ['/nologo', '/MDd', '/c', c_filename, '/Fo' + o_filename]
+ cstd_flag = ['/std:c11', '/experimental:c11atomics'] if use_c11 else []
+ args += cstd_flag + ['/nologo', '/MDd', '/c', c_filename, '/Fo' + o_filename]
else:
# See "Compiling the wasm2c output" section of wasm2c/README.md
# When compiling with -O2, GCC and clang require '-fno-optimize-sibling-calls'
# and '-frounding-math' to maintain conformance with the spec tests
# (GCC also requires '-fsignaling-nans')
+ if use_c11:
+ args.append('-std=c11')
+ else:
+ args.append('-std=c99')
args += ['-c', c_filename, '-o', o_filename, '-O2',
'-Wall', '-Werror', '-Wno-unused',
'-Wno-array-bounds',
@@ -467,7 +473,7 @@ def Compile(cc, c_filename, out_dir, *cflags):
'-Wno-infinite-recursion',
'-fno-optimize-sibling-calls',
'-frounding-math', '-fsignaling-nans',
- '-std=c99', '-D_DEFAULT_SOURCE']
+ '-D_DEFAULT_SOURCE']
# Use RunWithArgsForStdout and discard stdout because cl.exe
# unconditionally prints the name of input files on stdout
# and we don't want that to be part of our stdout.
@@ -602,6 +608,8 @@ def main(args):
return SKIPPED
cflags.append('-DSUPPORT_MEMORY64=1')
+ use_c11 = options.enable_threads
+
for i, wasm_filename in enumerate(cwriter.GetModuleFilenames()):
wasm_filename = os.path.join(out_dir, wasm_filename)
c_filename_input = utils.ChangeExt(wasm_filename, '.c')
@@ -616,7 +624,7 @@ def main(args):
wasm2c.RunWithArgs(wasm_filename, '-o', c_filename_input, *args)
if options.compile:
for j, c_filename in enumerate(c_filenames):
- o_filenames.append(Compile(cc, c_filename, out_dir, *cflags))
+ o_filenames.append(Compile(cc, c_filename, out_dir, use_c11, *cflags))
cwriter.Write()
main_filename = utils.ChangeExt(json_file_path, '-main.c')
@@ -624,16 +632,17 @@ def main(args):
out_main_file.write(output.getvalue())
if options.compile:
- # Compile wasm-rt-impl.
- wasm_rt_impl_c = os.path.join(options.wasmrt_dir, 'wasm-rt-impl.c')
- o_filenames.append(Compile(cc, wasm_rt_impl_c, out_dir, *cflags))
+ # Compile runtime code
+ source_files = [
+ main_filename,
+ os.path.join(options.wasmrt_dir, 'wasm-rt-impl.c'),
+ os.path.join(options.wasmrt_dir, 'wasm-rt-exceptions-impl.c'),
+ os.path.join(options.wasmrt_dir, 'wasm-rt-mem-impl.c'),
+ ]
- # Compile wasm-rt-exceptions.
- wasm_rt_exceptions_c = os.path.join(options.wasmrt_dir, 'wasm-rt-exceptions-impl.c')
- o_filenames.append(Compile(cc, wasm_rt_exceptions_c, out_dir, *cflags))
+ for f in source_files:
+ o_filenames.append(Compile(cc, f, out_dir, use_c11, *cflags))
- # Compile and link -main test run entry point
- o_filenames.append(Compile(cc, main_filename, out_dir, *cflags))
if IS_WINDOWS:
exe_ext = '.exe'
libs = []
diff --git a/test/wasm2c/spec/threads/atomic.txt b/test/wasm2c/spec/threads/atomic.txt
index 256adc68..d9880c71 100644
--- a/test/wasm2c/spec/threads/atomic.txt
+++ b/test/wasm2c/spec/threads/atomic.txt
@@ -1,6 +1,8 @@
;;; TOOL: run-spec-wasm2c
;;; ARGS*: --enable-threads
-;; atomic operations --- This is a subset of the full test suite, that is currently supported by wasm2c
+;; atomic operations --- This is a subset of the full test suite taken from the
+;; threads proposal repo, from the upstream-rebuild branch (pending future
+;; merge of threads upstream-rebuild branch).
(module
(memory 1 1 shared)
@@ -324,21 +326,41 @@
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
+(assert_return (invoke "i32.atomic.rmw8.cmpxchg_u" (i32.const 0) (i32.const 0x11111111) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
+(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111cd))
+
+(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
+(assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0x11111111) (i32.const 0xcafecafe)) (i32.const 0x1111))
+(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111cafe))
+
+(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
+(assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0x4242424242424242)) (i64.const 0x11))
+(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111142))
+
+(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
+(assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
+(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111beef))
+
+(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
+(invoke "init" (i64.const 0x1111111111111111))
+(assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
+(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111cabba6e5))
+
;; *.atomic.rmw*.cmpxchg (compare true)
(invoke "init" (i64.const 0x1111111111111111))
@@ -422,26 +444,11 @@
(memory 1 1 shared)
(func (export "init") (param $value i64) (i64.store (i32.const 0) (local.get $value)))
-
-;; ;; (func (export "memory.atomic.notify") (param $addr i32) (param $count i32) (result i32)
-;; ;; (memory.atomic.notify (local.get 0) (local.get 1)))
-;; ;; (func (export "memory.atomic.wait32") (param $addr i32) (param $expected i32) (param $timeout i64) (result i32)
-;; ;; (memory.atomic.wait32 (local.get 0) (local.get 1) (local.get 2)))
-;; ;; (func (export "memory.atomic.wait64") (param $addr i32) (param $expected i64) (param $timeout i64) (result i32)
-;; ;; (memory.atomic.wait64 (local.get 0) (local.get 1) (local.get 2)))
)
-;; (invoke "init" (i64.const 0xffffffffffff))
-;; ;; (assert_return (invoke "memory.atomic.wait32" (i32.const 0) (i32.const 0) (i64.const 0)) (i32.const 1))
-;; ;; (assert_return (invoke "memory.atomic.wait64" (i32.const 0) (i64.const 0) (i64.const 0)) (i32.const 1))
-;; ;; (assert_return (invoke "memory.atomic.notify" (i32.const 0) (i32.const 0)) (i32.const 0))
-
;; unshared memory is OK
(module
(memory 1 1)
- ;; (func (drop (memory.atomic.notify (i32.const 0) (i32.const 0))))
- ;; (func (drop (memory.atomic.wait32 (i32.const 0) (i32.const 0) (i64.const 0))))
- ;; (func (drop (memory.atomic.wait64 (i32.const 0) (i64.const 0) (i64.const 0))))
(func (drop (i32.atomic.load (i32.const 0))))
(func (drop (i64.atomic.load (i32.const 0))))
(func (drop (i32.atomic.load16_u (i32.const 0))))
@@ -489,10 +496,14 @@
(func (drop (i64.atomic.rmw32.cmpxchg_u (i32.const 0) (i64.const 0) (i64.const 0))))
)
+;; atomic.fence: no memory is ok
+(module
+ (func (export "fence") (atomic.fence))
+)
+
+(assert_return (invoke "fence"))
+
;; Fails with no memory
-;; (assert_invalid (module (func (drop (memory.atomic.notify (i32.const 0) (i32.const 0))))) "unknown memory")
-;; (assert_invalid (module (func (drop (memory.atomic.wait32 (i32.const 0) (i32.const 0) (i64.const 0))))) "unknown memory")
-;; (assert_invalid (module (func (drop (memory.atomic.wait64 (i32.const 0) (i64.const 0) (i64.const 0))))) "unknown memory")
(assert_invalid (module (func (drop (i32.atomic.load (i32.const 0))))) "unknown memory")
(assert_invalid (module (func (drop (i64.atomic.load (i32.const 0))))) "unknown memory")
(assert_invalid (module (func (drop (i32.atomic.load16_u (i32.const 0))))) "unknown memory")
@@ -539,5 +550,5 @@
(assert_invalid (module (func (drop (i64.atomic.rmw16.cmpxchg_u (i32.const 0) (i64.const 0) (i64.const 0))))) "unknown memory")
(assert_invalid (module (func (drop (i64.atomic.rmw32.cmpxchg_u (i32.const 0) (i64.const 0) (i64.const 0))))) "unknown memory")
(;; STDOUT ;;;
-184/184 tests passed.
+195/195 tests passed.
;;; STDOUT ;;)
diff --git a/wasm2c/README.md b/wasm2c/README.md
index e0739bfb..9cdbdc27 100644
--- a/wasm2c/README.md
+++ b/wasm2c/README.md
@@ -11,6 +11,9 @@ $ wasm2c test.wasm -o test.c
$ wasm2c test.wasm --no-debug-names -o test.c
```
+The C code produced targets the C99 standard. If, however, the Wasm module uses
+Wasm threads/atomics, the code produced targets the C11 standard.
+
## Tutorial: .wat -> .wasm -> .c
Let's look at a simple example of a factorial function.
@@ -255,11 +258,28 @@ specified by the module, or `0xffffffff` if there is no limit.
```c
typedef struct {
uint8_t* data;
- uint32_t pages, max_pages;
- uint32_t size;
+ uint64_t pages, max_pages;
+ uint64_t size;
+ bool is64;
} wasm_rt_memory_t;
```
+This is followed by the definition of a shared memory instance. This is similar
+to a regular memory instance, but represents memory that can be used by multiple
+Wasm instances, and thus enforces a minimum amount of memory order on
+operations. The Shared memory definition has one additional member, `mem_lock`,
+which is a lock that is used during memory grow operations for thread safety.
+
+```c
+typedef struct {
+ _Atomic volatile uint8_t* data;
+ uint64_t pages, max_pages;
+ uint64_t size;
+ bool is64;
+ mtx_t mem_lock;
+} wasm_rt_shared_memory_t;
+```
+
Next is the definition of a table instance. The `data` field is a pointer to
`size` elements. Like a memory instance, `size` is the current size of a table,
and `max_size` is the maximum size of the table, or `0xffffffff` if there is no
@@ -290,6 +310,9 @@ const char* wasm_rt_strerror(wasm_rt_trap_t trap);
void wasm_rt_allocate_memory(wasm_rt_memory_t*, uint32_t initial_pages, uint32_t max_pages, bool is64);
uint32_t wasm_rt_grow_memory(wasm_rt_memory_t*, uint32_t pages);
void wasm_rt_free_memory(wasm_rt_memory_t*);
+void wasm_rt_allocate_memory_shared(wasm_rt_shared_memory_t*, uint32_t initial_pages, uint32_t max_pages, bool is64);
+uint32_t wasm_rt_grow_memory_shared(wasm_rt_shared_memory_t*, uint32_t pages);
+void wasm_rt_free_memory_shared(wasm_rt_shared_memory_t*);
void wasm_rt_allocate_funcref_table(wasm_rt_table_t*, uint32_t elements, uint32_t max_elements);
void wasm_rt_allocate_externref_table(wasm_rt_externref_table_t*, uint32_t elements, uint32_t max_elements);
void wasm_rt_free_funcref_table(wasm_rt_table_t*);
@@ -329,6 +352,16 @@ arguments and returning `void` . e.g.
`wasm_rt_free_memory` frees the memory instance.
+`wasm_rt_allocate_memory_shared` initializes a memory instance that can be
+shared by different Wasm threads. It's operation is otherwise similar to
+`wasm_rt_allocate_memory`.
+
+`wasm_rt_grow_memory_shared` must grow the given shared memory instance by the
+given number of pages. It's operation is otherwise similar to
+`wasm_rt_grow_memory`.
+
+`wasm_rt_free_memory_shared` frees the shared memory instance.
+
`wasm_rt_allocate_funcref_table` and the similar `..._externref_table`
initialize a table instance of the given type, and allocate at least
enough space for the given number of initial elements. The elements
diff --git a/wasm2c/wasm-rt-impl.c b/wasm2c/wasm-rt-impl.c
index 57bd74cf..7c88499d 100644
--- a/wasm2c/wasm-rt-impl.c
+++ b/wasm2c/wasm-rt-impl.c
@@ -36,8 +36,6 @@
#include <sys/mman.h>
#endif
-#define PAGE_SIZE 65536
-
#ifndef NDEBUG
#define DEBUG_PRINTF(...) fprintf(stderr, __VA_ARGS__);
#else
@@ -64,10 +62,6 @@ WASM_RT_THREAD_LOCAL wasm_rt_jmp_buf g_wasm_rt_jmp_buf;
extern void WASM_RT_TRAP_HANDLER(wasm_rt_trap_t code);
#endif
-#ifdef WASM_RT_GROW_FAILED_HANDLER
-extern void WASM_RT_GROW_FAILED_HANDLER();
-#endif
-
void wasm_rt_trap(wasm_rt_trap_t code) {
assert(code != WASM_RT_TRAP_NONE);
#if WASM_RT_STACK_DEPTH_COUNT
@@ -83,47 +77,6 @@ void wasm_rt_trap(wasm_rt_trap_t code) {
}
#ifdef _WIN32
-static void* os_mmap(size_t size) {
- void* ret = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- return ret;
-}
-
-static int os_munmap(void* addr, size_t size) {
- // Windows can only unmap the whole mapping
- (void)size; /* unused */
- BOOL succeeded = VirtualFree(addr, 0, MEM_RELEASE);
- return succeeded ? 0 : -1;
-}
-
-static int os_mprotect(void* addr, size_t size) {
- if (size == 0) {
- return 0;
- }
- void* ret = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
- if (ret == addr) {
- return 0;
- }
- VirtualFree(addr, 0, MEM_RELEASE);
- return -1;
-}
-
-static void os_print_last_error(const char* msg) {
- DWORD errorMessageID = GetLastError();
- if (errorMessageID != 0) {
- LPSTR messageBuffer = 0;
- // The api creates the buffer that holds the message
- size_t size = FormatMessageA(
- FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, errorMessageID, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- (LPSTR)&messageBuffer, 0, NULL);
- (void)size;
- printf("%s. %s\n", msg, messageBuffer);
- LocalFree(messageBuffer);
- } else {
- printf("%s. No error code.\n", msg);
- }
-}
#if WASM_RT_INSTALL_SIGNAL_HANDLER
@@ -148,28 +101,6 @@ static void os_cleanup_signal_handler(void) {
#endif
#else
-#if WASM_RT_USE_MMAP
-static void* os_mmap(size_t size) {
- int map_prot = PROT_NONE;
- int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
- uint8_t* addr = mmap(NULL, size, map_prot, map_flags, -1, 0);
- if (addr == MAP_FAILED)
- return NULL;
- return addr;
-}
-
-static int os_munmap(void* addr, size_t size) {
- return munmap(addr, size);
-}
-
-static int os_mprotect(void* addr, size_t size) {
- return mprotect(addr, size, PROT_READ | PROT_WRITE);
-}
-
-static void os_print_last_error(const char* msg) {
- perror(msg);
-}
-#endif
#if WASM_RT_INSTALL_SIGNAL_HANDLER
static void os_signal_handler(int sig, siginfo_t* si, void* unused) {
@@ -326,113 +257,6 @@ void wasm_rt_free_thread(void) {
#endif
}
-#if WASM_RT_USE_MMAP
-
-static uint64_t get_allocation_size_for_mmap(wasm_rt_memory_t* memory) {
- assert(!memory->is64 &&
- "memory64 is not yet compatible with WASM_RT_USE_MMAP");
-#if WASM_RT_MEMCHECK_GUARD_PAGES
- /* Reserve 8GiB. */
- const uint64_t max_size = 0x200000000ul;
- return max_size;
-#else
- if (memory->max_pages != 0) {
- const uint64_t max_size = memory->max_pages * PAGE_SIZE;
- return max_size;
- }
-
- /* Reserve 4GiB. */
- const uint64_t max_size = 0x100000000ul;
- return max_size;
-#endif
-}
-
-#endif
-
-void wasm_rt_allocate_memory(wasm_rt_memory_t* memory,
- uint64_t initial_pages,
- uint64_t max_pages,
- bool is64) {
- uint64_t byte_length = initial_pages * PAGE_SIZE;
- memory->size = byte_length;
- memory->pages = initial_pages;
- memory->max_pages = max_pages;
- memory->is64 = is64;
-
-#if WASM_RT_USE_MMAP
- const uint64_t mmap_size = get_allocation_size_for_mmap(memory);
- void* addr = os_mmap(mmap_size);
- if (!addr) {
- os_print_last_error("os_mmap failed.");
- abort();
- }
- int ret = os_mprotect(addr, byte_length);
- if (ret != 0) {
- os_print_last_error("os_mprotect failed.");
- abort();
- }
- memory->data = addr;
-#else
- memory->data = calloc(byte_length, 1);
-#endif
-}
-
-static uint64_t grow_memory_impl(wasm_rt_memory_t* memory, uint64_t delta) {
- uint64_t old_pages = memory->pages;
- uint64_t new_pages = memory->pages + delta;
- if (new_pages == 0) {
- return 0;
- }
- if (new_pages < old_pages || new_pages > memory->max_pages) {
- return (uint64_t)-1;
- }
- uint64_t old_size = old_pages * PAGE_SIZE;
- uint64_t new_size = new_pages * PAGE_SIZE;
- uint64_t delta_size = delta * PAGE_SIZE;
-#if WASM_RT_USE_MMAP
- uint8_t* new_data = memory->data;
- int ret = os_mprotect(new_data + old_size, delta_size);
- if (ret != 0) {
- return (uint64_t)-1;
- }
-#else
- uint8_t* new_data = realloc(memory->data, new_size);
- if (new_data == NULL) {
- return (uint64_t)-1;
- }
-#if !WABT_BIG_ENDIAN
- memset(new_data + old_size, 0, delta_size);
-#endif
-#endif
-#if WABT_BIG_ENDIAN
- memmove(new_data + new_size - old_size, new_data, old_size);
- memset(new_data, 0, delta_size);
-#endif
- memory->pages = new_pages;
- memory->size = new_size;
- memory->data = new_data;
- return old_pages;
-}
-
-uint64_t wasm_rt_grow_memory(wasm_rt_memory_t* memory, uint64_t delta) {
- uint64_t ret = grow_memory_impl(memory, delta);
-#ifdef WASM_RT_GROW_FAILED_HANDLER
- if (ret == -1) {
- WASM_RT_GROW_FAILED_HANDLER();
- }
-#endif
- return ret;
-}
-
-void wasm_rt_free_memory(wasm_rt_memory_t* memory) {
-#if WASM_RT_USE_MMAP
- const uint64_t mmap_size = get_allocation_size_for_mmap(memory);
- os_munmap(memory->data, mmap_size); // ignore error
-#else
- free(memory->data);
-#endif
-}
-
#define DEFINE_TABLE_OPS(type) \
void wasm_rt_allocate_##type##_table(wasm_rt_##type##_table_t* table, \
uint32_t elements, \
diff --git a/wasm2c/wasm-rt-mem-impl-helper.inc b/wasm2c/wasm-rt-mem-impl-helper.inc
new file mode 100644
index 00000000..5cd503c7
--- /dev/null
+++ b/wasm2c/wasm-rt-mem-impl-helper.inc
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file is used as a template to generate code for regular memories or for
+// shared memories. For this, the file must be included after defining either
+// WASM_RT_MEM_OPS or WASM_RT_MEM_OPS_SHARED.
+
+#if defined(WASM_RT_MEM_OPS) && defined(WASM_RT_MEM_OPS_SHARED)
+#error \
+ "Expected only one of { WASM_RT_MEM_OPS, WASM_RT_MEM_OPS_SHARED } to be defined"
+#elif !defined(WASM_RT_MEM_OPS) && !defined(WASM_RT_MEM_OPS_SHARED)
+#error \
+ "Expected one of { WASM_RT_MEM_OPS, WASM_RT_MEM_OPS_SHARED } to be defined"
+#endif
+
+// Shared memory operations are defined only if we have C11
+#if defined(WASM_RT_MEM_OPS) || \
+ (defined(WASM_RT_MEM_OPS_SHARED) && defined(WASM_RT_C11_AVAILABLE))
+
+#ifdef WASM_RT_MEM_OPS
+
+// Memory operations on wasm_rt_memory_t
+#define MEMORY_TYPE wasm_rt_memory_t
+#define MEMORY_API_NAME(name) name
+#define MEMORY_CELL_TYPE uint8_t*
+#define MEMORY_LOCK_VAR_INIT(name)
+#define MEMORY_LOCK_AQUIRE(name)
+#define MEMORY_LOCK_RELEASE(name)
+
+#else
+
+// Memory operations on wasm_rt_shared_memory_t
+#define MEMORY_TYPE wasm_rt_shared_memory_t
+#define MEMORY_API_NAME(name) name##_shared
+#define MEMORY_CELL_TYPE _Atomic volatile uint8_t*
+
+#if WASM_RT_USE_C11THREADS
+#define MEMORY_LOCK_VAR_INIT(name) C11_MEMORY_LOCK_VAR_INIT(name)
+#define MEMORY_LOCK_AQUIRE(name) C11_MEMORY_LOCK_AQUIRE(name)
+#define MEMORY_LOCK_RELEASE(name) C11_MEMORY_LOCK_RELEASE(name)
+#elif WASM_RT_USE_PTHREADS
+#define MEMORY_LOCK_VAR_INIT(name) PTHREAD_MEMORY_LOCK_VAR_INIT(name)
+#define MEMORY_LOCK_AQUIRE(name) PTHREAD_MEMORY_LOCK_AQUIRE(name)
+#define MEMORY_LOCK_RELEASE(name) PTHREAD_MEMORY_LOCK_RELEASE(name)
+#elif WASM_RT_USE_CRITICALSECTION
+#define MEMORY_LOCK_VAR_INIT(name) WIN_MEMORY_LOCK_VAR_INIT(name)
+#define MEMORY_LOCK_AQUIRE(name) WIN_MEMORY_LOCK_AQUIRE(name)
+#define MEMORY_LOCK_RELEASE(name) WIN_MEMORY_LOCK_RELEASE(name)
+#endif
+
+#endif
+
+void MEMORY_API_NAME(wasm_rt_allocate_memory)(MEMORY_TYPE* memory,
+ uint64_t initial_pages,
+ uint64_t max_pages,
+ bool is64) {
+ uint64_t byte_length = initial_pages * PAGE_SIZE;
+ memory->size = byte_length;
+ memory->pages = initial_pages;
+ memory->max_pages = max_pages;
+ memory->is64 = is64;
+ MEMORY_LOCK_VAR_INIT(memory->mem_lock);
+
+#if WASM_RT_USE_MMAP
+ const uint64_t mmap_size =
+ get_alloc_size_for_mmap(memory->max_pages, memory->is64);
+ void* addr = os_mmap(mmap_size);
+ if (!addr) {
+ os_print_last_error("os_mmap failed.");
+ abort();
+ }
+ int ret = os_mprotect(addr, byte_length);
+ if (ret != 0) {
+ os_print_last_error("os_mprotect failed.");
+ abort();
+ }
+ memory->data = addr;
+#else
+ memory->data = calloc(byte_length, 1);
+#endif
+}
+
+static uint64_t MEMORY_API_NAME(grow_memory_impl)(MEMORY_TYPE* memory,
+ uint64_t delta) {
+ uint64_t old_pages = memory->pages;
+ uint64_t new_pages = memory->pages + delta;
+ if (new_pages == 0) {
+ return 0;
+ }
+ if (new_pages < old_pages || new_pages > memory->max_pages) {
+ return (uint64_t)-1;
+ }
+ uint64_t old_size = old_pages * PAGE_SIZE;
+ uint64_t new_size = new_pages * PAGE_SIZE;
+ uint64_t delta_size = delta * PAGE_SIZE;
+#if WASM_RT_USE_MMAP
+ MEMORY_CELL_TYPE new_data = memory->data;
+ int ret = os_mprotect((void*)(new_data + old_size), delta_size);
+ if (ret != 0) {
+ return (uint64_t)-1;
+ }
+#else
+ MEMORY_CELL_TYPE new_data = realloc((void*)memory->data, new_size);
+ if (new_data == NULL) {
+ return (uint64_t)-1;
+ }
+#if !WABT_BIG_ENDIAN
+ memset((void*)(new_data + old_size), 0, delta_size);
+#endif
+#endif
+#if WABT_BIG_ENDIAN
+ memmove(new_data + new_size - old_size, new_data, old_size);
+ memset(new_data, 0, delta_size);
+#endif
+ memory->pages = new_pages;
+ memory->size = new_size;
+ memory->data = new_data;
+ return old_pages;
+}
+
+uint64_t MEMORY_API_NAME(wasm_rt_grow_memory)(MEMORY_TYPE* memory,
+ uint64_t delta) {
+ MEMORY_LOCK_AQUIRE(memory->mem_lock);
+ uint64_t ret = MEMORY_API_NAME(grow_memory_impl)(memory, delta);
+ MEMORY_LOCK_RELEASE(memory->mem_lock);
+#ifdef WASM_RT_GROW_FAILED_HANDLER
+ if (ret == -1) {
+ WASM_RT_GROW_FAILED_HANDLER();
+ }
+#endif
+ return ret;
+}
+
+void MEMORY_API_NAME(wasm_rt_free_memory)(MEMORY_TYPE* memory) {
+#if WASM_RT_USE_MMAP
+ const uint64_t mmap_size =
+ get_alloc_size_for_mmap(memory->max_pages, memory->is64);
+ os_munmap((void*)memory->data, mmap_size); // ignore error
+#else
+ free((void*)memory->data);
+#endif
+}
+
+#undef MEMORY_LOCK_RELEASE
+#undef MEMORY_LOCK_AQUIRE
+#undef MEMORY_LOCK_VAR_INIT
+#undef MEMORY_CELL_TYPE
+#undef MEMORY_API_NAME
+#undef MEMORY_TYPE
+
+#endif
diff --git a/wasm2c/wasm-rt-mem-impl.c b/wasm2c/wasm-rt-mem-impl.c
new file mode 100644
index 00000000..d29aadad
--- /dev/null
+++ b/wasm2c/wasm-rt-mem-impl.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2018 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm-rt-impl.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#endif
+
+#define PAGE_SIZE 65536
+
+#ifdef WASM_RT_GROW_FAILED_HANDLER
+extern void WASM_RT_GROW_FAILED_HANDLER();
+#endif
+
+#define C11_MEMORY_LOCK_VAR_INIT(name) \
+ if (mtx_init(&(name), mtx_plain) != thrd_success) { \
+ fprintf(stderr, "Lock init failed\n"); \
+ abort(); \
+ }
+#define C11_MEMORY_LOCK_AQUIRE(name) \
+ if (mtx_lock(&(name)) != thrd_success) { \
+ fprintf(stderr, "Lock acquire failed\n"); \
+ abort(); \
+ }
+#define C11_MEMORY_LOCK_RELEASE(name) \
+ if (mtx_unlock(&(name)) != thrd_success) { \
+ fprintf(stderr, "Lock release failed\n"); \
+ abort(); \
+ }
+
+#define PTHREAD_MEMORY_LOCK_VAR_INIT(name) \
+ if (pthread_mutex_init(&(name), NULL) != 0) { \
+ fprintf(stderr, "Lock init failed\n"); \
+ abort(); \
+ }
+#define PTHREAD_MEMORY_LOCK_AQUIRE(name) \
+ if (pthread_mutex_lock(&(name)) != 0) { \
+ fprintf(stderr, "Lock acquire failed\n"); \
+ abort(); \
+ }
+#define PTHREAD_MEMORY_LOCK_RELEASE(name) \
+ if (pthread_mutex_unlock(&(name)) != 0) { \
+ fprintf(stderr, "Lock release failed\n"); \
+ abort(); \
+ }
+
+#define WIN_MEMORY_LOCK_VAR_INIT(name) InitializeCriticalSection(&(name))
+#define WIN_MEMORY_LOCK_AQUIRE(name) EnterCriticalSection(&(name))
+#define WIN_MEMORY_LOCK_RELEASE(name) LeaveCriticalSection(&(name))
+
+#if WASM_RT_USE_MMAP
+
+#ifdef _WIN32
+static void* os_mmap(size_t size) {
+ void* ret = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+ return ret;
+}
+
+static int os_munmap(void* addr, size_t size) {
+ // Windows can only unmap the whole mapping
+ (void)size; /* unused */
+ BOOL succeeded = VirtualFree(addr, 0, MEM_RELEASE);
+ return succeeded ? 0 : -1;
+}
+
+static int os_mprotect(void* addr, size_t size) {
+ if (size == 0) {
+ return 0;
+ }
+ void* ret = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+ if (ret == addr) {
+ return 0;
+ }
+ VirtualFree(addr, 0, MEM_RELEASE);
+ return -1;
+}
+
+static void os_print_last_error(const char* msg) {
+ DWORD errorMessageID = GetLastError();
+ if (errorMessageID != 0) {
+ LPSTR messageBuffer = 0;
+ // The api creates the buffer that holds the message
+ size_t size = FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, errorMessageID, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&messageBuffer, 0, NULL);
+ (void)size;
+ printf("%s. %s\n", msg, messageBuffer);
+ LocalFree(messageBuffer);
+ } else {
+ printf("%s. No error code.\n", msg);
+ }
+}
+
+#else
+static void* os_mmap(size_t size) {
+ int map_prot = PROT_NONE;
+ int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ uint8_t* addr = mmap(NULL, size, map_prot, map_flags, -1, 0);
+ if (addr == MAP_FAILED)
+ return NULL;
+ return addr;
+}
+
+static int os_munmap(void* addr, size_t size) {
+ return munmap(addr, size);
+}
+
+static int os_mprotect(void* addr, size_t size) {
+ return mprotect(addr, size, PROT_READ | PROT_WRITE);
+}
+
+static void os_print_last_error(const char* msg) {
+ perror(msg);
+}
+
+#endif
+
+static uint64_t get_alloc_size_for_mmap(uint64_t max_pages, bool is64) {
+ assert(!is64 && "memory64 is not yet compatible with WASM_RT_USE_MMAP");
+#if WASM_RT_MEMCHECK_GUARD_PAGES
+ /* Reserve 8GiB. */
+ const uint64_t max_size = 0x200000000ul;
+ return max_size;
+#else
+ if (max_pages != 0) {
+ const uint64_t max_size = max_pages * PAGE_SIZE;
+ return max_size;
+ }
+
+ /* Reserve 4GiB. */
+ const uint64_t max_size = 0x100000000ul;
+ return max_size;
+#endif
+}
+
+#endif
+
+// Include operations for memory
+#define WASM_RT_MEM_OPS
+#include "wasm-rt-mem-impl-helper.inc"
+#undef WASM_RT_MEM_OPS
+
+// Include operations for shared memory
+#define WASM_RT_MEM_OPS_SHARED
+#include "wasm-rt-mem-impl-helper.inc"
+#undef WASM_RT_MEM_OPS_SHARED
+
+#undef C11_MEMORY_LOCK_VAR_INIT
+#undef C11_MEMORY_LOCK_AQUIRE
+#undef C11_MEMORY_LOCK_RELEASE
+#undef PTHREAD_MEMORY_LOCK_VAR_INIT
+#undef PTHREAD_MEMORY_LOCK_AQUIRE
+#undef PTHREAD_MEMORY_LOCK_RELEASE
+#undef WIN_MEMORY_LOCK_VAR_INIT
+#undef WIN_MEMORY_LOCK_AQUIRE
+#undef WIN_MEMORY_LOCK_RELEASE
+#undef PAGE_SIZE
diff --git a/wasm2c/wasm-rt.h b/wasm2c/wasm-rt.h
index bd0cd61d..cbd09a31 100644
--- a/wasm2c/wasm-rt.h
+++ b/wasm2c/wasm-rt.h
@@ -51,9 +51,37 @@ extern "C" {
#define wasm_rt_unreachable abort
#endif
+#ifdef __STDC_VERSION__
+#if __STDC_VERSION__ >= 201112L
+#define WASM_RT_C11_AVAILABLE
+#endif
+#endif
+
+/**
+ * Apple and Windows devices don't implement the C11 threads.h. We use pthreads
+ * on Apple devices, and CriticalSection APIs for Windows.
+ */
+#ifdef WASM_RT_C11_AVAILABLE
+
+#ifdef __APPLE__
+#include <pthread.h>
+#define WASM_RT_MUTEX pthread_mutex_t
+#define WASM_RT_USE_PTHREADS 1
+#elif defined(_WIN32)
+#include <windows.h>
+#define WASM_RT_MUTEX CRITICAL_SECTION
+#define WASM_RT_USE_CRITICALSECTION 1
+#else
+#include <threads.h>
+#define WASM_RT_MUTEX mtx_t
+#define WASM_RT_USE_C11THREADS 1
+#endif
+
+#endif
+
#ifdef _MSC_VER
#define WASM_RT_THREAD_LOCAL __declspec(thread)
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+#elif defined(WASM_RT_C11_AVAILABLE)
#define WASM_RT_THREAD_LOCAL _Thread_local
#else
#define WASM_RT_THREAD_LOCAL
@@ -346,6 +374,33 @@ typedef struct {
bool is64;
} wasm_rt_memory_t;
+#ifdef WASM_RT_C11_AVAILABLE
+/** A shared Memory object. */
+typedef struct {
+ /**
+ * The linear memory data, with a byte length of `size`. The memory is marked
+ * atomic as it is shared and may have to be accessed with different memory
+ * orders --- sequential when being accessed atomically, relaxed otherwise.
+ * Unfortunately, the C standard does not state what happens if there are
+ * overlaps in two memory accesses which have a memory order, e.g., an
+ * atomic32 being read from the same location an atomic64 is read. One way to
+ * prevent optimizations from assuming non-overlapping behavior as typically
+ * done in C is to mark the memory as volatile. Thus the memory is atomic and
+ * volatile. */
+ _Atomic volatile uint8_t* data;
+ /**
+ * The current and maximum page count for this Memory object. If there is no
+ * maximum, `max_pages` is 0xffffffffu (i.e. UINT32_MAX). */
+ uint64_t pages, max_pages;
+ /** The current size of the linear memory, in bytes. */
+ uint64_t size;
+ /** Is this memory indexed by u64 (as opposed to default u32) */
+ bool is64;
+ /** Lock used to ensure operations such as memory grow are threadsafe */
+ WASM_RT_MUTEX mem_lock;
+} wasm_rt_shared_memory_t;
+#endif
+
/** A Table of type funcref. */
typedef struct {
/** The table element data, with an element count of `size`. */
@@ -475,6 +530,26 @@ uint64_t wasm_rt_grow_memory(wasm_rt_memory_t*, uint64_t pages);
*/
void wasm_rt_free_memory(wasm_rt_memory_t*);
+#ifdef WASM_RT_C11_AVAILABLE
+/**
+ * Shared memory version of wasm_rt_allocate_memory
+ */
+void wasm_rt_allocate_memory_shared(wasm_rt_shared_memory_t*,
+ uint64_t initial_pages,
+ uint64_t max_pages,
+ bool is64);
+
+/**
+ * Shared memory version of wasm_rt_grow_memory
+ */
+uint64_t wasm_rt_grow_memory_shared(wasm_rt_shared_memory_t*, uint64_t pages);
+
+/**
+ * Shared memory version of wasm_rt_free_memory
+ */
+void wasm_rt_free_memory_shared(wasm_rt_shared_memory_t*);
+#endif
+
/**
* Initialize a funcref Table object with an element count of `elements` and a
* maximum size of `max_elements`.