summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKeith Winstein <208955+keithw@users.noreply.github.com>2024-12-17 19:06:57 -0800
committerGitHub <noreply@github.com>2024-12-17 19:06:57 -0800
commitea193b40d6d4a1a697d68ae855b2b3b3e263b377 (patch)
tree03e10d23cf9883f08657f69fa50c66a516ebe17f /src
parent4e7d7efe6e9a786370848e669041bdc237730a8b (diff)
downloadwabt-main.tar.gz
wabt-main.tar.bz2
wabt-main.zip
wasm2c: harmonize bulk mem ops re: i32/i64 (#2506) + parametrize memchecks per-memory (#2507)HEADmain
The PR updates the bulk memory operations (memory.fill, memory.copy, table.fill, etc.) to support 64-bit addresses and counts. Previously these functions only took u32's, even with memory64 enabled. (#2506) This PR also allows "software-bounds-checked" memories and "guard-page-checked" memories to coexist in the same module. It creates two versions of every memory operation: an unrestricted version (that works with any memory) and a _default32 version (for memories with default page size and i32 indexing). (#2507) #2506 and #2507 have been squashed together to avoid a performance regression. This is a stepping stone to supporting custom-page-sizes (which will need to be software-bounds-checked) (#2508).
Diffstat (limited to 'src')
-rw-r--r--src/c-writer.cc19
-rw-r--r--src/prebuilt/wasm2c_atomicops_source_declarations.cc136
-rw-r--r--src/prebuilt/wasm2c_simd_source_declarations.cc46
-rw-r--r--src/prebuilt/wasm2c_source_declarations.cc205
-rw-r--r--src/template/wasm2c.declarations.c139
-rw-r--r--src/template/wasm2c_atomicops.declarations.c129
-rw-r--r--src/template/wasm2c_simd.declarations.c43
7 files changed, 447 insertions, 270 deletions
diff --git a/src/c-writer.cc b/src/c-writer.cc
index 7a0c548b..e7bcff4f 100644
--- a/src/c-writer.cc
+++ b/src/c-writer.cc
@@ -1369,7 +1369,24 @@ static std::string GetMemoryTypeString(const Memory& memory) {
}
static std::string GetMemoryAPIString(const Memory& memory, std::string api) {
- return memory.page_limits.is_shared ? (api + "_shared") : api;
+ std::string suffix;
+ if (memory.page_limits.is_shared) {
+ suffix += "_shared";
+ }
+
+ // Memory load and store routines can be optimized for default-page-size,
+ // 32-bit memories (by using hardware to bounds-check memory access).
+ // Append "_default32" to these function names to choose the (possibly) fast
+ // path.
+ //
+ // We don't need to do this for runtime routines; those can check the
+ // wasm_rt_memory_t structure.
+ if (api.substr(0, 8) != "wasm_rt_" &&
+ memory.page_size == WABT_DEFAULT_PAGE_SIZE &&
+ memory.page_limits.is_64 == false) {
+ suffix += "_default32";
+ }
+ return api + suffix;
}
void CWriter::WriteInitExpr(const ExprList& expr_list) {
diff --git a/src/prebuilt/wasm2c_atomicops_source_declarations.cc b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
index 8312d5d6..dc02591c 100644
--- a/src/prebuilt/wasm2c_atomicops_source_declarations.cc
+++ b/src/prebuilt/wasm2c_atomicops_source_declarations.cc
@@ -17,25 +17,25 @@ R"w2c_template( TRAP(UNALIGNED); \
R"w2c_template( }
)w2c_template"
R"w2c_template(
-#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template( static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
+R"w2c_template( static inline t3 name##_unchecked(wasm_rt_shared_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t1 result; \
)w2c_template"
-R"w2c_template( t1 result; \
+R"w2c_template( result = atomic_load_explicit( \
)w2c_template"
-R"w2c_template( result = atomic_load_explicit( \
+R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
)w2c_template"
-R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
+R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template( memory_order_relaxed); \
+R"w2c_template( force_read(result); \
)w2c_template"
-R"w2c_template( force_read(result); \
+R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template( return (t3)(t2)result; \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( DEF_MEM_CHECKS0(name, _shared_, t1, return, t3)
)w2c_template"
R"w2c_template(
DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
@@ -69,9 +69,9 @@ R"w2c_template(DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ
R"w2c_template(
#define DEFINE_SHARED_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template( static inline void name##_unchecked(wasm_rt_shared_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t2 value) { \
)w2c_template"
R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
@@ -81,7 +81,9 @@ R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wr
)w2c_template"
R"w2c_template( memory_order_relaxed); \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name, _shared_, t1, , void, t2)
)w2c_template"
R"w2c_template(
DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
@@ -105,9 +107,7 @@ R"w2c_template(DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
R"w2c_template(
#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
)w2c_template"
-R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
-)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( static inline t3 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -121,9 +121,11 @@ R"w2c_template( return (t3)(t2)result;
)w2c_template"
R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) { \
+R"w2c_template( DEF_MEM_CHECKS0(name, _, t1, return, t3) \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( static inline t3 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
+)w2c_template"
+R"w2c_template( u64 addr) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -137,7 +139,9 @@ R"w2c_template( force_read(result);
)w2c_template"
R"w2c_template( return (t3)(t2)result; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS0(name##_shared, _shared_, t1, return, t3)
)w2c_template"
R"w2c_template(
DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
@@ -157,9 +161,9 @@ R"w2c_template(DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ
R"w2c_template(
#define DEFINE_ATOMIC_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template( static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -169,11 +173,11 @@ R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, siz
)w2c_template"
R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t1, , void, t2) \
)w2c_template"
-R"w2c_template( t2 value) { \
+R"w2c_template( static inline void name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( u64 addr, t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -183,7 +187,9 @@ R"w2c_template( atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof
)w2c_template"
R"w2c_template( wrapped); \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name##_shared, _shared_, t1, , void, t2)
)w2c_template"
R"w2c_template(
DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
@@ -203,9 +209,9 @@ R"w2c_template(DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
R"w2c_template(
#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2) \
)w2c_template"
-R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template( static inline t2 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -223,11 +229,11 @@ R"w2c_template( return (t2)ret;
)w2c_template"
R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t1, return, t2, t2) \
)w2c_template"
-R"w2c_template( t2 value) { \
+R"w2c_template( static inline t2 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( u64 addr, t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -239,7 +245,9 @@ R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wr
)w2c_template"
R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name##_shared, _shared_, t1, return, t2, t2)
)w2c_template"
R"w2c_template(
DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
@@ -319,9 +327,9 @@ R"w2c_template(DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
R"w2c_template(
#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2) \
)w2c_template"
-R"w2c_template( static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template( static inline t2 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -337,11 +345,11 @@ R"w2c_template( return (t2)ret;
)w2c_template"
R"w2c_template( } \
)w2c_template"
-R"w2c_template( static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t1, return, t2, t2) \
)w2c_template"
-R"w2c_template( t2 value) { \
+R"w2c_template( static inline t2 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( u64 addr, t2 value) { \
)w2c_template"
R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t1); \
)w2c_template"
@@ -353,7 +361,9 @@ R"w2c_template( (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wr
)w2c_template"
R"w2c_template( return (t2)ret; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name##_shared, _shared_, t1, return, t2, t2)
)w2c_template"
R"w2c_template(
DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
@@ -371,57 +381,57 @@ R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
R"w2c_template(DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
)w2c_template"
R"w2c_template(
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
)w2c_template"
-R"w2c_template( static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
+R"w2c_template( static inline t1 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( t1 replacement) { \
+R"w2c_template( t1 expected, t1 replacement) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t2); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template( t2 expected_wrapped = (t2)expected; \
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
+R"w2c_template( t2 ret; \
)w2c_template"
-R"w2c_template( t2 ret; \
+R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
)w2c_template"
-R"w2c_template( wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
+R"w2c_template( if (ret == expected_wrapped) { \
)w2c_template"
-R"w2c_template( if (ret == expected_wrapped) { \
+R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
)w2c_template"
-R"w2c_template( wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
+R"w2c_template( sizeof(t2)); \
)w2c_template"
-R"w2c_template( sizeof(t2)); \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( } \
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
-R"w2c_template( return (t1)expected_wrapped; \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( } \
+R"w2c_template( DEF_MEM_CHECKS2(name, _, t2, return, t1, t1, t1) \
)w2c_template"
-R"w2c_template( static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
+R"w2c_template( static inline t1 name##_shared_unchecked( \
)w2c_template"
-R"w2c_template( t1 expected, t1 replacement) { \
+R"w2c_template( wasm_rt_shared_memory_t* mem, u64 addr, t1 expected, t1 replacement) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t2); \
+R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
)w2c_template"
-R"w2c_template( ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+R"w2c_template( t2 expected_wrapped = (t2)expected; \
)w2c_template"
-R"w2c_template( t2 expected_wrapped = (t2)expected; \
+R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
)w2c_template"
-R"w2c_template( t2 replacement_wrapped = (t2)replacement; \
+R"w2c_template( atomic_compare_exchange_strong( \
)w2c_template"
-R"w2c_template( atomic_compare_exchange_strong( \
+R"w2c_template( (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
)w2c_template"
-R"w2c_template( (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
+R"w2c_template( &expected_wrapped, replacement_wrapped); \
)w2c_template"
-R"w2c_template( &expected_wrapped, replacement_wrapped); \
+R"w2c_template( return (t1)expected_wrapped; \
)w2c_template"
-R"w2c_template( return (t1)expected_wrapped; \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( DEF_MEM_CHECKS2(name##_shared, _shared_, t2, return, t1, t1, t1)
)w2c_template"
R"w2c_template(
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw8_cmpxchg_u, u32, u8);
diff --git a/src/prebuilt/wasm2c_simd_source_declarations.cc b/src/prebuilt/wasm2c_simd_source_declarations.cc
index 5b903b26..a43a0006 100644
--- a/src/prebuilt/wasm2c_simd_source_declarations.cc
+++ b/src/prebuilt/wasm2c_simd_source_declarations.cc
@@ -15,26 +15,26 @@ R"w2c_template(#endif
R"w2c_template(// TODO: equivalent constraint for ARM and other architectures
)w2c_template"
R"w2c_template(
-#define DEFINE_SIMD_LOAD_FUNC(name, func, t) \
+#define DEFINE_SIMD_LOAD_FUNC(name, func, t) \
)w2c_template"
-R"w2c_template( static inline v128 name(wasm_rt_memory_t* mem, u64 addr) { \
+R"w2c_template( static inline v128 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t); \
+R"w2c_template( v128 result = func(MEM_ADDR(mem, addr, sizeof(t))); \
)w2c_template"
-R"w2c_template( v128 result = func(MEM_ADDR(mem, addr, sizeof(t))); \
+R"w2c_template( SIMD_FORCE_READ(result); \
)w2c_template"
-R"w2c_template( SIMD_FORCE_READ(result); \
+R"w2c_template( return result; \
)w2c_template"
-R"w2c_template( return result; \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( DEF_MEM_CHECKS0(name, _, t, return, v128);
)w2c_template"
R"w2c_template(
#define DEFINE_SIMD_LOAD_LANE(name, func, t, lane) \
)w2c_template"
-R"w2c_template( static inline v128 name(wasm_rt_memory_t* mem, u64 addr, v128 vec) { \
+R"w2c_template( static inline v128 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t); \
+R"w2c_template( v128 vec) { \
)w2c_template"
R"w2c_template( v128 result = func(MEM_ADDR(mem, addr, sizeof(t)), vec, lane); \
)w2c_template"
@@ -42,29 +42,35 @@ R"w2c_template( SIMD_FORCE_READ(result);
)w2c_template"
R"w2c_template( return result; \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t, return, v128, v128);
)w2c_template"
R"w2c_template(
-#define DEFINE_SIMD_STORE(name, t) \
+#define DEFINE_SIMD_STORE(name, t) \
+)w2c_template"
+R"w2c_template( static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, v128 value) { \
+R"w2c_template( v128 value) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t); \
+R"w2c_template( simde_wasm_v128_store(MEM_ADDR(mem, addr, sizeof(t)), value); \
)w2c_template"
-R"w2c_template( simde_wasm_v128_store(MEM_ADDR(mem, addr, sizeof(t)), value); \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t, , void, v128);
)w2c_template"
R"w2c_template(
-#define DEFINE_SIMD_STORE_LANE(name, func, t, lane) \
+#define DEFINE_SIMD_STORE_LANE(name, func, t, lane) \
+)w2c_template"
+R"w2c_template( static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, v128 value) { \
+R"w2c_template( v128 value) { \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t); \
+R"w2c_template( func(MEM_ADDR(mem, addr, sizeof(t)), value, lane); \
)w2c_template"
-R"w2c_template( func(MEM_ADDR(mem, addr, sizeof(t)), value, lane); \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t, , void, v128);
)w2c_template"
R"w2c_template(
// clang-format off
diff --git a/src/prebuilt/wasm2c_source_declarations.cc b/src/prebuilt/wasm2c_source_declarations.cc
index 87529c40..557bf67b 100644
--- a/src/prebuilt/wasm2c_source_declarations.cc
+++ b/src/prebuilt/wasm2c_source_declarations.cc
@@ -165,15 +165,32 @@ R"w2c_template( (CHECK_CALL_INDIRECT(table, ft, x), \
R"w2c_template( DO_CALL_INDIRECT(table, t, x, __VA_ARGS__))
)w2c_template"
R"w2c_template(
-#ifdef SUPPORT_MEMORY64
+static inline bool add_overflow(uint64_t a, uint64_t b, uint64_t* resptr) {
)w2c_template"
-R"w2c_template(#define RANGE_CHECK(mem, offset, len) \
+R"w2c_template(#if __has_builtin(__builtin_add_overflow)
+)w2c_template"
+R"w2c_template( return __builtin_add_overflow(a, b, resptr);
+)w2c_template"
+R"w2c_template(#elif defined(_MSC_VER)
+)w2c_template"
+R"w2c_template( return _addcarry_u64(0, a, b, resptr);
+)w2c_template"
+R"w2c_template(#else
+)w2c_template"
+R"w2c_template(#error "Missing implementation of __builtin_add_overflow or _addcarry_u64"
+)w2c_template"
+R"w2c_template(#endif
+)w2c_template"
+R"w2c_template(}
+)w2c_template"
+R"w2c_template(
+#define RANGE_CHECK(mem, offset, len) \
)w2c_template"
R"w2c_template( do { \
)w2c_template"
R"w2c_template( uint64_t res; \
)w2c_template"
-R"w2c_template( if (__builtin_add_overflow(offset, len, &res)) \
+R"w2c_template( if (UNLIKELY(add_overflow(offset, len, &res))) \
)w2c_template"
R"w2c_template( TRAP(OOB); \
)w2c_template"
@@ -183,16 +200,6 @@ R"w2c_template( TRAP(OOB); \
)w2c_template"
R"w2c_template( } while (0);
)w2c_template"
-R"w2c_template(#else
-)w2c_template"
-R"w2c_template(#define RANGE_CHECK(mem, offset, len) \
-)w2c_template"
-R"w2c_template( if (UNLIKELY(offset + (uint64_t)len > mem->size)) \
-)w2c_template"
-R"w2c_template( TRAP(OOB);
-)w2c_template"
-R"w2c_template(#endif
-)w2c_template"
R"w2c_template(
#if WASM_RT_USE_SEGUE_FOR_THIS_MODULE && WASM_RT_SANITY_CHECKS
)w2c_template"
@@ -215,21 +222,40 @@ R"w2c_template(#define WASM_RT_CHECK_BASE(mem)
R"w2c_template(#endif
)w2c_template"
R"w2c_template(
-#if WASM_RT_MEMCHECK_GUARD_PAGES
+// MEMCHECK_DEFAULT32 is an "accelerated" MEMCHECK used only for
+)w2c_template"
+R"w2c_template(// default-page-size, 32-bit memories. It may do nothing at all
+)w2c_template"
+R"w2c_template(// (if hardware bounds-checking is enabled via guard pages)
)w2c_template"
-R"w2c_template(#define MEMCHECK(mem, a, t) WASM_RT_CHECK_BASE(mem);
+R"w2c_template(// or it may do a slightly faster RANGE_CHECK.
+)w2c_template"
+R"w2c_template(#if WASM_RT_MEMCHECK_GUARD_PAGES
+)w2c_template"
+R"w2c_template(#define MEMCHECK_DEFAULT32(mem, a, t) WASM_RT_CHECK_BASE(mem);
)w2c_template"
R"w2c_template(#else
)w2c_template"
-R"w2c_template(#define MEMCHECK(mem, a, t) \
+R"w2c_template(#define MEMCHECK_DEFAULT32(mem, a, t) \
+)w2c_template"
+R"w2c_template( WASM_RT_CHECK_BASE(mem); \
)w2c_template"
-R"w2c_template( WASM_RT_CHECK_BASE(mem); \
+R"w2c_template( if (UNLIKELY(a + (uint64_t)sizeof(t) > mem->size)) \
)w2c_template"
-R"w2c_template( RANGE_CHECK(mem, a, sizeof(t))
+R"w2c_template( TRAP(OOB);
)w2c_template"
R"w2c_template(#endif
)w2c_template"
R"w2c_template(
+// MEMCHECK_GENERAL can be used for any memory
+)w2c_template"
+R"w2c_template(#define MEMCHECK_GENERAL(mem, a, t) \
+)w2c_template"
+R"w2c_template( WASM_RT_CHECK_BASE(mem); \
+)w2c_template"
+R"w2c_template( RANGE_CHECK(mem, a, sizeof(t));
+)w2c_template"
+R"w2c_template(
#ifdef __GNUC__
)w2c_template"
R"w2c_template(#define FORCE_READ_INT(var) __asm__("" ::"r"(var));
@@ -299,30 +325,103 @@ R"w2c_template( load_data(MEM_ADDR(&m, o, s), i, s); \
R"w2c_template( } while (0)
)w2c_template"
R"w2c_template(
-#define DEFINE_LOAD(name, t1, t2, t3, force_read) \
+#define DEF_MEM_CHECKS0(name, shared, mem_type, ret_kw, return_type) \
+)w2c_template"
+R"w2c_template( static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+)w2c_template"
+R"w2c_template( u64 addr) { \
+)w2c_template"
+R"w2c_template( MEMCHECK_DEFAULT32(mem, addr, mem_type); \
+)w2c_template"
+R"w2c_template( ret_kw name##_unchecked(mem, addr); \
+)w2c_template"
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr) { \
+)w2c_template"
+R"w2c_template( MEMCHECK_GENERAL(mem, addr, mem_type); \
+)w2c_template"
+R"w2c_template( ret_kw name##_unchecked(mem, addr); \
+)w2c_template"
+R"w2c_template( }
+)w2c_template"
+R"w2c_template(
+#define DEF_MEM_CHECKS1(name, shared, mem_type, ret_kw, return_type, \
+)w2c_template"
+R"w2c_template( val_type1) \
+)w2c_template"
+R"w2c_template( static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+)w2c_template"
+R"w2c_template( u64 addr, val_type1 val1) { \
+)w2c_template"
+R"w2c_template( MEMCHECK_DEFAULT32(mem, addr, mem_type); \
+)w2c_template"
+R"w2c_template( ret_kw name##_unchecked(mem, addr, val1); \
+)w2c_template"
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr, \
+)w2c_template"
+R"w2c_template( val_type1 val1) { \
+)w2c_template"
+R"w2c_template( MEMCHECK_GENERAL(mem, addr, mem_type); \
+)w2c_template"
+R"w2c_template( ret_kw name##_unchecked(mem, addr, val1); \
+)w2c_template"
+R"w2c_template( }
+)w2c_template"
+R"w2c_template(
+#define DEF_MEM_CHECKS2(name, shared, mem_type, ret_kw, return_type, \
+)w2c_template"
+R"w2c_template( val_type1, val_type2) \
+)w2c_template"
+R"w2c_template( static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+)w2c_template"
+R"w2c_template( u64 addr, val_type1 val1, \
+)w2c_template"
+R"w2c_template( val_type2 val2) { \
)w2c_template"
-R"w2c_template( static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
+R"w2c_template( MEMCHECK_DEFAULT32(mem, addr, mem_type); \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( ret_kw name##_unchecked(mem, addr, val1, val2); \
)w2c_template"
-R"w2c_template( t1 result; \
+R"w2c_template( } \
)w2c_template"
-R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), \
+R"w2c_template( static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( sizeof(t1)); \
+R"w2c_template( val_type1 val1, val_type2 val2) { \
)w2c_template"
-R"w2c_template( force_read(result); \
+R"w2c_template( MEMCHECK_GENERAL(mem, addr, mem_type); \
)w2c_template"
-R"w2c_template( return (t3)(t2)result; \
+R"w2c_template( ret_kw name##_unchecked(mem, addr, val1, val2); \
)w2c_template"
R"w2c_template( }
)w2c_template"
R"w2c_template(
+#define DEFINE_LOAD(name, t1, t2, t3, force_read) \
+)w2c_template"
+R"w2c_template( static inline t3 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
+)w2c_template"
+R"w2c_template( t1 result; \
+)w2c_template"
+R"w2c_template( wasm_rt_memcpy(&result, MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), \
+)w2c_template"
+R"w2c_template( sizeof(t1)); \
+)w2c_template"
+R"w2c_template( force_read(result); \
+)w2c_template"
+R"w2c_template( return (t3)(t2)result; \
+)w2c_template"
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS0(name, _, t1, return, t3)
+)w2c_template"
+R"w2c_template(
#define DEFINE_STORE(name, t1, t2) \
)w2c_template"
-R"w2c_template( static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
+R"w2c_template( static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
)w2c_template"
-R"w2c_template( MEMCHECK(mem, addr, t1); \
+R"w2c_template( t2 value) { \
)w2c_template"
R"w2c_template( t1 wrapped = (t1)value; \
)w2c_template"
@@ -330,7 +429,9 @@ R"w2c_template( wasm_rt_memcpy(MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), &wrappe
)w2c_template"
R"w2c_template( sizeof(t1)); \
)w2c_template"
-R"w2c_template( }
+R"w2c_template( } \
+)w2c_template"
+R"w2c_template( DEF_MEM_CHECKS1(name, _, t1, , void, t2)
)w2c_template"
R"w2c_template(
DEFINE_LOAD(i32_load, u32, u32, u32, FORCE_READ_INT)
@@ -976,7 +1077,7 @@ R"w2c_template( return sqrtf(x);
R"w2c_template(}
)w2c_template"
R"w2c_template(
-static inline void memory_fill(wasm_rt_memory_t* mem, u32 d, u32 val, u32 n) {
+static inline void memory_fill(wasm_rt_memory_t* mem, u64 d, u32 val, u64 n) {
)w2c_template"
R"w2c_template( RANGE_CHECK(mem, d, n);
)w2c_template"
@@ -989,11 +1090,11 @@ static inline void memory_copy(wasm_rt_memory_t* dest,
)w2c_template"
R"w2c_template( const wasm_rt_memory_t* src,
)w2c_template"
-R"w2c_template( u32 dest_addr,
+R"w2c_template( u64 dest_addr,
)w2c_template"
-R"w2c_template( u32 src_addr,
+R"w2c_template( u64 src_addr,
)w2c_template"
-R"w2c_template( u32 n) {
+R"w2c_template( u64 n) {
)w2c_template"
R"w2c_template( RANGE_CHECK(dest, dest_addr, n);
)w2c_template"
@@ -1010,7 +1111,7 @@ R"w2c_template( const u8* src,
)w2c_template"
R"w2c_template( u32 src_size,
)w2c_template"
-R"w2c_template( u32 dest_addr,
+R"w2c_template( u64 dest_addr,
)w2c_template"
R"w2c_template( u32 src_addr,
)w2c_template"
@@ -1046,7 +1147,7 @@ R"w2c_template( const wasm_elem_segment_exp
)w2c_template"
R"w2c_template( u32 src_size,
)w2c_template"
-R"w2c_template( u32 dest_addr,
+R"w2c_template( u64 dest_addr,
)w2c_template"
R"w2c_template( u32 src_addr,
)w2c_template"
@@ -1058,9 +1159,7 @@ R"w2c_template( if (UNLIKELY(src_addr + (uint64_t)n > src_size))
)w2c_template"
R"w2c_template( TRAP(OOB);
)w2c_template"
-R"w2c_template( if (UNLIKELY(dest_addr + (uint64_t)n > dest->size))
-)w2c_template"
-R"w2c_template( TRAP(OOB);
+R"w2c_template( RANGE_CHECK(dest, dest_addr, n);
)w2c_template"
R"w2c_template( for (u32 i = 0; i < n; i++) {
)w2c_template"
@@ -1107,7 +1206,7 @@ R"w2c_template(static inline void externref_table_init(wasm_rt_externref_table_t
)w2c_template"
R"w2c_template( u32 src_size,
)w2c_template"
-R"w2c_template( u32 dest_addr,
+R"w2c_template( u64 dest_addr,
)w2c_template"
R"w2c_template( u32 src_addr,
)w2c_template"
@@ -1117,9 +1216,7 @@ R"w2c_template( if (UNLIKELY(src_addr + (uint64_t)n > src_size))
)w2c_template"
R"w2c_template( TRAP(OOB);
)w2c_template"
-R"w2c_template( if (UNLIKELY(dest_addr + (uint64_t)n > dest->size))
-)w2c_template"
-R"w2c_template( TRAP(OOB);
+R"w2c_template( RANGE_CHECK(dest, dest_addr, n);
)w2c_template"
R"w2c_template( for (u32 i = 0; i < n; i++) {
)w2c_template"
@@ -1136,17 +1233,11 @@ R"w2c_template( static inline void type##_table_copy(wasm_rt_##type##_table_t*
)w2c_template"
R"w2c_template( const wasm_rt_##type##_table_t* src, \
)w2c_template"
-R"w2c_template( u32 dest_addr, u32 src_addr, u32 n) { \
+R"w2c_template( u64 dest_addr, u64 src_addr, u64 n) { \
)w2c_template"
-R"w2c_template( if (UNLIKELY(dest_addr + (uint64_t)n > dest->size)) \
+R"w2c_template( RANGE_CHECK(dest, dest_addr, n); \
)w2c_template"
-R"w2c_template( TRAP(OOB); \
-)w2c_template"
-R"w2c_template( if (UNLIKELY(src_addr + (uint64_t)n > src->size)) \
-)w2c_template"
-R"w2c_template( TRAP(OOB); \
-)w2c_template"
-R"w2c_template( \
+R"w2c_template( RANGE_CHECK(src, src_addr, n); \
)w2c_template"
R"w2c_template( memmove(dest->data + dest_addr, src->data + src_addr, \
)w2c_template"
@@ -1164,7 +1255,7 @@ R"w2c_template(
)w2c_template"
R"w2c_template( static inline wasm_rt_##type##_t type##_table_get( \
)w2c_template"
-R"w2c_template( const wasm_rt_##type##_table_t* table, u32 i) { \
+R"w2c_template( const wasm_rt_##type##_table_t* table, u64 i) { \
)w2c_template"
R"w2c_template( if (UNLIKELY(i >= table->size)) \
)w2c_template"
@@ -1184,7 +1275,7 @@ R"w2c_template(
)w2c_template"
R"w2c_template( static inline void type##_table_set(const wasm_rt_##type##_table_t* table, \
)w2c_template"
-R"w2c_template( u32 i, const wasm_rt_##type##_t val) { \
+R"w2c_template( u64 i, const wasm_rt_##type##_t val) { \
)w2c_template"
R"w2c_template( if (UNLIKELY(i >= table->size)) \
)w2c_template"
@@ -1204,13 +1295,11 @@ R"w2c_template(
)w2c_template"
R"w2c_template( static inline void type##_table_fill(const wasm_rt_##type##_table_t* table, \
)w2c_template"
-R"w2c_template( u32 d, const wasm_rt_##type##_t val, \
-)w2c_template"
-R"w2c_template( u32 n) { \
+R"w2c_template( u64 d, const wasm_rt_##type##_t val, \
)w2c_template"
-R"w2c_template( if (UNLIKELY((uint64_t)d + n > table->size)) \
+R"w2c_template( u64 n) { \
)w2c_template"
-R"w2c_template( TRAP(OOB); \
+R"w2c_template( RANGE_CHECK(table, d, n); \
)w2c_template"
R"w2c_template( for (uint32_t i = d; i < d + n; i++) { \
)w2c_template"
diff --git a/src/template/wasm2c.declarations.c b/src/template/wasm2c.declarations.c
index 296391ed..ecc7b680 100644
--- a/src/template/wasm2c.declarations.c
+++ b/src/template/wasm2c.declarations.c
@@ -89,20 +89,24 @@ static inline bool func_types_eq(const wasm_rt_func_type_t a,
(CHECK_CALL_INDIRECT(table, ft, x), \
DO_CALL_INDIRECT(table, t, x, __VA_ARGS__))
-#ifdef SUPPORT_MEMORY64
+static inline bool add_overflow(uint64_t a, uint64_t b, uint64_t* resptr) {
+#if __has_builtin(__builtin_add_overflow)
+ return __builtin_add_overflow(a, b, resptr);
+#elif defined(_MSC_VER)
+ return _addcarry_u64(0, a, b, resptr);
+#else
+#error "Missing implementation of __builtin_add_overflow or _addcarry_u64"
+#endif
+}
+
#define RANGE_CHECK(mem, offset, len) \
do { \
uint64_t res; \
- if (__builtin_add_overflow(offset, len, &res)) \
+ if (UNLIKELY(add_overflow(offset, len, &res))) \
TRAP(OOB); \
if (UNLIKELY(res > mem->size)) \
TRAP(OOB); \
} while (0);
-#else
-#define RANGE_CHECK(mem, offset, len) \
- if (UNLIKELY(offset + (uint64_t)len > mem->size)) \
- TRAP(OOB);
-#endif
#if WASM_RT_USE_SEGUE_FOR_THIS_MODULE && WASM_RT_SANITY_CHECKS
#include <stdio.h>
@@ -115,14 +119,24 @@ static inline bool func_types_eq(const wasm_rt_func_type_t a,
#define WASM_RT_CHECK_BASE(mem)
#endif
+// MEMCHECK_DEFAULT32 is an "accelerated" MEMCHECK used only for
+// default-page-size, 32-bit memories. It may do nothing at all
+// (if hardware bounds-checking is enabled via guard pages)
+// or it may do a slightly faster RANGE_CHECK.
#if WASM_RT_MEMCHECK_GUARD_PAGES
-#define MEMCHECK(mem, a, t) WASM_RT_CHECK_BASE(mem);
+#define MEMCHECK_DEFAULT32(mem, a, t) WASM_RT_CHECK_BASE(mem);
#else
-#define MEMCHECK(mem, a, t) \
- WASM_RT_CHECK_BASE(mem); \
- RANGE_CHECK(mem, a, sizeof(t))
+#define MEMCHECK_DEFAULT32(mem, a, t) \
+ WASM_RT_CHECK_BASE(mem); \
+ if (UNLIKELY(a + (uint64_t)sizeof(t) > mem->size)) \
+ TRAP(OOB);
#endif
+// MEMCHECK_GENERAL can be used for any memory
+#define MEMCHECK_GENERAL(mem, a, t) \
+ WASM_RT_CHECK_BASE(mem); \
+ RANGE_CHECK(mem, a, sizeof(t));
+
#ifdef __GNUC__
#define FORCE_READ_INT(var) __asm__("" ::"r"(var));
// Clang on Mips requires "f" constraints on floats
@@ -159,23 +173,62 @@ static inline void load_data(void* dest, const void* src, size_t n) {
load_data(MEM_ADDR(&m, o, s), i, s); \
} while (0)
-#define DEFINE_LOAD(name, t1, t2, t3, force_read) \
- static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t1); \
- t1 result; \
- wasm_rt_memcpy(&result, MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), \
- sizeof(t1)); \
- force_read(result); \
- return (t3)(t2)result; \
- }
+#define DEF_MEM_CHECKS0(name, shared, mem_type, ret_kw, return_type) \
+ static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+ u64 addr) { \
+ MEMCHECK_DEFAULT32(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr); \
+ } \
+ static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr) { \
+ MEMCHECK_GENERAL(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr); \
+ }
+
+#define DEF_MEM_CHECKS1(name, shared, mem_type, ret_kw, return_type, \
+ val_type1) \
+ static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+ u64 addr, val_type1 val1) { \
+ MEMCHECK_DEFAULT32(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr, val1); \
+ } \
+ static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr, \
+ val_type1 val1) { \
+ MEMCHECK_GENERAL(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr, val1); \
+ }
+
+#define DEF_MEM_CHECKS2(name, shared, mem_type, ret_kw, return_type, \
+ val_type1, val_type2) \
+ static inline return_type name##_default32(wasm_rt##shared##memory_t* mem, \
+ u64 addr, val_type1 val1, \
+ val_type2 val2) { \
+ MEMCHECK_DEFAULT32(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr, val1, val2); \
+ } \
+ static inline return_type name(wasm_rt##shared##memory_t* mem, u64 addr, \
+ val_type1 val1, val_type2 val2) { \
+ MEMCHECK_GENERAL(mem, addr, mem_type); \
+ ret_kw name##_unchecked(mem, addr, val1, val2); \
+ }
+
+#define DEFINE_LOAD(name, t1, t2, t3, force_read) \
+ static inline t3 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
+ t1 result; \
+ wasm_rt_memcpy(&result, MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), \
+ sizeof(t1)); \
+ force_read(result); \
+ return (t3)(t2)result; \
+ } \
+ DEF_MEM_CHECKS0(name, _, t1, return, t3)
#define DEFINE_STORE(name, t1, t2) \
- static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ t2 value) { \
t1 wrapped = (t1)value; \
wasm_rt_memcpy(MEM_ADDR_MEMOP(mem, addr, sizeof(t1)), &wrapped, \
sizeof(t1)); \
- }
+ } \
+ DEF_MEM_CHECKS1(name, _, t1, , void, t2)
DEFINE_LOAD(i32_load, u32, u32, u32, FORCE_READ_INT)
DEFINE_LOAD(i64_load, u64, u64, u64, FORCE_READ_INT)
@@ -523,16 +576,16 @@ static float wasm_sqrtf(float x) {
return sqrtf(x);
}
-static inline void memory_fill(wasm_rt_memory_t* mem, u32 d, u32 val, u32 n) {
+static inline void memory_fill(wasm_rt_memory_t* mem, u64 d, u32 val, u64 n) {
RANGE_CHECK(mem, d, n);
memset(MEM_ADDR(mem, d, n), val, n);
}
static inline void memory_copy(wasm_rt_memory_t* dest,
const wasm_rt_memory_t* src,
- u32 dest_addr,
- u32 src_addr,
- u32 n) {
+ u64 dest_addr,
+ u64 src_addr,
+ u64 n) {
RANGE_CHECK(dest, dest_addr, n);
RANGE_CHECK(src, src_addr, n);
memmove(MEM_ADDR(dest, dest_addr, n), MEM_ADDR(src, src_addr, n), n);
@@ -541,7 +594,7 @@ static inline void memory_copy(wasm_rt_memory_t* dest,
static inline void memory_init(wasm_rt_memory_t* dest,
const u8* src,
u32 src_size,
- u32 dest_addr,
+ u64 dest_addr,
u32 src_addr,
u32 n) {
if (UNLIKELY(src_addr + (uint64_t)n > src_size))
@@ -560,14 +613,13 @@ typedef struct {
static inline void funcref_table_init(wasm_rt_funcref_table_t* dest,
const wasm_elem_segment_expr_t* src,
u32 src_size,
- u32 dest_addr,
+ u64 dest_addr,
u32 src_addr,
u32 n,
void* module_instance) {
if (UNLIKELY(src_addr + (uint64_t)n > src_size))
TRAP(OOB);
- if (UNLIKELY(dest_addr + (uint64_t)n > dest->size))
- TRAP(OOB);
+ RANGE_CHECK(dest, dest_addr, n);
for (u32 i = 0; i < n; i++) {
const wasm_elem_segment_expr_t* const src_expr = &src[src_addr + i];
wasm_rt_funcref_t* const dest_val = &(dest->data[dest_addr + i]);
@@ -591,13 +643,12 @@ static inline void funcref_table_init(wasm_rt_funcref_table_t* dest,
// Currently wasm2c only supports initializing externref tables with ref.null.
static inline void externref_table_init(wasm_rt_externref_table_t* dest,
u32 src_size,
- u32 dest_addr,
+ u64 dest_addr,
u32 src_addr,
u32 n) {
if (UNLIKELY(src_addr + (uint64_t)n > src_size))
TRAP(OOB);
- if (UNLIKELY(dest_addr + (uint64_t)n > dest->size))
- TRAP(OOB);
+ RANGE_CHECK(dest, dest_addr, n);
for (u32 i = 0; i < n; i++) {
dest->data[dest_addr + i] = wasm_rt_externref_null_value;
}
@@ -606,12 +657,9 @@ static inline void externref_table_init(wasm_rt_externref_table_t* dest,
#define DEFINE_TABLE_COPY(type) \
static inline void type##_table_copy(wasm_rt_##type##_table_t* dest, \
const wasm_rt_##type##_table_t* src, \
- u32 dest_addr, u32 src_addr, u32 n) { \
- if (UNLIKELY(dest_addr + (uint64_t)n > dest->size)) \
- TRAP(OOB); \
- if (UNLIKELY(src_addr + (uint64_t)n > src->size)) \
- TRAP(OOB); \
- \
+ u64 dest_addr, u64 src_addr, u64 n) { \
+ RANGE_CHECK(dest, dest_addr, n); \
+ RANGE_CHECK(src, src_addr, n); \
memmove(dest->data + dest_addr, src->data + src_addr, \
n * sizeof(wasm_rt_##type##_t)); \
}
@@ -621,7 +669,7 @@ DEFINE_TABLE_COPY(externref)
#define DEFINE_TABLE_GET(type) \
static inline wasm_rt_##type##_t type##_table_get( \
- const wasm_rt_##type##_table_t* table, u32 i) { \
+ const wasm_rt_##type##_table_t* table, u64 i) { \
if (UNLIKELY(i >= table->size)) \
TRAP(OOB); \
return table->data[i]; \
@@ -632,7 +680,7 @@ DEFINE_TABLE_GET(externref)
#define DEFINE_TABLE_SET(type) \
static inline void type##_table_set(const wasm_rt_##type##_table_t* table, \
- u32 i, const wasm_rt_##type##_t val) { \
+ u64 i, const wasm_rt_##type##_t val) { \
if (UNLIKELY(i >= table->size)) \
TRAP(OOB); \
table->data[i] = val; \
@@ -643,10 +691,9 @@ DEFINE_TABLE_SET(externref)
#define DEFINE_TABLE_FILL(type) \
static inline void type##_table_fill(const wasm_rt_##type##_table_t* table, \
- u32 d, const wasm_rt_##type##_t val, \
- u32 n) { \
- if (UNLIKELY((uint64_t)d + n > table->size)) \
- TRAP(OOB); \
+ u64 d, const wasm_rt_##type##_t val, \
+ u64 n) { \
+ RANGE_CHECK(table, d, n); \
for (uint32_t i = d; i < d + n; i++) { \
table->data[i] = val; \
} \
diff --git a/src/template/wasm2c_atomicops.declarations.c b/src/template/wasm2c_atomicops.declarations.c
index 5d9cdf90..cb25da51 100644
--- a/src/template/wasm2c_atomicops.declarations.c
+++ b/src/template/wasm2c_atomicops.declarations.c
@@ -9,16 +9,16 @@
TRAP(UNALIGNED); \
}
-#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
- static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t1); \
- t1 result; \
- result = atomic_load_explicit( \
- (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
- memory_order_relaxed); \
- force_read(result); \
- return (t3)(t2)result; \
- }
+#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read) \
+ static inline t3 name##_unchecked(wasm_rt_shared_memory_t* mem, u64 addr) { \
+ t1 result; \
+ result = atomic_load_explicit( \
+ (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
+ memory_order_relaxed); \
+ force_read(result); \
+ return (t3)(t2)result; \
+ } \
+ DEF_MEM_CHECKS0(name, _shared_, t1, return, t3)
DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
@@ -36,13 +36,14 @@ DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)
#define DEFINE_SHARED_STORE(name, t1, t2) \
- static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ static inline void name##_unchecked(wasm_rt_shared_memory_t* mem, u64 addr, \
+ t2 value) { \
t1 wrapped = (t1)value; \
atomic_store_explicit( \
(_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped, \
memory_order_relaxed); \
- }
+ } \
+ DEF_MEM_CHECKS1(name, _shared_, t1, , void, t2)
DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
@@ -55,23 +56,24 @@ DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)
#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read) \
- static inline t3 name(wasm_rt_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t1); \
+ static inline t3 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 result; \
wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1)); \
force_read(result); \
return (t3)(t2)result; \
} \
- static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t1); \
+ DEF_MEM_CHECKS0(name, _, t1, return, t3) \
+ static inline t3 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
+ u64 addr) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 result; \
result = \
atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
force_read(result); \
return (t3)(t2)result; \
- }
+ } \
+ DEF_MEM_CHECKS0(name##_shared, _shared_, t1, return, t3)
DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
@@ -82,20 +84,21 @@ DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)
#define DEFINE_ATOMIC_STORE(name, t1, t2) \
- static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
} \
- static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
- t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ DEF_MEM_CHECKS1(name, _, t1, , void, t2) \
+ static inline void name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
+ u64 addr, t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), \
wrapped); \
- }
+ } \
+ DEF_MEM_CHECKS1(name##_shared, _shared_, t1, , void, t2)
DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
@@ -106,8 +109,8 @@ DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2) \
- static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ static inline t2 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
t1 ret; \
@@ -116,15 +119,16 @@ DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)
wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1)); \
return (t2)ret; \
} \
- static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
- t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ DEF_MEM_CHECKS1(name, _, t1, return, t2, t2) \
+ static inline t2 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
+ u64 addr, t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
t1 ret = atomic_##opname( \
(_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
return (t2)ret; \
- }
+ } \
+ DEF_MEM_CHECKS1(name##_shared, _shared_, t1, return, t2, t2)
DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
@@ -167,8 +171,8 @@ DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2) \
- static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ static inline t2 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
t1 ret; \
@@ -176,15 +180,16 @@ DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)
wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
return (t2)ret; \
} \
- static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
- t2 value) { \
- MEMCHECK(mem, addr, t1); \
+ DEF_MEM_CHECKS1(name, _, t1, return, t2, t2) \
+ static inline t2 name##_shared_unchecked(wasm_rt_shared_memory_t* mem, \
+ u64 addr, t2 value) { \
ATOMIC_ALIGNMENT_CHECK(addr, t1); \
t1 wrapped = (t1)value; \
t1 ret = atomic_##opname( \
(_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
return (t2)ret; \
- }
+ } \
+ DEF_MEM_CHECKS1(name##_shared, _shared_, t1, return, t2, t2)
DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
@@ -194,32 +199,32 @@ DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)
-#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
- static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected, \
- t1 replacement) { \
- MEMCHECK(mem, addr, t2); \
- ATOMIC_ALIGNMENT_CHECK(addr, t2); \
- t2 expected_wrapped = (t2)expected; \
- t2 replacement_wrapped = (t2)replacement; \
- t2 ret; \
- wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
- if (ret == expected_wrapped) { \
- wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
- sizeof(t2)); \
- } \
- return (t1)expected_wrapped; \
- } \
- static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
- t1 expected, t1 replacement) { \
- MEMCHECK(mem, addr, t2); \
- ATOMIC_ALIGNMENT_CHECK(addr, t2); \
- t2 expected_wrapped = (t2)expected; \
- t2 replacement_wrapped = (t2)replacement; \
- atomic_compare_exchange_strong( \
- (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
- &expected_wrapped, replacement_wrapped); \
- return (t1)expected_wrapped; \
- }
+#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2) \
+ static inline t1 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ t1 expected, t1 replacement) { \
+ ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+ t2 expected_wrapped = (t2)expected; \
+ t2 replacement_wrapped = (t2)replacement; \
+ t2 ret; \
+ wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2)); \
+ if (ret == expected_wrapped) { \
+ wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
+ sizeof(t2)); \
+ } \
+ return (t1)expected_wrapped; \
+ } \
+ DEF_MEM_CHECKS2(name, _, t2, return, t1, t1, t1) \
+ static inline t1 name##_shared_unchecked( \
+ wasm_rt_shared_memory_t* mem, u64 addr, t1 expected, t1 replacement) { \
+ ATOMIC_ALIGNMENT_CHECK(addr, t2); \
+ t2 expected_wrapped = (t2)expected; \
+ t2 replacement_wrapped = (t2)replacement; \
+ atomic_compare_exchange_strong( \
+ (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)), \
+ &expected_wrapped, replacement_wrapped); \
+ return (t1)expected_wrapped; \
+ } \
+ DEF_MEM_CHECKS2(name##_shared, _shared_, t2, return, t1, t1, t1)
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw8_cmpxchg_u, u32, u8);
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw16_cmpxchg_u, u32, u16);
diff --git a/src/template/wasm2c_simd.declarations.c b/src/template/wasm2c_simd.declarations.c
index 0e2c9511..39eb4578 100644
--- a/src/template/wasm2c_simd.declarations.c
+++ b/src/template/wasm2c_simd.declarations.c
@@ -7,33 +7,36 @@
#endif
// TODO: equivalent constraint for ARM and other architectures
-#define DEFINE_SIMD_LOAD_FUNC(name, func, t) \
- static inline v128 name(wasm_rt_memory_t* mem, u64 addr) { \
- MEMCHECK(mem, addr, t); \
- v128 result = func(MEM_ADDR(mem, addr, sizeof(t))); \
- SIMD_FORCE_READ(result); \
- return result; \
- }
+#define DEFINE_SIMD_LOAD_FUNC(name, func, t) \
+ static inline v128 name##_unchecked(wasm_rt_memory_t* mem, u64 addr) { \
+ v128 result = func(MEM_ADDR(mem, addr, sizeof(t))); \
+ SIMD_FORCE_READ(result); \
+ return result; \
+ } \
+ DEF_MEM_CHECKS0(name, _, t, return, v128);
#define DEFINE_SIMD_LOAD_LANE(name, func, t, lane) \
- static inline v128 name(wasm_rt_memory_t* mem, u64 addr, v128 vec) { \
- MEMCHECK(mem, addr, t); \
+ static inline v128 name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ v128 vec) { \
v128 result = func(MEM_ADDR(mem, addr, sizeof(t)), vec, lane); \
SIMD_FORCE_READ(result); \
return result; \
- }
+ } \
+ DEF_MEM_CHECKS1(name, _, t, return, v128, v128);
-#define DEFINE_SIMD_STORE(name, t) \
- static inline void name(wasm_rt_memory_t* mem, u64 addr, v128 value) { \
- MEMCHECK(mem, addr, t); \
- simde_wasm_v128_store(MEM_ADDR(mem, addr, sizeof(t)), value); \
- }
+#define DEFINE_SIMD_STORE(name, t) \
+ static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ v128 value) { \
+ simde_wasm_v128_store(MEM_ADDR(mem, addr, sizeof(t)), value); \
+ } \
+ DEF_MEM_CHECKS1(name, _, t, , void, v128);
-#define DEFINE_SIMD_STORE_LANE(name, func, t, lane) \
- static inline void name(wasm_rt_memory_t* mem, u64 addr, v128 value) { \
- MEMCHECK(mem, addr, t); \
- func(MEM_ADDR(mem, addr, sizeof(t)), value, lane); \
- }
+#define DEFINE_SIMD_STORE_LANE(name, func, t, lane) \
+ static inline void name##_unchecked(wasm_rt_memory_t* mem, u64 addr, \
+ v128 value) { \
+ func(MEM_ADDR(mem, addr, sizeof(t)), value, lane); \
+ } \
+ DEF_MEM_CHECKS1(name, _, t, , void, v128);
// clang-format off
#if WABT_BIG_ENDIAN