summaryrefslogtreecommitdiff
path: root/src/template/wasm2c_atomicops.declarations.c
blob: 5d9cdf90403127e4eb30108747ce29a11edc1f67 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
#include <stdatomic.h>

#ifndef WASM_RT_C11_AVAILABLE
#error "C11 is required for Wasm threads and shared memory support"
#endif

#define ATOMIC_ALIGNMENT_CHECK(addr, t1) \
  if (UNLIKELY(addr % sizeof(t1))) {     \
    TRAP(UNALIGNED);                     \
  }

#define DEFINE_SHARED_LOAD(name, t1, t2, t3, force_read)          \
  static inline t3 name(wasm_rt_shared_memory_t* mem, u64 addr) { \
    MEMCHECK(mem, addr, t1);                                      \
    t1 result;                                                    \
    result = atomic_load_explicit(                                \
        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
        memory_order_relaxed);                                    \
    force_read(result);                                           \
    return (t3)(t2)result;                                        \
  }

DEFINE_SHARED_LOAD(i32_load_shared, u32, u32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load_shared, u64, u64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(f32_load_shared, f32, f32, f32, FORCE_READ_FLOAT)
DEFINE_SHARED_LOAD(f64_load_shared, f64, f64, f64, FORCE_READ_FLOAT)
DEFINE_SHARED_LOAD(i32_load8_s_shared, s8, s32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load8_s_shared, s8, s64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i32_load8_u_shared, u8, u32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load8_u_shared, u8, u64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i32_load16_s_shared, s16, s32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load16_s_shared, s16, s64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i32_load16_u_shared, u16, u32, u32, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load16_u_shared, u16, u64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load32_s_shared, s32, s64, u64, FORCE_READ_INT)
DEFINE_SHARED_LOAD(i64_load32_u_shared, u32, u64, u64, FORCE_READ_INT)

#define DEFINE_SHARED_STORE(name, t1, t2)                                     \
  static inline void name(wasm_rt_shared_memory_t* mem, u64 addr, t2 value) { \
    MEMCHECK(mem, addr, t1);                                                  \
    t1 wrapped = (t1)value;                                                   \
    atomic_store_explicit(                                                    \
        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped,       \
        memory_order_relaxed);                                                \
  }

DEFINE_SHARED_STORE(i32_store_shared, u32, u32)
DEFINE_SHARED_STORE(i64_store_shared, u64, u64)
DEFINE_SHARED_STORE(f32_store_shared, f32, f32)
DEFINE_SHARED_STORE(f64_store_shared, f64, f64)
DEFINE_SHARED_STORE(i32_store8_shared, u8, u32)
DEFINE_SHARED_STORE(i32_store16_shared, u16, u32)
DEFINE_SHARED_STORE(i64_store8_shared, u8, u64)
DEFINE_SHARED_STORE(i64_store16_shared, u16, u64)
DEFINE_SHARED_STORE(i64_store32_shared, u32, u64)

#define DEFINE_ATOMIC_LOAD(name, t1, t2, t3, force_read)                    \
  static inline t3 name(wasm_rt_memory_t* mem, u64 addr) {                  \
    MEMCHECK(mem, addr, t1);                                                \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
    t1 result;                                                              \
    wasm_rt_memcpy(&result, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
    force_read(result);                                                     \
    return (t3)(t2)result;                                                  \
  }                                                                         \
  static inline t3 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr) {  \
    MEMCHECK(mem, addr, t1);                                                \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                       \
    t1 result;                                                              \
    result =                                                                \
        atomic_load((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1))); \
    force_read(result);                                                     \
    return (t3)(t2)result;                                                  \
  }

DEFINE_ATOMIC_LOAD(i32_atomic_load, u32, u32, u32, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load, u64, u64, u64, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i32_atomic_load8_u, u8, u32, u32, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load8_u, u8, u64, u64, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i32_atomic_load16_u, u16, u32, u32, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load16_u, u16, u64, u64, FORCE_READ_INT)
DEFINE_ATOMIC_LOAD(i64_atomic_load32_u, u32, u64, u64, FORCE_READ_INT)

#define DEFINE_ATOMIC_STORE(name, t1, t2)                                  \
  static inline void name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
    MEMCHECK(mem, addr, t1);                                               \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
    t1 wrapped = (t1)value;                                                \
    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
  }                                                                        \
  static inline void name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
                                   t2 value) {                             \
    MEMCHECK(mem, addr, t1);                                               \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
    t1 wrapped = (t1)value;                                                \
    atomic_store((_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)),    \
                 wrapped);                                                 \
  }

DEFINE_ATOMIC_STORE(i32_atomic_store, u32, u32)
DEFINE_ATOMIC_STORE(i64_atomic_store, u64, u64)
DEFINE_ATOMIC_STORE(i32_atomic_store8, u8, u32)
DEFINE_ATOMIC_STORE(i32_atomic_store16, u16, u32)
DEFINE_ATOMIC_STORE(i64_atomic_store8, u8, u64)
DEFINE_ATOMIC_STORE(i64_atomic_store16, u16, u64)
DEFINE_ATOMIC_STORE(i64_atomic_store32, u32, u64)

#define DEFINE_ATOMIC_RMW(name, opname, op, t1, t2)                      \
  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {     \
    MEMCHECK(mem, addr, t1);                                             \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
    t1 wrapped = (t1)value;                                              \
    t1 ret;                                                              \
    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));   \
    ret = ret op wrapped;                                                \
    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &ret, sizeof(t1));   \
    return (t2)ret;                                                      \
  }                                                                      \
  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr, \
                                 t2 value) {                             \
    MEMCHECK(mem, addr, t1);                                             \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                    \
    t1 wrapped = (t1)value;                                              \
    t1 ret = atomic_##opname(                                            \
        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped); \
    return (t2)ret;                                                      \
  }

DEFINE_ATOMIC_RMW(i32_atomic_rmw8_add_u, fetch_add, +, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_add_u, fetch_add, +, u16, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw_add, fetch_add, +, u32, u32)
DEFINE_ATOMIC_RMW(i64_atomic_rmw8_add_u, fetch_add, +, u8, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw16_add_u, fetch_add, +, u16, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw32_add_u, fetch_add, +, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_add, fetch_add, +, u64, u64)

DEFINE_ATOMIC_RMW(i32_atomic_rmw8_sub_u, fetch_sub, -, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_sub_u, fetch_sub, -, u16, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw_sub, fetch_sub, -, u32, u32)
DEFINE_ATOMIC_RMW(i64_atomic_rmw8_sub_u, fetch_sub, -, u8, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw16_sub_u, fetch_sub, -, u16, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw32_sub_u, fetch_sub, -, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_sub, fetch_sub, -, u64, u64)

DEFINE_ATOMIC_RMW(i32_atomic_rmw8_and_u, fetch_and, &, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_and_u, fetch_and, &, u16, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw_and, fetch_and, &, u32, u32)
DEFINE_ATOMIC_RMW(i64_atomic_rmw8_and_u, fetch_and, &, u8, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw16_and_u, fetch_and, &, u16, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw32_and_u, fetch_and, &, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_and, fetch_and, &, u64, u64)

DEFINE_ATOMIC_RMW(i32_atomic_rmw8_or_u, fetch_or, |, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_or_u, fetch_or, |, u16, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw_or, fetch_or, |, u32, u32)
DEFINE_ATOMIC_RMW(i64_atomic_rmw8_or_u, fetch_or, |, u8, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw16_or_u, fetch_or, |, u16, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw32_or_u, fetch_or, |, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_or, fetch_or, |, u64, u64)

DEFINE_ATOMIC_RMW(i32_atomic_rmw8_xor_u, fetch_xor, ^, u8, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw16_xor_u, fetch_xor, ^, u16, u32)
DEFINE_ATOMIC_RMW(i32_atomic_rmw_xor, fetch_xor, ^, u32, u32)
DEFINE_ATOMIC_RMW(i64_atomic_rmw8_xor_u, fetch_xor, ^, u8, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw16_xor_u, fetch_xor, ^, u16, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw32_xor_u, fetch_xor, ^, u32, u64)
DEFINE_ATOMIC_RMW(i64_atomic_rmw_xor, fetch_xor, ^, u64, u64)

#define DEFINE_ATOMIC_XCHG(name, opname, t1, t2)                           \
  static inline t2 name(wasm_rt_memory_t* mem, u64 addr, t2 value) {       \
    MEMCHECK(mem, addr, t1);                                               \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
    t1 wrapped = (t1)value;                                                \
    t1 ret;                                                                \
    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t1)), sizeof(t1));     \
    wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t1)), &wrapped, sizeof(t1)); \
    return (t2)ret;                                                        \
  }                                                                        \
  static inline t2 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,   \
                                 t2 value) {                               \
    MEMCHECK(mem, addr, t1);                                               \
    ATOMIC_ALIGNMENT_CHECK(addr, t1);                                      \
    t1 wrapped = (t1)value;                                                \
    t1 ret = atomic_##opname(                                              \
        (_Atomic volatile t1*)MEM_ADDR(mem, addr, sizeof(t1)), wrapped);   \
    return (t2)ret;                                                        \
  }

DEFINE_ATOMIC_XCHG(i32_atomic_rmw8_xchg_u, exchange, u8, u32)
DEFINE_ATOMIC_XCHG(i32_atomic_rmw16_xchg_u, exchange, u16, u32)
DEFINE_ATOMIC_XCHG(i32_atomic_rmw_xchg, exchange, u32, u32)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw8_xchg_u, exchange, u8, u64)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw16_xchg_u, exchange, u16, u64)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw32_xchg_u, exchange, u32, u64)
DEFINE_ATOMIC_XCHG(i64_atomic_rmw_xchg, exchange, u64, u64)

#define DEFINE_ATOMIC_CMP_XCHG(name, t1, t2)                                \
  static inline t1 name(wasm_rt_memory_t* mem, u64 addr, t1 expected,       \
                        t1 replacement) {                                   \
    MEMCHECK(mem, addr, t2);                                                \
    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
    t2 expected_wrapped = (t2)expected;                                     \
    t2 replacement_wrapped = (t2)replacement;                               \
    t2 ret;                                                                 \
    wasm_rt_memcpy(&ret, MEM_ADDR(mem, addr, sizeof(t2)), sizeof(t2));      \
    if (ret == expected_wrapped) {                                          \
      wasm_rt_memcpy(MEM_ADDR(mem, addr, sizeof(t2)), &replacement_wrapped, \
                     sizeof(t2));                                           \
    }                                                                       \
    return (t1)expected_wrapped;                                            \
  }                                                                         \
  static inline t1 name##_shared(wasm_rt_shared_memory_t* mem, u64 addr,    \
                                 t1 expected, t1 replacement) {             \
    MEMCHECK(mem, addr, t2);                                                \
    ATOMIC_ALIGNMENT_CHECK(addr, t2);                                       \
    t2 expected_wrapped = (t2)expected;                                     \
    t2 replacement_wrapped = (t2)replacement;                               \
    atomic_compare_exchange_strong(                                         \
        (_Atomic volatile t2*)MEM_ADDR(mem, addr, sizeof(t2)),              \
        &expected_wrapped, replacement_wrapped);                            \
    return (t1)expected_wrapped;                                            \
  }

DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw8_cmpxchg_u, u32, u8);
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw16_cmpxchg_u, u32, u16);
DEFINE_ATOMIC_CMP_XCHG(i32_atomic_rmw_cmpxchg, u32, u32);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw8_cmpxchg_u, u64, u8);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw16_cmpxchg_u, u64, u16);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw32_cmpxchg_u, u64, u32);
DEFINE_ATOMIC_CMP_XCHG(i64_atomic_rmw_cmpxchg, u64, u64);

#define atomic_fence() atomic_thread_fence(memory_order_seq_cst)