diff options
-rw-r--r-- | src/passes/Print.cpp | 33 | ||||
-rw-r--r-- | src/wasm-binary.h | 50 | ||||
-rw-r--r-- | src/wasm-s-parser.h | 1 | ||||
-rw-r--r-- | src/wasm-traversal.h | 10 | ||||
-rw-r--r-- | src/wasm.h | 31 | ||||
-rw-r--r-- | src/wasm/wasm-binary.cpp | 90 | ||||
-rw-r--r-- | src/wasm/wasm-s-parser.cpp | 129 | ||||
-rw-r--r-- | src/wasm/wasm.cpp | 6 | ||||
-rw-r--r-- | test/atomics.wast | 40 | ||||
-rw-r--r-- | test/atomics.wast.from-wast | 36 | ||||
-rw-r--r-- | test/atomics.wast.fromBinary | 38 | ||||
-rw-r--r-- | test/atomics.wast.fromBinary.noDebugInfo | 36 |
12 files changed, 427 insertions, 73 deletions
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp index 7ec3e98a3..d0c9603f5 100644 --- a/src/passes/Print.cpp +++ b/src/passes/Print.cpp @@ -350,6 +350,39 @@ struct PrintSExpression : public Visitor<PrintSExpression> { printFullLine(curr->value); decIndent(); } + void visitAtomicRMW(AtomicRMW* curr) { + o << '('; + prepareColor(o) << printWasmType(curr->type) << ".atomic.rmw"; + if (curr->bytes != getWasmTypeSize(curr->type)) { + if (curr->bytes == 1) { + o << '8'; + } else if (curr->bytes == 2) { + o << "16"; + } else if (curr->bytes == 4) { + o << "32"; + } else { + WASM_UNREACHABLE(); + } + o << "_u"; + } + o << '.'; + switch (curr->op) { + case Add: o << "add"; break; + case Sub: o << "sub"; break; + case And: o << "and"; break; + case Or: o << "or"; break; + case Xor: o << "xor"; break; + case Xchg: o << "xchg"; break; + } + restoreNormalColor(o); + if (curr->offset) { + o << " offset=" << curr->offset; + } + incIndent(); + printFullLine(curr->ptr); + printFullLine(curr->value); + decIndent(); + } void visitConst(Const *curr) { o << curr->value; } diff --git a/src/wasm-binary.h b/src/wasm-binary.h index f1396cbb2..332b4a7b6 100644 --- a/src/wasm-binary.h +++ b/src/wasm-binary.h @@ -525,9 +525,55 @@ enum AtomicOpcodes { I32AtomicStore16 = 0x1a, I64AtomicStore8 = 0x1b, I64AtomicStore16 = 0x1c, - I64AtomicStore32 = 0x1d + I64AtomicStore32 = 0x1d, + + AtomicRMWOps_Begin = 0x1e, + I32AtomicRMWAdd = 0x1e, + I64AtomicRMWAdd = 0x1f, + I32AtomicRMWAdd8U = 0x20, + I32AtomicRMWAdd16U = 0x21, + I64AtomicRMWAdd8U = 0x22, + I64AtomicRMWAdd16U = 0x23, + I64AtomicRMWAdd32U = 0x24, + I32AtomicRMWSub = 0x25, + I64AtomicRMWSub = 0x26, + I32AtomicRMWSub8U = 0x27, + I32AtomicRMWSub16U = 0x28, + I64AtomicRMWSub8U = 0x29, + I64AtomicRMWSub16U = 0x2a, + I64AtomicRMWSub32U = 0x2b, + I32AtomicRMWAnd = 0x2c, + I64AtomicRMWAnd = 0x2d, + I32AtomicRMWAnd8U = 0x2e, + I32AtomicRMWAnd16U = 0x2f, + I64AtomicRMWAnd8U = 0x30, + I64AtomicRMWAnd16U = 0x31, + I64AtomicRMWAnd32U = 0x32, + I32AtomicRMWOr = 0x33, + I64AtomicRMWOr = 0x34, + I32AtomicRMWOr8U = 0x35, + I32AtomicRMWOr16U = 0x36, + I64AtomicRMWOr8U = 0x37, + I64AtomicRMWOr16U = 0x38, + I64AtomicRMWOr32U = 0x39, + I32AtomicRMWXor = 0x3a, + I64AtomicRMWXor = 0x3b, + I32AtomicRMWXor8U = 0x3c, + I32AtomicRMWXor16U = 0x3d, + I64AtomicRMWXor8U = 0x3e, + I64AtomicRMWXor16U = 0x3f, + I64AtomicRMWXor32U = 0x40, + I32AtomicRMWXchg = 0x41, + I64AtomicRMWXchg = 0x42, + I32AtomicRMWXchg8U = 0x43, + I32AtomicRMWXchg16U = 0x44, + I64AtomicRMWXchg8U = 0x45, + I64AtomicRMWXchg16U = 0x46, + I64AtomicRMWXchg32U = 0x47, + AtomicRMWOps_End = 0x47, }; + enum MemoryAccess { Offset = 0x10, // bit 4 Alignment = 0x80, // bit 7 @@ -676,6 +722,7 @@ public: void emitMemoryAccess(size_t alignment, size_t bytes, uint32_t offset); void visitLoad(Load *curr); void visitStore(Store *curr); + void visitAtomicRMW(AtomicRMW *curr); void visitConst(Const *curr); void visitUnary(Unary *curr); void visitBinary(Binary *curr); @@ -833,6 +880,7 @@ public: void readMemoryAccess(Address& alignment, size_t bytes, Address& offset); bool maybeVisitLoad(Expression*& out, uint8_t code, bool isAtomic); bool maybeVisitStore(Expression*& out, uint8_t code, bool isAtomic); + bool maybeVisitAtomicRMW(Expression*& out, uint8_t code); bool maybeVisitConst(Expression*& out, uint8_t code); bool maybeVisitUnary(Expression*& out, uint8_t code); bool maybeVisitBinary(Expression*& out, uint8_t code); diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h index 90e797615..161256c2f 100644 --- a/src/wasm-s-parser.h +++ b/src/wasm-s-parser.h @@ -177,6 +177,7 @@ private: Expression* makeConst(Element& s, WasmType type); Expression* makeLoad(Element& s, WasmType type, bool isAtomic); Expression* makeStore(Element& s, WasmType type, bool isAtomic); + Expression* makeAtomicRMW(Element& s, WasmType type); Expression* makeIf(Element& s); Expression* makeMaybeBlock(Element& s, size_t i, WasmType type); Expression* makeLoop(Element& s); diff --git a/src/wasm-traversal.h b/src/wasm-traversal.h index 3b1de2e32..11332b5c3 100644 --- a/src/wasm-traversal.h +++ b/src/wasm-traversal.h @@ -49,6 +49,7 @@ struct Visitor { ReturnType visitSetGlobal(SetGlobal* curr) {} ReturnType visitLoad(Load* curr) {} ReturnType visitStore(Store* curr) {} + ReturnType visitAtomicRMW(AtomicRMW* curr) {return ReturnType();} //Stub impl so not every pass has to implement this yet. ReturnType visitConst(Const* curr) {} ReturnType visitUnary(Unary* curr) {} ReturnType visitBinary(Binary* curr) {} @@ -90,6 +91,7 @@ struct Visitor { case Expression::Id::SetGlobalId: DELEGATE(SetGlobal); case Expression::Id::LoadId: DELEGATE(Load); case Expression::Id::StoreId: DELEGATE(Store); + case Expression::Id::AtomicRMWId: DELEGATE(AtomicRMW); case Expression::Id::ConstId: DELEGATE(Const); case Expression::Id::UnaryId: DELEGATE(Unary); case Expression::Id::BinaryId: DELEGATE(Binary); @@ -130,6 +132,7 @@ struct UnifiedExpressionVisitor : public Visitor<SubType> { ReturnType visitSetGlobal(SetGlobal* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } ReturnType visitLoad(Load* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } ReturnType visitStore(Store* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } + ReturnType visitAtomicRMW(AtomicRMW* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } ReturnType visitConst(Const* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } ReturnType visitUnary(Unary* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } ReturnType visitBinary(Binary* curr) { return static_cast<SubType*>(this)->visitExpression(curr); } @@ -306,6 +309,7 @@ struct Walker : public VisitorType { static void doVisitSetGlobal(SubType* self, Expression** currp) { self->visitSetGlobal((*currp)->cast<SetGlobal>()); } static void doVisitLoad(SubType* self, Expression** currp) { self->visitLoad((*currp)->cast<Load>()); } static void doVisitStore(SubType* self, Expression** currp) { self->visitStore((*currp)->cast<Store>()); } + static void doVisitAtomicRMW(SubType* self, Expression** currp) { self->visitAtomicRMW((*currp)->cast<AtomicRMW>()); } static void doVisitConst(SubType* self, Expression** currp) { self->visitConst((*currp)->cast<Const>()); } static void doVisitUnary(SubType* self, Expression** currp) { self->visitUnary((*currp)->cast<Unary>()); } static void doVisitBinary(SubType* self, Expression** currp) { self->visitBinary((*currp)->cast<Binary>()); } @@ -428,6 +432,12 @@ struct PostWalker : public Walker<SubType, VisitorType> { self->pushTask(SubType::scan, &curr->cast<Store>()->ptr); break; } + case Expression::Id::AtomicRMWId: { + self->pushTask(SubType::doVisitAtomicRMW, currp); + self->pushTask(SubType::scan, &curr->cast<AtomicRMW>()->value); + self->pushTask(SubType::scan, &curr->cast<AtomicRMW>()->ptr); + break; + } case Expression::Id::ConstId: { self->pushTask(SubType::doVisitConst, currp); break; diff --git a/src/wasm.h b/src/wasm.h index 286604848..e98c4db63 100644 --- a/src/wasm.h +++ b/src/wasm.h @@ -130,6 +130,10 @@ enum HostOp { PageSize, CurrentMemory, GrowMemory, HasFeature }; +enum AtomicRMWOp { + Add, Sub, And, Or, Xor, Xchg, +}; + // // Expressions // @@ -177,6 +181,7 @@ public: HostId, NopId, UnreachableId, + AtomicCmpxchgId, AtomicRMWId, NumExpressionIds }; @@ -423,6 +428,25 @@ public: void finalize(); }; +class AtomicRMW : public SpecificExpression<Expression::AtomicRMWId> { + public: + AtomicRMW() = default; + AtomicRMW(MixedArena& allocator) : AtomicRMW() {} + + AtomicRMWOp op; + uint8_t bytes; + Address offset; + Expression* ptr; + Expression* value; + + void finalize(); +}; + +class AtomicCmpxchg : public SpecificExpression<Expression::AtomicCmpxchgId> { + public: + AtomicCmpxchg() = default; +}; + class Const : public SpecificExpression<Expression::ConstId> { public: Const() {} @@ -514,13 +538,6 @@ public: Unreachable(MixedArena& allocator) : Unreachable() {} }; -class AtomicRMW : public SpecificExpression<Expression::AtomicRMWId> { - public: - AtomicRMW() {} - AtomicRMW(MixedArena& allocator) : AtomicRMW() {} - bool finalize(); -}; - // Globals class Function { diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp index 4844dd5b4..69d3ecdbb 100644 --- a/src/wasm/wasm-binary.cpp +++ b/src/wasm/wasm-binary.cpp @@ -832,6 +832,51 @@ void WasmBinaryWriter::visitStore(Store *curr) { emitMemoryAccess(curr->align, curr->bytes, curr->offset); } +void WasmBinaryWriter::visitAtomicRMW(AtomicRMW *curr) { + if (debug) std::cerr << "zz node: AtomicRMW" << std::endl; + recurse(curr->ptr); + recurse(curr->value); + + o << int8_t(BinaryConsts::AtomicPrefix); + +#define CASE_FOR_OP(Op) \ + case Op: \ + switch (curr->type) { \ + case i32: \ + switch (curr->bytes) { \ + case 1: o << int8_t(BinaryConsts::I32AtomicRMW##Op##8U); break; \ + case 2: o << int8_t(BinaryConsts::I32AtomicRMW##Op##16U); break; \ + case 4: o << int8_t(BinaryConsts::I32AtomicRMW##Op); break; \ + default: WASM_UNREACHABLE(); \ + } \ + break; \ + case i64: \ + switch (curr->bytes) { \ + case 1: o << int8_t(BinaryConsts::I64AtomicRMW##Op##8U); break; \ + case 2: o << int8_t(BinaryConsts::I64AtomicRMW##Op##16U); break; \ + case 4: o << int8_t(BinaryConsts::I64AtomicRMW##Op##32U); break; \ + case 8: o << int8_t(BinaryConsts::I64AtomicRMW##Op); break; \ + default: WASM_UNREACHABLE(); \ + } \ + break; \ + default: WASM_UNREACHABLE(); \ + } \ + break + + switch(curr->op) { + CASE_FOR_OP(Add); + CASE_FOR_OP(Sub); + CASE_FOR_OP(And); + CASE_FOR_OP(Or); + CASE_FOR_OP(Xor); + CASE_FOR_OP(Xchg); + default: WASM_UNREACHABLE(); + } +#undef CASE_FOR_OP + + emitMemoryAccess(curr->bytes, curr->bytes, curr->offset); +} + void WasmBinaryWriter::visitConst(Const *curr) { if (debug) std::cerr << "zz node: Const" << curr << " : " << curr->type << std::endl; switch (curr->type) { @@ -1934,6 +1979,7 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) { code = getInt8(); if (maybeVisitLoad(curr, code, /*isAtomic=*/true)) break; if (maybeVisitStore(curr, code, /*isAtomic=*/true)) break; + if (maybeVisitAtomicRMW(curr, code)) break; throw ParseException("invalid code after atomic prefix: " + std::to_string(code)); } default: { @@ -2282,6 +2328,50 @@ bool WasmBinaryBuilder::maybeVisitStore(Expression*& out, uint8_t code, bool isA return true; } + +bool WasmBinaryBuilder::maybeVisitAtomicRMW(Expression*& out, uint8_t code) { + if (code < BinaryConsts::AtomicRMWOps_Begin || code > BinaryConsts::AtomicRMWOps_End) return false; + auto* curr = allocator.alloc<AtomicRMW>(); + + // Set curr to the given opcode, type and size. +#define SET(opcode, optype, size) \ + curr->op = opcode; \ + curr->type = optype; \ + curr->bytes = size + + // Handle the cases for all the valid types for a particular opcode +#define SET_FOR_OP(Op) \ + case BinaryConsts::I32AtomicRMW##Op: SET(Op, i32, 4); break; \ + case BinaryConsts::I32AtomicRMW##Op##8U: SET(Op, i32, 1); break; \ + case BinaryConsts::I32AtomicRMW##Op##16U: SET(Op, i32, 2); break; \ + case BinaryConsts::I64AtomicRMW##Op: SET(Op, i64, 8); break; \ + case BinaryConsts::I64AtomicRMW##Op##8U: SET(Op, i64, 1); break; \ + case BinaryConsts::I64AtomicRMW##Op##16U: SET(Op, i64, 2); break; \ + case BinaryConsts::I64AtomicRMW##Op##32U: SET(Op, i64, 4); break; + + switch(code) { + SET_FOR_OP(Add); + SET_FOR_OP(Sub); + SET_FOR_OP(And); + SET_FOR_OP(Or); + SET_FOR_OP(Xor); + SET_FOR_OP(Xchg); + default: WASM_UNREACHABLE(); + } +#undef SET_FOR_OP +#undef SET + + if (debug) std::cerr << "zz node: AtomicRMW" << std::endl; + Address readAlign; + readMemoryAccess(readAlign, curr->bytes, curr->offset); + if (readAlign != curr->bytes) throw ParseException("Align of AtomicRMW must match size"); + curr->value = popNonVoidExpression(); + curr->ptr = popNonVoidExpression(); + curr->finalize(); + out = curr; + return true; +} + bool WasmBinaryBuilder::maybeVisitConst(Expression*& out, uint8_t code) { Const* curr; if (debug) std::cerr << "zz node: Const, code " << code << std::endl; diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp index 38954af1f..c6331afc0 100644 --- a/src/wasm/wasm-s-parser.cpp +++ b/src/wasm/wasm-s-parser.cpp @@ -666,6 +666,7 @@ Expression* SExpressionWasmBuilder::makeExpression(Element& s) { if (op[1] == 't' && !strncmp(op, "atomic.", strlen("atomic."))) { if (op[7] == 'l') return makeLoad(s, type, /*isAtomic=*/true); if (op[7] == 's') return makeStore(s, type, /*isAtomic=*/true); + if (op[7] == 'r') return makeAtomicRMW(s, type); } abort_on(op); } @@ -1125,46 +1126,57 @@ Expression* SExpressionWasmBuilder::makeConst(Element& s, WasmType type) { return ret; } - -Expression* SExpressionWasmBuilder::makeLoad(Element& s, WasmType type, bool isAtomic) { - const char *extra = strchr(s[0]->c_str(), '.') + 5; // after "type.load" - if (isAtomic) extra += 7; // after "type.atomic.load" - auto* ret = allocator.alloc<Load>(); - ret->isAtomic = isAtomic; - ret->type = type; - ret->bytes = getWasmTypeSize(type); - if (extra[0] == '8') { - ret->bytes = 1; - extra++; - } else if (extra[0] == '1') { - if (extra[1] != '6') throw ParseException("expected load16"); - ret->bytes = 2; - extra += 2; - } else if (extra[0] == '3') { - if (extra[1] != '2') throw ParseException("expected load32"); - ret->bytes = 4; - extra += 2; +static uint8_t parseMemBytes(const char** in, uint8_t fallback) { + uint8_t ret; + const char* s = *in; + if (s[0] == '8') { + ret = 1; + (*in)++; + } else if (s[0] == '1') { + if (s[1] != '6') throw ParseException("expected 16 for memop size"); + ret = 2; + *in += 2; + } else if (s[0] == '3') { + if (s[1] != '2') throw ParseException("expected 32 for memop size");; + ret = 4; + *in += 2; + } else { + ret = fallback; } - ret->signed_ = extra[0] && extra[1] == 's'; + return ret; +} + +static size_t parseMemAttributes(Element& s, Address* offset, Address* align, Address fallback) { size_t i = 1; - ret->offset = 0; - ret->align = ret->bytes; + *offset = 0; + *align = fallback; while (!s[i]->isList()) { const char *str = s[i]->c_str(); const char *eq = strchr(str, '='); - if (!eq) throw ParseException("no = in load attribute"); + if (!eq) throw ParseException("missing = in memory attribute"); eq++; + uint64_t value = atoll(eq); if (str[0] == 'a') { - uint64_t align = atoll(eq); - if (align > std::numeric_limits<uint32_t>::max()) throw ParseException("bad align"); - ret->align = align; + if (value > std::numeric_limits<uint32_t>::max()) throw ParseException("bad align"); + *align = value; } else if (str[0] == 'o') { - uint64_t offset = atoll(eq); - if (offset > std::numeric_limits<uint32_t>::max()) throw ParseException("bad offset"); - ret->offset = (uint32_t)offset; - } else throw ParseException("bad load attribute"); + if (value > std::numeric_limits<uint32_t>::max()) throw ParseException("bad offset"); + *offset = value; + } else throw ParseException("bad memory attribute"); i++; } + return i; +} + +Expression* SExpressionWasmBuilder::makeLoad(Element& s, WasmType type, bool isAtomic) { + const char *extra = strchr(s[0]->c_str(), '.') + 5; // after "type.load" + if (isAtomic) extra += 7; // after "type.atomic.load" + auto* ret = allocator.alloc<Load>(); + ret->isAtomic = isAtomic; + ret->type = type; + ret->bytes = parseMemBytes(&extra, getWasmTypeSize(type)); + ret->signed_ = extra[0] && extra[1] == 's'; + size_t i = parseMemAttributes(s, &ret->offset, &ret->align, ret->bytes); ret->ptr = parseExpression(s[i]); ret->finalize(); return ret; @@ -1176,36 +1188,33 @@ Expression* SExpressionWasmBuilder::makeStore(Element& s, WasmType type, bool is auto ret = allocator.alloc<Store>(); ret->isAtomic = isAtomic; ret->valueType = type; - ret->bytes = getWasmTypeSize(type); - if (extra[0] == '8') { - ret->bytes = 1; - extra++; - } else if (extra[0] == '1') { - if (extra[1] != '6') throw ParseException("expected store16"); - ret->bytes = 2; - extra += 2; - } else if (extra[0] == '3') { - if (extra[1] != '2') throw ParseException("expected store32");; - ret->bytes = 4; - extra += 2; - } - size_t i = 1; - ret->offset = 0; - ret->align = ret->bytes; - while (!s[i]->isList()) { - const char *str = s[i]->c_str(); - const char *eq = strchr(str, '='); - if (!eq) throw ParseException("missing = in store attribute");; - eq++; - if (str[0] == 'a') { - uint64_t align = atoll(eq); - if (align > std::numeric_limits<uint32_t>::max()) throw ParseException("bad align"); - ret->align = align; - } else if (str[0] == 'o') { - ret->offset = atoi(eq); - } else throw ParseException("bad store attribute"); - i++; - } + ret->bytes = parseMemBytes(&extra, getWasmTypeSize(type)); + size_t i = parseMemAttributes(s, &ret->offset, &ret->align, ret->bytes); + + ret->ptr = parseExpression(s[i]); + ret->value = parseExpression(s[i+1]); + ret->finalize(); + return ret; +} + +Expression* SExpressionWasmBuilder::makeAtomicRMW(Element& s, WasmType type) { + const char* extra = strchr(s[0]->c_str(), '.') + 11; // afer "type.atomic.rmw" + auto ret = allocator.alloc<AtomicRMW>(); + ret->type = type; + ret->bytes = parseMemBytes(&extra, getWasmTypeSize(type)); + extra = strchr(extra, '.'); // after the optional '_u' and before the opcode + if (!extra) throw ParseException("malformed atomic rmw instruction"); + extra++; // after the '.' + if (!strncmp(extra, "add", 3)) ret->op = Add; + else if (!strncmp(extra, "and", 3)) ret->op = And; + else if (!strncmp(extra, "or", 2)) ret->op = Or; + else if (!strncmp(extra, "sub", 3)) ret->op = Sub; + else if (!strncmp(extra, "xor", 3)) ret->op = Xor; + else if (!strncmp(extra, "xchg", 4)) ret->op = Xchg; + else throw ParseException("bad atomic rmw operator"); + Address align; + size_t i = parseMemAttributes(s, &ret->offset, &align, ret->bytes); + if (align != ret->bytes) throw ParseException("Align of Atomic RMW must match size"); ret->ptr = parseExpression(s[i]); ret->value = parseExpression(s[i+1]); ret->finalize(); diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp index 201c8d183..cbfcbccad 100644 --- a/src/wasm/wasm.cpp +++ b/src/wasm/wasm.cpp @@ -353,6 +353,12 @@ void Store::finalize() { } } +void AtomicRMW::finalize() { + if (ptr->type == unreachable || value->type == unreachable) { + type = unreachable; + } +} + Const* Const::set(Literal value_) { value = value_; type = value.type; diff --git a/test/atomics.wast b/test/atomics.wast index af78b98d5..26aebdb0d 100644 --- a/test/atomics.wast +++ b/test/atomics.wast @@ -1,7 +1,7 @@ (module (type $0 (func)) (memory $0 23 256 shared) - (func $atomics (type $0) + (func $atomic-loadstore (type $0) (local $0 i32) (local $1 i64) (drop @@ -39,11 +39,11 @@ (get_local $0) ) ) - (i32.atomic.store offset=4 + (i32.atomic.store offset=4 align=4 (get_local $0) (get_local $0) ) - (i32.atomic.store8 offset=4 + (i32.atomic.store8 offset=4 align=1 (get_local $0) (get_local $0) ) @@ -68,4 +68,38 @@ (get_local $1) ) ) + (func $atomic-rmw (type $0) + (local $0 i32) + (local $1 i64) + (drop + (i32.atomic.rmw.add offset=4 + (get_local $0) + (get_local $0) + ) + ) + (drop + (i32.atomic.rmw8_u.add offset=4 + (get_local $0) + (get_local $0) + ) + ) + (drop + (i32.atomic.rmw16_u.and align=2 + (get_local $0) + (get_local $0) + ) + ) + (drop + (i64.atomic.rmw32_u.or + (get_local $0) + (get_local $1) + ) + ) + (drop + (i32.atomic.rmw8_u.xchg align=1 + (get_local $0) + (get_local $0) + ) + ) + ) ) diff --git a/test/atomics.wast.from-wast b/test/atomics.wast.from-wast index af78b98d5..ef15de7e2 100644 --- a/test/atomics.wast.from-wast +++ b/test/atomics.wast.from-wast @@ -1,7 +1,7 @@ (module (type $0 (func)) (memory $0 23 256 shared) - (func $atomics (type $0) + (func $atomic-loadstore (type $0) (local $0 i32) (local $1 i64) (drop @@ -68,4 +68,38 @@ (get_local $1) ) ) + (func $atomic-rmw (type $0) + (local $0 i32) + (local $1 i64) + (drop + (i32.atomic.rmw.add offset=4 + (get_local $0) + (get_local $0) + ) + ) + (drop + (i32.atomic.rmw8_u.add offset=4 + (get_local $0) + (get_local $0) + ) + ) + (drop + (i32.atomic.rmw16_u.and + (get_local $0) + (get_local $0) + ) + ) + (drop + (i64.atomic.rmw32_u.or + (get_local $0) + (get_local $1) + ) + ) + (drop + (i32.atomic.rmw8_u.xchg + (get_local $0) + (get_local $0) + ) + ) + ) ) diff --git a/test/atomics.wast.fromBinary b/test/atomics.wast.fromBinary index 95c5473e2..b3bce034a 100644 --- a/test/atomics.wast.fromBinary +++ b/test/atomics.wast.fromBinary @@ -1,7 +1,7 @@ (module (type $0 (func)) (memory $0 23 256 shared) - (func $atomics (type $0) + (func $atomic-loadstore (type $0) (local $var$0 i32) (local $var$1 i64) (block $label$0 @@ -70,5 +70,41 @@ ) ) ) + (func $atomic-rmw (type $0) + (local $var$0 i32) + (local $var$1 i64) + (block $label$0 + (drop + (i32.atomic.rmw.add offset=4 + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i32.atomic.rmw8_u.add offset=4 + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i32.atomic.rmw16_u.and + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i64.atomic.rmw32_u.or + (get_local $var$0) + (get_local $var$1) + ) + ) + (drop + (i32.atomic.rmw8_u.xchg + (get_local $var$0) + (get_local $var$0) + ) + ) + ) + ) ) diff --git a/test/atomics.wast.fromBinary.noDebugInfo b/test/atomics.wast.fromBinary.noDebugInfo index 279ef79a6..3777417bf 100644 --- a/test/atomics.wast.fromBinary.noDebugInfo +++ b/test/atomics.wast.fromBinary.noDebugInfo @@ -70,5 +70,41 @@ ) ) ) + (func $1 (type $0) + (local $var$0 i32) + (local $var$1 i64) + (block $label$0 + (drop + (i32.atomic.rmw.add offset=4 + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i32.atomic.rmw8_u.add offset=4 + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i32.atomic.rmw16_u.and + (get_local $var$0) + (get_local $var$0) + ) + ) + (drop + (i64.atomic.rmw32_u.or + (get_local $var$0) + (get_local $var$1) + ) + ) + (drop + (i32.atomic.rmw8_u.xchg + (get_local $var$0) + (get_local $var$0) + ) + ) + ) + ) ) |