diff options
-rwxr-xr-x | scripts/gen-s-parser.py | 52 | ||||
-rw-r--r-- | src/gen-s-parser.inc | 86 | ||||
-rw-r--r-- | src/wasm-builder.h | 12 | ||||
-rw-r--r-- | src/wasm-s-parser.h | 5 | ||||
-rw-r--r-- | src/wasm/wasm-s-parser.cpp | 36 | ||||
-rw-r--r-- | src/wasm/wat-parser.cpp | 285 | ||||
-rw-r--r-- | test/lit/wat-kitchen-sink.wast | 221 |
7 files changed, 575 insertions, 122 deletions
diff --git a/scripts/gen-s-parser.py b/scripts/gen-s-parser.py index e2759b2eb..47ef4c600 100755 --- a/scripts/gen-s-parser.py +++ b/scripts/gen-s-parser.py @@ -357,14 +357,14 @@ instructions = [ ("v128.andnot", "makeBinary(s, BinaryOp::AndNotVec128)"), ("v128.any_true", "makeUnary(s, UnaryOp::AnyTrueVec128)"), ("v128.bitselect", "makeSIMDTernary(s, SIMDTernaryOp::Bitselect)"), - ("v128.load8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128)"), - ("v128.load16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128)"), - ("v128.load32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128)"), - ("v128.load64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128)"), - ("v128.store8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128)"), - ("v128.store16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128)"), - ("v128.store32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128)"), - ("v128.store64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128)"), + ("v128.load8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128, 1)"), + ("v128.load16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128, 2)"), + ("v128.load32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128, 4)"), + ("v128.load64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128, 8)"), + ("v128.store8_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128, 1)"), + ("v128.store16_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128, 2)"), + ("v128.store32_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128, 4)"), + ("v128.store64_lane", "makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128, 8)"), ("i8x16.popcnt", "makeUnary(s, UnaryOp::PopcntVecI8x16)"), ("i8x16.abs", "makeUnary(s, UnaryOp::AbsVecI8x16)"), ("i8x16.neg", "makeUnary(s, UnaryOp::NegVecI8x16)"), @@ -475,18 +475,18 @@ instructions = [ ("i32x4.trunc_sat_f32x4_u", "makeUnary(s, UnaryOp::TruncSatUVecF32x4ToVecI32x4)"), ("f32x4.convert_i32x4_s", "makeUnary(s, UnaryOp::ConvertSVecI32x4ToVecF32x4)"), ("f32x4.convert_i32x4_u", "makeUnary(s, UnaryOp::ConvertUVecI32x4ToVecF32x4)"), - ("v128.load8_splat", "makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128)"), - ("v128.load16_splat", "makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128)"), - ("v128.load32_splat", "makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128)"), - ("v128.load64_splat", "makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128)"), - ("v128.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128)"), - ("v128.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128)"), - ("v128.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128)"), - ("v128.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128)"), - ("v128.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128)"), - ("v128.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128)"), - ("v128.load32_zero", "makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128)"), - ("v128.load64_zero", "makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128)"), + ("v128.load8_splat", "makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128, 1)"), + ("v128.load16_splat", "makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128, 2)"), + ("v128.load32_splat", "makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128, 4)"), + ("v128.load64_splat", "makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128, 8)"), + ("v128.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128, 8)"), + ("v128.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128, 8)"), + ("v128.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128, 8)"), + ("v128.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128, 8)"), + ("v128.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128, 8)"), + ("v128.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128, 8)"), + ("v128.load32_zero", "makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128, 4)"), + ("v128.load64_zero", "makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128, 8)"), ("i8x16.narrow_i16x8_s", "makeBinary(s, BinaryOp::NarrowSVecI16x8ToVecI8x16)"), ("i8x16.narrow_i16x8_u", "makeBinary(s, BinaryOp::NarrowUVecI16x8ToVecI8x16)"), ("i16x8.narrow_i32x4_s", "makeBinary(s, BinaryOp::NarrowSVecI32x4ToVecI16x8)"), @@ -711,12 +711,16 @@ def instruction_parser(new_parser=False): printer = CodePrinter() - if not new_parser: + printer.print_line("char buf[{}] = {{}};".format(inst_length + 1)) + + if new_parser: + printer.print_line("auto str = *keyword;") + else: printer.print_line("using namespace std::string_view_literals;") printer.print_line("auto str = s[0]->str().str;") - printer.print_line("char buf[{}] = {{}};".format(inst_length + 1)) - printer.print_line("memcpy(buf, str.data(), str.size());") - printer.print_line("std::string_view op = {buf, str.size()};") + + printer.print_line("memcpy(buf, str.data(), str.size());") + printer.print_line("std::string_view op = {buf, str.size()};") def print_leaf(expr, inst): if new_parser: diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc index d9a6dab22..67f58b5e6 100644 --- a/src/gen-s-parser.inc +++ b/src/gen-s-parser.inc @@ -4,9 +4,9 @@ #ifdef INSTRUCTION_PARSER #undef INSTRUCTION_PARSER +char buf[33] = {}; using namespace std::string_view_literals; auto str = s[0]->str().str; -char buf[33] = {}; memcpy(buf, str.data(), str.size()); std::string_view op = {buf, str.size()}; switch (op[0]) { @@ -3396,10 +3396,10 @@ switch (op[0]) { case '_': { switch (op[12]) { case 'l': - if (op == "v128.load16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128); } + if (op == "v128.load16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128, 2); } goto parse_error; case 's': - if (op == "v128.load16_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128); } + if (op == "v128.load16_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128, 2); } goto parse_error; default: goto parse_error; } @@ -3407,10 +3407,10 @@ switch (op[0]) { case 'x': { switch (op[14]) { case 's': - if (op == "v128.load16x4_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128); } + if (op == "v128.load16x4_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128, 8); } goto parse_error; case 'u': - if (op == "v128.load16x4_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128); } + if (op == "v128.load16x4_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128, 8); } goto parse_error; default: goto parse_error; } @@ -3423,13 +3423,13 @@ switch (op[0]) { case '_': { switch (op[12]) { case 'l': - if (op == "v128.load32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128); } + if (op == "v128.load32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128, 4); } goto parse_error; case 's': - if (op == "v128.load32_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128); } + if (op == "v128.load32_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128, 4); } goto parse_error; case 'z': - if (op == "v128.load32_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128); } + if (op == "v128.load32_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128, 4); } goto parse_error; default: goto parse_error; } @@ -3437,10 +3437,10 @@ switch (op[0]) { case 'x': { switch (op[14]) { case 's': - if (op == "v128.load32x2_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128); } + if (op == "v128.load32x2_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128, 8); } goto parse_error; case 'u': - if (op == "v128.load32x2_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128); } + if (op == "v128.load32x2_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128, 8); } goto parse_error; default: goto parse_error; } @@ -3451,13 +3451,13 @@ switch (op[0]) { case '6': { switch (op[12]) { case 'l': - if (op == "v128.load64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128); } + if (op == "v128.load64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128, 8); } goto parse_error; case 's': - if (op == "v128.load64_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128); } + if (op == "v128.load64_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128, 8); } goto parse_error; case 'z': - if (op == "v128.load64_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128); } + if (op == "v128.load64_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128, 8); } goto parse_error; default: goto parse_error; } @@ -3467,10 +3467,10 @@ switch (op[0]) { case '_': { switch (op[11]) { case 'l': - if (op == "v128.load8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128); } + if (op == "v128.load8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128, 1); } goto parse_error; case 's': - if (op == "v128.load8_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128); } + if (op == "v128.load8_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128, 1); } goto parse_error; default: goto parse_error; } @@ -3478,10 +3478,10 @@ switch (op[0]) { case 'x': { switch (op[13]) { case 's': - if (op == "v128.load8x8_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128); } + if (op == "v128.load8x8_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128, 8); } goto parse_error; case 'u': - if (op == "v128.load8x8_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128); } + if (op == "v128.load8x8_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128, 8); } goto parse_error; default: goto parse_error; } @@ -3504,16 +3504,16 @@ switch (op[0]) { if (op == "v128.store"sv) { return makeStore(s, Type::v128, 16, /*isAtomic=*/false); } goto parse_error; case '1': - if (op == "v128.store16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128); } + if (op == "v128.store16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128, 2); } goto parse_error; case '3': - if (op == "v128.store32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128); } + if (op == "v128.store32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128, 4); } goto parse_error; case '6': - if (op == "v128.store64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128); } + if (op == "v128.store64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128, 8); } goto parse_error; case '8': - if (op == "v128.store8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128); } + if (op == "v128.store8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128, 1); } goto parse_error; default: goto parse_error; } @@ -3532,6 +3532,10 @@ parse_error: #ifdef NEW_INSTRUCTION_PARSER #undef NEW_INSTRUCTION_PARSER +char buf[33] = {}; +auto str = *keyword; +memcpy(buf, str.data(), str.size()); +std::string_view op = {buf, str.size()}; switch (op[0]) { case 'a': { switch (op[1]) { @@ -9216,14 +9220,14 @@ switch (op[0]) { switch (op[12]) { case 'l': if (op == "v128.load16_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load16LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load16LaneVec128, 2); CHECK_ERR(ret); return *ret; } goto parse_error; case 's': if (op == "v128.load16_splat"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16SplatVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16SplatVec128, 2); CHECK_ERR(ret); return *ret; } @@ -9235,14 +9239,14 @@ switch (op[0]) { switch (op[14]) { case 's': if (op == "v128.load16x4_s"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4SVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4SVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case 'u': if (op == "v128.load16x4_u"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4UVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4UVec128, 8); CHECK_ERR(ret); return *ret; } @@ -9259,21 +9263,21 @@ switch (op[0]) { switch (op[12]) { case 'l': if (op == "v128.load32_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load32LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load32LaneVec128, 4); CHECK_ERR(ret); return *ret; } goto parse_error; case 's': if (op == "v128.load32_splat"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32SplatVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32SplatVec128, 4); CHECK_ERR(ret); return *ret; } goto parse_error; case 'z': if (op == "v128.load32_zero"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32ZeroVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32ZeroVec128, 4); CHECK_ERR(ret); return *ret; } @@ -9285,14 +9289,14 @@ switch (op[0]) { switch (op[14]) { case 's': if (op == "v128.load32x2_s"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2SVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2SVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case 'u': if (op == "v128.load32x2_u"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2UVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2UVec128, 8); CHECK_ERR(ret); return *ret; } @@ -9307,21 +9311,21 @@ switch (op[0]) { switch (op[12]) { case 'l': if (op == "v128.load64_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load64LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load64LaneVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case 's': if (op == "v128.load64_splat"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64SplatVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64SplatVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case 'z': if (op == "v128.load64_zero"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64ZeroVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64ZeroVec128, 8); CHECK_ERR(ret); return *ret; } @@ -9335,14 +9339,14 @@ switch (op[0]) { switch (op[11]) { case 'l': if (op == "v128.load8_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load8LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load8LaneVec128, 1); CHECK_ERR(ret); return *ret; } goto parse_error; case 's': if (op == "v128.load8_splat"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8SplatVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8SplatVec128, 1); CHECK_ERR(ret); return *ret; } @@ -9354,14 +9358,14 @@ switch (op[0]) { switch (op[13]) { case 's': if (op == "v128.load8x8_s"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8SVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8SVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case 'u': if (op == "v128.load8x8_u"sv) { - auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8UVec128); + auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8UVec128, 8); CHECK_ERR(ret); return *ret; } @@ -9400,28 +9404,28 @@ switch (op[0]) { goto parse_error; case '1': if (op == "v128.store16_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store16LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store16LaneVec128, 2); CHECK_ERR(ret); return *ret; } goto parse_error; case '3': if (op == "v128.store32_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store32LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store32LaneVec128, 4); CHECK_ERR(ret); return *ret; } goto parse_error; case '6': if (op == "v128.store64_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store64LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store64LaneVec128, 8); CHECK_ERR(ret); return *ret; } goto parse_error; case '8': if (op == "v128.store8_lane"sv) { - auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store8LaneVec128); + auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store8LaneVec128, 1); CHECK_ERR(ret); return *ret; } diff --git a/src/wasm-builder.h b/src/wasm-builder.h index b07af4627..df036b3b4 100644 --- a/src/wasm-builder.h +++ b/src/wasm-builder.h @@ -367,7 +367,7 @@ public: } Load* makeLoad(unsigned bytes, bool signed_, - uint32_t offset, + Address offset, unsigned align, Expression* ptr, Type type, @@ -384,7 +384,7 @@ public: return ret; } Load* makeAtomicLoad( - unsigned bytes, uint32_t offset, Expression* ptr, Type type, Name memory) { + unsigned bytes, Address offset, Expression* ptr, Type type, Name memory) { Load* load = makeLoad(bytes, false, offset, bytes, ptr, type, memory); load->isAtomic = true; return load; @@ -419,7 +419,7 @@ public: } AtomicFence* makeAtomicFence() { return wasm.allocator.alloc<AtomicFence>(); } Store* makeStore(unsigned bytes, - uint32_t offset, + Address offset, unsigned align, Expression* ptr, Expression* value, @@ -439,7 +439,7 @@ public: return ret; } Store* makeAtomicStore(unsigned bytes, - uint32_t offset, + Address offset, Expression* ptr, Expression* value, Type type, @@ -450,7 +450,7 @@ public: } AtomicRMW* makeAtomicRMW(AtomicRMWOp op, unsigned bytes, - uint32_t offset, + Address offset, Expression* ptr, Expression* value, Type type, @@ -467,7 +467,7 @@ public: return ret; } AtomicCmpxchg* makeAtomicCmpxchg(unsigned bytes, - uint32_t offset, + Address offset, Expression* ptr, Expression* expected, Expression* replacement, diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h index 1470701e4..888f7d009 100644 --- a/src/wasm-s-parser.h +++ b/src/wasm-s-parser.h @@ -238,8 +238,9 @@ private: Expression* makeSIMDShuffle(Element& s); Expression* makeSIMDTernary(Element& s, SIMDTernaryOp op); Expression* makeSIMDShift(Element& s, SIMDShiftOp op); - Expression* makeSIMDLoad(Element& s, SIMDLoadOp op); - Expression* makeSIMDLoadStoreLane(Element& s, SIMDLoadStoreLaneOp op); + Expression* makeSIMDLoad(Element& s, SIMDLoadOp op, int bytes); + Expression* + makeSIMDLoadStoreLane(Element& s, SIMDLoadStoreLaneOp op, int bytes); Expression* makeMemoryInit(Element& s); Expression* makeDataDrop(Element& s); Expression* makeMemoryCopy(Element& s); diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp index 347bbb713..a54226194 100644 --- a/src/wasm/wasm-s-parser.cpp +++ b/src/wasm/wasm-s-parser.cpp @@ -2180,32 +2180,12 @@ Expression* SExpressionWasmBuilder::makeSIMDShift(Element& s, SIMDShiftOp op) { return ret; } -Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) { +Expression* +SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op, int bytes) { auto ret = allocator.alloc<SIMDLoad>(); ret->op = op; ret->offset = 0; - switch (op) { - case Load8SplatVec128: - ret->align = 1; - break; - case Load16SplatVec128: - ret->align = 2; - break; - case Load32SplatVec128: - case Load32ZeroVec128: - ret->align = 4; - break; - case Load64SplatVec128: - case Load8x8SVec128: - case Load8x8UVec128: - case Load16x4SVec128: - case Load16x4UVec128: - case Load32x2SVec128: - case Load32x2UVec128: - case Load64ZeroVec128: - ret->align = 8; - break; - } + ret->align = bytes; Index i = 1; Name memory; // Check to make sure there are more than the default args & this str isn't @@ -2222,32 +2202,28 @@ Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) { return ret; } -Expression* -SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s, - SIMDLoadStoreLaneOp op) { +Expression* SExpressionWasmBuilder::makeSIMDLoadStoreLane( + Element& s, SIMDLoadStoreLaneOp op, int bytes) { auto* ret = allocator.alloc<SIMDLoadStoreLane>(); ret->op = op; ret->offset = 0; + ret->align = bytes; size_t lanes; switch (op) { case Load8LaneVec128: case Store8LaneVec128: - ret->align = 1; lanes = 16; break; case Load16LaneVec128: case Store16LaneVec128: - ret->align = 2; lanes = 8; break; case Load32LaneVec128: case Store32LaneVec128: - ret->align = 4; lanes = 4; break; case Load64LaneVec128: case Store64LaneVec128: - ret->align = 8; lanes = 2; break; default: diff --git a/src/wasm/wat-parser.cpp b/src/wasm/wat-parser.cpp index 1cc2623ca..16f33d2d7 100644 --- a/src/wasm/wat-parser.cpp +++ b/src/wasm/wat-parser.cpp @@ -158,6 +158,50 @@ struct ParseInput { return false; } + std::optional<uint64_t> takeOffset() { + if (auto t = peek()) { + if (auto keyword = t->getKeyword()) { + if (keyword->substr(0, 7) != "offset="sv) { + return {}; + } + Lexer subLexer(keyword->substr(7)); + if (subLexer == subLexer.end()) { + return {}; + } + if (auto o = subLexer->getU64()) { + ++subLexer; + if (subLexer == subLexer.end()) { + ++lexer; + return o; + } + } + } + } + return {}; + } + + std::optional<uint32_t> takeAlign() { + if (auto t = peek()) { + if (auto keyword = t->getKeyword()) { + if (keyword->substr(0, 6) != "align="sv) { + return {}; + } + Lexer subLexer(keyword->substr(6)); + if (subLexer == subLexer.end()) { + return {}; + } + if (auto a = subLexer->getU32()) { + ++subLexer; + if (subLexer == subLexer.end()) { + ++lexer; + return a; + } + } + } + } + return {}; + } + std::optional<uint64_t> takeU64() { if (auto t = peek()) { if (auto n = t->getU64()) { @@ -335,6 +379,11 @@ struct MemType { bool shared; }; +struct Memarg { + uint64_t offset; + uint32_t align; +}; + // RAII utility for temporarily changing the parsing position of a parsing // context. template<typename Ctx> struct WithPosition { @@ -614,6 +663,8 @@ struct NullInstrParserCtx { using GlobalT = Ok; using MemoryT = Ok; + using MemargT = Ok; + InstrsT makeInstrs() { return Ok{}; } void appendInstr(InstrsT&, InstrT) {} InstrsT finishInstrs(InstrsT&) { return Ok{}; } @@ -627,6 +678,8 @@ struct NullInstrParserCtx { MemoryT getMemoryFromIdx(uint32_t) { return Ok{}; } MemoryT getMemoryFromName(Name) { return Ok{}; } + MemargT getMemarg(uint64_t, uint32_t) { return Ok{}; } + InstrT makeUnreachable(Index) { return Ok{}; } InstrT makeNop(Index) { return Ok{}; } InstrT makeBinary(Index, BinaryOp) { return Ok{}; } @@ -647,12 +700,27 @@ struct NullInstrParserCtx { InstrT makeI64Const(Index, uint64_t) { return Ok{}; } InstrT makeF32Const(Index, float) { return Ok{}; } InstrT makeF64Const(Index, double) { return Ok{}; } - + InstrT makeLoad(Index, Type, bool, int, bool, MemoryT*, MemargT) { + return Ok{}; + } + InstrT makeStore(Index, Type, int, bool, MemoryT*, MemargT) { return Ok{}; } + InstrT makeAtomicRMW(Index, AtomicRMWOp, Type, int, MemoryT*, MemargT) { + return Ok{}; + } + InstrT makeAtomicCmpxchg(Index, Type, int, MemoryT*, MemargT) { return Ok{}; } + InstrT makeAtomicWait(Index, Type, MemoryT*, MemargT) { return Ok{}; } + InstrT makeAtomicNotify(Index, MemoryT*, MemargT) { return Ok{}; } + InstrT makeAtomicFence(Index) { return Ok{}; } InstrT makeSIMDExtract(Index, SIMDExtractOp, uint8_t) { return Ok{}; } InstrT makeSIMDReplace(Index, SIMDReplaceOp, uint8_t) { return Ok{}; } InstrT makeSIMDShuffle(Index, const std::array<uint8_t, 16>&) { return Ok{}; } InstrT makeSIMDTernary(Index, SIMDTernaryOp) { return Ok{}; } InstrT makeSIMDShift(Index, SIMDShiftOp) { return Ok{}; } + InstrT makeSIMDLoad(Index, SIMDLoadOp, MemoryT*, MemargT) { return Ok{}; } + InstrT makeSIMDLoadStoreLane( + Index, SIMDLoadStoreLaneOp, MemoryT*, MemargT, uint8_t) { + return Ok{}; + } template<typename HeapTypeT> InstrT makeRefNull(Index, HeapTypeT) { return {}; @@ -670,6 +738,8 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> { using GlobalT = Name; using MemoryT = Name; + using MemargT = Memarg; + Builder builder; // The stack of parsed expressions, used as the children of newly parsed @@ -782,6 +852,8 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> { return std::move(exprStack); } + Memarg getMemarg(uint64_t offset, uint32_t align) { return {offset, align}; } + ExprT makeExpr(InstrsT& instrs) { switch (instrs.size()) { case 0: @@ -856,6 +928,95 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> { Result<> makeF64Const(Index pos, double c) { return push(pos, builder.makeConst(Literal(c))); } + Result<> makeLoad(Index pos, + Type type, + bool signed_, + int bytes, + bool isAtomic, + Name* mem, + Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto ptr = pop(pos); + CHECK_ERR(ptr); + if (isAtomic) { + return push(pos, + builder.makeAtomicLoad(bytes, memarg.offset, *ptr, type, *m)); + } + return push(pos, + builder.makeLoad( + bytes, signed_, memarg.offset, memarg.align, *ptr, type, *m)); + } + Result<> makeStore( + Index pos, Type type, int bytes, bool isAtomic, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto val = pop(pos); + CHECK_ERR(val); + auto ptr = pop(pos); + CHECK_ERR(ptr); + if (isAtomic) { + return push( + pos, + builder.makeAtomicStore(bytes, memarg.offset, *ptr, *val, type, *m)); + } + return push(pos, + builder.makeStore( + bytes, memarg.offset, memarg.align, *ptr, *val, type, *m)); + } + Result<> makeAtomicRMW( + Index pos, AtomicRMWOp op, Type type, int bytes, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto val = pop(pos); + CHECK_ERR(val); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push( + pos, + builder.makeAtomicRMW(op, bytes, memarg.offset, *ptr, *val, type, *m)); + } + Result<> + makeAtomicCmpxchg(Index pos, Type type, int bytes, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto replacement = pop(pos); + CHECK_ERR(replacement); + auto expected = pop(pos); + CHECK_ERR(expected); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push( + pos, + builder.makeAtomicCmpxchg( + bytes, memarg.offset, *ptr, *expected, *replacement, type, *m)); + } + Result<> makeAtomicWait(Index pos, Type type, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto timeout = pop(pos); + CHECK_ERR(timeout); + auto expected = pop(pos); + CHECK_ERR(expected); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push(pos, + builder.makeAtomicWait( + *ptr, *expected, *timeout, type, memarg.offset, *m)); + } + Result<> makeAtomicNotify(Index pos, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto count = pop(pos); + CHECK_ERR(count); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push(pos, builder.makeAtomicNotify(*ptr, *count, memarg.offset, *m)); + } + Result<> makeAtomicFence(Index pos) { + return push(pos, builder.makeAtomicFence()); + } + Result<> makeRefNull(Index pos, HeapType type) { return push(pos, builder.makeRefNull(type)); } @@ -1507,6 +1668,28 @@ struct ParseDefsCtx : InstrParserCtx<ParseDefsCtx> { CHECK_ERR(vec); return push(pos, builder.makeSIMDShift(op, *vec, *shift)); } + + Result<> makeSIMDLoad(Index pos, SIMDLoadOp op, Name* mem, Memarg memarg) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push( + pos, builder.makeSIMDLoad(op, memarg.offset, memarg.align, *ptr, *m)); + } + + Result<> makeSIMDLoadStoreLane( + Index pos, SIMDLoadStoreLaneOp op, Name* mem, Memarg memarg, uint8_t lane) { + auto m = self().getMemory(pos, mem); + CHECK_ERR(m); + auto vec = pop(pos); + CHECK_ERR(vec); + auto ptr = pop(pos); + CHECK_ERR(ptr); + return push(pos, + builder.makeSIMDLoadStoreLane( + op, memarg.offset, memarg.align, lane, *ptr, *vec, *m)); + } }; // ================ @@ -1534,6 +1717,7 @@ template<typename Ctx> Result<typename Ctx::GlobalTypeT> globaltype(Ctx&); template<typename Ctx> MaybeResult<typename Ctx::InstrT> instr(Ctx&); template<typename Ctx> Result<typename Ctx::InstrsT> instrs(Ctx&); template<typename Ctx> Result<typename Ctx::ExprT> expr(Ctx&); +template<typename Ctx> Result<typename Ctx::MemargT> memarg(Ctx&, uint32_t); template<typename Ctx> Result<typename Ctx::InstrT> makeUnreachable(Ctx&, Index); template<typename Ctx> Result<typename Ctx::InstrT> makeNop(Ctx&, Index); @@ -1585,10 +1769,11 @@ Result<typename Ctx::InstrT> makeSIMDTernary(Ctx&, Index, SIMDTernaryOp op); template<typename Ctx> Result<typename Ctx::InstrT> makeSIMDShift(Ctx&, Index, SIMDShiftOp op); template<typename Ctx> -Result<typename Ctx::InstrT> makeSIMDLoad(Ctx&, Index, SIMDLoadOp op); +Result<typename Ctx::InstrT> +makeSIMDLoad(Ctx&, Index, SIMDLoadOp op, int bytes); template<typename Ctx> Result<typename Ctx::InstrT> -makeSIMDLoadStoreLane(Ctx&, Index, SIMDLoadStoreLaneOp op); +makeSIMDLoadStoreLane(Ctx&, Index, SIMDLoadStoreLaneOp op, int bytes); template<typename Ctx> Result<typename Ctx::InstrT> makeMemoryInit(Ctx&, Index); template<typename Ctx> Result<typename Ctx::InstrT> makeDataDrop(Ctx&, Index); template<typename Ctx> Result<typename Ctx::InstrT> makeMemoryCopy(Ctx&, Index); @@ -2046,8 +2231,6 @@ template<typename Ctx> MaybeResult<typename Ctx::InstrT> instr(Ctx& ctx) { return {}; } - auto op = *keyword; - #define NEW_INSTRUCTION_PARSER #define NEW_WAT_PARSER #include <gen-s-parser.inc> @@ -2123,6 +2306,22 @@ template<typename Ctx> Result<typename Ctx::ExprT> expr(Ctx& ctx) { return ctx.makeExpr(*insts); } +// memarg_n ::= o:offset a:align_n +// offset ::= 'offset='o:u64 => o | _ => 0 +// align_n ::= 'align='a:u32 => a | _ => n +template<typename Ctx> +Result<typename Ctx::MemargT> memarg(Ctx& ctx, uint32_t n) { + uint64_t offset = 0; + uint32_t align = n; + if (auto o = ctx.in.takeOffset()) { + offset = *o; + } + if (auto a = ctx.in.takeAlign()) { + align = *a; + } + return ctx.getMemarg(offset, align); +} + template<typename Ctx> Result<typename Ctx::InstrT> makeUnreachable(Ctx& ctx, Index pos) { return ctx.makeUnreachable(pos); @@ -2250,40 +2449,64 @@ Result<typename Ctx::InstrT> makeConst(Ctx& ctx, Index pos, Type type) { template<typename Ctx> Result<typename Ctx::InstrT> makeLoad( Ctx& ctx, Index pos, Type type, bool signed_, int bytes, bool isAtomic) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + return ctx.makeLoad(pos, type, signed_, bytes, isAtomic, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeStore(Ctx& ctx, Index pos, Type type, int bytes, bool isAtomic) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + return ctx.makeStore(pos, type, bytes, isAtomic, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicRMW(Ctx& ctx, Index pos, AtomicRMWOp op, Type type, uint8_t bytes) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + return ctx.makeAtomicRMW(pos, op, type, bytes, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicCmpxchg(Ctx& ctx, Index pos, Type type, uint8_t bytes) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + return ctx.makeAtomicCmpxchg(pos, type, bytes, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicWait(Ctx& ctx, Index pos, Type type) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, type == Type::i32 ? 4 : 8); + CHECK_ERR(arg); + return ctx.makeAtomicWait(pos, type, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicNotify(Ctx& ctx, Index pos) { - return ctx.in.err("unimplemented instruction"); + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, 4); + CHECK_ERR(arg); + return ctx.makeAtomicNotify(pos, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> makeAtomicFence(Ctx& ctx, Index pos) { - return ctx.in.err("unimplemented instruction"); + return ctx.makeAtomicFence(pos); } template<typename Ctx> @@ -2332,14 +2555,44 @@ makeSIMDShift(Ctx& ctx, Index pos, SIMDShiftOp op) { } template<typename Ctx> -Result<typename Ctx::InstrT> makeSIMDLoad(Ctx& ctx, Index pos, SIMDLoadOp op) { - return ctx.in.err("unimplemented instruction"); +Result<typename Ctx::InstrT> +makeSIMDLoad(Ctx& ctx, Index pos, SIMDLoadOp op, int bytes) { + auto mem = maybeMemidx(ctx); + CHECK_ERR(mem); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + return ctx.makeSIMDLoad(pos, op, mem.getPtr(), *arg); } template<typename Ctx> Result<typename Ctx::InstrT> -makeSIMDLoadStoreLane(Ctx& ctx, Index pos, SIMDLoadStoreLaneOp op) { - return ctx.in.err("unimplemented instruction"); +makeSIMDLoadStoreLane(Ctx& ctx, Index pos, SIMDLoadStoreLaneOp op, int bytes) { + auto reset = ctx.in.getPos(); + + auto retry = [&]() -> Result<typename Ctx::InstrT> { + // We failed to parse. Maybe the lane index was accidentally parsed as the + // optional memory index. Try again without parsing a memory index. + WithPosition with(ctx, reset); + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + auto lane = ctx.in.takeU8(); + if (!lane) { + return ctx.in.err("expected lane index"); + } + return ctx.makeSIMDLoadStoreLane(pos, op, nullptr, *arg, *lane); + }; + + auto mem = maybeMemidx(ctx); + if (mem.getErr()) { + return retry(); + } + auto arg = memarg(ctx, bytes); + CHECK_ERR(arg); + auto lane = ctx.in.takeU8(); + if (!lane) { + return retry(); + } + return ctx.makeSIMDLoadStoreLane(pos, op, mem.getPtr(), *arg, *lane); } template<typename Ctx> diff --git a/test/lit/wat-kitchen-sink.wast b/test/lit/wat-kitchen-sink.wast index 8f5af0d81..4c1ac92e7 100644 --- a/test/lit/wat-kitchen-sink.wast +++ b/test/lit/wat-kitchen-sink.wast @@ -5,13 +5,15 @@ (module $parse ;; types + ;; CHECK: (type $void (func_subtype func)) + ;; CHECK: (type $none_=>_i32 (func_subtype (result i32) func)) ;; CHECK: (type $ret2 (func_subtype (result i32 i32) func)) (type $ret2 (func (result i32 i32))) (rec - ;; CHECK: (type $void (func_subtype func)) + ;; CHECK: (type $i32_i64_=>_none (func_subtype (param i32 i64) func)) ;; CHECK: (type $i32_=>_none (func_subtype (param i32) func)) @@ -27,14 +29,14 @@ ;; CHECK: (type $i32_i32_i32_=>_none (func_subtype (param i32 i32 i32) func)) - ;; CHECK: (type $i32_i64_=>_none (func_subtype (param i32 i64) func)) - ;; CHECK: (type $v128_=>_i32 (func_subtype (param v128) (result i32) func)) ;; CHECK: (type $v128_v128_=>_v128 (func_subtype (param v128 v128) (result v128) func)) ;; CHECK: (type $v128_v128_v128_=>_v128 (func_subtype (param v128 v128 v128) (result v128) func)) + ;; CHECK: (type $i32_i64_v128_=>_none (func_subtype (param i32 i64 v128) func)) + ;; CHECK: (rec ;; CHECK-NEXT: (type $s0 (struct_subtype data)) (type $s0 (sub (struct))) @@ -782,6 +784,176 @@ global.set 4 ) + ;; CHECK: (func $load (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i32.load $mem offset=42 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i64.load8_s $0 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i32.atomic.load16_u $mem-i64 offset=42 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $load (param i32 i64) + local.get 0 + i32.load offset=42 + drop + local.get 0 + i64.load8_s 1 align=1 + drop + local.get 1 + i32.atomic.load16_u $mem-i64 offset=42 align=2 + drop + ) + + ;; CHECK: (func $store (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (i32.store $mem offset=42 align=1 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (i64.atomic.store8 $0 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i64.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (f32.store $mem-i64 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (f32.const 2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $store (param i32 i64) + local.get 0 + i32.const 0 + i32.store offset=42 align=1 + local.get 0 + i64.const 1 + i64.atomic.store8 1 + local.get 1 + f32.const 2 + f32.store $mem-i64 + ) + + ;; CHECK: (func $atomic-rmw (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i32.atomic.rmw16.add_u $mem + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i64.atomic.rmw.xor $mem-i64 offset=8 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (i64.const 2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $atomic-rmw (param i32 i64) + local.get 0 + i32.const 1 + i32.atomic.rmw16.add_u + drop + local.get 1 + i64.const 2 + i64.atomic.rmw.xor $mem-i64 offset=8 align=8 + drop + ) + + ;; CHECK: (func $atomic-cmpxchg (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i32.atomic.rmw8.cmpxchg_u $mem + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: (i32.const 2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (i64.atomic.rmw32.cmpxchg_u $mem-i64 offset=16 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (i64.const 3) + ;; CHECK-NEXT: (i64.const 4) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $atomic-cmpxchg (param i32 i64) + local.get 0 + i32.const 1 + i32.const 2 + i32.atomic.rmw8.cmpxchg_u 0 align=1 + drop + local.get 1 + i64.const 3 + i64.const 4 + i64.atomic.rmw32.cmpxchg_u 3 offset=16 + drop + ) + + ;; CHECK: (func $atomic-wait (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (memory.atomic.wait32 $mem + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: (i64.const 2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (memory.atomic.wait64 $mem-i64 offset=8 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (i64.const 3) + ;; CHECK-NEXT: (i64.const 4) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $atomic-wait (param i32 i64) + local.get 0 + i32.const 1 + i64.const 2 + memory.atomic.wait32 + drop + local.get 1 + i64.const 3 + i64.const 4 + memory.atomic.wait64 $mem-i64 offset=8 align=8 + drop + ) + + ;; CHECK: (func $atomic-notify (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (memory.atomic.notify $mem offset=8 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (i32.const 0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (memory.atomic.notify $mem-i64 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (i32.const 1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $atomic-notify (param i32 i64) + local.get 0 + i32.const 0 + memory.atomic.notify offset=8 align=4 + drop + local.get 1 + i32.const 1 + memory.atomic.notify $mem-i64 + drop + ) + + ;; CHECK: (func $atomic-fence (type $void) + ;; CHECK-NEXT: (atomic.fence) + ;; CHECK-NEXT: ) + (func $atomic-fence + atomic.fence + ) + ;; CHECK: (func $simd-extract (type $v128_=>_i32) (param $0 v128) (result i32) ;; CHECK-NEXT: (i32x4.extract_lane 3 ;; CHECK-NEXT: (local.get $0) @@ -842,6 +1014,49 @@ i8x16.shl ) + ;; CHECK: (func $simd-load (type $i32_i64_=>_none) (param $0 i32) (param $1 i64) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (v128.load8x8_s $mem offset=8 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (v128.load16_splat $mem-i64 offset=2 align=1 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $simd-load (param i32 i64) + local.get 0 + v128.load8x8_s offset=8 align=8 + drop + local.get 1 + v128.load16_splat $mem-i64 offset=2 align=1 + drop + ) + + ;; CHECK: (func $simd-load-store-lane (type $i32_i64_v128_=>_none) (param $0 i32) (param $1 i64) (param $2 v128) + ;; CHECK-NEXT: (drop + ;; CHECK-NEXT: (v128.load16_lane $mem 7 + ;; CHECK-NEXT: (local.get $0) + ;; CHECK-NEXT: (local.get $2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: (v128.store64_lane $mem-i64 align=4 0 + ;; CHECK-NEXT: (local.get $1) + ;; CHECK-NEXT: (local.get $2) + ;; CHECK-NEXT: ) + ;; CHECK-NEXT: ) + (func $simd-load-store-lane (param i32 i64 v128) + local.get 0 + local.get 2 + v128.load16_lane 7 + drop + local.get 1 + local.get 2 + v128.store64_lane 3 align=4 0 + ) + ;; CHECK: (func $use-types (type $ref|$s0|_ref|$s1|_ref|$s2|_ref|$s3|_ref|$s4|_ref|$s5|_ref|$s6|_ref|$s7|_ref|$s8|_ref|$a0|_ref|$a1|_ref|$a2|_ref|$a3|_ref|$subvoid|_ref|$submany|_=>_none) (param $0 (ref $s0)) (param $1 (ref $s1)) (param $2 (ref $s2)) (param $3 (ref $s3)) (param $4 (ref $s4)) (param $5 (ref $s5)) (param $6 (ref $s6)) (param $7 (ref $s7)) (param $8 (ref $s8)) (param $9 (ref $a0)) (param $10 (ref $a1)) (param $11 (ref $a2)) (param $12 (ref $a3)) (param $13 (ref $subvoid)) (param $14 (ref $submany)) ;; CHECK-NEXT: (nop) ;; CHECK-NEXT: ) |