summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gen-s-parser.inc86
-rw-r--r--src/wasm-builder.h12
-rw-r--r--src/wasm-s-parser.h5
-rw-r--r--src/wasm/wasm-s-parser.cpp36
-rw-r--r--src/wasm/wat-parser.cpp285
5 files changed, 329 insertions, 95 deletions
diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc
index d9a6dab22..67f58b5e6 100644
--- a/src/gen-s-parser.inc
+++ b/src/gen-s-parser.inc
@@ -4,9 +4,9 @@
#ifdef INSTRUCTION_PARSER
#undef INSTRUCTION_PARSER
+char buf[33] = {};
using namespace std::string_view_literals;
auto str = s[0]->str().str;
-char buf[33] = {};
memcpy(buf, str.data(), str.size());
std::string_view op = {buf, str.size()};
switch (op[0]) {
@@ -3396,10 +3396,10 @@ switch (op[0]) {
case '_': {
switch (op[12]) {
case 'l':
- if (op == "v128.load16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128); }
+ if (op == "v128.load16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load16LaneVec128, 2); }
goto parse_error;
case 's':
- if (op == "v128.load16_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128); }
+ if (op == "v128.load16_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16SplatVec128, 2); }
goto parse_error;
default: goto parse_error;
}
@@ -3407,10 +3407,10 @@ switch (op[0]) {
case 'x': {
switch (op[14]) {
case 's':
- if (op == "v128.load16x4_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128); }
+ if (op == "v128.load16x4_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4SVec128, 8); }
goto parse_error;
case 'u':
- if (op == "v128.load16x4_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128); }
+ if (op == "v128.load16x4_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load16x4UVec128, 8); }
goto parse_error;
default: goto parse_error;
}
@@ -3423,13 +3423,13 @@ switch (op[0]) {
case '_': {
switch (op[12]) {
case 'l':
- if (op == "v128.load32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128); }
+ if (op == "v128.load32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load32LaneVec128, 4); }
goto parse_error;
case 's':
- if (op == "v128.load32_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128); }
+ if (op == "v128.load32_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32SplatVec128, 4); }
goto parse_error;
case 'z':
- if (op == "v128.load32_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128); }
+ if (op == "v128.load32_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32ZeroVec128, 4); }
goto parse_error;
default: goto parse_error;
}
@@ -3437,10 +3437,10 @@ switch (op[0]) {
case 'x': {
switch (op[14]) {
case 's':
- if (op == "v128.load32x2_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128); }
+ if (op == "v128.load32x2_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2SVec128, 8); }
goto parse_error;
case 'u':
- if (op == "v128.load32x2_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128); }
+ if (op == "v128.load32x2_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load32x2UVec128, 8); }
goto parse_error;
default: goto parse_error;
}
@@ -3451,13 +3451,13 @@ switch (op[0]) {
case '6': {
switch (op[12]) {
case 'l':
- if (op == "v128.load64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128); }
+ if (op == "v128.load64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load64LaneVec128, 8); }
goto parse_error;
case 's':
- if (op == "v128.load64_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128); }
+ if (op == "v128.load64_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64SplatVec128, 8); }
goto parse_error;
case 'z':
- if (op == "v128.load64_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128); }
+ if (op == "v128.load64_zero"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load64ZeroVec128, 8); }
goto parse_error;
default: goto parse_error;
}
@@ -3467,10 +3467,10 @@ switch (op[0]) {
case '_': {
switch (op[11]) {
case 'l':
- if (op == "v128.load8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128); }
+ if (op == "v128.load8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Load8LaneVec128, 1); }
goto parse_error;
case 's':
- if (op == "v128.load8_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128); }
+ if (op == "v128.load8_splat"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8SplatVec128, 1); }
goto parse_error;
default: goto parse_error;
}
@@ -3478,10 +3478,10 @@ switch (op[0]) {
case 'x': {
switch (op[13]) {
case 's':
- if (op == "v128.load8x8_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128); }
+ if (op == "v128.load8x8_s"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8SVec128, 8); }
goto parse_error;
case 'u':
- if (op == "v128.load8x8_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128); }
+ if (op == "v128.load8x8_u"sv) { return makeSIMDLoad(s, SIMDLoadOp::Load8x8UVec128, 8); }
goto parse_error;
default: goto parse_error;
}
@@ -3504,16 +3504,16 @@ switch (op[0]) {
if (op == "v128.store"sv) { return makeStore(s, Type::v128, 16, /*isAtomic=*/false); }
goto parse_error;
case '1':
- if (op == "v128.store16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128); }
+ if (op == "v128.store16_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store16LaneVec128, 2); }
goto parse_error;
case '3':
- if (op == "v128.store32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128); }
+ if (op == "v128.store32_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store32LaneVec128, 4); }
goto parse_error;
case '6':
- if (op == "v128.store64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128); }
+ if (op == "v128.store64_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store64LaneVec128, 8); }
goto parse_error;
case '8':
- if (op == "v128.store8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128); }
+ if (op == "v128.store8_lane"sv) { return makeSIMDLoadStoreLane(s, SIMDLoadStoreLaneOp::Store8LaneVec128, 1); }
goto parse_error;
default: goto parse_error;
}
@@ -3532,6 +3532,10 @@ parse_error:
#ifdef NEW_INSTRUCTION_PARSER
#undef NEW_INSTRUCTION_PARSER
+char buf[33] = {};
+auto str = *keyword;
+memcpy(buf, str.data(), str.size());
+std::string_view op = {buf, str.size()};
switch (op[0]) {
case 'a': {
switch (op[1]) {
@@ -9216,14 +9220,14 @@ switch (op[0]) {
switch (op[12]) {
case 'l':
if (op == "v128.load16_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load16LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load16LaneVec128, 2);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 's':
if (op == "v128.load16_splat"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16SplatVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16SplatVec128, 2);
CHECK_ERR(ret);
return *ret;
}
@@ -9235,14 +9239,14 @@ switch (op[0]) {
switch (op[14]) {
case 's':
if (op == "v128.load16x4_s"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4SVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4SVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 'u':
if (op == "v128.load16x4_u"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4UVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load16x4UVec128, 8);
CHECK_ERR(ret);
return *ret;
}
@@ -9259,21 +9263,21 @@ switch (op[0]) {
switch (op[12]) {
case 'l':
if (op == "v128.load32_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load32LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load32LaneVec128, 4);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 's':
if (op == "v128.load32_splat"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32SplatVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32SplatVec128, 4);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 'z':
if (op == "v128.load32_zero"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32ZeroVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32ZeroVec128, 4);
CHECK_ERR(ret);
return *ret;
}
@@ -9285,14 +9289,14 @@ switch (op[0]) {
switch (op[14]) {
case 's':
if (op == "v128.load32x2_s"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2SVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2SVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 'u':
if (op == "v128.load32x2_u"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2UVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load32x2UVec128, 8);
CHECK_ERR(ret);
return *ret;
}
@@ -9307,21 +9311,21 @@ switch (op[0]) {
switch (op[12]) {
case 'l':
if (op == "v128.load64_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load64LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load64LaneVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 's':
if (op == "v128.load64_splat"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64SplatVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64SplatVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 'z':
if (op == "v128.load64_zero"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64ZeroVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load64ZeroVec128, 8);
CHECK_ERR(ret);
return *ret;
}
@@ -9335,14 +9339,14 @@ switch (op[0]) {
switch (op[11]) {
case 'l':
if (op == "v128.load8_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load8LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Load8LaneVec128, 1);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 's':
if (op == "v128.load8_splat"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8SplatVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8SplatVec128, 1);
CHECK_ERR(ret);
return *ret;
}
@@ -9354,14 +9358,14 @@ switch (op[0]) {
switch (op[13]) {
case 's':
if (op == "v128.load8x8_s"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8SVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8SVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case 'u':
if (op == "v128.load8x8_u"sv) {
- auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8UVec128);
+ auto ret = makeSIMDLoad(ctx, pos, SIMDLoadOp::Load8x8UVec128, 8);
CHECK_ERR(ret);
return *ret;
}
@@ -9400,28 +9404,28 @@ switch (op[0]) {
goto parse_error;
case '1':
if (op == "v128.store16_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store16LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store16LaneVec128, 2);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case '3':
if (op == "v128.store32_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store32LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store32LaneVec128, 4);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case '6':
if (op == "v128.store64_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store64LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store64LaneVec128, 8);
CHECK_ERR(ret);
return *ret;
}
goto parse_error;
case '8':
if (op == "v128.store8_lane"sv) {
- auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store8LaneVec128);
+ auto ret = makeSIMDLoadStoreLane(ctx, pos, SIMDLoadStoreLaneOp::Store8LaneVec128, 1);
CHECK_ERR(ret);
return *ret;
}
diff --git a/src/wasm-builder.h b/src/wasm-builder.h
index b07af4627..df036b3b4 100644
--- a/src/wasm-builder.h
+++ b/src/wasm-builder.h
@@ -367,7 +367,7 @@ public:
}
Load* makeLoad(unsigned bytes,
bool signed_,
- uint32_t offset,
+ Address offset,
unsigned align,
Expression* ptr,
Type type,
@@ -384,7 +384,7 @@ public:
return ret;
}
Load* makeAtomicLoad(
- unsigned bytes, uint32_t offset, Expression* ptr, Type type, Name memory) {
+ unsigned bytes, Address offset, Expression* ptr, Type type, Name memory) {
Load* load = makeLoad(bytes, false, offset, bytes, ptr, type, memory);
load->isAtomic = true;
return load;
@@ -419,7 +419,7 @@ public:
}
AtomicFence* makeAtomicFence() { return wasm.allocator.alloc<AtomicFence>(); }
Store* makeStore(unsigned bytes,
- uint32_t offset,
+ Address offset,
unsigned align,
Expression* ptr,
Expression* value,
@@ -439,7 +439,7 @@ public:
return ret;
}
Store* makeAtomicStore(unsigned bytes,
- uint32_t offset,
+ Address offset,
Expression* ptr,
Expression* value,
Type type,
@@ -450,7 +450,7 @@ public:
}
AtomicRMW* makeAtomicRMW(AtomicRMWOp op,
unsigned bytes,
- uint32_t offset,
+ Address offset,
Expression* ptr,
Expression* value,
Type type,
@@ -467,7 +467,7 @@ public:
return ret;
}
AtomicCmpxchg* makeAtomicCmpxchg(unsigned bytes,
- uint32_t offset,
+ Address offset,
Expression* ptr,
Expression* expected,
Expression* replacement,
diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h
index 1470701e4..888f7d009 100644
--- a/src/wasm-s-parser.h
+++ b/src/wasm-s-parser.h
@@ -238,8 +238,9 @@ private:
Expression* makeSIMDShuffle(Element& s);
Expression* makeSIMDTernary(Element& s, SIMDTernaryOp op);
Expression* makeSIMDShift(Element& s, SIMDShiftOp op);
- Expression* makeSIMDLoad(Element& s, SIMDLoadOp op);
- Expression* makeSIMDLoadStoreLane(Element& s, SIMDLoadStoreLaneOp op);
+ Expression* makeSIMDLoad(Element& s, SIMDLoadOp op, int bytes);
+ Expression*
+ makeSIMDLoadStoreLane(Element& s, SIMDLoadStoreLaneOp op, int bytes);
Expression* makeMemoryInit(Element& s);
Expression* makeDataDrop(Element& s);
Expression* makeMemoryCopy(Element& s);
diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp
index 347bbb713..a54226194 100644
--- a/src/wasm/wasm-s-parser.cpp
+++ b/src/wasm/wasm-s-parser.cpp
@@ -2180,32 +2180,12 @@ Expression* SExpressionWasmBuilder::makeSIMDShift(Element& s, SIMDShiftOp op) {
return ret;
}
-Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) {
+Expression*
+SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op, int bytes) {
auto ret = allocator.alloc<SIMDLoad>();
ret->op = op;
ret->offset = 0;
- switch (op) {
- case Load8SplatVec128:
- ret->align = 1;
- break;
- case Load16SplatVec128:
- ret->align = 2;
- break;
- case Load32SplatVec128:
- case Load32ZeroVec128:
- ret->align = 4;
- break;
- case Load64SplatVec128:
- case Load8x8SVec128:
- case Load8x8UVec128:
- case Load16x4SVec128:
- case Load16x4UVec128:
- case Load32x2SVec128:
- case Load32x2UVec128:
- case Load64ZeroVec128:
- ret->align = 8;
- break;
- }
+ ret->align = bytes;
Index i = 1;
Name memory;
// Check to make sure there are more than the default args & this str isn't
@@ -2222,32 +2202,28 @@ Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) {
return ret;
}
-Expression*
-SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s,
- SIMDLoadStoreLaneOp op) {
+Expression* SExpressionWasmBuilder::makeSIMDLoadStoreLane(
+ Element& s, SIMDLoadStoreLaneOp op, int bytes) {
auto* ret = allocator.alloc<SIMDLoadStoreLane>();
ret->op = op;
ret->offset = 0;
+ ret->align = bytes;
size_t lanes;
switch (op) {
case Load8LaneVec128:
case Store8LaneVec128:
- ret->align = 1;
lanes = 16;
break;
case Load16LaneVec128:
case Store16LaneVec128:
- ret->align = 2;
lanes = 8;
break;
case Load32LaneVec128:
case Store32LaneVec128:
- ret->align = 4;
lanes = 4;
break;
case Load64LaneVec128:
case Store64LaneVec128:
- ret->align = 8;
lanes = 2;
break;
default:
diff --git a/src/wasm/wat-parser.cpp b/src/wasm/wat-parser.cpp
index 1cc2623ca..16f33d2d7 100644
--- a/src/wasm/wat-parser.cpp
+++ b/src/wasm/wat-parser.cpp
@@ -158,6 +158,50 @@ struct ParseInput {
return false;
}
+ std::optional<uint64_t> takeOffset() {
+ if (auto t = peek()) {
+ if (auto keyword = t->getKeyword()) {
+ if (keyword->substr(0, 7) != "offset="sv) {
+ return {};
+ }
+ Lexer subLexer(keyword->substr(7));
+ if (subLexer == subLexer.end()) {
+ return {};
+ }
+ if (auto o = subLexer->getU64()) {
+ ++subLexer;
+ if (subLexer == subLexer.end()) {
+ ++lexer;
+ return o;
+ }
+ }
+ }
+ }
+ return {};
+ }
+
+ std::optional<uint32_t> takeAlign() {
+ if (auto t = peek()) {
+ if (auto keyword = t->getKeyword()) {
+ if (keyword->substr(0, 6) != "align="sv) {
+ return {};
+ }
+ Lexer subLexer(keyword->substr(6));
+ if (subLexer == subLexer.end()) {
+ return {};
+ }
+ if (auto a = subLexer->getU32()) {
+ ++subLexer;
+ if (subLexer == subLexer.end()) {
+ ++lexer;
+ return a;
+ }
+ }
+ }
+ }
+ return {};
+ }
+
std::optional<uint64_t> takeU64() {
if (auto t = peek()) {
if (auto n = t->getU64()) {
@@ -335,6 +379,11 @@ struct MemType {
bool shared;
};
+struct Memarg {
+ uint64_t offset;
+ uint32_t align;
+};
+
// RAII utility for temporarily changing the parsing position of a parsing
// context.
template<typename Ctx> struct WithPosition {
@@ -614,6 +663,8 @@ struct NullInstrParserCtx {
using GlobalT = Ok;
using MemoryT = Ok;
+ using MemargT = Ok;
+
InstrsT makeInstrs() { return Ok{}; }
void appendInstr(InstrsT&, InstrT) {}
InstrsT finishInstrs(InstrsT&) { return Ok{}; }
@@ -627,6 +678,8 @@ struct NullInstrParserCtx {
MemoryT getMemoryFromIdx(uint32_t) { return Ok{}; }
MemoryT getMemoryFromName(Name) { return Ok{}; }
+ MemargT getMemarg(uint64_t, uint32_t) { return Ok{}; }
+
InstrT makeUnreachable(Index) { return Ok{}; }
InstrT makeNop(Index) { return Ok{}; }
InstrT makeBinary(Index, BinaryOp) { return Ok{}; }
@@ -647,12 +700,27 @@ struct NullInstrParserCtx {
InstrT makeI64Const(Index, uint64_t) { return Ok{}; }
InstrT makeF32Const(Index, float) { return Ok{}; }
InstrT makeF64Const(Index, double) { return Ok{}; }
-
+ InstrT makeLoad(Index, Type, bool, int, bool, MemoryT*, MemargT) {
+ return Ok{};
+ }
+ InstrT makeStore(Index, Type, int, bool, MemoryT*, MemargT) { return Ok{}; }
+ InstrT makeAtomicRMW(Index, AtomicRMWOp, Type, int, MemoryT*, MemargT) {
+ return Ok{};
+ }
+ InstrT makeAtomicCmpxchg(Index, Type, int, MemoryT*, MemargT) { return Ok{}; }
+ InstrT makeAtomicWait(Index, Type, MemoryT*, MemargT) { return Ok{}; }
+ InstrT makeAtomicNotify(Index, MemoryT*, MemargT) { return Ok{}; }
+ InstrT makeAtomicFence(Index) { return Ok{}; }
InstrT makeSIMDExtract(Index, SIMDExtractOp, uint8_t) { return Ok{}; }
InstrT makeSIMDReplace(Index, SIMDReplaceOp, uint8_t) { return Ok{}; }
InstrT makeSIMDShuffle(Index, const std::array<uint8_t, 16>&) { return Ok{}; }
InstrT makeSIMDTernary(Index, SIMDTernaryOp) { return Ok{}; }
InstrT makeSIMDShift(Index, SIMDShiftOp) { return Ok{}; }
+ InstrT makeSIMDLoad(Index, SIMDLoadOp, MemoryT*, MemargT) { return Ok{}; }
+ InstrT makeSIMDLoadStoreLane(
+ Index, SIMDLoadStoreLaneOp, MemoryT*, MemargT, uint8_t) {
+ return Ok{};
+ }
template<typename HeapTypeT> InstrT makeRefNull(Index, HeapTypeT) {
return {};
@@ -670,6 +738,8 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> {
using GlobalT = Name;
using MemoryT = Name;
+ using MemargT = Memarg;
+
Builder builder;
// The stack of parsed expressions, used as the children of newly parsed
@@ -782,6 +852,8 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> {
return std::move(exprStack);
}
+ Memarg getMemarg(uint64_t offset, uint32_t align) { return {offset, align}; }
+
ExprT makeExpr(InstrsT& instrs) {
switch (instrs.size()) {
case 0:
@@ -856,6 +928,95 @@ template<typename Ctx> struct InstrParserCtx : TypeParserCtx<Ctx> {
Result<> makeF64Const(Index pos, double c) {
return push(pos, builder.makeConst(Literal(c)));
}
+ Result<> makeLoad(Index pos,
+ Type type,
+ bool signed_,
+ int bytes,
+ bool isAtomic,
+ Name* mem,
+ Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ if (isAtomic) {
+ return push(pos,
+ builder.makeAtomicLoad(bytes, memarg.offset, *ptr, type, *m));
+ }
+ return push(pos,
+ builder.makeLoad(
+ bytes, signed_, memarg.offset, memarg.align, *ptr, type, *m));
+ }
+ Result<> makeStore(
+ Index pos, Type type, int bytes, bool isAtomic, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto val = pop(pos);
+ CHECK_ERR(val);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ if (isAtomic) {
+ return push(
+ pos,
+ builder.makeAtomicStore(bytes, memarg.offset, *ptr, *val, type, *m));
+ }
+ return push(pos,
+ builder.makeStore(
+ bytes, memarg.offset, memarg.align, *ptr, *val, type, *m));
+ }
+ Result<> makeAtomicRMW(
+ Index pos, AtomicRMWOp op, Type type, int bytes, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto val = pop(pos);
+ CHECK_ERR(val);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(
+ pos,
+ builder.makeAtomicRMW(op, bytes, memarg.offset, *ptr, *val, type, *m));
+ }
+ Result<>
+ makeAtomicCmpxchg(Index pos, Type type, int bytes, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto replacement = pop(pos);
+ CHECK_ERR(replacement);
+ auto expected = pop(pos);
+ CHECK_ERR(expected);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(
+ pos,
+ builder.makeAtomicCmpxchg(
+ bytes, memarg.offset, *ptr, *expected, *replacement, type, *m));
+ }
+ Result<> makeAtomicWait(Index pos, Type type, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto timeout = pop(pos);
+ CHECK_ERR(timeout);
+ auto expected = pop(pos);
+ CHECK_ERR(expected);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(pos,
+ builder.makeAtomicWait(
+ *ptr, *expected, *timeout, type, memarg.offset, *m));
+ }
+ Result<> makeAtomicNotify(Index pos, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto count = pop(pos);
+ CHECK_ERR(count);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(pos, builder.makeAtomicNotify(*ptr, *count, memarg.offset, *m));
+ }
+ Result<> makeAtomicFence(Index pos) {
+ return push(pos, builder.makeAtomicFence());
+ }
+
Result<> makeRefNull(Index pos, HeapType type) {
return push(pos, builder.makeRefNull(type));
}
@@ -1507,6 +1668,28 @@ struct ParseDefsCtx : InstrParserCtx<ParseDefsCtx> {
CHECK_ERR(vec);
return push(pos, builder.makeSIMDShift(op, *vec, *shift));
}
+
+ Result<> makeSIMDLoad(Index pos, SIMDLoadOp op, Name* mem, Memarg memarg) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(
+ pos, builder.makeSIMDLoad(op, memarg.offset, memarg.align, *ptr, *m));
+ }
+
+ Result<> makeSIMDLoadStoreLane(
+ Index pos, SIMDLoadStoreLaneOp op, Name* mem, Memarg memarg, uint8_t lane) {
+ auto m = self().getMemory(pos, mem);
+ CHECK_ERR(m);
+ auto vec = pop(pos);
+ CHECK_ERR(vec);
+ auto ptr = pop(pos);
+ CHECK_ERR(ptr);
+ return push(pos,
+ builder.makeSIMDLoadStoreLane(
+ op, memarg.offset, memarg.align, lane, *ptr, *vec, *m));
+ }
};
// ================
@@ -1534,6 +1717,7 @@ template<typename Ctx> Result<typename Ctx::GlobalTypeT> globaltype(Ctx&);
template<typename Ctx> MaybeResult<typename Ctx::InstrT> instr(Ctx&);
template<typename Ctx> Result<typename Ctx::InstrsT> instrs(Ctx&);
template<typename Ctx> Result<typename Ctx::ExprT> expr(Ctx&);
+template<typename Ctx> Result<typename Ctx::MemargT> memarg(Ctx&, uint32_t);
template<typename Ctx>
Result<typename Ctx::InstrT> makeUnreachable(Ctx&, Index);
template<typename Ctx> Result<typename Ctx::InstrT> makeNop(Ctx&, Index);
@@ -1585,10 +1769,11 @@ Result<typename Ctx::InstrT> makeSIMDTernary(Ctx&, Index, SIMDTernaryOp op);
template<typename Ctx>
Result<typename Ctx::InstrT> makeSIMDShift(Ctx&, Index, SIMDShiftOp op);
template<typename Ctx>
-Result<typename Ctx::InstrT> makeSIMDLoad(Ctx&, Index, SIMDLoadOp op);
+Result<typename Ctx::InstrT>
+makeSIMDLoad(Ctx&, Index, SIMDLoadOp op, int bytes);
template<typename Ctx>
Result<typename Ctx::InstrT>
-makeSIMDLoadStoreLane(Ctx&, Index, SIMDLoadStoreLaneOp op);
+makeSIMDLoadStoreLane(Ctx&, Index, SIMDLoadStoreLaneOp op, int bytes);
template<typename Ctx> Result<typename Ctx::InstrT> makeMemoryInit(Ctx&, Index);
template<typename Ctx> Result<typename Ctx::InstrT> makeDataDrop(Ctx&, Index);
template<typename Ctx> Result<typename Ctx::InstrT> makeMemoryCopy(Ctx&, Index);
@@ -2046,8 +2231,6 @@ template<typename Ctx> MaybeResult<typename Ctx::InstrT> instr(Ctx& ctx) {
return {};
}
- auto op = *keyword;
-
#define NEW_INSTRUCTION_PARSER
#define NEW_WAT_PARSER
#include <gen-s-parser.inc>
@@ -2123,6 +2306,22 @@ template<typename Ctx> Result<typename Ctx::ExprT> expr(Ctx& ctx) {
return ctx.makeExpr(*insts);
}
+// memarg_n ::= o:offset a:align_n
+// offset ::= 'offset='o:u64 => o | _ => 0
+// align_n ::= 'align='a:u32 => a | _ => n
+template<typename Ctx>
+Result<typename Ctx::MemargT> memarg(Ctx& ctx, uint32_t n) {
+ uint64_t offset = 0;
+ uint32_t align = n;
+ if (auto o = ctx.in.takeOffset()) {
+ offset = *o;
+ }
+ if (auto a = ctx.in.takeAlign()) {
+ align = *a;
+ }
+ return ctx.getMemarg(offset, align);
+}
+
template<typename Ctx>
Result<typename Ctx::InstrT> makeUnreachable(Ctx& ctx, Index pos) {
return ctx.makeUnreachable(pos);
@@ -2250,40 +2449,64 @@ Result<typename Ctx::InstrT> makeConst(Ctx& ctx, Index pos, Type type) {
template<typename Ctx>
Result<typename Ctx::InstrT> makeLoad(
Ctx& ctx, Index pos, Type type, bool signed_, int bytes, bool isAtomic) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ return ctx.makeLoad(pos, type, signed_, bytes, isAtomic, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT>
makeStore(Ctx& ctx, Index pos, Type type, int bytes, bool isAtomic) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ return ctx.makeStore(pos, type, bytes, isAtomic, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT>
makeAtomicRMW(Ctx& ctx, Index pos, AtomicRMWOp op, Type type, uint8_t bytes) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ return ctx.makeAtomicRMW(pos, op, type, bytes, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT>
makeAtomicCmpxchg(Ctx& ctx, Index pos, Type type, uint8_t bytes) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ return ctx.makeAtomicCmpxchg(pos, type, bytes, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT> makeAtomicWait(Ctx& ctx, Index pos, Type type) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, type == Type::i32 ? 4 : 8);
+ CHECK_ERR(arg);
+ return ctx.makeAtomicWait(pos, type, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT> makeAtomicNotify(Ctx& ctx, Index pos) {
- return ctx.in.err("unimplemented instruction");
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, 4);
+ CHECK_ERR(arg);
+ return ctx.makeAtomicNotify(pos, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT> makeAtomicFence(Ctx& ctx, Index pos) {
- return ctx.in.err("unimplemented instruction");
+ return ctx.makeAtomicFence(pos);
}
template<typename Ctx>
@@ -2332,14 +2555,44 @@ makeSIMDShift(Ctx& ctx, Index pos, SIMDShiftOp op) {
}
template<typename Ctx>
-Result<typename Ctx::InstrT> makeSIMDLoad(Ctx& ctx, Index pos, SIMDLoadOp op) {
- return ctx.in.err("unimplemented instruction");
+Result<typename Ctx::InstrT>
+makeSIMDLoad(Ctx& ctx, Index pos, SIMDLoadOp op, int bytes) {
+ auto mem = maybeMemidx(ctx);
+ CHECK_ERR(mem);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ return ctx.makeSIMDLoad(pos, op, mem.getPtr(), *arg);
}
template<typename Ctx>
Result<typename Ctx::InstrT>
-makeSIMDLoadStoreLane(Ctx& ctx, Index pos, SIMDLoadStoreLaneOp op) {
- return ctx.in.err("unimplemented instruction");
+makeSIMDLoadStoreLane(Ctx& ctx, Index pos, SIMDLoadStoreLaneOp op, int bytes) {
+ auto reset = ctx.in.getPos();
+
+ auto retry = [&]() -> Result<typename Ctx::InstrT> {
+ // We failed to parse. Maybe the lane index was accidentally parsed as the
+ // optional memory index. Try again without parsing a memory index.
+ WithPosition with(ctx, reset);
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ auto lane = ctx.in.takeU8();
+ if (!lane) {
+ return ctx.in.err("expected lane index");
+ }
+ return ctx.makeSIMDLoadStoreLane(pos, op, nullptr, *arg, *lane);
+ };
+
+ auto mem = maybeMemidx(ctx);
+ if (mem.getErr()) {
+ return retry();
+ }
+ auto arg = memarg(ctx, bytes);
+ CHECK_ERR(arg);
+ auto lane = ctx.in.takeU8();
+ if (!lane) {
+ return retry();
+ }
+ return ctx.makeSIMDLoadStoreLane(pos, op, mem.getPtr(), *arg, *lane);
}
template<typename Ctx>