summaryrefslogtreecommitdiff
path: root/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'src/wasm')
-rw-r--r--src/wasm/wasm-binary.cpp54
-rw-r--r--src/wasm/wasm-s-parser.cpp43
-rw-r--r--src/wasm/wasm-stack.cpp33
-rw-r--r--src/wasm/wasm-validator.cpp54
-rw-r--r--src/wasm/wasm.cpp44
5 files changed, 228 insertions, 0 deletions
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index b6e926f6a..80f82dd5e 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -2711,6 +2711,9 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) {
if (maybeVisitSIMDLoad(curr, opcode)) {
break;
}
+ if (maybeVisitSIMDLoadStoreLane(curr, opcode)) {
+ break;
+ }
throwError("invalid code after SIMD prefix: " + std::to_string(opcode));
break;
}
@@ -4980,6 +4983,57 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) {
return true;
}
+bool WasmBinaryBuilder::maybeVisitSIMDLoadStoreLane(Expression*& out,
+ uint32_t code) {
+ SIMDLoadStoreLaneOp op;
+ size_t lanes;
+ switch (code) {
+ case BinaryConsts::V128Load8Lane:
+ op = LoadLaneVec8x16;
+ lanes = 16;
+ break;
+ case BinaryConsts::V128Load16Lane:
+ op = LoadLaneVec16x8;
+ lanes = 8;
+ break;
+ case BinaryConsts::V128Load32Lane:
+ op = LoadLaneVec32x4;
+ lanes = 4;
+ break;
+ case BinaryConsts::V128Load64Lane:
+ op = LoadLaneVec64x2;
+ lanes = 2;
+ break;
+ case BinaryConsts::V128Store8Lane:
+ op = StoreLaneVec8x16;
+ lanes = 16;
+ break;
+ case BinaryConsts::V128Store16Lane:
+ op = StoreLaneVec16x8;
+ lanes = 8;
+ break;
+ case BinaryConsts::V128Store32Lane:
+ op = StoreLaneVec32x4;
+ lanes = 4;
+ break;
+ case BinaryConsts::V128Store64Lane:
+ op = StoreLaneVec64x2;
+ lanes = 2;
+ break;
+ default:
+ return false;
+ }
+ auto* curr = allocator.alloc<SIMDLoadStoreLane>();
+ curr->op = op;
+ readMemoryAccess(curr->align, curr->offset);
+ curr->index = getLaneIndex(lanes);
+ curr->vec = popNonVoidExpression();
+ curr->ptr = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
void WasmBinaryBuilder::visitSelect(Select* curr, uint8_t code) {
BYN_TRACE("zz node: Select, code " << int32_t(code) << std::endl);
if (code == BinaryConsts::SelectWithType) {
diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp
index c4de76987..4e06a75bb 100644
--- a/src/wasm/wasm-s-parser.cpp
+++ b/src/wasm/wasm-s-parser.cpp
@@ -1300,8 +1300,12 @@ static size_t parseMemAttributes(Element& s,
size_t i = 1;
offset = 0;
align = fallbackAlign;
+ // Parse "align=X" and "offset=X" arguments, bailing out on anything else.
while (!s[i]->isList()) {
const char* str = s[i]->c_str();
+ if (strncmp(str, "align", 5) != 0 && strncmp(str, "offset", 6) != 0) {
+ return i;
+ }
const char* eq = strchr(str, '=');
if (!eq) {
throw ParseException(
@@ -1592,6 +1596,45 @@ Expression* SExpressionWasmBuilder::makeSIMDLoad(Element& s, SIMDLoadOp op) {
return ret;
}
+Expression*
+SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s,
+ SIMDLoadStoreLaneOp op) {
+ auto* ret = allocator.alloc<SIMDLoadStoreLane>();
+ ret->op = op;
+ Address defaultAlign;
+ size_t lanes;
+ switch (op) {
+ case LoadLaneVec8x16:
+ case StoreLaneVec8x16:
+ defaultAlign = 1;
+ lanes = 16;
+ break;
+ case LoadLaneVec16x8:
+ case StoreLaneVec16x8:
+ defaultAlign = 2;
+ lanes = 8;
+ break;
+ case LoadLaneVec32x4:
+ case StoreLaneVec32x4:
+ defaultAlign = 4;
+ lanes = 4;
+ break;
+ case LoadLaneVec64x2:
+ case StoreLaneVec64x2:
+ defaultAlign = 8;
+ lanes = 2;
+ break;
+ default:
+ WASM_UNREACHABLE("Unexpected SIMDLoadStoreLane op");
+ }
+ size_t i = parseMemAttributes(s, ret->offset, ret->align, defaultAlign);
+ ret->index = parseLaneIndex(s[i++], lanes);
+ ret->ptr = parseExpression(s[i++]);
+ ret->vec = parseExpression(s[i]);
+ ret->finalize();
+ return ret;
+}
+
Expression* SExpressionWasmBuilder::makeMemoryInit(Element& s) {
auto ret = allocator.alloc<MemoryInit>();
ret->segment = atoi(s[1]->str().c_str());
diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp
index fcf9e4b8a..4bc479d15 100644
--- a/src/wasm/wasm-stack.cpp
+++ b/src/wasm/wasm-stack.cpp
@@ -647,6 +647,39 @@ void BinaryInstWriter::visitSIMDLoad(SIMDLoad* curr) {
emitMemoryAccess(curr->align, /*(unused) bytes=*/0, curr->offset);
}
+void BinaryInstWriter::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
+ o << int8_t(BinaryConsts::SIMDPrefix);
+ switch (curr->op) {
+ case LoadLaneVec8x16:
+ o << U32LEB(BinaryConsts::V128Load8Lane);
+ break;
+ case LoadLaneVec16x8:
+ o << U32LEB(BinaryConsts::V128Load16Lane);
+ break;
+ case LoadLaneVec32x4:
+ o << U32LEB(BinaryConsts::V128Load32Lane);
+ break;
+ case LoadLaneVec64x2:
+ o << U32LEB(BinaryConsts::V128Load64Lane);
+ break;
+ case StoreLaneVec8x16:
+ o << U32LEB(BinaryConsts::V128Store8Lane);
+ break;
+ case StoreLaneVec16x8:
+ o << U32LEB(BinaryConsts::V128Store16Lane);
+ break;
+ case StoreLaneVec32x4:
+ o << U32LEB(BinaryConsts::V128Store32Lane);
+ break;
+ case StoreLaneVec64x2:
+ o << U32LEB(BinaryConsts::V128Store64Lane);
+ break;
+ }
+ assert(curr->align);
+ emitMemoryAccess(curr->align, /*(unused) bytes=*/0, curr->offset);
+ o << curr->index;
+}
+
void BinaryInstWriter::visitMemoryInit(MemoryInit* curr) {
o << int8_t(BinaryConsts::MiscPrefix);
o << U32LEB(BinaryConsts::MemoryInit);
diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp
index ef6a29373..31b68a80c 100644
--- a/src/wasm/wasm-validator.cpp
+++ b/src/wasm/wasm-validator.cpp
@@ -317,6 +317,7 @@ public:
void visitSIMDTernary(SIMDTernary* curr);
void visitSIMDShift(SIMDShift* curr);
void visitSIMDLoad(SIMDLoad* curr);
+ void visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr);
void visitMemoryInit(MemoryInit* curr);
void visitDataDrop(DataDrop* curr);
void visitMemoryCopy(MemoryCopy* curr);
@@ -1264,6 +1265,59 @@ void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) {
validateAlignment(curr->align, memAlignType, bytes, /*isAtomic=*/false, curr);
}
+void FunctionValidator::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
+ shouldBeTrue(
+ getModule()->memory.exists, curr, "Memory operations require a memory");
+ shouldBeTrue(
+ getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ if (curr->isLoad()) {
+ shouldBeEqualOrFirstIsUnreachable(
+ curr->type, Type(Type::v128), curr, "loadX_lane must have type v128");
+ } else {
+ shouldBeEqualOrFirstIsUnreachable(
+ curr->type, Type(Type::none), curr, "storeX_lane must have type none");
+ }
+ shouldBeEqualOrFirstIsUnreachable(
+ curr->ptr->type,
+ indexType(),
+ curr,
+ "loadX_lane or storeX_lane address must match memory index type");
+ shouldBeEqualOrFirstIsUnreachable(
+ curr->vec->type,
+ Type(Type::v128),
+ curr,
+ "loadX_lane or storeX_lane vector argument must have type v128");
+ size_t lanes;
+ Type memAlignType = Type::none;
+ switch (curr->op) {
+ case LoadLaneVec8x16:
+ case StoreLaneVec8x16:
+ lanes = 16;
+ memAlignType = Type::i32;
+ break;
+ case LoadLaneVec16x8:
+ case StoreLaneVec16x8:
+ lanes = 8;
+ memAlignType = Type::i32;
+ break;
+ case LoadLaneVec32x4:
+ case StoreLaneVec32x4:
+ lanes = 4;
+ memAlignType = Type::i32;
+ break;
+ case LoadLaneVec64x2:
+ case StoreLaneVec64x2:
+ lanes = 2;
+ memAlignType = Type::i64;
+ break;
+ default:
+ WASM_UNREACHABLE("Unexpected SIMDLoadStoreLane op");
+ }
+ Index bytes = curr->getMemBytes();
+ validateAlignment(curr->align, memAlignType, bytes, /*isAtomic=*/false, curr);
+ shouldBeTrue(curr->index < lanes, curr, "invalid lane index");
+}
+
void FunctionValidator::visitMemoryInit(MemoryInit* curr) {
shouldBeTrue(getModule()->features.hasBulkMemory(),
curr,
diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp
index 2052afa89..472902ad5 100644
--- a/src/wasm/wasm.cpp
+++ b/src/wasm/wasm.cpp
@@ -175,6 +175,8 @@ const char* getExpressionName(Expression* curr) {
return "simd_shift";
case Expression::Id::SIMDLoadId:
return "simd_load";
+ case Expression::Id::SIMDLoadStoreLaneId:
+ return "simd_load_store_lane";
case Expression::Id::MemoryInitId:
return "memory_init";
case Expression::Id::DataDropId:
@@ -674,6 +676,48 @@ Index SIMDLoad::getMemBytes() {
WASM_UNREACHABLE("unexpected op");
}
+void SIMDLoadStoreLane::finalize() {
+ assert(ptr && vec);
+ type = isLoad() ? Type::v128 : Type::none;
+ if (ptr->type == Type::unreachable || vec->type == Type::unreachable) {
+ type = Type::unreachable;
+ }
+}
+
+Index SIMDLoadStoreLane::getMemBytes() {
+ switch (op) {
+ case LoadLaneVec8x16:
+ case StoreLaneVec8x16:
+ return 1;
+ case LoadLaneVec16x8:
+ case StoreLaneVec16x8:
+ return 2;
+ case LoadLaneVec32x4:
+ case StoreLaneVec32x4:
+ return 4;
+ case LoadLaneVec64x2:
+ case StoreLaneVec64x2:
+ return 8;
+ }
+ WASM_UNREACHABLE("unexpected op");
+}
+
+bool SIMDLoadStoreLane::isStore() {
+ switch (op) {
+ case StoreLaneVec8x16:
+ case StoreLaneVec16x8:
+ case StoreLaneVec32x4:
+ case StoreLaneVec64x2:
+ return true;
+ case LoadLaneVec16x8:
+ case LoadLaneVec32x4:
+ case LoadLaneVec64x2:
+ case LoadLaneVec8x16:
+ return false;
+ }
+ WASM_UNREACHABLE("unexpected op");
+}
+
Const* Const::set(Literal value_) {
value = value_;
type = value.type;