summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/binaryen-c.cpp429
-rw-r--r--src/binaryen-c.h249
-rw-r--r--src/dataflow/graph.h2
-rw-r--r--src/gen-s-parser.inc2630
-rw-r--r--src/ir/ExpressionAnalyzer.cpp71
-rw-r--r--src/ir/ReFinalize.cpp6
-rw-r--r--src/ir/cost.h109
-rw-r--r--src/ir/literal-utils.h19
-rw-r--r--src/ir/utils.h10
-rw-r--r--src/js/binaryen.js-post.js646
-rw-r--r--src/literal.h220
-rw-r--r--src/passes/DeadCodeElimination.cpp5
-rw-r--r--src/passes/OptimizeInstructions.cpp8
-rw-r--r--src/passes/Precompute.cpp2
-rw-r--r--src/passes/Print.cpp259
-rw-r--r--src/passes/RedundantSetElimination.cpp3
-rw-r--r--src/passes/SafeHeap.cpp28
-rw-r--r--src/shell-interface.h6
-rw-r--r--src/tools/feature-options.h15
-rw-r--r--src/tools/fuzzing.h6
-rw-r--r--src/tools/wasm-ctor-eval.cpp4
-rw-r--r--src/tools/wasm-reduce.cpp2
-rw-r--r--src/wasm-binary.h160
-rw-r--r--src/wasm-builder.h48
-rw-r--r--src/wasm-interpreter.h202
-rw-r--r--src/wasm-s-parser.h5
-rw-r--r--src/wasm-stack.h211
-rw-r--r--src/wasm-traversal.h60
-rw-r--r--src/wasm.h110
-rw-r--r--src/wasm/literal.cpp684
-rw-r--r--src/wasm/wasm-binary.cpp303
-rw-r--r--src/wasm/wasm-s-parser.cpp127
-rw-r--r--src/wasm/wasm-validator.cpp232
-rw-r--r--src/wasm/wasm.cpp88
34 files changed, 5819 insertions, 1140 deletions
diff --git a/src/binaryen-c.cpp b/src/binaryen-c.cpp
index 88a3d033e..6b5481af2 100644
--- a/src/binaryen-c.cpp
+++ b/src/binaryen-c.cpp
@@ -49,7 +49,10 @@ BinaryenLiteral toBinaryenLiteral(Literal x) {
case Type::i64: ret.i64 = x.geti64(); break;
case Type::f32: ret.i32 = x.reinterpreti32(); break;
case Type::f64: ret.i64 = x.reinterpreti64(); break;
- case Type::v128: assert(false && "v128 not implemented yet");
+ case Type::v128: {
+ memcpy(&ret.v128, x.getv128Ptr(), 16);
+ break;
+ }
case Type::none:
case Type::unreachable: WASM_UNREACHABLE();
}
@@ -62,6 +65,7 @@ Literal fromBinaryenLiteral(BinaryenLiteral x) {
case Type::i64: return Literal(x.i64);
case Type::f32: return Literal(x.i32).castToF32();
case Type::f64: return Literal(x.i64).castToF64();
+ case Type::v128: return Literal(x.v128);
case Type::none:
case Type::unreachable: WASM_UNREACHABLE();
}
@@ -82,9 +86,9 @@ static PassOptions globalPassOptions = PassOptions::getWithDefaultOptimizationOp
static int tracing = 0;
-void traceNameOrNULL(const char* name) {
- if (name) std::cout << "\"" << name << "\"";
- else std::cout << "NULL";
+void traceNameOrNULL(const char* name, std::ostream &out = std::cout) {
+ if (name) out << "\"" << name << "\"";
+ else out << "NULL";
}
std::map<BinaryenFunctionTypeRef, size_t> functionTypes;
@@ -101,14 +105,19 @@ size_t noteExpression(BinaryenExpressionRef expression) {
return id;
}
+std::string getTemp() {
+ static size_t n = 0;
+ return "t" + std::to_string(n++);
+}
+
template<typename T>
-void printArg(T arg) {
- std::cout << arg;
+void printArg(std::ostream &setup, std::ostream& out, T arg) {
+ out << arg;
}
template<>
-void printArg(void* arg) {
- std::cout << "expressions[" << expressions[arg] << "]";
+void printArg(std::ostream &setup, std::ostream& out, BinaryenExpressionRef arg) {
+ out << "expressions[" << expressions[arg] << "]";
}
struct StringLit {
@@ -117,60 +126,83 @@ struct StringLit {
};
template<>
-void printArg(StringLit arg) {
- traceNameOrNULL(arg.name);
+void printArg(std::ostream &setup, std::ostream& out, StringLit arg) {
+ traceNameOrNULL(arg.name, out);
}
template<>
-void printArg(BinaryenType arg) {
+void printArg(std::ostream &setup, std::ostream &out, BinaryenType arg) {
if (arg == BinaryenTypeAuto()) {
- std::cout << "BinaryenTypeAuto()";
+ out << "BinaryenTypeAuto()";
} else {
- std::cout << arg;
+ out << arg;
}
}
template<>
-void printArg(BinaryenLiteral arg) {
+void printArg(std::ostream &setup, std::ostream &out, BinaryenLiteral arg) {
switch (arg.type) {
- case Type::i32: std::cout << "BinaryenLiteralInt32(" << arg.i32 << ")"; break;
- case Type::i64: std::cout << "BinaryenLiteralInt64(" << arg.i64 << ")"; break;
+ case Type::i32: out << "BinaryenLiteralInt32(" << arg.i32 << ")"; break;
+ case Type::i64: out << "BinaryenLiteralInt64(" << arg.i64 << ")"; break;
case Type::f32:
if (std::isnan(arg.f32)) {
- std::cout << "BinaryenLiteralFloat32(NAN)"; break;
+ out << "BinaryenLiteralFloat32(NAN)"; break;
} else {
- std::cout << "BinaryenLiteralFloat32(" << arg.f32 << ")"; break;
+ out << "BinaryenLiteralFloat32(" << arg.f32 << ")"; break;
}
case Type::f64:
if (std::isnan(arg.f64)) {
- std::cout << "BinaryenLiteralFloat64(NAN)"; break;
+ out << "BinaryenLiteralFloat64(NAN)"; break;
} else {
- std::cout << "BinaryenLiteralFloat64(" << arg.f64 << ")"; break;
+ out << "BinaryenLiteralFloat64(" << arg.f64 << ")"; break;
+ }
+ case Type::v128: {
+ std::string array = getTemp();
+ setup << "uint8_t " << array << "[] = {";
+ for (size_t i = 0; i < 16; ++i) {
+ setup << int(arg.v128[i]);
+ if (i < 15) {
+ setup << ", ";
+ }
}
- case Type::v128:
+ setup << "};\n";
+ out << "BinaryenLiteralVec128(" << array << ")";
+ break;
+ }
case Type::none:
case Type::unreachable: WASM_UNREACHABLE();
}
}
template<typename T>
-void traceArgs(T arg) {
- printArg(arg);
+void traceArgs(std::ostream &setup, std::ostream &out, T arg) {
+ printArg(setup, out, arg);
}
template<typename T, typename S, typename ...Ts>
-void traceArgs(T arg, S next, Ts... rest) {
- printArg(arg);
- std::cout << ", ";
- traceArgs(next, rest...);
+void traceArgs(std::ostream &setup, std::ostream &out, T arg, S next, Ts... rest) {
+ printArg(setup, out, arg);
+ out << ", ";
+ traceArgs(setup, out, next, rest...);
}
template<typename ...Ts>
void traceExpression(BinaryenExpressionRef expr, const char* constructor, Ts... args) {
auto id = noteExpression(expr);
- std::cout << " expressions[" << id << "] = " << constructor << "(";
- traceArgs("the_module", args...);
- std::cout << ");\n";
+ std::stringstream setup, out;
+ out << "expressions[" << id << "] = " << constructor << "(";
+ traceArgs(setup, out, "the_module", args...);
+ out << ");\n";
+ if (!setup.str().empty()) {
+ std::cout << " {\n";
+ for (std::string line; getline(setup, line);) {
+ std::cout << " " << line << "\n";
+ }
+ std::cout << " " << out.str();
+ std::cout << " }\n";
+ } else {
+ std::cout << " " << out.str();
+ }
}
extern "C" {
@@ -226,6 +258,11 @@ BinaryenExpressionId BinaryenAtomicCmpxchgId(void) { return Expression::Id::Atom
BinaryenExpressionId BinaryenAtomicRMWId(void) { return Expression::Id::AtomicRMWId; }
BinaryenExpressionId BinaryenAtomicWaitId(void) { return Expression::Id::AtomicWaitId; }
BinaryenExpressionId BinaryenAtomicWakeId(void) { return Expression::Id::AtomicWakeId; }
+BinaryenExpressionId BinaryenSIMDExtractId(void) { return Expression::Id::SIMDExtractId; }
+BinaryenExpressionId BinaryenSIMDReplaceId(void) { return Expression::Id::SIMDReplaceId; }
+BinaryenExpressionId BinaryenSIMDShuffleId(void) { return Expression::Id::SIMDShuffleId; }
+BinaryenExpressionId BinaryenSIMDBitselectId(void) { return Expression::Id::SIMDBitselectId; }
+BinaryenExpressionId BinaryenSIMDShiftId(void) { return Expression::Id::SIMDShiftId; }
// External kinds
@@ -325,6 +362,7 @@ BinaryenLiteral BinaryenLiteralInt32(int32_t x) { return toBinaryenLiteral(Liter
BinaryenLiteral BinaryenLiteralInt64(int64_t x) { return toBinaryenLiteral(Literal(x)); }
BinaryenLiteral BinaryenLiteralFloat32(float x) { return toBinaryenLiteral(Literal(x)); }
BinaryenLiteral BinaryenLiteralFloat64(double x) { return toBinaryenLiteral(Literal(x)); }
+BinaryenLiteral BinaryenLiteralVec128(const uint8_t x[16]) { return toBinaryenLiteral(Literal(x)); }
BinaryenLiteral BinaryenLiteralFloat32Bits(int32_t x) { return toBinaryenLiteral(Literal(x).castToF32()); }
BinaryenLiteral BinaryenLiteralFloat64Bits(int64_t x) { return toBinaryenLiteral(Literal(x).castToF64()); }
@@ -474,6 +512,141 @@ BinaryenOp BinaryenTruncSatSFloat64ToInt32(void) { return TruncSatSFloat64ToInt3
BinaryenOp BinaryenTruncSatSFloat64ToInt64(void) { return TruncSatSFloat64ToInt64; }
BinaryenOp BinaryenTruncSatUFloat64ToInt32(void) { return TruncSatUFloat64ToInt32; }
BinaryenOp BinaryenTruncSatUFloat64ToInt64(void) { return TruncSatUFloat64ToInt64; }
+BinaryenOp BinaryenSplatVecI8x16(void) { return SplatVecI8x16; }
+BinaryenOp BinaryenExtractLaneSVecI8x16(void) { return ExtractLaneSVecI8x16; }
+BinaryenOp BinaryenExtractLaneUVecI8x16(void) { return ExtractLaneUVecI8x16; }
+BinaryenOp BinaryenReplaceLaneVecI8x16(void) { return ReplaceLaneVecI8x16; }
+BinaryenOp BinaryenSplatVecI16x8(void) { return SplatVecI16x8; }
+BinaryenOp BinaryenExtractLaneSVecI16x8(void) { return ExtractLaneSVecI16x8; }
+BinaryenOp BinaryenExtractLaneUVecI16x8(void) { return ExtractLaneUVecI16x8; }
+BinaryenOp BinaryenReplaceLaneVecI16x8(void) { return ReplaceLaneVecI16x8; }
+BinaryenOp BinaryenSplatVecI32x4(void) { return SplatVecI32x4; }
+BinaryenOp BinaryenExtractLaneVecI32x4(void) { return ExtractLaneVecI32x4; }
+BinaryenOp BinaryenReplaceLaneVecI32x4(void) { return ReplaceLaneVecI32x4; }
+BinaryenOp BinaryenSplatVecI64x2(void) { return SplatVecI64x2; }
+BinaryenOp BinaryenExtractLaneVecI64x2(void) { return ExtractLaneVecI64x2; }
+BinaryenOp BinaryenReplaceLaneVecI64x2(void) { return ReplaceLaneVecI64x2; }
+BinaryenOp BinaryenSplatVecF32x4(void) { return SplatVecF32x4; }
+BinaryenOp BinaryenExtractLaneVecF32x4(void) { return ExtractLaneVecF32x4; }
+BinaryenOp BinaryenReplaceLaneVecF32x4(void) { return ReplaceLaneVecF32x4; }
+BinaryenOp BinaryenSplatVecF64x2(void) { return SplatVecF64x2; }
+BinaryenOp BinaryenExtractLaneVecF64x2(void) { return ExtractLaneVecF64x2; }
+BinaryenOp BinaryenReplaceLaneVecF64x2(void) { return ReplaceLaneVecF64x2; }
+BinaryenOp BinaryenEqVecI8x16(void) { return EqVecI8x16; }
+BinaryenOp BinaryenNeVecI8x16(void) { return NeVecI8x16; }
+BinaryenOp BinaryenLtSVecI8x16(void) { return LtSVecI8x16; }
+BinaryenOp BinaryenLtUVecI8x16(void) { return LtUVecI8x16; }
+BinaryenOp BinaryenGtSVecI8x16(void) { return GtSVecI8x16; }
+BinaryenOp BinaryenGtUVecI8x16(void) { return GtUVecI8x16; }
+BinaryenOp BinaryenLeSVecI8x16(void) { return LeSVecI8x16; }
+BinaryenOp BinaryenLeUVecI8x16(void) { return LeUVecI8x16; }
+BinaryenOp BinaryenGeSVecI8x16(void) { return GeSVecI8x16; }
+BinaryenOp BinaryenGeUVecI8x16(void) { return GeUVecI8x16; }
+BinaryenOp BinaryenEqVecI16x8(void) { return EqVecI16x8; }
+BinaryenOp BinaryenNeVecI16x8(void) { return NeVecI16x8; }
+BinaryenOp BinaryenLtSVecI16x8(void) { return LtSVecI16x8; }
+BinaryenOp BinaryenLtUVecI16x8(void) { return LtUVecI16x8; }
+BinaryenOp BinaryenGtSVecI16x8(void) { return GtSVecI16x8; }
+BinaryenOp BinaryenGtUVecI16x8(void) { return GtUVecI16x8; }
+BinaryenOp BinaryenLeSVecI16x8(void) { return LeSVecI16x8; }
+BinaryenOp BinaryenLeUVecI16x8(void) { return LeUVecI16x8; }
+BinaryenOp BinaryenGeSVecI16x8(void) { return GeSVecI16x8; }
+BinaryenOp BinaryenGeUVecI16x8(void) { return GeUVecI16x8; }
+BinaryenOp BinaryenEqVecI32x4(void) { return EqVecI32x4; }
+BinaryenOp BinaryenNeVecI32x4(void) { return NeVecI32x4; }
+BinaryenOp BinaryenLtSVecI32x4(void) { return LtSVecI32x4; }
+BinaryenOp BinaryenLtUVecI32x4(void) { return LtUVecI32x4; }
+BinaryenOp BinaryenGtSVecI32x4(void) { return GtSVecI32x4; }
+BinaryenOp BinaryenGtUVecI32x4(void) { return GtUVecI32x4; }
+BinaryenOp BinaryenLeSVecI32x4(void) { return LeSVecI32x4; }
+BinaryenOp BinaryenLeUVecI32x4(void) { return LeUVecI32x4; }
+BinaryenOp BinaryenGeSVecI32x4(void) { return GeSVecI32x4; }
+BinaryenOp BinaryenGeUVecI32x4(void) { return GeUVecI32x4; }
+BinaryenOp BinaryenEqVecF32x4(void) { return EqVecF32x4; }
+BinaryenOp BinaryenNeVecF32x4(void) { return NeVecF32x4; }
+BinaryenOp BinaryenLtVecF32x4(void) { return LtVecF32x4; }
+BinaryenOp BinaryenGtVecF32x4(void) { return GtVecF32x4; }
+BinaryenOp BinaryenLeVecF32x4(void) { return LeVecF32x4; }
+BinaryenOp BinaryenGeVecF32x4(void) { return GeVecF32x4; }
+BinaryenOp BinaryenEqVecF64x2(void) { return EqVecF64x2; }
+BinaryenOp BinaryenNeVecF64x2(void) { return NeVecF64x2; }
+BinaryenOp BinaryenLtVecF64x2(void) { return LtVecF64x2; }
+BinaryenOp BinaryenGtVecF64x2(void) { return GtVecF64x2; }
+BinaryenOp BinaryenLeVecF64x2(void) { return LeVecF64x2; }
+BinaryenOp BinaryenGeVecF64x2(void) { return GeVecF64x2; }
+BinaryenOp BinaryenNotVec128(void) { return NotVec128; }
+BinaryenOp BinaryenAndVec128(void) { return AndVec128; }
+BinaryenOp BinaryenOrVec128(void) { return OrVec128; }
+BinaryenOp BinaryenXorVec128(void) { return XorVec128; }
+BinaryenOp BinaryenNegVecI8x16(void) { return NegVecI8x16; }
+BinaryenOp BinaryenAnyTrueVecI8x16(void) { return AnyTrueVecI8x16; }
+BinaryenOp BinaryenAllTrueVecI8x16(void) { return AllTrueVecI8x16; }
+BinaryenOp BinaryenShlVecI8x16(void) { return ShlVecI8x16; }
+BinaryenOp BinaryenShrSVecI8x16(void) { return ShrSVecI8x16; }
+BinaryenOp BinaryenShrUVecI8x16(void) { return ShrUVecI8x16; }
+BinaryenOp BinaryenAddVecI8x16(void) { return AddVecI8x16; }
+BinaryenOp BinaryenAddSatSVecI8x16(void) { return AddSatSVecI8x16; }
+BinaryenOp BinaryenAddSatUVecI8x16(void) { return AddSatUVecI8x16; }
+BinaryenOp BinaryenSubVecI8x16(void) { return SubVecI8x16; }
+BinaryenOp BinaryenSubSatSVecI8x16(void) { return SubSatSVecI8x16; }
+BinaryenOp BinaryenSubSatUVecI8x16(void) { return SubSatUVecI8x16; }
+BinaryenOp BinaryenMulVecI8x16(void) { return MulVecI8x16; }
+BinaryenOp BinaryenNegVecI16x8(void) { return NegVecI16x8; }
+BinaryenOp BinaryenAnyTrueVecI16x8(void) { return AnyTrueVecI16x8; }
+BinaryenOp BinaryenAllTrueVecI16x8(void) { return AllTrueVecI16x8; }
+BinaryenOp BinaryenShlVecI16x8(void) { return ShlVecI16x8; }
+BinaryenOp BinaryenShrSVecI16x8(void) { return ShrSVecI16x8; }
+BinaryenOp BinaryenShrUVecI16x8(void) { return ShrUVecI16x8; }
+BinaryenOp BinaryenAddVecI16x8(void) { return AddVecI16x8; }
+BinaryenOp BinaryenAddSatSVecI16x8(void) { return AddSatSVecI16x8; }
+BinaryenOp BinaryenAddSatUVecI16x8(void) { return AddSatUVecI16x8; }
+BinaryenOp BinaryenSubVecI16x8(void) { return SubVecI16x8; }
+BinaryenOp BinaryenSubSatSVecI16x8(void) { return SubSatSVecI16x8; }
+BinaryenOp BinaryenSubSatUVecI16x8(void) { return SubSatUVecI16x8; }
+BinaryenOp BinaryenMulVecI16x8(void) { return MulVecI16x8; }
+BinaryenOp BinaryenNegVecI32x4(void) { return NegVecI32x4; }
+BinaryenOp BinaryenAnyTrueVecI32x4(void) { return AnyTrueVecI32x4; }
+BinaryenOp BinaryenAllTrueVecI32x4(void) { return AllTrueVecI32x4; }
+BinaryenOp BinaryenShlVecI32x4(void) { return ShlVecI32x4; }
+BinaryenOp BinaryenShrSVecI32x4(void) { return ShrSVecI32x4; }
+BinaryenOp BinaryenShrUVecI32x4(void) { return ShrUVecI32x4; }
+BinaryenOp BinaryenAddVecI32x4(void) { return AddVecI32x4; }
+BinaryenOp BinaryenSubVecI32x4(void) { return SubVecI32x4; }
+BinaryenOp BinaryenMulVecI32x4(void) { return MulVecI32x4; }
+BinaryenOp BinaryenNegVecI64x2(void) { return NegVecI64x2; }
+BinaryenOp BinaryenAnyTrueVecI64x2(void) { return AnyTrueVecI64x2; }
+BinaryenOp BinaryenAllTrueVecI64x2(void) { return AllTrueVecI64x2; }
+BinaryenOp BinaryenShlVecI64x2(void) { return ShlVecI64x2; }
+BinaryenOp BinaryenShrSVecI64x2(void) { return ShrSVecI64x2; }
+BinaryenOp BinaryenShrUVecI64x2(void) { return ShrUVecI64x2; }
+BinaryenOp BinaryenAddVecI64x2(void) { return AddVecI64x2; }
+BinaryenOp BinaryenSubVecI64x2(void) { return SubVecI64x2; }
+BinaryenOp BinaryenAbsVecF32x4(void) { return AbsVecF32x4; }
+BinaryenOp BinaryenNegVecF32x4(void) { return NegVecF32x4; }
+BinaryenOp BinaryenSqrtVecF32x4(void) { return SqrtVecF32x4; }
+BinaryenOp BinaryenAddVecF32x4(void) { return AddVecF32x4; }
+BinaryenOp BinaryenSubVecF32x4(void) { return SubVecF32x4; }
+BinaryenOp BinaryenMulVecF32x4(void) { return MulVecF32x4; }
+BinaryenOp BinaryenDivVecF32x4(void) { return DivVecF32x4; }
+BinaryenOp BinaryenMinVecF32x4(void) { return MinVecF32x4; }
+BinaryenOp BinaryenMaxVecF32x4(void) { return MaxVecF32x4; }
+BinaryenOp BinaryenAbsVecF64x2(void) { return AbsVecF64x2; }
+BinaryenOp BinaryenNegVecF64x2(void) { return NegVecF64x2; }
+BinaryenOp BinaryenSqrtVecF64x2(void) { return SqrtVecF64x2; }
+BinaryenOp BinaryenAddVecF64x2(void) { return AddVecF64x2; }
+BinaryenOp BinaryenSubVecF64x2(void) { return SubVecF64x2; }
+BinaryenOp BinaryenMulVecF64x2(void) { return MulVecF64x2; }
+BinaryenOp BinaryenDivVecF64x2(void) { return DivVecF64x2; }
+BinaryenOp BinaryenMinVecF64x2(void) { return MinVecF64x2; }
+BinaryenOp BinaryenMaxVecF64x2(void) { return MaxVecF64x2; }
+BinaryenOp BinaryenTruncSatSVecF32x4ToVecI32x4(void) { return TruncSatSVecF32x4ToVecI32x4; }
+BinaryenOp BinaryenTruncSatUVecF32x4ToVecI32x4(void) { return TruncSatUVecF32x4ToVecI32x4; }
+BinaryenOp BinaryenTruncSatSVecF64x2ToVecI64x2(void) { return TruncSatSVecF64x2ToVecI64x2; }
+BinaryenOp BinaryenTruncSatUVecF64x2ToVecI64x2(void) { return TruncSatUVecF64x2ToVecI64x2; }
+BinaryenOp BinaryenConvertSVecI32x4ToVecF32x4(void) { return ConvertSVecI32x4ToVecF32x4; }
+BinaryenOp BinaryenConvertUVecI32x4ToVecF32x4(void) { return ConvertUVecI32x4ToVecF32x4; }
+BinaryenOp BinaryenConvertSVecI64x2ToVecF64x2(void) { return ConvertSVecI64x2ToVecF64x2; }
+BinaryenOp BinaryenConvertUVecI64x2ToVecF64x2(void) { return ConvertUVecI64x2ToVecF64x2; }
BinaryenExpressionRef BinaryenBlock(BinaryenModuleRef module, const char* name, BinaryenExpressionRef* children, BinaryenIndex numChildren, BinaryenType type) {
auto* ret = ((Module*)module)->allocator.alloc<Block>();
@@ -854,6 +1027,53 @@ BinaryenExpressionRef BinaryenAtomicWake(BinaryenModuleRef module, BinaryenExpre
return static_cast<Expression*>(ret);
}
+BinaryenExpressionRef BinaryenSIMDExtract(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, uint8_t idx) {
+ auto* ret = Builder(*((Module*)module)).makeSIMDExtract(SIMDExtractOp(op), (Expression*) vec, idx);
+ if (tracing) {
+ traceExpression(ret, "BinaryenSIMDExtract", op, vec, int(idx));
+ }
+ return static_cast<Expression*>(ret);
+}
+BinaryenExpressionRef BinaryenSIMDReplace(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, uint8_t idx, BinaryenExpressionRef value) {
+ auto* ret = Builder(*((Module*)module)).makeSIMDReplace(SIMDReplaceOp(op), (Expression*) vec, idx, (Expression*)value);
+ if (tracing) {
+ traceExpression(ret, "BinaryenSIMDReplace", op, vec, int(idx), value);
+ }
+ return static_cast<Expression*>(ret);
+}
+BinaryenExpressionRef BinaryenSIMDShuffle(BinaryenModuleRef module, BinaryenExpressionRef left, BinaryenExpressionRef right, const uint8_t mask_[16]) {
+ std::array<uint8_t, 16> mask;
+ memcpy(mask.data(), mask_, 16);
+ auto* ret = Builder(*((Module*)module)).makeSIMDShuffle((Expression*)left, (Expression*)right, mask);
+ if (tracing) {
+ std::cout << " {\n";
+ std::cout << " uint8_t mask[] = {";
+ for (size_t i = 0; i < mask.size(); ++i) {
+ std::cout << int(mask[i]);
+ if (i < mask.size() - 1) {
+ std::cout << ", ";
+ }
+ }
+ std::cout << "};\n ";
+ traceExpression(ret, "BinaryenSIMDShuffle", left, right, "mask");
+ std::cout << " }\n";
+ }
+ return static_cast<Expression*>(ret);
+}
+BinaryenExpressionRef BinaryenSIMDBitselect(BinaryenModuleRef module, BinaryenExpressionRef left, BinaryenExpressionRef right, BinaryenExpressionRef cond) {
+ auto* ret = Builder(*((Module*)module)).makeSIMDBitselect((Expression*)left, (Expression*)right, (Expression*)cond);
+ if (tracing) {
+ traceExpression(ret, "BinaryenSIMDBitselect", left, right, cond);
+ }
+ return static_cast<Expression*>(ret);
+}
+BinaryenExpressionRef BinaryenSIMDShift(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, BinaryenExpressionRef shift) {
+ auto* ret = Builder(*((Module*)module)).makeSIMDShift(SIMDShiftOp(op), (Expression*)vec, (Expression*)shift);
+ if (tracing) {
+ traceExpression(ret, "BinaryenSIMDShift", op, vec, shift);
+ }
+ return static_cast<Expression*>(ret);
+}
// Expression utility
@@ -1604,6 +1824,155 @@ BinaryenExpressionRef BinaryenAtomicWakeGetWakeCount(BinaryenExpressionRef expr)
assert(expression->is<AtomicWake>());
return static_cast<AtomicWake*>(expression)->wakeCount;
}
+// SIMDExtract
+BinaryenOp BinaryenSIMDExtractGetOp(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDExtractGetOp(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDExtract>());
+ return static_cast<SIMDExtract*>(expression)->op;
+}
+BinaryenExpressionRef BinaryenSIMDExtractGetVec(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDExtractGetVec(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDExtract>());
+ return static_cast<SIMDExtract*>(expression)->vec;
+}
+uint8_t BinaryenSIMDExtractGetIdx(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDExtractGetIdx(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDExtract>());
+ return static_cast<SIMDExtract*>(expression)->idx;
+}
+// SIMDReplace
+BinaryenOp BinaryenSIMDReplaceGetOp(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDReplaceGetOp(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDReplace>());
+ return static_cast<SIMDReplace*>(expression)->op;
+}
+BinaryenExpressionRef BinaryenSIMDReplaceGetVec(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDReplaceGetVec(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDReplace>());
+ return static_cast<SIMDReplace*>(expression)->vec;
+}
+uint8_t BinaryenSIMDReplaceGetIdx(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDReplaceGetIdx(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDReplace>());
+ return static_cast<SIMDReplace*>(expression)->idx;
+}
+BinaryenExpressionRef BinaryenSIMDReplaceGetValue(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDReplaceGetValue(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDReplace>());
+ return static_cast<SIMDReplace*>(expression)->value;
+}
+// SIMDShuffle
+BinaryenExpressionRef BinaryenSIMDShuffleGetLeft(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShuffleGetLeft(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShuffle>());
+ return static_cast<SIMDShuffle*>(expression)->left;
+}
+BinaryenExpressionRef BinaryenSIMDShuffleGetRight(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShuffleGetRight(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShuffle>());
+ return static_cast<SIMDShuffle*>(expression)->right;
+}
+void BinaryenSIMDShuffleGetMask(BinaryenExpressionRef expr, uint8_t *mask) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShuffleGetMask(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShuffle>());
+ memcpy(mask, static_cast<SIMDShuffle*>(expression)->mask.data(), 16);
+}
+// SIMDBitselect
+BinaryenExpressionRef BinaryenSIMDBitselectGetLeft(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDBitselectGetLeft(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDBitselect>());
+ return static_cast<SIMDBitselect*>(expression)->left;
+}
+BinaryenExpressionRef BinaryenSIMDBitselectGetRight(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDBitselectGetRight(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDBitselect>());
+ return static_cast<SIMDBitselect*>(expression)->right;
+}
+BinaryenExpressionRef BinaryenSIMDBitselectGetCond(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDBitselectGetCond(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDBitselect>());
+ return static_cast<SIMDBitselect*>(expression)->cond;
+}
+// SIMDShift
+BinaryenOp BinaryenSIMDShiftGetOp(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShiftGetOp(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShift>());
+ return static_cast<SIMDShift*>(expression)->op;
+}
+BinaryenExpressionRef BinaryenSIMDShiftGetVec(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShiftGetVec(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShift>());
+ return static_cast<SIMDShift*>(expression)->vec;
+}
+BinaryenExpressionRef BinaryenSIMDShiftGetShift(BinaryenExpressionRef expr) {
+ if (tracing) {
+ std::cout << " BinaryenSIMDShiftGetShift(expressions[" << expressions[expr] << "]);\n";
+ }
+
+ auto* expression = (Expression*)expr;
+ assert(expression->is<SIMDShift>());
+ return static_cast<SIMDShift*>(expression)->shift;
+}
// Functions
diff --git a/src/binaryen-c.h b/src/binaryen-c.h
index dc47b379f..9d1992851 100644
--- a/src/binaryen-c.h
+++ b/src/binaryen-c.h
@@ -117,6 +117,11 @@ BinaryenExpressionId BinaryenAtomicCmpxchgId(void);
BinaryenExpressionId BinaryenAtomicRMWId(void);
BinaryenExpressionId BinaryenAtomicWaitId(void);
BinaryenExpressionId BinaryenAtomicWakeId(void);
+BinaryenExpressionId BinaryenSIMDExtractId(void);
+BinaryenExpressionId BinaryenSIMDReplaceId(void);
+BinaryenExpressionId BinaryenSIMDShuffleId(void);
+BinaryenExpressionId BinaryenSIMDBitselectId(void);
+BinaryenExpressionId BinaryenSIMDShiftId(void);
// External kinds (call to get the value of each; you can cache them)
@@ -166,6 +171,7 @@ struct BinaryenLiteral {
int64_t i64;
float f32;
double f64;
+ uint8_t v128[16];
};
};
@@ -173,6 +179,7 @@ struct BinaryenLiteral BinaryenLiteralInt32(int32_t x);
struct BinaryenLiteral BinaryenLiteralInt64(int64_t x);
struct BinaryenLiteral BinaryenLiteralFloat32(float x);
struct BinaryenLiteral BinaryenLiteralFloat64(double x);
+struct BinaryenLiteral BinaryenLiteralVec128(const uint8_t x[16]);
struct BinaryenLiteral BinaryenLiteralFloat32Bits(int32_t x);
struct BinaryenLiteral BinaryenLiteralFloat64Bits(int64_t x);
@@ -332,6 +339,141 @@ BinaryenOp BinaryenTruncSatSFloat64ToInt32(void);
BinaryenOp BinaryenTruncSatSFloat64ToInt64(void);
BinaryenOp BinaryenTruncSatUFloat64ToInt32(void);
BinaryenOp BinaryenTruncSatUFloat64ToInt64(void);
+BinaryenOp BinaryenSplatVecI8x16(void);
+BinaryenOp BinaryenExtractLaneSVecI8x16(void);
+BinaryenOp BinaryenExtractLaneUVecI8x16(void);
+BinaryenOp BinaryenReplaceLaneVecI8x16(void);
+BinaryenOp BinaryenSplatVecI16x8(void);
+BinaryenOp BinaryenExtractLaneSVecI16x8(void);
+BinaryenOp BinaryenExtractLaneUVecI16x8(void);
+BinaryenOp BinaryenReplaceLaneVecI16x8(void);
+BinaryenOp BinaryenSplatVecI32x4(void);
+BinaryenOp BinaryenExtractLaneVecI32x4(void);
+BinaryenOp BinaryenReplaceLaneVecI32x4(void);
+BinaryenOp BinaryenSplatVecI64x2(void);
+BinaryenOp BinaryenExtractLaneVecI64x2(void);
+BinaryenOp BinaryenReplaceLaneVecI64x2(void);
+BinaryenOp BinaryenSplatVecF32x4(void);
+BinaryenOp BinaryenExtractLaneVecF32x4(void);
+BinaryenOp BinaryenReplaceLaneVecF32x4(void);
+BinaryenOp BinaryenSplatVecF64x2(void);
+BinaryenOp BinaryenExtractLaneVecF64x2(void);
+BinaryenOp BinaryenReplaceLaneVecF64x2(void);
+BinaryenOp BinaryenEqVecI8x16(void);
+BinaryenOp BinaryenNeVecI8x16(void);
+BinaryenOp BinaryenLtSVecI8x16(void);
+BinaryenOp BinaryenLtUVecI8x16(void);
+BinaryenOp BinaryenGtSVecI8x16(void);
+BinaryenOp BinaryenGtUVecI8x16(void);
+BinaryenOp BinaryenLeSVecI8x16(void);
+BinaryenOp BinaryenLeUVecI8x16(void);
+BinaryenOp BinaryenGeSVecI8x16(void);
+BinaryenOp BinaryenGeUVecI8x16(void);
+BinaryenOp BinaryenEqVecI16x8(void);
+BinaryenOp BinaryenNeVecI16x8(void);
+BinaryenOp BinaryenLtSVecI16x8(void);
+BinaryenOp BinaryenLtUVecI16x8(void);
+BinaryenOp BinaryenGtSVecI16x8(void);
+BinaryenOp BinaryenGtUVecI16x8(void);
+BinaryenOp BinaryenLeSVecI16x8(void);
+BinaryenOp BinaryenLeUVecI16x8(void);
+BinaryenOp BinaryenGeSVecI16x8(void);
+BinaryenOp BinaryenGeUVecI16x8(void);
+BinaryenOp BinaryenEqVecI32x4(void);
+BinaryenOp BinaryenNeVecI32x4(void);
+BinaryenOp BinaryenLtSVecI32x4(void);
+BinaryenOp BinaryenLtUVecI32x4(void);
+BinaryenOp BinaryenGtSVecI32x4(void);
+BinaryenOp BinaryenGtUVecI32x4(void);
+BinaryenOp BinaryenLeSVecI32x4(void);
+BinaryenOp BinaryenLeUVecI32x4(void);
+BinaryenOp BinaryenGeSVecI32x4(void);
+BinaryenOp BinaryenGeUVecI32x4(void);
+BinaryenOp BinaryenEqVecF32x4(void);
+BinaryenOp BinaryenNeVecF32x4(void);
+BinaryenOp BinaryenLtVecF32x4(void);
+BinaryenOp BinaryenGtVecF32x4(void);
+BinaryenOp BinaryenLeVecF32x4(void);
+BinaryenOp BinaryenGeVecF32x4(void);
+BinaryenOp BinaryenEqVecF64x2(void);
+BinaryenOp BinaryenNeVecF64x2(void);
+BinaryenOp BinaryenLtVecF64x2(void);
+BinaryenOp BinaryenGtVecF64x2(void);
+BinaryenOp BinaryenLeVecF64x2(void);
+BinaryenOp BinaryenGeVecF64x2(void);
+BinaryenOp BinaryenNotVec128(void);
+BinaryenOp BinaryenAndVec128(void);
+BinaryenOp BinaryenOrVec128(void);
+BinaryenOp BinaryenXorVec128(void);
+BinaryenOp BinaryenNegVecI8x16(void);
+BinaryenOp BinaryenAnyTrueVecI8x16(void);
+BinaryenOp BinaryenAllTrueVecI8x16(void);
+BinaryenOp BinaryenShlVecI8x16(void);
+BinaryenOp BinaryenShrSVecI8x16(void);
+BinaryenOp BinaryenShrUVecI8x16(void);
+BinaryenOp BinaryenAddVecI8x16(void);
+BinaryenOp BinaryenAddSatSVecI8x16(void);
+BinaryenOp BinaryenAddSatUVecI8x16(void);
+BinaryenOp BinaryenSubVecI8x16(void);
+BinaryenOp BinaryenSubSatSVecI8x16(void);
+BinaryenOp BinaryenSubSatUVecI8x16(void);
+BinaryenOp BinaryenMulVecI8x16(void);
+BinaryenOp BinaryenNegVecI16x8(void);
+BinaryenOp BinaryenAnyTrueVecI16x8(void);
+BinaryenOp BinaryenAllTrueVecI16x8(void);
+BinaryenOp BinaryenShlVecI16x8(void);
+BinaryenOp BinaryenShrSVecI16x8(void);
+BinaryenOp BinaryenShrUVecI16x8(void);
+BinaryenOp BinaryenAddVecI16x8(void);
+BinaryenOp BinaryenAddSatSVecI16x8(void);
+BinaryenOp BinaryenAddSatUVecI16x8(void);
+BinaryenOp BinaryenSubVecI16x8(void);
+BinaryenOp BinaryenSubSatSVecI16x8(void);
+BinaryenOp BinaryenSubSatUVecI16x8(void);
+BinaryenOp BinaryenMulVecI16x8(void);
+BinaryenOp BinaryenNegVecI32x4(void);
+BinaryenOp BinaryenAnyTrueVecI32x4(void);
+BinaryenOp BinaryenAllTrueVecI32x4(void);
+BinaryenOp BinaryenShlVecI32x4(void);
+BinaryenOp BinaryenShrSVecI32x4(void);
+BinaryenOp BinaryenShrUVecI32x4(void);
+BinaryenOp BinaryenAddVecI32x4(void);
+BinaryenOp BinaryenSubVecI32x4(void);
+BinaryenOp BinaryenMulVecI32x4(void);
+BinaryenOp BinaryenNegVecI64x2(void);
+BinaryenOp BinaryenAnyTrueVecI64x2(void);
+BinaryenOp BinaryenAllTrueVecI64x2(void);
+BinaryenOp BinaryenShlVecI64x2(void);
+BinaryenOp BinaryenShrSVecI64x2(void);
+BinaryenOp BinaryenShrUVecI64x2(void);
+BinaryenOp BinaryenAddVecI64x2(void);
+BinaryenOp BinaryenSubVecI64x2(void);
+BinaryenOp BinaryenAbsVecF32x4(void);
+BinaryenOp BinaryenNegVecF32x4(void);
+BinaryenOp BinaryenSqrtVecF32x4(void);
+BinaryenOp BinaryenAddVecF32x4(void);
+BinaryenOp BinaryenSubVecF32x4(void);
+BinaryenOp BinaryenMulVecF32x4(void);
+BinaryenOp BinaryenDivVecF32x4(void);
+BinaryenOp BinaryenMinVecF32x4(void);
+BinaryenOp BinaryenMaxVecF32x4(void);
+BinaryenOp BinaryenAbsVecF64x2(void);
+BinaryenOp BinaryenNegVecF64x2(void);
+BinaryenOp BinaryenSqrtVecF64x2(void);
+BinaryenOp BinaryenAddVecF64x2(void);
+BinaryenOp BinaryenSubVecF64x2(void);
+BinaryenOp BinaryenMulVecF64x2(void);
+BinaryenOp BinaryenDivVecF64x2(void);
+BinaryenOp BinaryenMinVecF64x2(void);
+BinaryenOp BinaryenMaxVecF64x2(void);
+BinaryenOp BinaryenTruncSatSVecF32x4ToVecI32x4(void);
+BinaryenOp BinaryenTruncSatUVecF32x4ToVecI32x4(void);
+BinaryenOp BinaryenTruncSatSVecF64x2ToVecI64x2(void);
+BinaryenOp BinaryenTruncSatUVecF64x2ToVecI64x2(void);
+BinaryenOp BinaryenConvertSVecI32x4ToVecF32x4(void);
+BinaryenOp BinaryenConvertUVecI32x4ToVecF32x4(void);
+BinaryenOp BinaryenConvertSVecI64x2ToVecF64x2(void);
+BinaryenOp BinaryenConvertUVecI64x2ToVecF64x2(void);
typedef void* BinaryenExpressionRef;
@@ -393,192 +535,139 @@ BinaryenExpressionRef BinaryenAtomicRMW(BinaryenModuleRef module, BinaryenOp op,
BinaryenExpressionRef BinaryenAtomicCmpxchg(BinaryenModuleRef module, BinaryenIndex bytes, BinaryenIndex offset, BinaryenExpressionRef ptr, BinaryenExpressionRef expected, BinaryenExpressionRef replacement, BinaryenType type);
BinaryenExpressionRef BinaryenAtomicWait(BinaryenModuleRef module, BinaryenExpressionRef ptr, BinaryenExpressionRef expected, BinaryenExpressionRef timeout, BinaryenType type);
BinaryenExpressionRef BinaryenAtomicWake(BinaryenModuleRef module, BinaryenExpressionRef ptr, BinaryenExpressionRef wakeCount);
+BinaryenExpressionRef BinaryenSIMDExtract(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, uint8_t idx);
+BinaryenExpressionRef BinaryenSIMDReplace(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, uint8_t idx, BinaryenExpressionRef value);
+BinaryenExpressionRef BinaryenSIMDShuffle(BinaryenModuleRef module, BinaryenExpressionRef left, BinaryenExpressionRef right, const uint8_t mask[16]);
+BinaryenExpressionRef BinaryenSIMDBitselect(BinaryenModuleRef module, BinaryenExpressionRef left, BinaryenExpressionRef right, BinaryenExpressionRef cond);
+BinaryenExpressionRef BinaryenSIMDShift(BinaryenModuleRef module, BinaryenOp op, BinaryenExpressionRef vec, BinaryenExpressionRef shift);
-// Gets the id (kind) of the specified expression.
BinaryenExpressionId BinaryenExpressionGetId(BinaryenExpressionRef expr);
-// Gets the type of the specified expression.
BinaryenType BinaryenExpressionGetType(BinaryenExpressionRef expr);
-// Prints an expression to stdout. Useful for debugging.
void BinaryenExpressionPrint(BinaryenExpressionRef expr);
-// Gets the name of the specified `Block` expression. May be `NULL`.
const char* BinaryenBlockGetName(BinaryenExpressionRef expr);
-// Gets the number of nested child expressions within the specified `Block` expression.
BinaryenIndex BinaryenBlockGetNumChildren(BinaryenExpressionRef expr);
-// Gets the nested child expression at the specified index within the specified `Block` expression.
BinaryenExpressionRef BinaryenBlockGetChild(BinaryenExpressionRef expr, BinaryenIndex index);
-// Gets the nested condition expression within the specified `If` expression.
BinaryenExpressionRef BinaryenIfGetCondition(BinaryenExpressionRef expr);
-// Gets the nested ifTrue expression within the specified `If` expression.
BinaryenExpressionRef BinaryenIfGetIfTrue(BinaryenExpressionRef expr);
-// Gets the nested ifFalse expression within the specified `If` expression.
BinaryenExpressionRef BinaryenIfGetIfFalse(BinaryenExpressionRef expr);
-// Gets the name of the specified `Loop` expression. May be `NULL`.
const char* BinaryenLoopGetName(BinaryenExpressionRef expr);
-// Gets the nested body expression within the specified `Loop` expression.
BinaryenExpressionRef BinaryenLoopGetBody(BinaryenExpressionRef expr);
-// Gets the name of the specified `Break` expression. May be `NULL`.
const char* BinaryenBreakGetName(BinaryenExpressionRef expr);
-// Gets the nested condition expression within the specified `Break` expression. Returns `NULL` if this is a `br` and not a `br_if`.
BinaryenExpressionRef BinaryenBreakGetCondition(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `Break` expression. May be `NULL`.
BinaryenExpressionRef BinaryenBreakGetValue(BinaryenExpressionRef expr);
-// Gets the number of names within the specified `Switch` expression.
BinaryenIndex BinaryenSwitchGetNumNames(BinaryenExpressionRef expr);
-// Gets the name at the specified index within the specified `Switch` expression.
const char* BinaryenSwitchGetName(BinaryenExpressionRef expr, BinaryenIndex index);
-// Gets the default name of the specified `Switch` expression.
const char* BinaryenSwitchGetDefaultName(BinaryenExpressionRef expr);
-// Gets the nested condition expression within the specified `Switch` expression.
BinaryenExpressionRef BinaryenSwitchGetCondition(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specifiedd `Switch` expression. May be `NULL`.
BinaryenExpressionRef BinaryenSwitchGetValue(BinaryenExpressionRef expr);
-// Gets the name of the target of the specified `Call` expression.
const char* BinaryenCallGetTarget(BinaryenExpressionRef expr);
-// Gets the number of nested operand expressions within the specified `Call` expression.
BinaryenIndex BinaryenCallGetNumOperands(BinaryenExpressionRef expr);
-// Gets the nested operand expression at the specified index within the specified `Call` expression.
BinaryenExpressionRef BinaryenCallGetOperand(BinaryenExpressionRef expr, BinaryenIndex index);
-// Gets the nested target expression of the specified `CallIndirect` expression.
BinaryenExpressionRef BinaryenCallIndirectGetTarget(BinaryenExpressionRef expr);
-// Gets the number of nested operand expressions within the specified `CallIndirect` expression.
BinaryenIndex BinaryenCallIndirectGetNumOperands(BinaryenExpressionRef expr);
-// Gets the nested operand expression at the specified index within the specified `CallIndirect` expression.
BinaryenExpressionRef BinaryenCallIndirectGetOperand(BinaryenExpressionRef expr, BinaryenIndex index);
-// Gets the index of the specified `GetLocal` expression.
BinaryenIndex BinaryenGetLocalGetIndex(BinaryenExpressionRef expr);
-// Tests if the specified `SetLocal` expression performs a `tee_local` instead of a `set_local`.
int BinaryenSetLocalIsTee(BinaryenExpressionRef expr);
-// Gets the index of the specified `SetLocal` expression.
BinaryenIndex BinaryenSetLocalGetIndex(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `SetLocal` expression.
BinaryenExpressionRef BinaryenSetLocalGetValue(BinaryenExpressionRef expr);
-// Gets the name of the specified `GetGlobal` expression.
const char* BinaryenGetGlobalGetName(BinaryenExpressionRef expr);
-// Gets the name of the specified `SetGlobal` expression.
const char* BinaryenSetGlobalGetName(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `SetLocal` expression.
BinaryenExpressionRef BinaryenSetGlobalGetValue(BinaryenExpressionRef expr);
-// Gets the operator of the specified `Host` expression.
BinaryenOp BinaryenHostGetOp(BinaryenExpressionRef expr);
-// Gets the name operand of the specified `Host` expression. May be `NULL`.
const char* BinaryenHostGetNameOperand(BinaryenExpressionRef expr);
-// Gets the number of nested operand expressions within the specified `Host` expression.
BinaryenIndex BinaryenHostGetNumOperands(BinaryenExpressionRef expr);
-// Gets the nested operand expression at the specified index within the specified `Host` expression.
BinaryenExpressionRef BinaryenHostGetOperand(BinaryenExpressionRef expr, BinaryenIndex index);
-// Tests if the specified `Load` expression is atomic.
int BinaryenLoadIsAtomic(BinaryenExpressionRef expr);
-// Tests if the specified `Load` expression is signed.
int BinaryenLoadIsSigned(BinaryenExpressionRef expr);
-// Gets the offset of the specified `Load` expression.
uint32_t BinaryenLoadGetOffset(BinaryenExpressionRef expr);
-// Gets the byte size of the specified `Load` expression.
uint32_t BinaryenLoadGetBytes(BinaryenExpressionRef expr);
-// Gets the alignment of the specified `Load` expression.
uint32_t BinaryenLoadGetAlign(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `Load` expression.
BinaryenExpressionRef BinaryenLoadGetPtr(BinaryenExpressionRef expr);
-// Tests if the specified `Store` expression is atomic.
int BinaryenStoreIsAtomic(BinaryenExpressionRef expr);
-// Gets the byte size of the specified `Store` expression.
uint32_t BinaryenStoreGetBytes(BinaryenExpressionRef expr);
-// Gets the offset of the specified store expression.
uint32_t BinaryenStoreGetOffset(BinaryenExpressionRef expr);
-// Gets the alignment of the specified `Store` expression.
uint32_t BinaryenStoreGetAlign(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `Store` expression.
BinaryenExpressionRef BinaryenStoreGetPtr(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `Store` expression.
BinaryenExpressionRef BinaryenStoreGetValue(BinaryenExpressionRef expr);
-// Gets the 32-bit integer value of the specified `Const` expression.
int32_t BinaryenConstGetValueI32(BinaryenExpressionRef expr);
-// Gets the 64-bit integer value of the specified `Const` expression.
int64_t BinaryenConstGetValueI64(BinaryenExpressionRef expr);
-// Gets the low 32-bits of a 64-bit integer value of the specified `Const` expression. Useful where I64 returning exports are illegal, i.e. binaryen.js.
int32_t BinaryenConstGetValueI64Low(BinaryenExpressionRef expr);
-// Gets the high 32-bits of a 64-bit integer value of the specified `Const` expression. Useful where I64 returning exports are illegal, i.e. binaryen.js.
int32_t BinaryenConstGetValueI64High(BinaryenExpressionRef expr);
-// Gets the 32-bit float value of the specified `Const` expression.
float BinaryenConstGetValueF32(BinaryenExpressionRef expr);
-// Gets the 64-bit float value of the specified `Const` expression.
double BinaryenConstGetValueF64(BinaryenExpressionRef expr);
-// Gets the operator of the specified `Unary` expression.
BinaryenOp BinaryenUnaryGetOp(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `Unary` expression.
BinaryenExpressionRef BinaryenUnaryGetValue(BinaryenExpressionRef expr);
-// Gets the operator of the specified `Binary` expression.
BinaryenOp BinaryenBinaryGetOp(BinaryenExpressionRef expr);
-// Gets the nested left expression within the specified `Binary` expression.
BinaryenExpressionRef BinaryenBinaryGetLeft(BinaryenExpressionRef expr);
-// Gets the nested right expression within the specified `Binary` expression.
BinaryenExpressionRef BinaryenBinaryGetRight(BinaryenExpressionRef expr);
-// Gets the nested ifTrue expression within the specified `Select` expression.
BinaryenExpressionRef BinaryenSelectGetIfTrue(BinaryenExpressionRef expr);
-// Gets the nested ifFalse expression within the specified `Select` expression.
BinaryenExpressionRef BinaryenSelectGetIfFalse(BinaryenExpressionRef expr);
-// Gets the nested condition expression within the specified `Select` expression.
BinaryenExpressionRef BinaryenSelectGetCondition(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `Drop` expression.
BinaryenExpressionRef BinaryenDropGetValue(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `Return` expression.
BinaryenExpressionRef BinaryenReturnGetValue(BinaryenExpressionRef expr);
-// Gets the operator of the specified `AtomicRMW` expression.
BinaryenOp BinaryenAtomicRMWGetOp(BinaryenExpressionRef expr);
-// Gets the byte size of the specified `AtomicRMW` expression.
uint32_t BinaryenAtomicRMWGetBytes(BinaryenExpressionRef expr);
-// Gets the offset of the specified `AtomicRMW` expression.
uint32_t BinaryenAtomicRMWGetOffset(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `AtomicRMW` expression.
BinaryenExpressionRef BinaryenAtomicRMWGetPtr(BinaryenExpressionRef expr);
-// Gets the nested value expression within the specified `AtomicRMW` expression.
BinaryenExpressionRef BinaryenAtomicRMWGetValue(BinaryenExpressionRef expr);
-// Gets the byte size of the specified `AtomicCmpxchg` expression.
uint32_t BinaryenAtomicCmpxchgGetBytes(BinaryenExpressionRef expr);
-// Gets the offset of the specified `AtomicCmpxchg` expression.
uint32_t BinaryenAtomicCmpxchgGetOffset(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `AtomicCmpxchg` expression.
BinaryenExpressionRef BinaryenAtomicCmpxchgGetPtr(BinaryenExpressionRef expr);
-// Gets the nested expected value expression within the specified `AtomicCmpxchg` expression.
BinaryenExpressionRef BinaryenAtomicCmpxchgGetExpected(BinaryenExpressionRef expr);
-// Gets the nested replacement value expression within the specified `AtomicCmpxchg` expression.
BinaryenExpressionRef BinaryenAtomicCmpxchgGetReplacement(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `AtomicWait` expression.
BinaryenExpressionRef BinaryenAtomicWaitGetPtr(BinaryenExpressionRef expr);
-// Gets the nested expected value expression within the specified `AtomicWait` expression.
BinaryenExpressionRef BinaryenAtomicWaitGetExpected(BinaryenExpressionRef expr);
-// Gets the nested timeout expression within the specified `AtomicWait` expression.
BinaryenExpressionRef BinaryenAtomicWaitGetTimeout(BinaryenExpressionRef expr);
-// Gets the expected type of the specified `AtomicWait` expression.
BinaryenType BinaryenAtomicWaitGetExpectedType(BinaryenExpressionRef expr);
-// Gets the nested pointer expression within the specified `AtomicWake` expression.
BinaryenExpressionRef BinaryenAtomicWakeGetPtr(BinaryenExpressionRef expr);
-// Gets the nested wake count expression within the specified `AtomicWake` expression.
BinaryenExpressionRef BinaryenAtomicWakeGetWakeCount(BinaryenExpressionRef expr);
+BinaryenOp BinaryenSIMDExtractGetOp(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDExtractGetVec(BinaryenExpressionRef expr);
+uint8_t BinaryenSIMDExtractGetIdx(BinaryenExpressionRef expr);
+
+BinaryenOp BinaryenSIMDReplaceGetOp(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDReplaceGetVec(BinaryenExpressionRef expr);
+uint8_t BinaryenSIMDReplaceGetIdx(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDReplaceGetValue(BinaryenExpressionRef expr);
+
+BinaryenExpressionRef BinaryenSIMDShuffleGetLeft(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDShuffleGetRight(BinaryenExpressionRef expr);
+void BinaryenSIMDShuffleGetMask(BinaryenExpressionRef expr, uint8_t *mask);
+
+BinaryenExpressionRef BinaryenSIMDBitselectGetLeft(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDBitselectGetRight(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDBitselectGetCond(BinaryenExpressionRef expr);
+
+BinaryenOp BinaryenSIMDShiftGetOp(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDShiftGetVec(BinaryenExpressionRef expr);
+BinaryenExpressionRef BinaryenSIMDShiftGetShift(BinaryenExpressionRef expr);
+
+
// Functions
typedef void* BinaryenFunctionRef;
diff --git a/src/dataflow/graph.h b/src/dataflow/graph.h
index 7f5654f8d..9a30b7576 100644
--- a/src/dataflow/graph.h
+++ b/src/dataflow/graph.h
@@ -153,7 +153,7 @@ struct Graph : public UnifiedExpressionVisitor<Graph, Node*> {
}
Node* makeZero(wasm::Type type) {
- return makeConst(LiteralUtils::makeLiteralZero(type));
+ return makeConst(Literal::makeZero(type));
}
// Add a new node to our list of owned nodes.
diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc
index 16399bfba..77e505260 100644
--- a/src/gen-s-parser.inc
+++ b/src/gen-s-parser.inc
@@ -60,311 +60,531 @@ switch (op[0]) {
case 'f': {
switch (op[1]) {
case '3': {
- switch (op[4]) {
- case 'a': {
- switch (op[5]) {
- case 'b':
- if (strcmp(op, "f32.abs") == 0) return makeUnary(s, UnaryOp::AbsFloat32);
- goto parse_error;
- case 'd':
- if (strcmp(op, "f32.add") == 0) return makeBinary(s, BinaryOp::AddFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c': {
- switch (op[5]) {
- case 'e':
- if (strcmp(op, "f32.ceil") == 0) return makeUnary(s, UnaryOp::CeilFloat32);
- goto parse_error;
- case 'o': {
- switch (op[6]) {
- case 'n': {
- switch (op[7]) {
- case 's':
- if (strcmp(op, "f32.const") == 0) return makeConst(s, f32);
- goto parse_error;
- case 'v': {
- switch (op[12]) {
- case 's': {
- switch (op[15]) {
- case '3':
- if (strcmp(op, "f32.convert_s/i32") == 0) return makeUnary(s, UnaryOp::ConvertSInt32ToFloat32);
- goto parse_error;
- case '6':
- if (strcmp(op, "f32.convert_s/i64") == 0) return makeUnary(s, UnaryOp::ConvertSInt64ToFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'u': {
- switch (op[15]) {
- case '3':
- if (strcmp(op, "f32.convert_u/i32") == 0) return makeUnary(s, UnaryOp::ConvertUInt32ToFloat32);
- goto parse_error;
- case '6':
- if (strcmp(op, "f32.convert_u/i64") == 0) return makeUnary(s, UnaryOp::ConvertUInt64ToFloat32);
- goto parse_error;
+ switch (op[3]) {
+ case '.': {
+ switch (op[4]) {
+ case 'a': {
+ switch (op[5]) {
+ case 'b':
+ if (strcmp(op, "f32.abs") == 0) return makeUnary(s, UnaryOp::AbsFloat32);
+ goto parse_error;
+ case 'd':
+ if (strcmp(op, "f32.add") == 0) return makeBinary(s, BinaryOp::AddFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f32.ceil") == 0) return makeUnary(s, UnaryOp::CeilFloat32);
+ goto parse_error;
+ case 'o': {
+ switch (op[6]) {
+ case 'n': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "f32.const") == 0) return makeConst(s, f32);
+ goto parse_error;
+ case 'v': {
+ switch (op[12]) {
+ case 's': {
+ switch (op[15]) {
+ case '3':
+ if (strcmp(op, "f32.convert_s/i32") == 0) return makeUnary(s, UnaryOp::ConvertSInt32ToFloat32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "f32.convert_s/i64") == 0) return makeUnary(s, UnaryOp::ConvertSInt64ToFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'u': {
+ switch (op[15]) {
+ case '3':
+ if (strcmp(op, "f32.convert_u/i32") == 0) return makeUnary(s, UnaryOp::ConvertUInt32ToFloat32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "f32.convert_u/i64") == 0) return makeUnary(s, UnaryOp::ConvertUInt64ToFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
+ case 'p':
+ if (strcmp(op, "f32.copysign") == 0) return makeBinary(s, BinaryOp::CopySignFloat32);
+ goto parse_error;
default: goto parse_error;
}
}
- case 'p':
- if (strcmp(op, "f32.copysign") == 0) return makeBinary(s, BinaryOp::CopySignFloat32);
+ default: goto parse_error;
+ }
+ }
+ case 'd': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f32.demote/f64") == 0) return makeUnary(s, UnaryOp::DemoteFloat64);
+ goto parse_error;
+ case 'i':
+ if (strcmp(op, "f32.div") == 0) return makeBinary(s, BinaryOp::DivFloat32);
goto parse_error;
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'd': {
- switch (op[5]) {
case 'e':
- if (strcmp(op, "f32.demote/f64") == 0) return makeUnary(s, UnaryOp::DemoteFloat64);
+ if (strcmp(op, "f32.eq") == 0) return makeBinary(s, BinaryOp::EqFloat32);
goto parse_error;
- case 'i':
- if (strcmp(op, "f32.div") == 0) return makeBinary(s, BinaryOp::DivFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'e':
- if (strcmp(op, "f32.eq") == 0) return makeBinary(s, BinaryOp::EqFloat32);
- goto parse_error;
- case 'f':
- if (strcmp(op, "f32.floor") == 0) return makeUnary(s, UnaryOp::FloorFloat32);
- goto parse_error;
- case 'g': {
- switch (op[5]) {
- case 'e':
- if (strcmp(op, "f32.ge") == 0) return makeBinary(s, BinaryOp::GeFloat32);
- goto parse_error;
- case 't':
- if (strcmp(op, "f32.gt") == 0) return makeBinary(s, BinaryOp::GtFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[5]) {
- case 'e':
- if (strcmp(op, "f32.le") == 0) return makeBinary(s, BinaryOp::LeFloat32);
+ case 'f':
+ if (strcmp(op, "f32.floor") == 0) return makeUnary(s, UnaryOp::FloorFloat32);
goto parse_error;
- case 'o':
- if (strcmp(op, "f32.load") == 0) return makeLoad(s, f32, /*isAtomic=*/false);
+ case 'g': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f32.ge") == 0) return makeBinary(s, BinaryOp::GeFloat32);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f32.gt") == 0) return makeBinary(s, BinaryOp::GtFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f32.le") == 0) return makeBinary(s, BinaryOp::LeFloat32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "f32.load") == 0) return makeLoad(s, f32, /*isAtomic=*/false);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f32.lt") == 0) return makeBinary(s, BinaryOp::LtFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'm': {
+ switch (op[5]) {
+ case 'a':
+ if (strcmp(op, "f32.max") == 0) return makeBinary(s, BinaryOp::MaxFloat32);
+ goto parse_error;
+ case 'i':
+ if (strcmp(op, "f32.min") == 0) return makeBinary(s, BinaryOp::MinFloat32);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f32.mul") == 0) return makeBinary(s, BinaryOp::MulFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'n': {
+ switch (op[6]) {
+ case '\0':
+ if (strcmp(op, "f32.ne") == 0) return makeBinary(s, BinaryOp::NeFloat32);
+ goto parse_error;
+ case 'a':
+ if (strcmp(op, "f32.nearest") == 0) return makeUnary(s, UnaryOp::NearestFloat32);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "f32.neg") == 0) return makeUnary(s, UnaryOp::NegFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'r':
+ if (strcmp(op, "f32.reinterpret/i32") == 0) return makeUnary(s, UnaryOp::ReinterpretInt32);
goto parse_error;
+ case 's': {
+ switch (op[5]) {
+ case 'q':
+ if (strcmp(op, "f32.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtFloat32);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f32.store") == 0) return makeStore(s, f32, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f32.sub") == 0) return makeBinary(s, BinaryOp::SubFloat32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 't':
- if (strcmp(op, "f32.lt") == 0) return makeBinary(s, BinaryOp::LtFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'm': {
- switch (op[5]) {
- case 'a':
- if (strcmp(op, "f32.max") == 0) return makeBinary(s, BinaryOp::MaxFloat32);
- goto parse_error;
- case 'i':
- if (strcmp(op, "f32.min") == 0) return makeBinary(s, BinaryOp::MinFloat32);
- goto parse_error;
- case 'u':
- if (strcmp(op, "f32.mul") == 0) return makeBinary(s, BinaryOp::MulFloat32);
+ if (strcmp(op, "f32.trunc") == 0) return makeUnary(s, UnaryOp::TruncFloat32);
goto parse_error;
default: goto parse_error;
}
}
- case 'n': {
+ case 'x': {
switch (op[6]) {
- case '\0':
- if (strcmp(op, "f32.ne") == 0) return makeBinary(s, BinaryOp::NeFloat32);
- goto parse_error;
- case 'a':
- if (strcmp(op, "f32.nearest") == 0) return makeUnary(s, UnaryOp::NearestFloat32);
- goto parse_error;
- case 'g':
- if (strcmp(op, "f32.neg") == 0) return makeUnary(s, UnaryOp::NegFloat32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'r':
- if (strcmp(op, "f32.reinterpret/i32") == 0) return makeUnary(s, UnaryOp::ReinterpretInt32);
- goto parse_error;
- case 's': {
- switch (op[5]) {
- case 'q':
- if (strcmp(op, "f32.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtFloat32);
- goto parse_error;
- case 't':
- if (strcmp(op, "f32.store") == 0) return makeStore(s, f32, /*isAtomic=*/false);
+ case 'a': {
+ switch (op[7]) {
+ case 'b':
+ if (strcmp(op, "f32x4.abs") == 0) return makeUnary(s, UnaryOp::AbsVecF32x4);
+ goto parse_error;
+ case 'd':
+ if (strcmp(op, "f32x4.add") == 0) return makeBinary(s, BinaryOp::AddVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c': {
+ switch (op[14]) {
+ case 's':
+ if (strcmp(op, "f32x4.convert_s/i32x4") == 0) return makeUnary(s, UnaryOp::ConvertSVecI32x4ToVecF32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f32x4.convert_u/i32x4") == 0) return makeUnary(s, UnaryOp::ConvertUVecI32x4ToVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'd':
+ if (strcmp(op, "f32x4.div") == 0) return makeBinary(s, BinaryOp::DivVecF32x4);
goto parse_error;
- case 'u':
- if (strcmp(op, "f32.sub") == 0) return makeBinary(s, BinaryOp::SubFloat32);
+ case 'e': {
+ switch (op[7]) {
+ case 'q':
+ if (strcmp(op, "f32x4.eq") == 0) return makeBinary(s, BinaryOp::EqVecF32x4);
+ goto parse_error;
+ case 'x':
+ if (strcmp(op, "f32x4.extract_lane") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF32x4, 4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'g': {
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "f32x4.ge") == 0) return makeBinary(s, BinaryOp::GeVecF32x4);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f32x4.gt") == 0) return makeBinary(s, BinaryOp::GtVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "f32x4.le") == 0) return makeBinary(s, BinaryOp::LeVecF32x4);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f32x4.lt") == 0) return makeBinary(s, BinaryOp::LtVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'm': {
+ switch (op[7]) {
+ case 'a':
+ if (strcmp(op, "f32x4.max") == 0) return makeBinary(s, BinaryOp::MaxVecF32x4);
+ goto parse_error;
+ case 'i':
+ if (strcmp(op, "f32x4.min") == 0) return makeBinary(s, BinaryOp::MinVecF32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f32x4.mul") == 0) return makeBinary(s, BinaryOp::MulVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "f32x4.ne") == 0) return makeBinary(s, BinaryOp::NeVecF32x4);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "f32x4.neg") == 0) return makeUnary(s, UnaryOp::NegVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'r':
+ if (strcmp(op, "f32x4.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF32x4, 4);
goto parse_error;
+ case 's': {
+ switch (op[7]) {
+ case 'p':
+ if (strcmp(op, "f32x4.splat") == 0) return makeUnary(s, UnaryOp::SplatVecF32x4);
+ goto parse_error;
+ case 'q':
+ if (strcmp(op, "f32x4.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtVecF32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f32x4.sub") == 0) return makeBinary(s, BinaryOp::SubVecF32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 't':
- if (strcmp(op, "f32.trunc") == 0) return makeUnary(s, UnaryOp::TruncFloat32);
- goto parse_error;
default: goto parse_error;
}
}
case '6': {
- switch (op[4]) {
- case 'a': {
- switch (op[5]) {
- case 'b':
- if (strcmp(op, "f64.abs") == 0) return makeUnary(s, UnaryOp::AbsFloat64);
- goto parse_error;
- case 'd':
- if (strcmp(op, "f64.add") == 0) return makeBinary(s, BinaryOp::AddFloat64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c': {
- switch (op[5]) {
- case 'e':
- if (strcmp(op, "f64.ceil") == 0) return makeUnary(s, UnaryOp::CeilFloat64);
- goto parse_error;
- case 'o': {
- switch (op[6]) {
- case 'n': {
- switch (op[7]) {
- case 's':
- if (strcmp(op, "f64.const") == 0) return makeConst(s, f64);
- goto parse_error;
- case 'v': {
- switch (op[12]) {
- case 's': {
- switch (op[15]) {
- case '3':
- if (strcmp(op, "f64.convert_s/i32") == 0) return makeUnary(s, UnaryOp::ConvertSInt32ToFloat64);
- goto parse_error;
- case '6':
- if (strcmp(op, "f64.convert_s/i64") == 0) return makeUnary(s, UnaryOp::ConvertSInt64ToFloat64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'u': {
- switch (op[15]) {
- case '3':
- if (strcmp(op, "f64.convert_u/i32") == 0) return makeUnary(s, UnaryOp::ConvertUInt32ToFloat64);
- goto parse_error;
- case '6':
- if (strcmp(op, "f64.convert_u/i64") == 0) return makeUnary(s, UnaryOp::ConvertUInt64ToFloat64);
- goto parse_error;
+ switch (op[3]) {
+ case '.': {
+ switch (op[4]) {
+ case 'a': {
+ switch (op[5]) {
+ case 'b':
+ if (strcmp(op, "f64.abs") == 0) return makeUnary(s, UnaryOp::AbsFloat64);
+ goto parse_error;
+ case 'd':
+ if (strcmp(op, "f64.add") == 0) return makeBinary(s, BinaryOp::AddFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f64.ceil") == 0) return makeUnary(s, UnaryOp::CeilFloat64);
+ goto parse_error;
+ case 'o': {
+ switch (op[6]) {
+ case 'n': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "f64.const") == 0) return makeConst(s, f64);
+ goto parse_error;
+ case 'v': {
+ switch (op[12]) {
+ case 's': {
+ switch (op[15]) {
+ case '3':
+ if (strcmp(op, "f64.convert_s/i32") == 0) return makeUnary(s, UnaryOp::ConvertSInt32ToFloat64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "f64.convert_s/i64") == 0) return makeUnary(s, UnaryOp::ConvertSInt64ToFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'u': {
+ switch (op[15]) {
+ case '3':
+ if (strcmp(op, "f64.convert_u/i32") == 0) return makeUnary(s, UnaryOp::ConvertUInt32ToFloat64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "f64.convert_u/i64") == 0) return makeUnary(s, UnaryOp::ConvertUInt64ToFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
+ case 'p':
+ if (strcmp(op, "f64.copysign") == 0) return makeBinary(s, BinaryOp::CopySignFloat64);
+ goto parse_error;
default: goto parse_error;
}
}
- case 'p':
- if (strcmp(op, "f64.copysign") == 0) return makeBinary(s, BinaryOp::CopySignFloat64);
- goto parse_error;
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'd':
- if (strcmp(op, "f64.div") == 0) return makeBinary(s, BinaryOp::DivFloat64);
- goto parse_error;
- case 'e':
- if (strcmp(op, "f64.eq") == 0) return makeBinary(s, BinaryOp::EqFloat64);
- goto parse_error;
- case 'f':
- if (strcmp(op, "f64.floor") == 0) return makeUnary(s, UnaryOp::FloorFloat64);
- goto parse_error;
- case 'g': {
- switch (op[5]) {
- case 'e':
- if (strcmp(op, "f64.ge") == 0) return makeBinary(s, BinaryOp::GeFloat64);
- goto parse_error;
- case 't':
- if (strcmp(op, "f64.gt") == 0) return makeBinary(s, BinaryOp::GtFloat64);
+ case 'd':
+ if (strcmp(op, "f64.div") == 0) return makeBinary(s, BinaryOp::DivFloat64);
goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[5]) {
case 'e':
- if (strcmp(op, "f64.le") == 0) return makeBinary(s, BinaryOp::LeFloat64);
+ if (strcmp(op, "f64.eq") == 0) return makeBinary(s, BinaryOp::EqFloat64);
goto parse_error;
- case 'o':
- if (strcmp(op, "f64.load") == 0) return makeLoad(s, f64, /*isAtomic=*/false);
- goto parse_error;
- case 't':
- if (strcmp(op, "f64.lt") == 0) return makeBinary(s, BinaryOp::LtFloat64);
+ case 'f':
+ if (strcmp(op, "f64.floor") == 0) return makeUnary(s, UnaryOp::FloorFloat64);
goto parse_error;
- default: goto parse_error;
- }
- }
- case 'm': {
- switch (op[5]) {
- case 'a':
- if (strcmp(op, "f64.max") == 0) return makeBinary(s, BinaryOp::MaxFloat64);
+ case 'g': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f64.ge") == 0) return makeBinary(s, BinaryOp::GeFloat64);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f64.gt") == 0) return makeBinary(s, BinaryOp::GtFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[5]) {
+ case 'e':
+ if (strcmp(op, "f64.le") == 0) return makeBinary(s, BinaryOp::LeFloat64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "f64.load") == 0) return makeLoad(s, f64, /*isAtomic=*/false);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f64.lt") == 0) return makeBinary(s, BinaryOp::LtFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'm': {
+ switch (op[5]) {
+ case 'a':
+ if (strcmp(op, "f64.max") == 0) return makeBinary(s, BinaryOp::MaxFloat64);
+ goto parse_error;
+ case 'i':
+ if (strcmp(op, "f64.min") == 0) return makeBinary(s, BinaryOp::MinFloat64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64.mul") == 0) return makeBinary(s, BinaryOp::MulFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'n': {
+ switch (op[6]) {
+ case '\0':
+ if (strcmp(op, "f64.ne") == 0) return makeBinary(s, BinaryOp::NeFloat64);
+ goto parse_error;
+ case 'a':
+ if (strcmp(op, "f64.nearest") == 0) return makeUnary(s, UnaryOp::NearestFloat64);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "f64.neg") == 0) return makeUnary(s, UnaryOp::NegFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'p':
+ if (strcmp(op, "f64.promote/f32") == 0) return makeUnary(s, UnaryOp::PromoteFloat32);
goto parse_error;
- case 'i':
- if (strcmp(op, "f64.min") == 0) return makeBinary(s, BinaryOp::MinFloat64);
+ case 'r':
+ if (strcmp(op, "f64.reinterpret/i64") == 0) return makeUnary(s, UnaryOp::ReinterpretInt64);
goto parse_error;
- case 'u':
- if (strcmp(op, "f64.mul") == 0) return makeBinary(s, BinaryOp::MulFloat64);
+ case 's': {
+ switch (op[5]) {
+ case 'q':
+ if (strcmp(op, "f64.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtFloat64);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f64.store") == 0) return makeStore(s, f64, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64.sub") == 0) return makeBinary(s, BinaryOp::SubFloat64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 't':
+ if (strcmp(op, "f64.trunc") == 0) return makeUnary(s, UnaryOp::TruncFloat64);
goto parse_error;
default: goto parse_error;
}
}
- case 'n': {
+ case 'x': {
switch (op[6]) {
- case '\0':
- if (strcmp(op, "f64.ne") == 0) return makeBinary(s, BinaryOp::NeFloat64);
- goto parse_error;
- case 'a':
- if (strcmp(op, "f64.nearest") == 0) return makeUnary(s, UnaryOp::NearestFloat64);
- goto parse_error;
- case 'g':
- if (strcmp(op, "f64.neg") == 0) return makeUnary(s, UnaryOp::NegFloat64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'p':
- if (strcmp(op, "f64.promote/f32") == 0) return makeUnary(s, UnaryOp::PromoteFloat32);
- goto parse_error;
- case 'r':
- if (strcmp(op, "f64.reinterpret/i64") == 0) return makeUnary(s, UnaryOp::ReinterpretInt64);
- goto parse_error;
- case 's': {
- switch (op[5]) {
- case 'q':
- if (strcmp(op, "f64.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtFloat64);
- goto parse_error;
- case 't':
- if (strcmp(op, "f64.store") == 0) return makeStore(s, f64, /*isAtomic=*/false);
+ case 'a': {
+ switch (op[7]) {
+ case 'b':
+ if (strcmp(op, "f64x2.abs") == 0) return makeUnary(s, UnaryOp::AbsVecF64x2);
+ goto parse_error;
+ case 'd':
+ if (strcmp(op, "f64x2.add") == 0) return makeBinary(s, BinaryOp::AddVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c': {
+ switch (op[14]) {
+ case 's':
+ if (strcmp(op, "f64x2.convert_s/i64x2") == 0) return makeUnary(s, UnaryOp::ConvertSVecI64x2ToVecF64x2);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64x2.convert_u/i64x2") == 0) return makeUnary(s, UnaryOp::ConvertUVecI64x2ToVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'd':
+ if (strcmp(op, "f64x2.div") == 0) return makeBinary(s, BinaryOp::DivVecF64x2);
goto parse_error;
- case 'u':
- if (strcmp(op, "f64.sub") == 0) return makeBinary(s, BinaryOp::SubFloat64);
+ case 'e': {
+ switch (op[7]) {
+ case 'q':
+ if (strcmp(op, "f64x2.eq") == 0) return makeBinary(s, BinaryOp::EqVecF64x2);
+ goto parse_error;
+ case 'x':
+ if (strcmp(op, "f64x2.extract_lane") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecF64x2, 2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'g': {
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "f64x2.ge") == 0) return makeBinary(s, BinaryOp::GeVecF64x2);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f64x2.gt") == 0) return makeBinary(s, BinaryOp::GtVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "f64x2.le") == 0) return makeBinary(s, BinaryOp::LeVecF64x2);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "f64x2.lt") == 0) return makeBinary(s, BinaryOp::LtVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'm': {
+ switch (op[7]) {
+ case 'a':
+ if (strcmp(op, "f64x2.max") == 0) return makeBinary(s, BinaryOp::MaxVecF64x2);
+ goto parse_error;
+ case 'i':
+ if (strcmp(op, "f64x2.min") == 0) return makeBinary(s, BinaryOp::MinVecF64x2);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64x2.mul") == 0) return makeBinary(s, BinaryOp::MulVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "f64x2.ne") == 0) return makeBinary(s, BinaryOp::NeVecF64x2);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "f64x2.neg") == 0) return makeUnary(s, UnaryOp::NegVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'r':
+ if (strcmp(op, "f64x2.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF64x2, 2);
goto parse_error;
+ case 's': {
+ switch (op[7]) {
+ case 'p':
+ if (strcmp(op, "f64x2.splat") == 0) return makeUnary(s, UnaryOp::SplatVecF64x2);
+ goto parse_error;
+ case 'q':
+ if (strcmp(op, "f64x2.sqrt") == 0) return makeUnary(s, UnaryOp::SqrtVecF64x2);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64x2.sub") == 0) return makeBinary(s, BinaryOp::SubVecF64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 't':
- if (strcmp(op, "f64.trunc") == 0) return makeUnary(s, UnaryOp::TruncFloat64);
- goto parse_error;
default: goto parse_error;
}
}
@@ -392,280 +612,410 @@ switch (op[0]) {
}
case 'i': {
switch (op[1]) {
- case '3': {
- switch (op[4]) {
+ case '1': {
+ switch (op[6]) {
case 'a': {
- switch (op[5]) {
- case 'd':
- if (strcmp(op, "i32.add") == 0) return makeBinary(s, BinaryOp::AddInt32);
+ switch (op[7]) {
+ case 'd': {
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "i16x8.add") == 0) return makeBinary(s, BinaryOp::AddVecI16x8);
+ goto parse_error;
+ case '_': {
+ switch (op[19]) {
+ case 's':
+ if (strcmp(op, "i16x8.add_saturate_s") == 0) return makeBinary(s, BinaryOp::AddSatSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.add_saturate_u") == 0) return makeBinary(s, BinaryOp::AddSatUVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'l':
+ if (strcmp(op, "i16x8.all_true") == 0) return makeUnary(s, UnaryOp::AllTrueVecI16x8);
goto parse_error;
case 'n':
- if (strcmp(op, "i32.and") == 0) return makeBinary(s, BinaryOp::AndInt32);
+ if (strcmp(op, "i16x8.any_true") == 0) return makeUnary(s, UnaryOp::AnyTrueVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'e': {
+ switch (op[7]) {
+ case 'q':
+ if (strcmp(op, "i16x8.eq") == 0) return makeBinary(s, BinaryOp::EqVecI16x8);
goto parse_error;
+ case 'x': {
+ switch (op[19]) {
+ case 's':
+ if (strcmp(op, "i16x8.extract_lane_s") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI16x8, 8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.extract_lane_u") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI16x8, 8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'g': {
+ switch (op[7]) {
+ case 'e': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i16x8.ge_s") == 0) return makeBinary(s, BinaryOp::GeSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.ge_u") == 0) return makeBinary(s, BinaryOp::GeUVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 't': {
- switch (op[11]) {
- case 'l': {
- switch (op[15]) {
- case '\0':
- if (strcmp(op, "i32.atomic.load") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i16x8.gt_s") == 0) return makeBinary(s, BinaryOp::GtSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.gt_u") == 0) return makeBinary(s, BinaryOp::GtUVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[7]) {
+ case 'e': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i16x8.le_s") == 0) return makeBinary(s, BinaryOp::LeSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.le_u") == 0) return makeBinary(s, BinaryOp::LeUVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i16x8.lt_s") == 0) return makeBinary(s, BinaryOp::LtSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.lt_u") == 0) return makeBinary(s, BinaryOp::LtUVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'm':
+ if (strcmp(op, "i16x8.mul") == 0) return makeBinary(s, BinaryOp::MulVecI16x8);
+ goto parse_error;
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i16x8.ne") == 0) return makeBinary(s, BinaryOp::NeVecI16x8);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "i16x8.neg") == 0) return makeUnary(s, UnaryOp::NegVecI16x8);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'r':
+ if (strcmp(op, "i16x8.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI16x8, 8);
+ goto parse_error;
+ case 's': {
+ switch (op[7]) {
+ case 'h': {
+ switch (op[8]) {
+ case 'l':
+ if (strcmp(op, "i16x8.shl") == 0) return makeSIMDShift(s, SIMDShiftOp::ShlVecI16x8);
+ goto parse_error;
+ case 'r': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i16x8.shr_s") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrSVecI16x8);
goto parse_error;
- case '1':
- if (strcmp(op, "i32.atomic.load16_u") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
+ case 'u':
+ if (strcmp(op, "i16x8.shr_u") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrUVecI16x8);
goto parse_error;
- case '8':
- if (strcmp(op, "i32.atomic.load8_u") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'p':
+ if (strcmp(op, "i16x8.splat") == 0) return makeUnary(s, UnaryOp::SplatVecI16x8);
+ goto parse_error;
+ case 'u': {
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "i16x8.sub") == 0) return makeBinary(s, BinaryOp::SubVecI16x8);
+ goto parse_error;
+ case '_': {
+ switch (op[19]) {
+ case 's':
+ if (strcmp(op, "i16x8.sub_saturate_s") == 0) return makeBinary(s, BinaryOp::SubSatSVecI16x8);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.sub_saturate_u") == 0) return makeBinary(s, BinaryOp::SubSatUVecI16x8);
goto parse_error;
default: goto parse_error;
}
}
- case 'r': {
- switch (op[14]) {
- case '.': {
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case '3': {
+ switch (op[3]) {
+ case '.': {
+ switch (op[4]) {
+ case 'a': {
+ switch (op[5]) {
+ case 'd':
+ if (strcmp(op, "i32.add") == 0) return makeBinary(s, BinaryOp::AddInt32);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32.and") == 0) return makeBinary(s, BinaryOp::AndInt32);
+ goto parse_error;
+ case 't': {
+ switch (op[11]) {
+ case 'l': {
switch (op[15]) {
- case 'a': {
- switch (op[16]) {
- case 'd':
- if (strcmp(op, "i32.atomic.rmw.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i32.atomic.rmw.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c':
- if (strcmp(op, "i32.atomic.rmw.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '\0':
+ if (strcmp(op, "i32.atomic.load") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 'o':
- if (strcmp(op, "i32.atomic.rmw.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '1':
+ if (strcmp(op, "i32.atomic.load16_u") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 's':
- if (strcmp(op, "i32.atomic.rmw.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '8':
+ if (strcmp(op, "i32.atomic.load8_u") == 0) return makeLoad(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 'x': {
- switch (op[16]) {
+ default: goto parse_error;
+ }
+ }
+ case 'r': {
+ switch (op[14]) {
+ case '.': {
+ switch (op[15]) {
+ case 'a': {
+ switch (op[16]) {
+ case 'd':
+ if (strcmp(op, "i32.atomic.rmw.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32.atomic.rmw.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'c':
- if (strcmp(op, "i32.atomic.rmw.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ if (strcmp(op, "i32.atomic.rmw.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
case 'o':
- if (strcmp(op, "i32.atomic.rmw.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ if (strcmp(op, "i32.atomic.rmw.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 's':
+ if (strcmp(op, "i32.atomic.rmw.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
+ case 'x': {
+ switch (op[16]) {
+ case 'c':
+ if (strcmp(op, "i32.atomic.rmw.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i32.atomic.rmw.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case '1': {
- switch (op[19]) {
- case 'a': {
- switch (op[20]) {
- case 'd':
- if (strcmp(op, "i32.atomic.rmw16_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '1': {
+ switch (op[19]) {
+ case 'a': {
+ switch (op[20]) {
+ case 'd':
+ if (strcmp(op, "i32.atomic.rmw16_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32.atomic.rmw16_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c':
+ if (strcmp(op, "i32.atomic.rmw16_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i32.atomic.rmw16_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
- case 'n':
- if (strcmp(op, "i32.atomic.rmw16_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case 's':
+ if (strcmp(op, "i32.atomic.rmw16_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
+ case 'x': {
+ switch (op[20]) {
+ case 'c':
+ if (strcmp(op, "i32.atomic.rmw16_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i32.atomic.rmw16_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 'c':
- if (strcmp(op, "i32.atomic.rmw16_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i32.atomic.rmw16_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 's':
- if (strcmp(op, "i32.atomic.rmw16_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 'x': {
- switch (op[20]) {
+ case '8': {
+ switch (op[18]) {
+ case 'a': {
+ switch (op[19]) {
+ case 'd':
+ if (strcmp(op, "i32.atomic.rmw8_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32.atomic.rmw8_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'c':
- if (strcmp(op, "i32.atomic.rmw16_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ if (strcmp(op, "i32.atomic.rmw8_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
case 'o':
- if (strcmp(op, "i32.atomic.rmw16_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ if (strcmp(op, "i32.atomic.rmw8_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
goto parse_error;
+ case 's':
+ if (strcmp(op, "i32.atomic.rmw8_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'x': {
+ switch (op[19]) {
+ case 'c':
+ if (strcmp(op, "i32.atomic.rmw8_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i32.atomic.rmw8_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
- case '8': {
- switch (op[18]) {
- case 'a': {
- switch (op[19]) {
- case 'd':
- if (strcmp(op, "i32.atomic.rmw8_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i32.atomic.rmw8_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c':
- if (strcmp(op, "i32.atomic.rmw8_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case 's': {
+ switch (op[16]) {
+ case '\0':
+ if (strcmp(op, "i32.atomic.store") == 0) return makeStore(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 'o':
- if (strcmp(op, "i32.atomic.rmw8_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '1':
+ if (strcmp(op, "i32.atomic.store16") == 0) return makeStore(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 's':
- if (strcmp(op, "i32.atomic.rmw8_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
+ case '8':
+ if (strcmp(op, "i32.atomic.store8") == 0) return makeStore(s, i32, /*isAtomic=*/true);
goto parse_error;
- case 'x': {
- switch (op[19]) {
- case 'c':
- if (strcmp(op, "i32.atomic.rmw8_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i32.atomic.rmw8_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i32);
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
- case 's': {
- switch (op[16]) {
- case '\0':
- if (strcmp(op, "i32.atomic.store") == 0) return makeStore(s, i32, /*isAtomic=*/true);
- goto parse_error;
- case '1':
- if (strcmp(op, "i32.atomic.store16") == 0) return makeStore(s, i32, /*isAtomic=*/true);
- goto parse_error;
- case '8':
- if (strcmp(op, "i32.atomic.store8") == 0) return makeStore(s, i32, /*isAtomic=*/true);
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
- case 'c': {
- switch (op[5]) {
- case 'l':
- if (strcmp(op, "i32.clz") == 0) return makeUnary(s, UnaryOp::ClzInt32);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i32.const") == 0) return makeConst(s, i32);
- goto parse_error;
- case 't':
- if (strcmp(op, "i32.ctz") == 0) return makeUnary(s, UnaryOp::CtzInt32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'd': {
- switch (op[8]) {
- case 's':
- if (strcmp(op, "i32.div_s") == 0) return makeBinary(s, BinaryOp::DivSInt32);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32.div_u") == 0) return makeBinary(s, BinaryOp::DivUInt32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'e': {
- switch (op[5]) {
- case 'q': {
- switch (op[6]) {
- case '\0':
- if (strcmp(op, "i32.eq") == 0) return makeBinary(s, BinaryOp::EqInt32);
- goto parse_error;
- case 'z':
- if (strcmp(op, "i32.eqz") == 0) return makeUnary(s, UnaryOp::EqZInt32);
- goto parse_error;
default: goto parse_error;
}
}
- case 'x': {
- switch (op[10]) {
- case '1':
- if (strcmp(op, "i32.extend16_s") == 0) return makeUnary(s, UnaryOp::ExtendS16Int32);
- goto parse_error;
- case '8':
- if (strcmp(op, "i32.extend8_s") == 0) return makeUnary(s, UnaryOp::ExtendS8Int32);
+ case 'c': {
+ switch (op[5]) {
+ case 'l':
+ if (strcmp(op, "i32.clz") == 0) return makeUnary(s, UnaryOp::ClzInt32);
goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
- case 'g': {
- switch (op[5]) {
- case 'e': {
- switch (op[7]) {
- case 's':
- if (strcmp(op, "i32.ge_s") == 0) return makeBinary(s, BinaryOp::GeSInt32);
+ case 'o':
+ if (strcmp(op, "i32.const") == 0) return makeConst(s, i32);
goto parse_error;
- case 'u':
- if (strcmp(op, "i32.ge_u") == 0) return makeBinary(s, BinaryOp::GeUInt32);
+ case 't':
+ if (strcmp(op, "i32.ctz") == 0) return makeUnary(s, UnaryOp::CtzInt32);
goto parse_error;
default: goto parse_error;
}
}
- case 't': {
- switch (op[7]) {
+ case 'd': {
+ switch (op[8]) {
case 's':
- if (strcmp(op, "i32.gt_s") == 0) return makeBinary(s, BinaryOp::GtSInt32);
+ if (strcmp(op, "i32.div_s") == 0) return makeBinary(s, BinaryOp::DivSInt32);
goto parse_error;
case 'u':
- if (strcmp(op, "i32.gt_u") == 0) return makeBinary(s, BinaryOp::GtUInt32);
+ if (strcmp(op, "i32.div_u") == 0) return makeBinary(s, BinaryOp::DivUInt32);
goto parse_error;
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[5]) {
case 'e': {
- switch (op[7]) {
- case 's':
- if (strcmp(op, "i32.le_s") == 0) return makeBinary(s, BinaryOp::LeSInt32);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32.le_u") == 0) return makeBinary(s, BinaryOp::LeUInt32);
- goto parse_error;
+ switch (op[5]) {
+ case 'q': {
+ switch (op[6]) {
+ case '\0':
+ if (strcmp(op, "i32.eq") == 0) return makeBinary(s, BinaryOp::EqInt32);
+ goto parse_error;
+ case 'z':
+ if (strcmp(op, "i32.eqz") == 0) return makeUnary(s, UnaryOp::EqZInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'x': {
+ switch (op[10]) {
+ case '1':
+ if (strcmp(op, "i32.extend16_s") == 0) return makeUnary(s, UnaryOp::ExtendS16Int32);
+ goto parse_error;
+ case '8':
+ if (strcmp(op, "i32.extend8_s") == 0) return makeUnary(s, UnaryOp::ExtendS8Int32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 'o': {
- switch (op[8]) {
- case '\0':
- if (strcmp(op, "i32.load") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
- goto parse_error;
- case '1': {
- switch (op[11]) {
+ case 'g': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[7]) {
case 's':
- if (strcmp(op, "i32.load16_s") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ if (strcmp(op, "i32.ge_s") == 0) return makeBinary(s, BinaryOp::GeSInt32);
goto parse_error;
case 'u':
- if (strcmp(op, "i32.load16_u") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ if (strcmp(op, "i32.ge_u") == 0) return makeBinary(s, BinaryOp::GeUInt32);
goto parse_error;
default: goto parse_error;
}
}
- case '8': {
- switch (op[10]) {
+ case 't': {
+ switch (op[7]) {
case 's':
- if (strcmp(op, "i32.load8_s") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ if (strcmp(op, "i32.gt_s") == 0) return makeBinary(s, BinaryOp::GtSInt32);
goto parse_error;
case 'u':
- if (strcmp(op, "i32.load8_u") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ if (strcmp(op, "i32.gt_u") == 0) return makeBinary(s, BinaryOp::GtUInt32);
goto parse_error;
default: goto parse_error;
}
@@ -673,46 +1023,56 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 't': {
- switch (op[7]) {
- case 's':
- if (strcmp(op, "i32.lt_s") == 0) return makeBinary(s, BinaryOp::LtSInt32);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32.lt_u") == 0) return makeBinary(s, BinaryOp::LtUInt32);
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
- case 'm':
- if (strcmp(op, "i32.mul") == 0) return makeBinary(s, BinaryOp::MulInt32);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i32.ne") == 0) return makeBinary(s, BinaryOp::NeInt32);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i32.or") == 0) return makeBinary(s, BinaryOp::OrInt32);
- goto parse_error;
- case 'p':
- if (strcmp(op, "i32.popcnt") == 0) return makeUnary(s, UnaryOp::PopcntInt32);
- goto parse_error;
- case 'r': {
- switch (op[5]) {
- case 'e': {
- switch (op[6]) {
- case 'i':
- if (strcmp(op, "i32.reinterpret/f32") == 0) return makeUnary(s, UnaryOp::ReinterpretFloat32);
- goto parse_error;
- case 'm': {
+ case 'l': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "i32.le_s") == 0) return makeBinary(s, BinaryOp::LeSInt32);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32.le_u") == 0) return makeBinary(s, BinaryOp::LeUInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'o': {
switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i32.load") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ case '1': {
+ switch (op[11]) {
+ case 's':
+ if (strcmp(op, "i32.load16_s") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32.load16_u") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case '8': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i32.load8_s") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32.load8_u") == 0) return makeLoad(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[7]) {
case 's':
- if (strcmp(op, "i32.rem_s") == 0) return makeBinary(s, BinaryOp::RemSInt32);
+ if (strcmp(op, "i32.lt_s") == 0) return makeBinary(s, BinaryOp::LtSInt32);
goto parse_error;
case 'u':
- if (strcmp(op, "i32.rem_u") == 0) return makeBinary(s, BinaryOp::RemUInt32);
+ if (strcmp(op, "i32.lt_u") == 0) return makeBinary(s, BinaryOp::LtUInt32);
goto parse_error;
default: goto parse_error;
}
@@ -720,83 +1080,217 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'o': {
- switch (op[7]) {
- case 'l':
- if (strcmp(op, "i32.rotl") == 0) return makeBinary(s, BinaryOp::RotLInt32);
- goto parse_error;
- case 'r':
- if (strcmp(op, "i32.rotr") == 0) return makeBinary(s, BinaryOp::RotRInt32);
- goto parse_error;
+ case 'm':
+ if (strcmp(op, "i32.mul") == 0) return makeBinary(s, BinaryOp::MulInt32);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32.ne") == 0) return makeBinary(s, BinaryOp::NeInt32);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i32.or") == 0) return makeBinary(s, BinaryOp::OrInt32);
+ goto parse_error;
+ case 'p':
+ if (strcmp(op, "i32.popcnt") == 0) return makeUnary(s, UnaryOp::PopcntInt32);
+ goto parse_error;
+ case 'r': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[6]) {
+ case 'i':
+ if (strcmp(op, "i32.reinterpret/f32") == 0) return makeUnary(s, UnaryOp::ReinterpretFloat32);
+ goto parse_error;
+ case 'm': {
+ switch (op[8]) {
+ case 's':
+ if (strcmp(op, "i32.rem_s") == 0) return makeBinary(s, BinaryOp::RemSInt32);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32.rem_u") == 0) return makeBinary(s, BinaryOp::RemUInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'o': {
+ switch (op[7]) {
+ case 'l':
+ if (strcmp(op, "i32.rotl") == 0) return makeBinary(s, BinaryOp::RotLInt32);
+ goto parse_error;
+ case 'r':
+ if (strcmp(op, "i32.rotr") == 0) return makeBinary(s, BinaryOp::RotRInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 's': {
- switch (op[5]) {
- case 'h': {
- switch (op[6]) {
- case 'l':
- if (strcmp(op, "i32.shl") == 0) return makeBinary(s, BinaryOp::ShlInt32);
- goto parse_error;
- case 'r': {
- switch (op[8]) {
- case 's':
- if (strcmp(op, "i32.shr_s") == 0) return makeBinary(s, BinaryOp::ShrSInt32);
+ case 's': {
+ switch (op[5]) {
+ case 'h': {
+ switch (op[6]) {
+ case 'l':
+ if (strcmp(op, "i32.shl") == 0) return makeBinary(s, BinaryOp::ShlInt32);
goto parse_error;
- case 'u':
- if (strcmp(op, "i32.shr_u") == 0) return makeBinary(s, BinaryOp::ShrUInt32);
+ case 'r': {
+ switch (op[8]) {
+ case 's':
+ if (strcmp(op, "i32.shr_s") == 0) return makeBinary(s, BinaryOp::ShrSInt32);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32.shr_u") == 0) return makeBinary(s, BinaryOp::ShrUInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "i32.store") == 0) return makeStore(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ case '1':
+ if (strcmp(op, "i32.store16") == 0) return makeStore(s, i32, /*isAtomic=*/false);
+ goto parse_error;
+ case '8':
+ if (strcmp(op, "i32.store8") == 0) return makeStore(s, i32, /*isAtomic=*/false);
goto parse_error;
default: goto parse_error;
}
}
+ case 'u':
+ if (strcmp(op, "i32.sub") == 0) return makeBinary(s, BinaryOp::SubInt32);
+ goto parse_error;
default: goto parse_error;
}
}
case 't': {
- switch (op[9]) {
- case '\0':
- if (strcmp(op, "i32.store") == 0) return makeStore(s, i32, /*isAtomic=*/false);
- goto parse_error;
- case '1':
- if (strcmp(op, "i32.store16") == 0) return makeStore(s, i32, /*isAtomic=*/false);
+ switch (op[10]) {
+ case 's': {
+ switch (op[11]) {
+ case '/': {
+ switch (op[13]) {
+ case '3':
+ if (strcmp(op, "i32.trunc_s/f32") == 0) return makeUnary(s, UnaryOp::TruncSFloat32ToInt32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i32.trunc_s/f64") == 0) return makeUnary(s, UnaryOp::TruncSFloat64ToInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case ':': {
+ switch (op[17]) {
+ case '3':
+ if (strcmp(op, "i32.trunc_s:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat32ToInt32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i32.trunc_s:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat64ToInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'u': {
+ switch (op[11]) {
+ case '/': {
+ switch (op[13]) {
+ case '3':
+ if (strcmp(op, "i32.trunc_u/f32") == 0) return makeUnary(s, UnaryOp::TruncUFloat32ToInt32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i32.trunc_u/f64") == 0) return makeUnary(s, UnaryOp::TruncUFloat64ToInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case ':': {
+ switch (op[17]) {
+ case '3':
+ if (strcmp(op, "i32.trunc_u:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat32ToInt32);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i32.trunc_u:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat64ToInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'w': {
+ switch (op[5]) {
+ case 'a':
+ if (strcmp(op, "i32.wait") == 0) return makeAtomicWait(s, i32);
goto parse_error;
- case '8':
- if (strcmp(op, "i32.store8") == 0) return makeStore(s, i32, /*isAtomic=*/false);
+ case 'r':
+ if (strcmp(op, "i32.wrap/i64") == 0) return makeUnary(s, UnaryOp::WrapInt64);
goto parse_error;
default: goto parse_error;
}
}
- case 'u':
- if (strcmp(op, "i32.sub") == 0) return makeBinary(s, BinaryOp::SubInt32);
+ case 'x':
+ if (strcmp(op, "i32.xor") == 0) return makeBinary(s, BinaryOp::XorInt32);
goto parse_error;
default: goto parse_error;
}
}
- case 't': {
- switch (op[10]) {
- case 's': {
- switch (op[11]) {
- case '/': {
- switch (op[13]) {
- case '3':
- if (strcmp(op, "i32.trunc_s/f32") == 0) return makeUnary(s, UnaryOp::TruncSFloat32ToInt32);
+ case 'x': {
+ switch (op[6]) {
+ case 'a': {
+ switch (op[7]) {
+ case 'd':
+ if (strcmp(op, "i32x4.add") == 0) return makeBinary(s, BinaryOp::AddVecI32x4);
+ goto parse_error;
+ case 'l':
+ if (strcmp(op, "i32x4.all_true") == 0) return makeUnary(s, UnaryOp::AllTrueVecI32x4);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i32x4.any_true") == 0) return makeUnary(s, UnaryOp::AnyTrueVecI32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'e': {
+ switch (op[7]) {
+ case 'q':
+ if (strcmp(op, "i32x4.eq") == 0) return makeBinary(s, BinaryOp::EqVecI32x4);
+ goto parse_error;
+ case 'x':
+ if (strcmp(op, "i32x4.extract_lane") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI32x4, 4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'g': {
+ switch (op[7]) {
+ case 'e': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i32x4.ge_s") == 0) return makeBinary(s, BinaryOp::GeSVecI32x4);
goto parse_error;
- case '6':
- if (strcmp(op, "i32.trunc_s/f64") == 0) return makeUnary(s, UnaryOp::TruncSFloat64ToInt32);
+ case 'u':
+ if (strcmp(op, "i32x4.ge_u") == 0) return makeBinary(s, BinaryOp::GeUVecI32x4);
goto parse_error;
default: goto parse_error;
}
}
- case ':': {
- switch (op[17]) {
- case '3':
- if (strcmp(op, "i32.trunc_s:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat32ToInt32);
+ case 't': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i32x4.gt_s") == 0) return makeBinary(s, BinaryOp::GtSVecI32x4);
goto parse_error;
- case '6':
- if (strcmp(op, "i32.trunc_s:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat64ToInt32);
+ case 'u':
+ if (strcmp(op, "i32x4.gt_u") == 0) return makeBinary(s, BinaryOp::GtUVecI32x4);
goto parse_error;
default: goto parse_error;
}
@@ -804,26 +1298,26 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'u': {
- switch (op[11]) {
- case '/': {
- switch (op[13]) {
- case '3':
- if (strcmp(op, "i32.trunc_u/f32") == 0) return makeUnary(s, UnaryOp::TruncUFloat32ToInt32);
+ case 'l': {
+ switch (op[7]) {
+ case 'e': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i32x4.le_s") == 0) return makeBinary(s, BinaryOp::LeSVecI32x4);
goto parse_error;
- case '6':
- if (strcmp(op, "i32.trunc_u/f64") == 0) return makeUnary(s, UnaryOp::TruncUFloat64ToInt32);
+ case 'u':
+ if (strcmp(op, "i32x4.le_u") == 0) return makeBinary(s, BinaryOp::LeUVecI32x4);
goto parse_error;
default: goto parse_error;
}
}
- case ':': {
- switch (op[17]) {
- case '3':
- if (strcmp(op, "i32.trunc_u:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat32ToInt32);
+ case 't': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i32x4.lt_s") == 0) return makeBinary(s, BinaryOp::LtSVecI32x4);
goto parse_error;
- case '6':
- if (strcmp(op, "i32.trunc_u:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat64ToInt32);
+ case 'u':
+ if (strcmp(op, "i32x4.lt_u") == 0) return makeBinary(s, BinaryOp::LtUVecI32x4);
goto parse_error;
default: goto parse_error;
}
@@ -831,217 +1325,359 @@ switch (op[0]) {
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'w': {
- switch (op[5]) {
- case 'a':
- if (strcmp(op, "i32.wait") == 0) return makeAtomicWait(s, i32);
+ case 'm':
+ if (strcmp(op, "i32x4.mul") == 0) return makeBinary(s, BinaryOp::MulVecI32x4);
goto parse_error;
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i32x4.ne") == 0) return makeBinary(s, BinaryOp::NeVecI32x4);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "i32x4.neg") == 0) return makeUnary(s, UnaryOp::NegVecI32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'r':
- if (strcmp(op, "i32.wrap/i64") == 0) return makeUnary(s, UnaryOp::WrapInt64);
+ if (strcmp(op, "i32x4.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI32x4, 4);
goto parse_error;
+ case 's': {
+ switch (op[7]) {
+ case 'h': {
+ switch (op[8]) {
+ case 'l':
+ if (strcmp(op, "i32x4.shl") == 0) return makeSIMDShift(s, SIMDShiftOp::ShlVecI32x4);
+ goto parse_error;
+ case 'r': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i32x4.shr_s") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrSVecI32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32x4.shr_u") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrUVecI32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'p':
+ if (strcmp(op, "i32x4.splat") == 0) return makeUnary(s, UnaryOp::SplatVecI32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32x4.sub") == 0) return makeBinary(s, BinaryOp::SubVecI32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[12]) {
+ case 's':
+ if (strcmp(op, "i32x4.trunc_s/f32x4:sat") == 0) return makeUnary(s, UnaryOp::TruncSatSVecF32x4ToVecI32x4);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32x4.trunc_u/f32x4:sat") == 0) return makeUnary(s, UnaryOp::TruncSatUVecF32x4ToVecI32x4);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 'x':
- if (strcmp(op, "i32.xor") == 0) return makeBinary(s, BinaryOp::XorInt32);
- goto parse_error;
default: goto parse_error;
}
}
case '6': {
- switch (op[4]) {
- case 'a': {
- switch (op[5]) {
- case 'd':
- if (strcmp(op, "i64.add") == 0) return makeBinary(s, BinaryOp::AddInt64);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i64.and") == 0) return makeBinary(s, BinaryOp::AndInt64);
- goto parse_error;
- case 't': {
- switch (op[11]) {
- case 'l': {
- switch (op[15]) {
- case '\0':
- if (strcmp(op, "i64.atomic.load") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
- goto parse_error;
- case '1':
- if (strcmp(op, "i64.atomic.load16_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
- goto parse_error;
- case '3':
- if (strcmp(op, "i64.atomic.load32_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
- goto parse_error;
- case '8':
- if (strcmp(op, "i64.atomic.load8_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'r': {
- switch (op[14]) {
- case '.': {
+ switch (op[3]) {
+ case '.': {
+ switch (op[4]) {
+ case 'a': {
+ switch (op[5]) {
+ case 'd':
+ if (strcmp(op, "i64.add") == 0) return makeBinary(s, BinaryOp::AddInt64);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64.and") == 0) return makeBinary(s, BinaryOp::AndInt64);
+ goto parse_error;
+ case 't': {
+ switch (op[11]) {
+ case 'l': {
switch (op[15]) {
- case 'a': {
- switch (op[16]) {
- case 'd':
- if (strcmp(op, "i64.atomic.rmw.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i64.atomic.rmw.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c':
- if (strcmp(op, "i64.atomic.rmw.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '\0':
+ if (strcmp(op, "i64.atomic.load") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '1':
+ if (strcmp(op, "i64.atomic.load16_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 's':
- if (strcmp(op, "i64.atomic.rmw.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '3':
+ if (strcmp(op, "i64.atomic.load32_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
+ goto parse_error;
+ case '8':
+ if (strcmp(op, "i64.atomic.load8_u") == 0) return makeLoad(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 'x': {
- switch (op[16]) {
- case 'c':
- if (strcmp(op, "i64.atomic.rmw.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
- case '1': {
- switch (op[19]) {
- case 'a': {
- switch (op[20]) {
- case 'd':
- if (strcmp(op, "i64.atomic.rmw16_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case 'r': {
+ switch (op[14]) {
+ case '.': {
+ switch (op[15]) {
+ case 'a': {
+ switch (op[16]) {
+ case 'd':
+ if (strcmp(op, "i64.atomic.rmw.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64.atomic.rmw.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
- case 'n':
- if (strcmp(op, "i64.atomic.rmw16_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case 's':
+ if (strcmp(op, "i64.atomic.rmw.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
+ case 'x': {
+ switch (op[16]) {
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 'c':
- if (strcmp(op, "i64.atomic.rmw16_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw16_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 's':
- if (strcmp(op, "i64.atomic.rmw16_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'x': {
- switch (op[20]) {
+ case '1': {
+ switch (op[19]) {
+ case 'a': {
+ switch (op[20]) {
+ case 'd':
+ if (strcmp(op, "i64.atomic.rmw16_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64.atomic.rmw16_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'c':
- if (strcmp(op, "i64.atomic.rmw16_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ if (strcmp(op, "i64.atomic.rmw16_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
case 'o':
- if (strcmp(op, "i64.atomic.rmw16_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ if (strcmp(op, "i64.atomic.rmw16_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 's':
+ if (strcmp(op, "i64.atomic.rmw16_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
+ case 'x': {
+ switch (op[20]) {
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw16_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw16_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case '3': {
- switch (op[19]) {
- case 'a': {
- switch (op[20]) {
- case 'd':
- if (strcmp(op, "i64.atomic.rmw32_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '3': {
+ switch (op[19]) {
+ case 'a': {
+ switch (op[20]) {
+ case 'd':
+ if (strcmp(op, "i64.atomic.rmw32_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64.atomic.rmw32_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw32_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw32_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
- case 'n':
- if (strcmp(op, "i64.atomic.rmw32_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case 's':
+ if (strcmp(op, "i64.atomic.rmw32_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
+ case 'x': {
+ switch (op[20]) {
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw32_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw32_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case 'c':
- if (strcmp(op, "i64.atomic.rmw32_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw32_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 's':
- if (strcmp(op, "i64.atomic.rmw32_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'x': {
- switch (op[20]) {
+ case '8': {
+ switch (op[18]) {
+ case 'a': {
+ switch (op[19]) {
+ case 'd':
+ if (strcmp(op, "i64.atomic.rmw8_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64.atomic.rmw8_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'c':
- if (strcmp(op, "i64.atomic.rmw32_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ if (strcmp(op, "i64.atomic.rmw8_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
case 'o':
- if (strcmp(op, "i64.atomic.rmw32_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ if (strcmp(op, "i64.atomic.rmw8_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 's':
+ if (strcmp(op, "i64.atomic.rmw8_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
goto parse_error;
+ case 'x': {
+ switch (op[19]) {
+ case 'c':
+ if (strcmp(op, "i64.atomic.rmw8_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.atomic.rmw8_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
- case '8': {
- switch (op[18]) {
- case 'a': {
- switch (op[19]) {
- case 'd':
- if (strcmp(op, "i64.atomic.rmw8_u.add") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i64.atomic.rmw8_u.and") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'c':
- if (strcmp(op, "i64.atomic.rmw8_u.cmpxchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case 's': {
+ switch (op[16]) {
+ case '\0':
+ if (strcmp(op, "i64.atomic.store") == 0) return makeStore(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw8_u.or") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '1':
+ if (strcmp(op, "i64.atomic.store16") == 0) return makeStore(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 's':
- if (strcmp(op, "i64.atomic.rmw8_u.sub") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
+ case '3':
+ if (strcmp(op, "i64.atomic.store32") == 0) return makeStore(s, i64, /*isAtomic=*/true);
+ goto parse_error;
+ case '8':
+ if (strcmp(op, "i64.atomic.store8") == 0) return makeStore(s, i64, /*isAtomic=*/true);
goto parse_error;
- case 'x': {
- switch (op[19]) {
- case 'c':
- if (strcmp(op, "i64.atomic.rmw8_u.xchg") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.atomic.rmw8_u.xor") == 0) return makeAtomicRMWOrCmpxchg(s, i64);
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
- case 's': {
- switch (op[16]) {
+ default: goto parse_error;
+ }
+ }
+ case 'c': {
+ switch (op[5]) {
+ case 'l':
+ if (strcmp(op, "i64.clz") == 0) return makeUnary(s, UnaryOp::ClzInt64);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "i64.const") == 0) return makeConst(s, i64);
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "i64.ctz") == 0) return makeUnary(s, UnaryOp::CtzInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'd': {
+ switch (op[8]) {
+ case 's':
+ if (strcmp(op, "i64.div_s") == 0) return makeBinary(s, BinaryOp::DivSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.div_u") == 0) return makeBinary(s, BinaryOp::DivUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'e': {
+ switch (op[5]) {
+ case 'q': {
+ switch (op[6]) {
case '\0':
- if (strcmp(op, "i64.atomic.store") == 0) return makeStore(s, i64, /*isAtomic=*/true);
+ if (strcmp(op, "i64.eq") == 0) return makeBinary(s, BinaryOp::EqInt64);
+ goto parse_error;
+ case 'z':
+ if (strcmp(op, "i64.eqz") == 0) return makeUnary(s, UnaryOp::EqZInt64);
goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'x': {
+ switch (op[10]) {
case '1':
- if (strcmp(op, "i64.atomic.store16") == 0) return makeStore(s, i64, /*isAtomic=*/true);
+ if (strcmp(op, "i64.extend16_s") == 0) return makeUnary(s, UnaryOp::ExtendS16Int64);
goto parse_error;
case '3':
- if (strcmp(op, "i64.atomic.store32") == 0) return makeStore(s, i64, /*isAtomic=*/true);
+ if (strcmp(op, "i64.extend32_s") == 0) return makeUnary(s, UnaryOp::ExtendS32Int64);
goto parse_error;
case '8':
- if (strcmp(op, "i64.atomic.store8") == 0) return makeStore(s, i64, /*isAtomic=*/true);
+ if (strcmp(op, "i64.extend8_s") == 0) return makeUnary(s, UnaryOp::ExtendS8Int64);
+ goto parse_error;
+ case '_': {
+ switch (op[11]) {
+ case 's':
+ if (strcmp(op, "i64.extend_s/i32") == 0) return makeUnary(s, UnaryOp::ExtendSInt32);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.extend_u/i32") == 0) return makeUnary(s, UnaryOp::ExtendUInt32);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'g': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "i64.ge_s") == 0) return makeBinary(s, BinaryOp::GeSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.ge_u") == 0) return makeBinary(s, BinaryOp::GeUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "i64.gt_s") == 0) return makeBinary(s, BinaryOp::GtSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.gt_u") == 0) return makeBinary(s, BinaryOp::GtUInt64);
goto parse_error;
default: goto parse_error;
}
@@ -1049,149 +1685,321 @@ switch (op[0]) {
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'c': {
- switch (op[5]) {
- case 'l':
- if (strcmp(op, "i64.clz") == 0) return makeUnary(s, UnaryOp::ClzInt64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.const") == 0) return makeConst(s, i64);
+ case 'l': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "i64.le_s") == 0) return makeBinary(s, BinaryOp::LeSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.le_u") == 0) return makeBinary(s, BinaryOp::LeUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'o': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i64.load") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case '1': {
+ switch (op[11]) {
+ case 's':
+ if (strcmp(op, "i64.load16_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.load16_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case '3': {
+ switch (op[11]) {
+ case 's':
+ if (strcmp(op, "i64.load32_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.load32_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case '8': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i64.load8_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.load8_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[7]) {
+ case 's':
+ if (strcmp(op, "i64.lt_s") == 0) return makeBinary(s, BinaryOp::LtSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.lt_u") == 0) return makeBinary(s, BinaryOp::LtUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'm':
+ if (strcmp(op, "i64.mul") == 0) return makeBinary(s, BinaryOp::MulInt64);
goto parse_error;
- case 't':
- if (strcmp(op, "i64.ctz") == 0) return makeUnary(s, UnaryOp::CtzInt64);
+ case 'n':
+ if (strcmp(op, "i64.ne") == 0) return makeBinary(s, BinaryOp::NeInt64);
goto parse_error;
- default: goto parse_error;
- }
- }
- case 'd': {
- switch (op[8]) {
- case 's':
- if (strcmp(op, "i64.div_s") == 0) return makeBinary(s, BinaryOp::DivSInt64);
+ case 'o':
+ if (strcmp(op, "i64.or") == 0) return makeBinary(s, BinaryOp::OrInt64);
goto parse_error;
- case 'u':
- if (strcmp(op, "i64.div_u") == 0) return makeBinary(s, BinaryOp::DivUInt64);
+ case 'p':
+ if (strcmp(op, "i64.popcnt") == 0) return makeUnary(s, UnaryOp::PopcntInt64);
goto parse_error;
- default: goto parse_error;
- }
- }
- case 'e': {
- switch (op[5]) {
- case 'q': {
- switch (op[6]) {
- case '\0':
- if (strcmp(op, "i64.eq") == 0) return makeBinary(s, BinaryOp::EqInt64);
- goto parse_error;
- case 'z':
- if (strcmp(op, "i64.eqz") == 0) return makeUnary(s, UnaryOp::EqZInt64);
+ case 'r': {
+ switch (op[5]) {
+ case 'e': {
+ switch (op[6]) {
+ case 'i':
+ if (strcmp(op, "i64.reinterpret/f64") == 0) return makeUnary(s, UnaryOp::ReinterpretFloat64);
+ goto parse_error;
+ case 'm': {
+ switch (op[8]) {
+ case 's':
+ if (strcmp(op, "i64.rem_s") == 0) return makeBinary(s, BinaryOp::RemSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.rem_u") == 0) return makeBinary(s, BinaryOp::RemUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'o': {
+ switch (op[7]) {
+ case 'l':
+ if (strcmp(op, "i64.rotl") == 0) return makeBinary(s, BinaryOp::RotLInt64);
+ goto parse_error;
+ case 'r':
+ if (strcmp(op, "i64.rotr") == 0) return makeBinary(s, BinaryOp::RotRInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 's': {
+ switch (op[5]) {
+ case 'h': {
+ switch (op[6]) {
+ case 'l':
+ if (strcmp(op, "i64.shl") == 0) return makeBinary(s, BinaryOp::ShlInt64);
+ goto parse_error;
+ case 'r': {
+ switch (op[8]) {
+ case 's':
+ if (strcmp(op, "i64.shr_s") == 0) return makeBinary(s, BinaryOp::ShrSInt64);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64.shr_u") == 0) return makeBinary(s, BinaryOp::ShrUInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 't': {
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "i64.store") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case '1':
+ if (strcmp(op, "i64.store16") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case '3':
+ if (strcmp(op, "i64.store32") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ case '8':
+ if (strcmp(op, "i64.store8") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'u':
+ if (strcmp(op, "i64.sub") == 0) return makeBinary(s, BinaryOp::SubInt64);
goto parse_error;
default: goto parse_error;
}
}
- case 'x': {
+ case 't': {
switch (op[10]) {
- case '1':
- if (strcmp(op, "i64.extend16_s") == 0) return makeUnary(s, UnaryOp::ExtendS16Int64);
- goto parse_error;
- case '3':
- if (strcmp(op, "i64.extend32_s") == 0) return makeUnary(s, UnaryOp::ExtendS32Int64);
- goto parse_error;
- case '8':
- if (strcmp(op, "i64.extend8_s") == 0) return makeUnary(s, UnaryOp::ExtendS8Int64);
- goto parse_error;
- case '_': {
+ case 's': {
switch (op[11]) {
- case 's':
- if (strcmp(op, "i64.extend_s/i32") == 0) return makeUnary(s, UnaryOp::ExtendSInt32);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64.extend_u/i32") == 0) return makeUnary(s, UnaryOp::ExtendUInt32);
- goto parse_error;
+ case '/': {
+ switch (op[13]) {
+ case '3':
+ if (strcmp(op, "i64.trunc_s/f32") == 0) return makeUnary(s, UnaryOp::TruncSFloat32ToInt64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i64.trunc_s/f64") == 0) return makeUnary(s, UnaryOp::TruncSFloat64ToInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case ':': {
+ switch (op[17]) {
+ case '3':
+ if (strcmp(op, "i64.trunc_s:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat32ToInt64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i64.trunc_s:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat64ToInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'u': {
+ switch (op[11]) {
+ case '/': {
+ switch (op[13]) {
+ case '3':
+ if (strcmp(op, "i64.trunc_u/f32") == 0) return makeUnary(s, UnaryOp::TruncUFloat32ToInt64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i64.trunc_u/f64") == 0) return makeUnary(s, UnaryOp::TruncUFloat64ToInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case ':': {
+ switch (op[17]) {
+ case '3':
+ if (strcmp(op, "i64.trunc_u:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat32ToInt64);
+ goto parse_error;
+ case '6':
+ if (strcmp(op, "i64.trunc_u:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat64ToInt64);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
default: goto parse_error;
}
}
+ case 'w':
+ if (strcmp(op, "i64.wait") == 0) return makeAtomicWait(s, i64);
+ goto parse_error;
+ case 'x':
+ if (strcmp(op, "i64.xor") == 0) return makeBinary(s, BinaryOp::XorInt64);
+ goto parse_error;
default: goto parse_error;
}
}
- case 'g': {
- switch (op[5]) {
- case 'e': {
+ case 'x': {
+ switch (op[6]) {
+ case 'a': {
switch (op[7]) {
- case 's':
- if (strcmp(op, "i64.ge_s") == 0) return makeBinary(s, BinaryOp::GeSInt64);
+ case 'd':
+ if (strcmp(op, "i64x2.add") == 0) return makeBinary(s, BinaryOp::AddVecI64x2);
goto parse_error;
- case 'u':
- if (strcmp(op, "i64.ge_u") == 0) return makeBinary(s, BinaryOp::GeUInt64);
+ case 'l':
+ if (strcmp(op, "i64x2.all_true") == 0) return makeUnary(s, UnaryOp::AllTrueVecI64x2);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64x2.any_true") == 0) return makeUnary(s, UnaryOp::AnyTrueVecI64x2);
goto parse_error;
default: goto parse_error;
}
}
- case 't': {
+ case 'e':
+ if (strcmp(op, "i64x2.extract_lane") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneVecI64x2, 2);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i64x2.neg") == 0) return makeUnary(s, UnaryOp::NegVecI64x2);
+ goto parse_error;
+ case 'r':
+ if (strcmp(op, "i64x2.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI64x2, 2);
+ goto parse_error;
+ case 's': {
switch (op[7]) {
- case 's':
- if (strcmp(op, "i64.gt_s") == 0) return makeBinary(s, BinaryOp::GtSInt64);
+ case 'h': {
+ switch (op[8]) {
+ case 'l':
+ if (strcmp(op, "i64x2.shl") == 0) return makeSIMDShift(s, SIMDShiftOp::ShlVecI64x2);
+ goto parse_error;
+ case 'r': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i64x2.shr_s") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrSVecI64x2);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64x2.shr_u") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrUVecI64x2);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case 'p':
+ if (strcmp(op, "i64x2.splat") == 0) return makeUnary(s, UnaryOp::SplatVecI64x2);
goto parse_error;
case 'u':
- if (strcmp(op, "i64.gt_u") == 0) return makeBinary(s, BinaryOp::GtUInt64);
+ if (strcmp(op, "i64x2.sub") == 0) return makeBinary(s, BinaryOp::SubVecI64x2);
goto parse_error;
default: goto parse_error;
}
}
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[5]) {
- case 'e': {
- switch (op[7]) {
+ case 't': {
+ switch (op[12]) {
case 's':
- if (strcmp(op, "i64.le_s") == 0) return makeBinary(s, BinaryOp::LeSInt64);
+ if (strcmp(op, "i64x2.trunc_s/f64x2:sat") == 0) return makeUnary(s, UnaryOp::TruncSatSVecF64x2ToVecI64x2);
goto parse_error;
case 'u':
- if (strcmp(op, "i64.le_u") == 0) return makeBinary(s, BinaryOp::LeUInt64);
+ if (strcmp(op, "i64x2.trunc_u/f64x2:sat") == 0) return makeUnary(s, UnaryOp::TruncSatUVecF64x2ToVecI64x2);
goto parse_error;
default: goto parse_error;
}
}
- case 'o': {
- switch (op[8]) {
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
+ case '8': {
+ switch (op[6]) {
+ case 'a': {
+ switch (op[7]) {
+ case 'd': {
+ switch (op[9]) {
case '\0':
- if (strcmp(op, "i64.load") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ if (strcmp(op, "i8x16.add") == 0) return makeBinary(s, BinaryOp::AddVecI8x16);
goto parse_error;
- case '1': {
- switch (op[11]) {
- case 's':
- if (strcmp(op, "i64.load16_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64.load16_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case '3': {
- switch (op[11]) {
- case 's':
- if (strcmp(op, "i64.load32_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64.load32_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case '8': {
- switch (op[10]) {
+ case '_': {
+ switch (op[19]) {
case 's':
- if (strcmp(op, "i64.load8_s") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ if (strcmp(op, "i8x16.add_saturate_s") == 0) return makeBinary(s, BinaryOp::AddSatSVecI8x16);
goto parse_error;
case 'u':
- if (strcmp(op, "i64.load8_u") == 0) return makeLoad(s, i64, /*isAtomic=*/false);
+ if (strcmp(op, "i8x16.add_saturate_u") == 0) return makeBinary(s, BinaryOp::AddSatUVecI8x16);
goto parse_error;
default: goto parse_error;
}
@@ -1199,13 +2007,27 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 't': {
- switch (op[7]) {
+ case 'l':
+ if (strcmp(op, "i8x16.all_true") == 0) return makeUnary(s, UnaryOp::AllTrueVecI8x16);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "i8x16.any_true") == 0) return makeUnary(s, UnaryOp::AnyTrueVecI8x16);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'e': {
+ switch (op[7]) {
+ case 'q':
+ if (strcmp(op, "i8x16.eq") == 0) return makeBinary(s, BinaryOp::EqVecI8x16);
+ goto parse_error;
+ case 'x': {
+ switch (op[19]) {
case 's':
- if (strcmp(op, "i64.lt_s") == 0) return makeBinary(s, BinaryOp::LtSInt64);
+ if (strcmp(op, "i8x16.extract_lane_s") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI8x16, 16);
goto parse_error;
case 'u':
- if (strcmp(op, "i64.lt_u") == 0) return makeBinary(s, BinaryOp::LtUInt64);
+ if (strcmp(op, "i8x16.extract_lane_u") == 0) return makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI8x16, 16);
goto parse_error;
default: goto parse_error;
}
@@ -1213,46 +2035,26 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'm':
- if (strcmp(op, "i64.mul") == 0) return makeBinary(s, BinaryOp::MulInt64);
- goto parse_error;
- case 'n':
- if (strcmp(op, "i64.ne") == 0) return makeBinary(s, BinaryOp::NeInt64);
- goto parse_error;
- case 'o':
- if (strcmp(op, "i64.or") == 0) return makeBinary(s, BinaryOp::OrInt64);
- goto parse_error;
- case 'p':
- if (strcmp(op, "i64.popcnt") == 0) return makeUnary(s, UnaryOp::PopcntInt64);
- goto parse_error;
- case 'r': {
- switch (op[5]) {
+ case 'g': {
+ switch (op[7]) {
case 'e': {
- switch (op[6]) {
- case 'i':
- if (strcmp(op, "i64.reinterpret/f64") == 0) return makeUnary(s, UnaryOp::ReinterpretFloat64);
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i8x16.ge_s") == 0) return makeBinary(s, BinaryOp::GeSVecI8x16);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i8x16.ge_u") == 0) return makeBinary(s, BinaryOp::GeUVecI8x16);
goto parse_error;
- case 'm': {
- switch (op[8]) {
- case 's':
- if (strcmp(op, "i64.rem_s") == 0) return makeBinary(s, BinaryOp::RemSInt64);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64.rem_u") == 0) return makeBinary(s, BinaryOp::RemUInt64);
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
- case 'o': {
- switch (op[7]) {
- case 'l':
- if (strcmp(op, "i64.rotl") == 0) return makeBinary(s, BinaryOp::RotLInt64);
+ case 't': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i8x16.gt_s") == 0) return makeBinary(s, BinaryOp::GtSVecI8x16);
goto parse_error;
- case 'r':
- if (strcmp(op, "i64.rotr") == 0) return makeBinary(s, BinaryOp::RotRInt64);
+ case 'u':
+ if (strcmp(op, "i8x16.gt_u") == 0) return makeBinary(s, BinaryOp::GtUVecI8x16);
goto parse_error;
default: goto parse_error;
}
@@ -1260,72 +2062,64 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 's': {
- switch (op[5]) {
- case 'h': {
- switch (op[6]) {
- case 'l':
- if (strcmp(op, "i64.shl") == 0) return makeBinary(s, BinaryOp::ShlInt64);
+ case 'l': {
+ switch (op[7]) {
+ case 'e': {
+ switch (op[9]) {
+ case 's':
+ if (strcmp(op, "i8x16.le_s") == 0) return makeBinary(s, BinaryOp::LeSVecI8x16);
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i8x16.le_u") == 0) return makeBinary(s, BinaryOp::LeUVecI8x16);
goto parse_error;
- case 'r': {
- switch (op[8]) {
- case 's':
- if (strcmp(op, "i64.shr_s") == 0) return makeBinary(s, BinaryOp::ShrSInt64);
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64.shr_u") == 0) return makeBinary(s, BinaryOp::ShrUInt64);
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
case 't': {
switch (op[9]) {
- case '\0':
- if (strcmp(op, "i64.store") == 0) return makeStore(s, i64, /*isAtomic=*/false);
- goto parse_error;
- case '1':
- if (strcmp(op, "i64.store16") == 0) return makeStore(s, i64, /*isAtomic=*/false);
- goto parse_error;
- case '3':
- if (strcmp(op, "i64.store32") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ case 's':
+ if (strcmp(op, "i8x16.lt_s") == 0) return makeBinary(s, BinaryOp::LtSVecI8x16);
goto parse_error;
- case '8':
- if (strcmp(op, "i64.store8") == 0) return makeStore(s, i64, /*isAtomic=*/false);
+ case 'u':
+ if (strcmp(op, "i8x16.lt_u") == 0) return makeBinary(s, BinaryOp::LtUVecI8x16);
goto parse_error;
default: goto parse_error;
}
}
- case 'u':
- if (strcmp(op, "i64.sub") == 0) return makeBinary(s, BinaryOp::SubInt64);
+ default: goto parse_error;
+ }
+ }
+ case 'm':
+ if (strcmp(op, "i8x16.mul") == 0) return makeBinary(s, BinaryOp::MulVecI8x16);
+ goto parse_error;
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i8x16.ne") == 0) return makeBinary(s, BinaryOp::NeVecI8x16);
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "i8x16.neg") == 0) return makeUnary(s, UnaryOp::NegVecI8x16);
goto parse_error;
default: goto parse_error;
}
}
- case 't': {
- switch (op[10]) {
- case 's': {
- switch (op[11]) {
- case '/': {
- switch (op[13]) {
- case '3':
- if (strcmp(op, "i64.trunc_s/f32") == 0) return makeUnary(s, UnaryOp::TruncSFloat32ToInt64);
- goto parse_error;
- case '6':
- if (strcmp(op, "i64.trunc_s/f64") == 0) return makeUnary(s, UnaryOp::TruncSFloat64ToInt64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case ':': {
- switch (op[17]) {
- case '3':
- if (strcmp(op, "i64.trunc_s:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat32ToInt64);
+ case 'r':
+ if (strcmp(op, "i8x16.replace_lane") == 0) return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI8x16, 16);
+ goto parse_error;
+ case 's': {
+ switch (op[7]) {
+ case 'h': {
+ switch (op[8]) {
+ case 'l':
+ if (strcmp(op, "i8x16.shl") == 0) return makeSIMDShift(s, SIMDShiftOp::ShlVecI8x16);
+ goto parse_error;
+ case 'r': {
+ switch (op[10]) {
+ case 's':
+ if (strcmp(op, "i8x16.shr_s") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrSVecI8x16);
goto parse_error;
- case '6':
- if (strcmp(op, "i64.trunc_s:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatSFloat64ToInt64);
+ case 'u':
+ if (strcmp(op, "i8x16.shr_u") == 0) return makeSIMDShift(s, SIMDShiftOp::ShrUVecI8x16);
goto parse_error;
default: goto parse_error;
}
@@ -1333,26 +2127,21 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'p':
+ if (strcmp(op, "i8x16.splat") == 0) return makeUnary(s, UnaryOp::SplatVecI8x16);
+ goto parse_error;
case 'u': {
- switch (op[11]) {
- case '/': {
- switch (op[13]) {
- case '3':
- if (strcmp(op, "i64.trunc_u/f32") == 0) return makeUnary(s, UnaryOp::TruncUFloat32ToInt64);
- goto parse_error;
- case '6':
- if (strcmp(op, "i64.trunc_u/f64") == 0) return makeUnary(s, UnaryOp::TruncUFloat64ToInt64);
- goto parse_error;
- default: goto parse_error;
- }
- }
- case ':': {
- switch (op[17]) {
- case '3':
- if (strcmp(op, "i64.trunc_u:sat/f32") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat32ToInt64);
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "i8x16.sub") == 0) return makeBinary(s, BinaryOp::SubVecI8x16);
+ goto parse_error;
+ case '_': {
+ switch (op[19]) {
+ case 's':
+ if (strcmp(op, "i8x16.sub_saturate_s") == 0) return makeBinary(s, BinaryOp::SubSatSVecI8x16);
goto parse_error;
- case '6':
- if (strcmp(op, "i64.trunc_u:sat/f64") == 0) return makeUnary(s, UnaryOp::TruncSatUFloat64ToInt64);
+ case 'u':
+ if (strcmp(op, "i8x16.sub_saturate_u") == 0) return makeBinary(s, BinaryOp::SubSatUVecI8x16);
goto parse_error;
default: goto parse_error;
}
@@ -1363,12 +2152,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'w':
- if (strcmp(op, "i64.wait") == 0) return makeAtomicWait(s, i64);
- goto parse_error;
- case 'x':
- if (strcmp(op, "i64.xor") == 0) return makeBinary(s, BinaryOp::XorInt64);
- goto parse_error;
default: goto parse_error;
}
}
@@ -1420,6 +2203,43 @@ switch (op[0]) {
case 'u':
if (strcmp(op, "unreachable") == 0) return makeUnreachable();
goto parse_error;
+ case 'v': {
+ switch (op[1]) {
+ case '1': {
+ switch (op[5]) {
+ case 'a':
+ if (strcmp(op, "v128.and") == 0) return makeBinary(s, BinaryOp::AndVec128);
+ goto parse_error;
+ case 'b':
+ if (strcmp(op, "v128.bitselect") == 0) return makeSIMDBitselect(s);
+ goto parse_error;
+ case 'c':
+ if (strcmp(op, "v128.const") == 0) return makeConst(s, v128);
+ goto parse_error;
+ case 'l':
+ if (strcmp(op, "v128.load") == 0) return makeLoad(s, v128, /*isAtomic=*/false);
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "v128.not") == 0) return makeUnary(s, UnaryOp::NotVec128);
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "v128.or") == 0) return makeBinary(s, BinaryOp::OrVec128);
+ goto parse_error;
+ case 's':
+ if (strcmp(op, "v128.store") == 0) return makeStore(s, v128, /*isAtomic=*/false);
+ goto parse_error;
+ case 'x':
+ if (strcmp(op, "v128.xor") == 0) return makeBinary(s, BinaryOp::XorVec128);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case '8':
+ if (strcmp(op, "v8x16.shuffle") == 0) return makeSIMDShuffle(s);
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'w':
if (strcmp(op, "wake") == 0) return makeAtomicWake(s);
goto parse_error;
diff --git a/src/ir/ExpressionAnalyzer.cpp b/src/ir/ExpressionAnalyzer.cpp
index 7788f7cde..7248691a9 100644
--- a/src/ir/ExpressionAnalyzer.cpp
+++ b/src/ir/ExpressionAnalyzer.cpp
@@ -248,6 +248,37 @@ bool ExpressionAnalyzer::flexibleEqual(Expression* left, Expression* right, Expr
PUSH(AtomicWake, wakeCount);
break;
}
+ case Expression::Id::SIMDExtractId: {
+ CHECK(SIMDExtract, op);
+ CHECK(SIMDExtract, idx);
+ PUSH(SIMDExtract, vec);
+ break;
+ }
+ case Expression::Id::SIMDReplaceId: {
+ CHECK(SIMDReplace, op);
+ CHECK(SIMDReplace, idx);
+ PUSH(SIMDReplace, vec);
+ PUSH(SIMDReplace, value);
+ break;
+ }
+ case Expression::Id::SIMDShuffleId: {
+ CHECK(SIMDShuffle, mask);
+ PUSH(SIMDShuffle, left);
+ PUSH(SIMDShuffle, right);
+ break;
+ }
+ case Expression::Id::SIMDBitselectId: {
+ PUSH(SIMDBitselect, left);
+ PUSH(SIMDBitselect, right);
+ PUSH(SIMDBitselect, cond);
+ break;
+ }
+ case Expression::Id::SIMDShiftId: {
+ CHECK(SIMDShift, op);
+ PUSH(SIMDShift, vec);
+ PUSH(SIMDShift, shift);
+ break;
+ }
case Expression::Id::ConstId: {
if (left->cast<Const>()->value != right->cast<Const>()->value) {
return false;
@@ -496,15 +527,43 @@ HashType ExpressionAnalyzer::hash(Expression* curr) {
PUSH(AtomicWake, wakeCount);
break;
}
+ case Expression::Id::SIMDExtractId: {
+ HASH(SIMDExtract, op);
+ HASH(SIMDExtract, idx);
+ PUSH(SIMDExtract, vec);
+ break;
+ }
+ case Expression::Id::SIMDReplaceId: {
+ HASH(SIMDReplace, op);
+ HASH(SIMDReplace, idx);
+ PUSH(SIMDReplace, vec);
+ PUSH(SIMDReplace, value);
+ break;
+ }
+ case Expression::Id::SIMDShuffleId: {
+ for (size_t i = 0; i < 16; ++i) {
+ HASH(SIMDShuffle, mask[i]);
+ }
+ PUSH(SIMDShuffle, left);
+ PUSH(SIMDShuffle, right);
+ break;
+ }
+ case Expression::Id::SIMDBitselectId: {
+ PUSH(SIMDBitselect, left);
+ PUSH(SIMDBitselect, right);
+ PUSH(SIMDBitselect, cond);
+ break;
+ }
+ case Expression::Id::SIMDShiftId: {
+ HASH(SIMDShift, op);
+ PUSH(SIMDShift, vec);
+ PUSH(SIMDShift, shift);
+ break;
+ }
case Expression::Id::ConstId: {
auto* c = curr->cast<Const>();
hash(c->type);
- auto bits = c->value.getBits();
- if (getTypeSize(c->type) == 4) {
- hash(HashType(bits));
- } else {
- hash64(bits);
- }
+ hash(std::hash<Literal>()(c->value));
break;
}
case Expression::Id::UnaryId: {
diff --git a/src/ir/ReFinalize.cpp b/src/ir/ReFinalize.cpp
index 31140837f..68526678a 100644
--- a/src/ir/ReFinalize.cpp
+++ b/src/ir/ReFinalize.cpp
@@ -137,6 +137,11 @@ void ReFinalize::visitAtomicRMW(AtomicRMW* curr) { curr->finalize(); }
void ReFinalize::visitAtomicCmpxchg(AtomicCmpxchg* curr) { curr->finalize(); }
void ReFinalize::visitAtomicWait(AtomicWait* curr) { curr->finalize(); }
void ReFinalize::visitAtomicWake(AtomicWake* curr) { curr->finalize(); }
+void ReFinalize::visitSIMDExtract(SIMDExtract* curr) { curr->finalize(); }
+void ReFinalize::visitSIMDReplace(SIMDReplace* curr) { curr->finalize(); }
+void ReFinalize::visitSIMDShuffle(SIMDShuffle* curr) { curr->finalize(); }
+void ReFinalize::visitSIMDBitselect(SIMDBitselect* curr) { curr->finalize(); }
+void ReFinalize::visitSIMDShift(SIMDShift* curr) { curr->finalize(); }
void ReFinalize::visitConst(Const* curr) { curr->finalize(); }
void ReFinalize::visitUnary(Unary* curr) { curr->finalize(); }
void ReFinalize::visitBinary(Binary* curr) { curr->finalize(); }
@@ -195,4 +200,3 @@ void ReFinalize::replaceUntaken(Expression* value, Expression* condition) {
}
} // namespace wasm
-
diff --git a/src/ir/cost.h b/src/ir/cost.h
index e28f535e7..354f663e1 100644
--- a/src/ir/cost.h
+++ b/src/ir/cost.h
@@ -152,6 +152,39 @@ struct CostAnalyzer : public Visitor<CostAnalyzer, Index> {
case TruncSatUFloat64ToInt64: ret = 1; break;
case SqrtFloat32:
case SqrtFloat64: ret = 2; break;
+ case SplatVecI8x16:
+ case SplatVecI16x8:
+ case SplatVecI32x4:
+ case SplatVecI64x2:
+ case SplatVecF32x4:
+ case SplatVecF64x2:
+ case NotVec128:
+ case NegVecI8x16:
+ case AnyTrueVecI8x16:
+ case AllTrueVecI8x16:
+ case NegVecI16x8:
+ case AnyTrueVecI16x8:
+ case AllTrueVecI16x8:
+ case NegVecI32x4:
+ case AnyTrueVecI32x4:
+ case AllTrueVecI32x4:
+ case NegVecI64x2:
+ case AnyTrueVecI64x2:
+ case AllTrueVecI64x2:
+ case AbsVecF32x4:
+ case NegVecF32x4:
+ case SqrtVecF32x4:
+ case AbsVecF64x2:
+ case NegVecF64x2:
+ case SqrtVecF64x2:
+ case TruncSatSVecF32x4ToVecI32x4:
+ case TruncSatUVecF32x4ToVecI32x4:
+ case TruncSatSVecF64x2ToVecI64x2:
+ case TruncSatUVecF64x2ToVecI64x2:
+ case ConvertSVecI32x4ToVecF32x4:
+ case ConvertUVecI32x4ToVecF32x4:
+ case ConvertSVecI64x2ToVecF64x2:
+ case ConvertUVecI64x2ToVecF64x2: assert(false && "v128 not implemented yet");
case InvalidUnary: WASM_UNREACHABLE();
}
return ret + visit(curr->value);
@@ -235,6 +268,82 @@ struct CostAnalyzer : public Visitor<CostAnalyzer, Index> {
case NeFloat32: ret = 1; break;
case EqFloat64: ret = 1; break;
case NeFloat64: ret = 1; break;
+ case EqVecI8x16:
+ case NeVecI8x16:
+ case LtSVecI8x16:
+ case LtUVecI8x16:
+ case LeSVecI8x16:
+ case LeUVecI8x16:
+ case GtSVecI8x16:
+ case GtUVecI8x16:
+ case GeSVecI8x16:
+ case GeUVecI8x16:
+ case EqVecI16x8:
+ case NeVecI16x8:
+ case LtSVecI16x8:
+ case LtUVecI16x8:
+ case LeSVecI16x8:
+ case LeUVecI16x8:
+ case GtSVecI16x8:
+ case GtUVecI16x8:
+ case GeSVecI16x8:
+ case GeUVecI16x8:
+ case EqVecI32x4:
+ case NeVecI32x4:
+ case LtSVecI32x4:
+ case LtUVecI32x4:
+ case LeSVecI32x4:
+ case LeUVecI32x4:
+ case GtSVecI32x4:
+ case GtUVecI32x4:
+ case GeSVecI32x4:
+ case GeUVecI32x4:
+ case EqVecF32x4:
+ case NeVecF32x4:
+ case LtVecF32x4:
+ case LeVecF32x4:
+ case GtVecF32x4:
+ case GeVecF32x4:
+ case EqVecF64x2:
+ case NeVecF64x2:
+ case LtVecF64x2:
+ case LeVecF64x2:
+ case GtVecF64x2:
+ case GeVecF64x2:
+ case AndVec128:
+ case OrVec128:
+ case XorVec128:
+ case AddVecI8x16:
+ case AddSatSVecI8x16:
+ case AddSatUVecI8x16:
+ case SubVecI8x16:
+ case SubSatSVecI8x16:
+ case SubSatUVecI8x16:
+ case MulVecI8x16:
+ case AddVecI16x8:
+ case AddSatSVecI16x8:
+ case AddSatUVecI16x8:
+ case SubVecI16x8:
+ case SubSatSVecI16x8:
+ case SubSatUVecI16x8:
+ case MulVecI16x8:
+ case AddVecI32x4:
+ case SubVecI32x4:
+ case MulVecI32x4:
+ case AddVecI64x2:
+ case SubVecI64x2:
+ case AddVecF32x4:
+ case SubVecF32x4:
+ case MulVecF32x4:
+ case DivVecF32x4:
+ case MinVecF32x4:
+ case MaxVecF32x4:
+ case AddVecF64x2:
+ case SubVecF64x2:
+ case MulVecF64x2:
+ case DivVecF64x2:
+ case MinVecF64x2:
+ case MaxVecF64x2: assert(false && "v128 not implemented yet");
case InvalidBinary: WASM_UNREACHABLE();
}
return ret + visit(curr->left) + visit(curr->right);
diff --git a/src/ir/literal-utils.h b/src/ir/literal-utils.h
index e00f05c52..543c34e9f 100644
--- a/src/ir/literal-utils.h
+++ b/src/ir/literal-utils.h
@@ -23,26 +23,9 @@ namespace wasm {
namespace LiteralUtils {
-inline Literal makeLiteralFromInt32(int32_t x, Type type) {
- switch (type) {
- case i32: return Literal(int32_t(x)); break;
- case i64: return Literal(int64_t(x)); break;
- case f32: return Literal(float(x)); break;
- case f64: return Literal(double(x)); break;
- case v128: assert(false && "v128 not implemented yet");
- case none:
- case unreachable: WASM_UNREACHABLE();
- }
- WASM_UNREACHABLE();
-}
-
-inline Literal makeLiteralZero(Type type) {
- return makeLiteralFromInt32(0, type);
-}
-
inline Expression* makeFromInt32(int32_t x, Type type, Module& wasm) {
auto* ret = wasm.allocator.alloc<Const>();
- ret->value = makeLiteralFromInt32(x, type);
+ ret->value = Literal::makeFromInt32(x, type);
ret->type = type;
return ret;
}
diff --git a/src/ir/utils.h b/src/ir/utils.h
index a4082b6bc..afb63b01c 100644
--- a/src/ir/utils.h
+++ b/src/ir/utils.h
@@ -129,6 +129,11 @@ struct ReFinalize : public WalkerPass<PostWalker<ReFinalize, OverriddenVisitor<R
void visitAtomicCmpxchg(AtomicCmpxchg* curr);
void visitAtomicWait(AtomicWait* curr);
void visitAtomicWake(AtomicWake* curr);
+ void visitSIMDExtract(SIMDExtract* curr);
+ void visitSIMDReplace(SIMDReplace* curr);
+ void visitSIMDShuffle(SIMDShuffle* curr);
+ void visitSIMDBitselect(SIMDBitselect* curr);
+ void visitSIMDShift(SIMDShift* curr);
void visitConst(Const* curr);
void visitUnary(Unary* curr);
void visitBinary(Binary* curr);
@@ -176,6 +181,11 @@ struct ReFinalizeNode : public OverriddenVisitor<ReFinalizeNode> {
void visitAtomicCmpxchg(AtomicCmpxchg* curr) { curr->finalize(); }
void visitAtomicWait(AtomicWait* curr) { curr->finalize(); }
void visitAtomicWake(AtomicWake* curr) { curr->finalize(); }
+ void visitSIMDExtract(SIMDExtract* curr) { curr->finalize(); }
+ void visitSIMDReplace(SIMDReplace* curr) { curr->finalize(); }
+ void visitSIMDShuffle(SIMDShuffle* curr) { curr->finalize(); }
+ void visitSIMDBitselect(SIMDBitselect* curr) { curr->finalize(); }
+ void visitSIMDShift(SIMDShift* curr) { curr->finalize(); }
void visitConst(Const* curr) { curr->finalize(); }
void visitUnary(Unary* curr) { curr->finalize(); }
void visitBinary(Binary* curr) { curr->finalize(); }
diff --git a/src/js/binaryen.js-post.js b/src/js/binaryen.js-post.js
index b63427935..11f721d54 100644
--- a/src/js/binaryen.js-post.js
+++ b/src/js/binaryen.js-post.js
@@ -22,6 +22,14 @@ function i32sToStack(i32s) {
return ret;
}
+function i8sToStack(i8s) {
+ var ret = stackAlloc(i8s.length);
+ for (var i = 0; i < i8s.length; i++) {
+ HEAP8[ret + i] = i8s[i];
+ }
+ return ret;
+}
+
// Types
Module['none'] = Module['_BinaryenTypeNone']();
Module['i32'] = Module['_BinaryenTypeInt32']();
@@ -60,6 +68,11 @@ Module['AtomicCmpxchgId'] = Module['_BinaryenAtomicCmpxchgId']();
Module['AtomicRMWId'] = Module['_BinaryenAtomicRMWId']();
Module['AtomicWaitId'] = Module['_BinaryenAtomicWaitId']();
Module['AtomicWakeId'] = Module['_BinaryenAtomicWakeId']();
+Module['SIMDExtractId'] = Module['_BinaryenSIMDExtractId']();
+Module['SIMDReplaceId'] = Module['_BinaryenSIMDReplaceId']();
+Module['SIMDShuffleId'] = Module['_BinaryenSIMDShuffleId']();
+Module['SIMDBitselectId'] = Module['_BinaryenSIMDBitselectId']();
+Module['SIMDShiftId'] = Module['_BinaryenSIMDShiftId']();
// External kinds
Module['ExternalFunction'] = Module['_BinaryenExternalFunction']();
@@ -212,6 +225,141 @@ Module['AtomicRMWAnd'] = Module['_BinaryenAtomicRMWAnd']();
Module['AtomicRMWOr'] = Module['_BinaryenAtomicRMWOr']();
Module['AtomicRMWXor'] = Module['_BinaryenAtomicRMWXor']();
Module['AtomicRMWXchg'] = Module['_BinaryenAtomicRMWXchg']();
+Module['SplatVecI8x16'] = Module['_BinaryenSplatVecI8x16']();
+Module['ExtractLaneSVecI8x16'] = Module['_BinaryenExtractLaneSVecI8x16']();
+Module['ExtractLaneUVecI8x16'] = Module['_BinaryenExtractLaneUVecI8x16']();
+Module['ReplaceLaneVecI8x16'] = Module['_BinaryenReplaceLaneVecI8x16']();
+Module['SplatVecI16x8'] = Module['_BinaryenSplatVecI16x8']();
+Module['ExtractLaneSVecI16x8'] = Module['_BinaryenExtractLaneSVecI16x8']();
+Module['ExtractLaneUVecI16x8'] = Module['_BinaryenExtractLaneUVecI16x8']();
+Module['ReplaceLaneVecI16x8'] = Module['_BinaryenReplaceLaneVecI16x8']();
+Module['SplatVecI32x4'] = Module['_BinaryenSplatVecI32x4']();
+Module['ExtractLaneVecI32x4'] = Module['_BinaryenExtractLaneVecI32x4']();
+Module['ReplaceLaneVecI32x4'] = Module['_BinaryenReplaceLaneVecI32x4']();
+Module['SplatVecI64x2'] = Module['_BinaryenSplatVecI64x2']();
+Module['ExtractLaneVecI64x2'] = Module['_BinaryenExtractLaneVecI64x2']();
+Module['ReplaceLaneVecI64x2'] = Module['_BinaryenReplaceLaneVecI64x2']();
+Module['SplatVecF32x4'] = Module['_BinaryenSplatVecF32x4']();
+Module['ExtractLaneVecF32x4'] = Module['_BinaryenExtractLaneVecF32x4']();
+Module['ReplaceLaneVecF32x4'] = Module['_BinaryenReplaceLaneVecF32x4']();
+Module['SplatVecF64x2'] = Module['_BinaryenSplatVecF64x2']();
+Module['ExtractLaneVecF64x2'] = Module['_BinaryenExtractLaneVecF64x2']();
+Module['ReplaceLaneVecF64x2'] = Module['_BinaryenReplaceLaneVecF64x2']();
+Module['EqVecI8x16'] = Module['_BinaryenEqVecI8x16']();
+Module['NeVecI8x16'] = Module['_BinaryenNeVecI8x16']();
+Module['LtSVecI8x16'] = Module['_BinaryenLtSVecI8x16']();
+Module['LtUVecI8x16'] = Module['_BinaryenLtUVecI8x16']();
+Module['GtSVecI8x16'] = Module['_BinaryenGtSVecI8x16']();
+Module['GtUVecI8x16'] = Module['_BinaryenGtUVecI8x16']();
+Module['LeSVecI8x16'] = Module['_BinaryenLeSVecI8x16']();
+Module['LeUVecI8x16'] = Module['_BinaryenLeUVecI8x16']();
+Module['GeSVecI8x16'] = Module['_BinaryenGeSVecI8x16']();
+Module['GeUVecI8x16'] = Module['_BinaryenGeUVecI8x16']();
+Module['EqVecI16x8'] = Module['_BinaryenEqVecI16x8']();
+Module['NeVecI16x8'] = Module['_BinaryenNeVecI16x8']();
+Module['LtSVecI16x8'] = Module['_BinaryenLtSVecI16x8']();
+Module['LtUVecI16x8'] = Module['_BinaryenLtUVecI16x8']();
+Module['GtSVecI16x8'] = Module['_BinaryenGtSVecI16x8']();
+Module['GtUVecI16x8'] = Module['_BinaryenGtUVecI16x8']();
+Module['LeSVecI16x8'] = Module['_BinaryenLeSVecI16x8']();
+Module['LeUVecI16x8'] = Module['_BinaryenLeUVecI16x8']();
+Module['GeSVecI16x8'] = Module['_BinaryenGeSVecI16x8']();
+Module['GeUVecI16x8'] = Module['_BinaryenGeUVecI16x8']();
+Module['EqVecI32x4'] = Module['_BinaryenEqVecI32x4']();
+Module['NeVecI32x4'] = Module['_BinaryenNeVecI32x4']();
+Module['LtSVecI32x4'] = Module['_BinaryenLtSVecI32x4']();
+Module['LtUVecI32x4'] = Module['_BinaryenLtUVecI32x4']();
+Module['GtSVecI32x4'] = Module['_BinaryenGtSVecI32x4']();
+Module['GtUVecI32x4'] = Module['_BinaryenGtUVecI32x4']();
+Module['LeSVecI32x4'] = Module['_BinaryenLeSVecI32x4']();
+Module['LeUVecI32x4'] = Module['_BinaryenLeUVecI32x4']();
+Module['GeSVecI32x4'] = Module['_BinaryenGeSVecI32x4']();
+Module['GeUVecI32x4'] = Module['_BinaryenGeUVecI32x4']();
+Module['EqVecF32x4'] = Module['_BinaryenEqVecF32x4']();
+Module['NeVecF32x4'] = Module['_BinaryenNeVecF32x4']();
+Module['LtVecF32x4'] = Module['_BinaryenLtVecF32x4']();
+Module['GtVecF32x4'] = Module['_BinaryenGtVecF32x4']();
+Module['LeVecF32x4'] = Module['_BinaryenLeVecF32x4']();
+Module['GeVecF32x4'] = Module['_BinaryenGeVecF32x4']();
+Module['EqVecF64x2'] = Module['_BinaryenGeVecF32x4']();
+Module['NeVecF64x2'] = Module['_BinaryenNeVecF64x2']();
+Module['LtVecF64x2'] = Module['_BinaryenLtVecF64x2']();
+Module['GtVecF64x2'] = Module['_BinaryenGtVecF64x2']();
+Module['LeVecF64x2'] = Module['_BinaryenLeVecF64x2']();
+Module['GeVecF64x2'] = Module['_BinaryenGeVecF64x2']();
+Module['NotVec128'] = Module['_BinaryenNotVec128']();
+Module['AndVec128'] = Module['_BinaryenAndVec128']();
+Module['OrVec128'] = Module['_BinaryenOrVec128']();
+Module['XorVec128'] = Module['_BinaryenXorVec128']();
+Module['NegVecI8x16'] = Module['_BinaryenNegVecI8x16']();
+Module['AnyTrueVecI8x16'] = Module['_BinaryenAnyTrueVecI8x16']();
+Module['AllTrueVecI8x16'] = Module['_BinaryenAllTrueVecI8x16']();
+Module['ShlVecI8x16'] = Module['_BinaryenShlVecI8x16']();
+Module['ShrSVecI8x16'] = Module['_BinaryenShrSVecI8x16']();
+Module['ShrUVecI8x16'] = Module['_BinaryenShrUVecI8x16']();
+Module['AddVecI8x16'] = Module['_BinaryenAddVecI8x16']();
+Module['AddSatSVecI8x16'] = Module['_BinaryenAddSatSVecI8x16']();
+Module['AddSatUVecI8x16'] = Module['_BinaryenAddSatUVecI8x16']();
+Module['SubVecI8x16'] = Module['_BinaryenSubVecI8x16']();
+Module['SubSatSVecI8x16'] = Module['_BinaryenSubSatSVecI8x16']();
+Module['SubSatUVecI8x16'] = Module['_BinaryenSubSatUVecI8x16']();
+Module['MulVecI8x16'] = Module['_BinaryenMulVecI8x16']();
+Module['NegVecI16x8'] = Module['_BinaryenNegVecI16x8']();
+Module['AnyTrueVecI16x8'] = Module['_BinaryenAnyTrueVecI16x8']();
+Module['AllTrueVecI16x8'] = Module['_BinaryenAllTrueVecI16x8']();
+Module['ShlVecI16x8'] = Module['_BinaryenShlVecI16x8']();
+Module['ShrSVecI16x8'] = Module['_BinaryenShrSVecI16x8']();
+Module['ShrUVecI16x8'] = Module['_BinaryenShrUVecI16x8']();
+Module['AddVecI16x8'] = Module['_BinaryenAddVecI16x8']();
+Module['AddSatSVecI16x8'] = Module['_BinaryenAddSatSVecI16x8']();
+Module['AddSatUVecI16x8'] = Module['_BinaryenAddSatUVecI16x8']();
+Module['SubVecI16x8'] = Module['_BinaryenSubVecI16x8']();
+Module['SubSatSVecI16x8'] = Module['_BinaryenSubSatSVecI16x8']();
+Module['SubSatUVecI16x8'] = Module['_BinaryenSubSatUVecI16x8']();
+Module['MulVecI16x8'] = Module['_BinaryenMulVecI16x8']();
+Module['NegVecI32x4'] = Module['_BinaryenNegVecI32x4']();
+Module['AnyTrueVecI32x4'] = Module['_BinaryenAnyTrueVecI32x4']();
+Module['AllTrueVecI32x4'] = Module['_BinaryenAllTrueVecI32x4']();
+Module['ShlVecI32x4'] = Module['_BinaryenShlVecI32x4']();
+Module['ShrSVecI32x4'] = Module['_BinaryenShrSVecI32x4']();
+Module['ShrUVecI32x4'] = Module['_BinaryenShrUVecI32x4']();
+Module['AddVecI32x4'] = Module['_BinaryenAddVecI32x4']();
+Module['SubVecI32x4'] = Module['_BinaryenSubVecI32x4']();
+Module['MulVecI32x4'] = Module['_BinaryenMulVecI32x4']();
+Module['NegVecI64x2'] = Module['_BinaryenNegVecI64x2']();
+Module['AnyTrueVecI64x2'] = Module['_BinaryenAnyTrueVecI64x2']();
+Module['AllTrueVecI64x2'] = Module['_BinaryenAllTrueVecI64x2']();
+Module['ShlVecI64x2'] = Module['_BinaryenShlVecI64x2']();
+Module['ShrSVecI64x2'] = Module['_BinaryenShrSVecI64x2']();
+Module['ShrUVecI64x2'] = Module['_BinaryenShrUVecI64x2']();
+Module['AddVecI64x2'] = Module['_BinaryenAddVecI64x2']();
+Module['SubVecI64x2'] = Module['_BinaryenSubVecI64x2']();
+Module['AbsVecF32x4'] = Module['_BinaryenAbsVecF32x4']();
+Module['NegVecF32x4'] = Module['_BinaryenNegVecF32x4']();
+Module['SqrtVecF32x4'] = Module['_BinaryenSqrtVecF32x4']();
+Module['AddVecF32x4'] = Module['_BinaryenAddVecF32x4']();
+Module['SubVecF32x4'] = Module['_BinaryenSubVecF32x4']();
+Module['MulVecF32x4'] = Module['_BinaryenMulVecF32x4']();
+Module['DivVecF32x4'] = Module['_BinaryenDivVecF32x4']();
+Module['MinVecF32x4'] = Module['_BinaryenMinVecF32x4']();
+Module['MaxVecF32x4'] = Module['_BinaryenMaxVecF32x4']();
+Module['AbsVecF64x2'] = Module['_BinaryenAbsVecF64x2']();
+Module['NegVecF64x2'] = Module['_BinaryenNegVecF64x2']();
+Module['SqrtVecF64x2'] = Module['_BinaryenSqrtVecF64x2']();
+Module['AddVecF64x2'] = Module['_BinaryenAddVecF64x2']();
+Module['SubVecF64x2'] = Module['_BinaryenSubVecF64x2']();
+Module['MulVecF64x2'] = Module['_BinaryenMulVecF64x2']();
+Module['DivVecF64x2'] = Module['_BinaryenDivVecF64x2']();
+Module['MinVecF64x2'] = Module['_BinaryenMinVecF64x2']();
+Module['MaxVecF64x2'] = Module['_BinaryenMaxVecF64x2']();
+Module['TruncSatSVecF32x4ToVecI32x4'] = Module['_BinaryenTruncSatSVecF32x4ToVecI32x4']();
+Module['TruncSatUVecF32x4ToVecI32x4'] = Module['_BinaryenTruncSatUVecF32x4ToVecI32x4']();
+Module['TruncSatSVecF64x2ToVecI64x2'] = Module['_BinaryenTruncSatSVecF64x2ToVecI64x2']();
+Module['TruncSatUVecF64x2ToVecI64x2'] = Module['_BinaryenTruncSatUVecF64x2ToVecI64x2']();
+Module['ConvertSVecI32x4ToVecF32x4'] = Module['_BinaryenConvertSVecI32x4ToVecF32x4']();
+Module['ConvertUVecI32x4ToVecF32x4'] = Module['_BinaryenConvertUVecI32x4ToVecF32x4']();
+Module['ConvertSVecI64x2ToVecF64x2'] = Module['_BinaryenConvertSVecI64x2ToVecF64x2']();
+Module['ConvertUVecI64x2ToVecF64x2'] = Module['_BinaryenConvertUVecI64x2ToVecF64x2']();
// 'Module' interface
Module['Module'] = function(module) {
@@ -1055,6 +1203,455 @@ function wrapModule(module, self) {
},
};
+ self['v128'] = {
+ 'load': function(offset, align, ptr) {
+ return Module['_BinaryenLoad'](module, 16, false, offset, align, Module['v128'], ptr);
+ },
+ 'store': function(offset, align, ptr, value) {
+ return Module['_BinaryenStore'](module, 16, offset, align, ptr, value, Module['v128']);
+ },
+ 'const': function(i8s) {
+ return preserveStack(function() {
+ Module['_BinaryenLiteralVec128'](temp, i8sToStack(i8s));
+ return Module['_BinaryenConst'](module, temp);
+ });
+ },
+ 'not': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NotVec128'], value);
+ },
+ 'and': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AndVec128'], value);
+ },
+ 'or': function(value) {
+ return Module['_BinaryenUnary'](module, Module['OrVec128'], value);
+ },
+ 'xor': function(value) {
+ return Module['_BinaryenUnary'](module, Module['XorVec128'], value);
+ },
+ 'bitselect': function(left, right, cond) {
+ return Module['_BinaryenSIMDBitselect'](module, left, right, cond);
+ }
+ };
+
+ self['v8x16'] = {
+ 'shuffle': function(left, right, mask) {
+ return preserveStack(function() {
+ return Module['_BinaryenSIMDShuffle'](module, left, right, i8sToStack(mask));
+ });
+ },
+ };
+
+ self['i8x16'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecI8x16'], value);
+ },
+ 'extract_lane_s': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneSVecI8x16'], vec, idx);
+ },
+ 'extract_lane_u': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneUVecI8x16'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecI8x16'], vec, idx, value);
+ },
+ 'eq': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['EqVecI8x16'], left, right);
+ },
+ 'ne': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['NeVecI8x16'], left, right);
+ },
+ 'lt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtSVecI8x16'], left, right);
+ },
+ 'lt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtUVecI8x16'], left, right);
+ },
+ 'gt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtSVecI8x16'], left, right);
+ },
+ 'gt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtUVecI8x16'], left, right);
+ },
+ 'le_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeSVecI8x16'], left, right);
+ },
+ 'le_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeUVecI8x16'], left, right);
+ },
+ 'ge_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeSVecI8x16'], left, right);
+ },
+ 'ge_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeUVecI8x16'], left, right);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecI8x16'], value);
+ },
+ 'any_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AnyTrueVecI8x16'], value);
+ },
+ 'all_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AllTrueVecI8x16'], value);
+ },
+ 'shl': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShlVecI8x16'], vec, shift);
+ },
+ 'shr_s': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrSVecI8x16'], vec, shift);
+ },
+ 'shr_u': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrUVecI8x16'], vec, shift);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecI8x16'], left, right);
+ },
+ 'add_saturate_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddSatSVecI8x16'], left, right);
+ },
+ 'add_saturate_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddSatUVecI8x16'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecI8x16'], left, right);
+ },
+ 'sub_saturate_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubSatSVecI8x16'], left, right);
+ },
+ 'sub_saturate_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubSatUVecI8x16'], left, right);
+ },
+ 'mul': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MulVecI8x16'], left, right);
+ },
+ };
+
+ self['i16x8'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecI16x8'], value);
+ },
+ 'extract_lane_s': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneSVecI16x8'], vec, idx);
+ },
+ 'extract_lane_u': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneUVecI16x8'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecI16x8'], vec, idx, value);
+ },
+ 'eq': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['EqVecI16x8'], left, right);
+ },
+ 'ne': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['NeVecI16x8'], left, right);
+ },
+ 'lt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtSVecI16x8'], left, right);
+ },
+ 'lt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtUVecI16x8'], left, right);
+ },
+ 'gt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtSVecI16x8'], left, right);
+ },
+ 'gt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtUVecI16x8'], left, right);
+ },
+ 'le_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeSVecI16x8'], left, right);
+ },
+ 'le_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeUVecI16x8'], left, right);
+ },
+ 'ge_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeSVecI16x8'], left, right);
+ },
+ 'ge_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeUVecI16x8'], left, right);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecI16x8'], value);
+ },
+ 'any_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AnyTrueVecI16x8'], value);
+ },
+ 'all_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AllTrueVecI16x8'], value);
+ },
+ 'shl': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShlVecI16x8'], vec, shift);
+ },
+ 'shr_s': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrSVecI16x8'], vec, shift);
+ },
+ 'shr_u': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrUVecI16x8'], vec, shift);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecI16x8'], left, right);
+ },
+ 'add_saturate_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddSatSVecI16x8'], left, right);
+ },
+ 'add_saturate_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddSatUVecI16x8'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecI16x8'], left, right);
+ },
+ 'sub_saturate_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubSatSVecI16x8'], left, right);
+ },
+ 'sub_saturate_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubSatUVecI16x8'], left, right);
+ },
+ 'mul': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MulVecI16x8'], left, right);
+ },
+ };
+
+ self['i32x4'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecI32x4'], value);
+ },
+ 'extract_lane': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneVecI32x4'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecI32x4'], vec, idx, value);
+ },
+ 'eq': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['EqVecI32x4'], left, right);
+ },
+ 'ne': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['NeVecI32x4'], left, right);
+ },
+ 'lt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtSVecI32x4'], left, right);
+ },
+ 'lt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtUVecI32x4'], left, right);
+ },
+ 'gt_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtSVecI32x4'], left, right);
+ },
+ 'gt_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtUVecI32x4'], left, right);
+ },
+ 'le_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeSVecI32x4'], left, right);
+ },
+ 'le_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeUVecI32x4'], left, right);
+ },
+ 'ge_s': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeSVecI32x4'], left, right);
+ },
+ 'ge_u': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeUVecI32x4'], left, right);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecI32x4'], value);
+ },
+ 'any_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AnyTrueVecI32x4'], value);
+ },
+ 'all_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AllTrueVecI32x4'], value);
+ },
+ 'shl': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShlVecI32x4'], vec, shift);
+ },
+ 'shr_s': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrSVecI32x4'], vec, shift);
+ },
+ 'shr_u': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrUVecI32x4'], vec, shift);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecI32x4'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecI32x4'], left, right);
+ },
+ 'mul': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MulVecI32x4'], left, right);
+ },
+ 'trunc_s/f32x4:sat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['TruncSatSVecF32x4ToVecI32x4'], value);
+ },
+ 'trunc_u/f32x4:sat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['TruncSatUVecF32x4ToVecI32x4'], value);
+ },
+ };
+
+ self['i64x2'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecI64x2'], value);
+ },
+ 'extract_lane': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneVecI64x2'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecI64x2'], vec, idx, value);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecI64x2'], value);
+ },
+ 'any_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AnyTrueVecI64x2'], value);
+ },
+ 'all_true': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AllTrueVecI64x2'], value);
+ },
+ 'shl': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShlVecI64x2'], vec, shift);
+ },
+ 'shr_s': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrSVecI64x2'], vec, shift);
+ },
+ 'shr_u': function(vec, shift) {
+ return Module['_BinaryenSIMDShift'](module, Module['ShrUVecI64x2'], vec, shift);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecI64x2'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecI64x2'], left, right);
+ },
+ 'trunc_s/f64x2:sat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['TruncSatSVecF64x2ToVecI64x2'], value);
+ },
+ 'trunc_u/f64x2:sat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['TruncSatUVecF64x2ToVecI64x2'], value);
+ },
+ };
+
+ self['f32x4'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecF32x4'], value);
+ },
+ 'extract_lane': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneVecF32x4'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecF32x4'], vec, idx, value);
+ },
+ 'eq': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['EqVecF32x4'], left, right);
+ },
+ 'ne': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['NeVecF32x4'], left, right);
+ },
+ 'lt': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtVecF32x4'], left, right);
+ },
+ 'gt': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtVecF32x4'], left, right);
+ },
+ 'le': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeVecF32x4'], left, right);
+ },
+ 'ge': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeVecF32x4'], left, right);
+ },
+ 'abs': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AbsVecF32x4'], value);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecF32x4'], value);
+ },
+ 'sqrt': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SqrtVecF32x4'], value);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecF32x4'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecF32x4'], left, right);
+ },
+ 'mul': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MulVecF32x4'], left, right);
+ },
+ 'div': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['DivVecF32x4'], left, right);
+ },
+ 'min': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MinVecF32x4'], left, right);
+ },
+ 'max': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MaxVecF32x4'], left, right);
+ },
+ 'convert_s/i32x4': function(value) {
+ return Module['_BinaryenUnary'](module, Module['ConvertSVecI32x4ToVecF32x4'], value);
+ },
+ 'convert_u/i32x4': function(value) {
+ return Module['_BinaryenUnary'](module, Module['ConvertUVecI32x4ToVecF32x4'], value);
+ },
+ };
+
+ self['f64x2'] = {
+ 'splat': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SplatVecF64x2'], value);
+ },
+ 'extract_lane': function(vec, idx) {
+ return Module['_BinaryenSIMDExtract'](module, Module['ExtractLaneVecF64x2'], vec, idx);
+ },
+ 'replace_lane': function(vec, idx, value) {
+ return Module['_BinaryenSIMDReplace'](module, Module['ReplaceLaneVecF64x2'], vec, idx, value);
+ },
+ 'eq': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['EqVecF64x2'], left, right);
+ },
+ 'ne': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['NeVecF64x2'], left, right);
+ },
+ 'lt': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LtVecF64x2'], left, right);
+ },
+ 'gt': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GtVecF64x2'], left, right);
+ },
+ 'le': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['LeVecF64x2'], left, right);
+ },
+ 'ge': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['GeVecF64x2'], left, right);
+ },
+ 'abs': function(value) {
+ return Module['_BinaryenUnary'](module, Module['AbsVecF64x2'], value);
+ },
+ 'neg': function(value) {
+ return Module['_BinaryenUnary'](module, Module['NegVecF64x2'], value);
+ },
+ 'sqrt': function(value) {
+ return Module['_BinaryenUnary'](module, Module['SqrtVecF64x2'], value);
+ },
+ 'add': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['AddVecF64x2'], left, right);
+ },
+ 'sub': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SubVecF64x2'], left, right);
+ },
+ 'mul': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MulVecF64x2'], left, right);
+ },
+ 'div': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['DivVecF64x2'], left, right);
+ },
+ 'min': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MinVecF64x2'], left, right);
+ },
+ 'max': function(left, right) {
+ return Module['_BinaryenBinary'](module, Module['MaxVecF64x2'], left, right);
+ },
+ 'convert_s/i64x2': function(value) {
+ return Module['_BinaryenUnary'](module, Module['ConvertSVecI64x2ToVecF64x2'], value);
+ },
+ 'convert_u/i64x2': function(value) {
+ return Module['_BinaryenUnary'](module, Module['ConvertUVecI64x2ToVecF64x2'], value);
+ },
+ };
+
self['select'] = function(condition, ifTrue, ifFalse) {
return Module['_BinaryenSelect'](module, condition, ifTrue, ifFalse);
};
@@ -1551,6 +2148,55 @@ Module['getExpressionInfo'] = function(expr) {
'ptr': Module['_BinaryenAtomicWakeGetPtr'](expr),
'wakeCount': Module['_BinaryenAtomicWakeGetWakeCount'](expr)
};
+ case Module['SIMDExtractId']:
+ return {
+ 'id': id,
+ 'type': type,
+ 'op': Module['_BinaryenSIMDExtractGetOp'](expr),
+ 'vec': Module['_BinaryenSIMDExtractGetVec'](expr),
+ 'idx': Module['_BinaryenSIMDExtractGetIdx'](expr)
+ };
+ case Module['SIMDReplaceId']:
+ return {
+ 'id': id,
+ 'type': type,
+ 'op': Module['_BinaryenSIMDReplaceGetOp'](expr),
+ 'vec': Module['_BinaryenSIMDReplaceGetVec'](expr),
+ 'idx': Module['_BinaryenSIMDReplaceGetIdx'](expr),
+ 'value': Module['_BinaryenSIMDReplaceGetValue'](expr)
+ };
+ case Module['SIMDShuffleId']:
+ return preserveStack(function() {
+ var ret = stackAlloc(16);
+ Module['_BinaryenSIMDShuffleGetMask'](expr, ret);
+ var mask = [];
+ for (var i = 0 ; i < 16; i++) {
+ mask[i] = HEAP8[ret + i];
+ }
+ return {
+ 'id': id,
+ 'type': type,
+ 'left': Module['_BinaryenSIMDShuffleGetLeft'](expr),
+ 'right': Module['_BinaryenSIMDShuffleGetRight'](expr),
+ 'mask': mask
+ };
+ });
+ case Module['SIMDBitselectId']:
+ return {
+ 'id': id,
+ 'type': type,
+ 'left': Module['_BinaryenSIMDBitselectGetLeft'](expr),
+ 'right': Module['_BinaryenSIMDBitselectGetRight'](expr),
+ 'cond': Module['_BinaryenSIMDBitselectGetCond'](expr)
+ };
+ case Module['SIMDShiftId']:
+ return {
+ 'id': id,
+ 'type': type,
+ 'op': Module['_BinaryenSIMDShiftGetOp'](expr),
+ 'vec': Module['_BinaryenSIMDShiftGetVec'](expr),
+ 'shift': Module['_BinaryenSIMDShiftGetShift'](expr)
+ };
default:
throw Error('unexpected id: ' + id);
}
diff --git a/src/literal.h b/src/literal.h
index 4ed23b80d..dd5688263 100644
--- a/src/literal.h
+++ b/src/literal.h
@@ -18,6 +18,7 @@
#define wasm_literal_h
#include <iostream>
+#include <array>
#include "support/hash.h"
#include "support/utilities.h"
@@ -36,10 +37,11 @@ private:
union {
int32_t i32;
int64_t i64;
+ uint8_t v128[16];
};
public:
- Literal() : type(Type::none), i64(0) {}
+ Literal() : type(Type::none), v128() {}
explicit Literal(Type type) : type(type), i64(0) {}
explicit Literal(int32_t init) : type(Type::i32), i32(init) {}
explicit Literal(uint32_t init) : type(Type::i32), i32(init) {}
@@ -47,10 +49,38 @@ public:
explicit Literal(uint64_t init) : type(Type::i64), i64(init) {}
explicit Literal(float init) : type(Type::f32), i32(bit_cast<int32_t>(init)) {}
explicit Literal(double init) : type(Type::f64), i64(bit_cast<int64_t>(init)) {}
+ // v128 literal from bytes
+ explicit Literal(const uint8_t init[16]);
+ // v128 literal from lane value literals
+ explicit Literal(const std::array<Literal, 16>&);
+ explicit Literal(const std::array<Literal, 8>&);
+ explicit Literal(const std::array<Literal, 4>&);
+ explicit Literal(const std::array<Literal, 2>&);
bool isConcrete() { return type != none; }
bool isNull() { return type == none; }
+ inline static Literal makeFromInt32(int32_t x, Type type) {
+ switch (type) {
+ case Type::i32: return Literal(int32_t(x)); break;
+ case Type::i64: return Literal(int64_t(x)); break;
+ case Type::f32: return Literal(float(x)); break;
+ case Type::f64: return Literal(double(x)); break;
+ case Type::v128: return Literal(
+ std::array<Literal, 4>{{
+ Literal(x), Literal(int32_t(0)), Literal(int32_t(0)), Literal(int32_t(0))
+ }}
+ );
+ case none:
+ case unreachable: WASM_UNREACHABLE();
+ }
+ WASM_UNREACHABLE();
+ }
+
+ inline static Literal makeZero(Type type) {
+ return makeFromInt32(0, type);
+ }
+
Literal castToF32();
Literal castToF64();
Literal castToI32();
@@ -60,8 +90,12 @@ public:
int64_t geti64() const { assert(type == Type::i64); return i64; }
float getf32() const { assert(type == Type::f32); return bit_cast<float>(i32); }
double getf64() const { assert(type == Type::f64); return bit_cast<double>(i64); }
+ std::array<uint8_t, 16> getv128() const;
- int32_t* geti32Ptr() { assert(type == Type::i32); return &i32; } // careful!
+ // careful!
+ int32_t* geti32Ptr() { assert(type == Type::i32); return &i32; }
+ uint8_t* getv128Ptr() { assert(type == Type::v128); return v128; }
+ const uint8_t* getv128Ptr() const { assert(type == Type::v128); return v128; }
int32_t reinterpreti32() const { assert(type == Type::f32); return i32; }
int64_t reinterpreti64() const { assert(type == Type::f64); return i64; }
@@ -70,7 +104,7 @@ public:
int64_t getInteger() const;
double getFloat() const;
- int64_t getBits() const;
+ void getBits(uint8_t (&buf)[16]) const;
// Equality checks for the type and the bits, so a nan float would
// be compared bitwise (which means that a Literal containing a nan
// would be equal to itself, if the bits are equal).
@@ -84,6 +118,7 @@ public:
static void printFloat(std::ostream &o, float f);
static void printDouble(std::ostream& o, double d);
+ static void printVec128(std::ostream& o, const std::array<uint8_t, 16>& v);
friend std::ostream& operator<<(std::ostream& o, Literal literal);
@@ -158,6 +193,163 @@ public:
Literal min(const Literal& other) const;
Literal max(const Literal& other) const;
Literal copysign(const Literal& other) const;
+
+ std::array<Literal, 16> getLanesSI8x16() const;
+ std::array<Literal, 16> getLanesUI8x16() const;
+ std::array<Literal, 8> getLanesSI16x8() const;
+ std::array<Literal, 8> getLanesUI16x8() const;
+ std::array<Literal, 4> getLanesI32x4() const;
+ std::array<Literal, 2> getLanesI64x2() const;
+ std::array<Literal, 4> getLanesF32x4() const;
+ std::array<Literal, 2> getLanesF64x2() const;
+
+ Literal shuffleV8x16(const Literal& other, const std::array<uint8_t, 16>& mask) const;
+ Literal splatI8x16() const;
+ Literal extractLaneSI8x16(uint8_t idx) const;
+ Literal extractLaneUI8x16(uint8_t idx) const;
+ Literal replaceLaneI8x16(const Literal& other, uint8_t idx) const;
+ Literal splatI16x8() const;
+ Literal extractLaneSI16x8(uint8_t idx) const;
+ Literal extractLaneUI16x8(uint8_t idx) const;
+ Literal replaceLaneI16x8(const Literal& other, uint8_t idx) const;
+ Literal splatI32x4() const;
+ Literal extractLaneI32x4(uint8_t idx) const;
+ Literal replaceLaneI32x4(const Literal& other, uint8_t idx) const;
+ Literal splatI64x2() const;
+ Literal extractLaneI64x2(uint8_t idx) const;
+ Literal replaceLaneI64x2(const Literal& other, uint8_t idx) const;
+ Literal splatF32x4() const;
+ Literal extractLaneF32x4(uint8_t idx) const;
+ Literal replaceLaneF32x4(const Literal& other, uint8_t idx) const;
+ Literal splatF64x2() const;
+ Literal extractLaneF64x2(uint8_t idx) const;
+ Literal replaceLaneF64x2(const Literal& other, uint8_t idx) const;
+ Literal eqI8x16(const Literal& other) const;
+ Literal neI8x16(const Literal& other) const;
+ Literal ltSI8x16(const Literal& other) const;
+ Literal ltUI8x16(const Literal& other) const;
+ Literal gtSI8x16(const Literal& other) const;
+ Literal gtUI8x16(const Literal& other) const;
+ Literal leSI8x16(const Literal& other) const;
+ Literal leUI8x16(const Literal& other) const;
+ Literal geSI8x16(const Literal& other) const;
+ Literal geUI8x16(const Literal& other) const;
+ Literal eqI16x8(const Literal& other) const;
+ Literal neI16x8(const Literal& other) const;
+ Literal ltSI16x8(const Literal& other) const;
+ Literal ltUI16x8(const Literal& other) const;
+ Literal gtSI16x8(const Literal& other) const;
+ Literal gtUI16x8(const Literal& other) const;
+ Literal leSI16x8(const Literal& other) const;
+ Literal leUI16x8(const Literal& other) const;
+ Literal geSI16x8(const Literal& other) const;
+ Literal geUI16x8(const Literal& other) const;
+ Literal eqI32x4(const Literal& other) const;
+ Literal neI32x4(const Literal& other) const;
+ Literal ltSI32x4(const Literal& other) const;
+ Literal ltUI32x4(const Literal& other) const;
+ Literal gtSI32x4(const Literal& other) const;
+ Literal gtUI32x4(const Literal& other) const;
+ Literal leSI32x4(const Literal& other) const;
+ Literal leUI32x4(const Literal& other) const;
+ Literal geSI32x4(const Literal& other) const;
+ Literal geUI32x4(const Literal& other) const;
+ Literal eqF32x4(const Literal& other) const;
+ Literal neF32x4(const Literal& other) const;
+ Literal ltF32x4(const Literal& other) const;
+ Literal gtF32x4(const Literal& other) const;
+ Literal leF32x4(const Literal& other) const;
+ Literal geF32x4(const Literal& other) const;
+ Literal eqF64x2(const Literal& other) const;
+ Literal neF64x2(const Literal& other) const;
+ Literal ltF64x2(const Literal& other) const;
+ Literal gtF64x2(const Literal& other) const;
+ Literal leF64x2(const Literal& other) const;
+ Literal geF64x2(const Literal& other) const;
+ Literal notV128() const;
+ Literal andV128(const Literal& other) const;
+ Literal orV128(const Literal& other) const;
+ Literal xorV128(const Literal& other) const;
+ Literal bitselectV128(const Literal& left, const Literal& right) const;
+ Literal negI8x16() const;
+ Literal anyTrueI8x16() const;
+ Literal allTrueI8x16() const;
+ Literal shlI8x16(const Literal& other) const;
+ Literal shrSI8x16(const Literal& other) const;
+ Literal shrUI8x16(const Literal& other) const;
+ Literal addI8x16(const Literal& other) const;
+ Literal addSaturateSI8x16(const Literal& other) const;
+ Literal addSaturateUI8x16(const Literal& other) const;
+ Literal subI8x16(const Literal& other) const;
+ Literal subSaturateSI8x16(const Literal& other) const;
+ Literal subSaturateUI8x16(const Literal& other) const;
+ Literal mulI8x16(const Literal& other) const;
+ Literal negI16x8() const;
+ Literal anyTrueI16x8() const;
+ Literal allTrueI16x8() const;
+ Literal shlI16x8(const Literal& other) const;
+ Literal shrSI16x8(const Literal& other) const;
+ Literal shrUI16x8(const Literal& other) const;
+ Literal addI16x8(const Literal& other) const;
+ Literal addSaturateSI16x8(const Literal& other) const;
+ Literal addSaturateUI16x8(const Literal& other) const;
+ Literal subI16x8(const Literal& other) const;
+ Literal subSaturateSI16x8(const Literal& other) const;
+ Literal subSaturateUI16x8(const Literal& other) const;
+ Literal mulI16x8(const Literal& other) const;
+ Literal negI32x4() const;
+ Literal anyTrueI32x4() const;
+ Literal allTrueI32x4() const;
+ Literal shlI32x4(const Literal& other) const;
+ Literal shrSI32x4(const Literal& other) const;
+ Literal shrUI32x4(const Literal& other) const;
+ Literal addI32x4(const Literal& other) const;
+ Literal subI32x4(const Literal& other) const;
+ Literal mulI32x4(const Literal& other) const;
+ Literal negI64x2() const;
+ Literal anyTrueI64x2() const;
+ Literal allTrueI64x2() const;
+ Literal shlI64x2(const Literal& other) const;
+ Literal shrSI64x2(const Literal& other) const;
+ Literal shrUI64x2(const Literal& other) const;
+ Literal addI64x2(const Literal& other) const;
+ Literal subI64x2(const Literal& other) const;
+ Literal absF32x4() const;
+ Literal negF32x4() const;
+ Literal sqrtF32x4() const;
+ Literal addF32x4(const Literal& other) const;
+ Literal subF32x4(const Literal& other) const;
+ Literal mulF32x4(const Literal& other) const;
+ Literal divF32x4(const Literal& other) const;
+ Literal minF32x4(const Literal& other) const;
+ Literal maxF32x4(const Literal& other) const;
+ Literal absF64x2() const;
+ Literal negF64x2() const;
+ Literal sqrtF64x2() const;
+ Literal addF64x2(const Literal& other) const;
+ Literal subF64x2(const Literal& other) const;
+ Literal mulF64x2(const Literal& other) const;
+ Literal divF64x2(const Literal& other) const;
+ Literal minF64x2(const Literal& other) const;
+ Literal maxF64x2(const Literal& other) const;
+ Literal truncSatToSI32x4() const;
+ Literal truncSatToUI32x4() const;
+ Literal truncSatToSI64x2() const;
+ Literal truncSatToUI64x2() const;
+ Literal convertSToF32x4() const;
+ Literal convertUToF32x4() const;
+ Literal convertSToF64x2() const;
+ Literal convertUToF64x2() const;
+
+ private:
+ Literal addSatSI8(const Literal& other) const;
+ Literal addSatUI8(const Literal& other) const;
+ Literal addSatSI16(const Literal& other) const;
+ Literal addSatUI16(const Literal& other) const;
+ Literal subSatSI8(const Literal& other) const;
+ Literal subSatUI8(const Literal& other) const;
+ Literal subSatSI16(const Literal& other) const;
+ Literal subSatUI16(const Literal& other) const;
};
} // namespace wasm
@@ -165,9 +357,16 @@ public:
namespace std {
template<> struct hash<wasm::Literal> {
size_t operator()(const wasm::Literal& a) const {
+ uint8_t bytes[16];
+ a.getBits(bytes);
+ int64_t chunks[2];
+ memcpy(chunks, bytes, sizeof(chunks));
return wasm::rehash(
- uint64_t(hash<size_t>()(size_t(a.type))),
- uint64_t(hash<int64_t>()(a.getBits()))
+ wasm::rehash(
+ uint64_t(hash<size_t>()(size_t(a.type))),
+ uint64_t(hash<int64_t>()(chunks[0]))
+ ),
+ uint64_t(hash<int64_t>()(chunks[1]))
);
}
};
@@ -175,7 +374,16 @@ template<> struct less<wasm::Literal> {
bool operator()(const wasm::Literal& a, const wasm::Literal& b) const {
if (a.type < b.type) return true;
if (a.type > b.type) return false;
- return a.getBits() < b.getBits();
+ switch (a.type) {
+ case wasm::Type::i32: return a.geti32() < b.geti32();
+ case wasm::Type::f32: return a.reinterpreti32() < b.reinterpreti32();
+ case wasm::Type::i64: return a.geti64() < b.geti64();
+ case wasm::Type::f64: return a.reinterpreti64() < b.reinterpreti64();
+ case wasm::Type::v128: return memcmp(a.getv128Ptr(), b.getv128Ptr(), 16) < 0;
+ case wasm::Type::none:
+ case wasm::Type::unreachable: return false;
+ }
+ WASM_UNREACHABLE();
}
};
}
diff --git a/src/passes/DeadCodeElimination.cpp b/src/passes/DeadCodeElimination.cpp
index 2e62197b5..6e70fc55d 100644
--- a/src/passes/DeadCodeElimination.cpp
+++ b/src/passes/DeadCodeElimination.cpp
@@ -257,6 +257,11 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
case Expression::Id::AtomicRMWId: DELEGATE(AtomicRMW);
case Expression::Id::AtomicWaitId: DELEGATE(AtomicWait);
case Expression::Id::AtomicWakeId: DELEGATE(AtomicWake);
+ case Expression::Id::SIMDExtractId: DELEGATE(SIMDExtract);
+ case Expression::Id::SIMDReplaceId: DELEGATE(SIMDReplace);
+ case Expression::Id::SIMDShuffleId: DELEGATE(SIMDShuffle);
+ case Expression::Id::SIMDBitselectId: DELEGATE(SIMDBitselect);
+ case Expression::Id::SIMDShiftId: DELEGATE(SIMDShift);
case Expression::Id::InvalidId: WASM_UNREACHABLE();
case Expression::Id::NumExpressionIds: WASM_UNREACHABLE();
}
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp
index 1dd05dd0a..4c4adc623 100644
--- a/src/passes/OptimizeInstructions.cpp
+++ b/src/passes/OptimizeInstructions.cpp
@@ -461,7 +461,7 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
auto total = Bits::getEffectiveShifts(leftRight) + Bits::getEffectiveShifts(right);
if (total == Bits::getEffectiveShifts(total, right->type)) {
// no overflow, we can do this
- leftRight->value = LiteralUtils::makeLiteralFromInt32(total, right->type);
+ leftRight->value = Literal::makeFromInt32(total, right->type);
return left;
} // TODO: handle overflows
}
@@ -1096,7 +1096,7 @@ private:
auto* right = binary->right->cast<Const>();
if (isIntegerType(type)) {
// operations on zero
- if (right->value == LiteralUtils::makeLiteralFromInt32(0, type)) {
+ if (right->value == Literal::makeFromInt32(0, type)) {
if (binary->op == Abstract::getBinary(type, Abstract::Shl) ||
binary->op == Abstract::getBinary(type, Abstract::ShrU) ||
binary->op == Abstract::getBinary(type, Abstract::ShrS) ||
@@ -1152,7 +1152,7 @@ private:
// as a NaN would skip the computation and just return the NaN,
// and that is precisely what we do here. but, the same with -1
// (change to a negation) would be incorrect for that reason.
- if (right->value == LiteralUtils::makeLiteralFromInt32(1, type)) {
+ if (right->value == Literal::makeFromInt32(1, type)) {
if (binary->op == Abstract::getBinary(type, Abstract::Mul) ||
binary->op == Abstract::getBinary(type, Abstract::DivS) ||
binary->op == Abstract::getBinary(type, Abstract::DivU)) {
@@ -1171,7 +1171,7 @@ private:
auto* left = binary->left->cast<Const>();
if (isIntegerType(type)) {
// operations on zero
- if (left->value == LiteralUtils::makeLiteralFromInt32(0, type)) {
+ if (left->value == Literal::makeFromInt32(0, type)) {
if ((binary->op == Abstract::getBinary(type, Abstract::Shl) ||
binary->op == Abstract::getBinary(type, Abstract::ShrU) ||
binary->op == Abstract::getBinary(type, Abstract::ShrS)) &&
diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp
index db34b9ffb..9fb7ab31c 100644
--- a/src/passes/Precompute.cpp
+++ b/src/passes/Precompute.cpp
@@ -297,7 +297,7 @@ private:
Literal curr;
if (set == nullptr) {
if (getFunction()->isVar(get->index)) {
- curr = LiteralUtils::makeLiteralZero(getFunction()->getLocalType(get->index));
+ curr = Literal::makeZero(getFunction()->getLocalType(get->index));
} else {
// it's a param, so it's hopeless
value = Literal();
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index 51c7e9f97..64dd0deae 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -234,6 +234,60 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
o << " offset=" << curr->offset;
}
}
+ void visitSIMDExtract(SIMDExtract* curr) {
+ prepareColor(o);
+ switch (curr->op) {
+ case ExtractLaneSVecI8x16: o << "i8x16.extract_lane_s"; break;
+ case ExtractLaneUVecI8x16: o << "i8x16.extract_lane_u"; break;
+ case ExtractLaneSVecI16x8: o << "i16x8.extract_lane_s"; break;
+ case ExtractLaneUVecI16x8: o << "i16x8.extract_lane_u"; break;
+ case ExtractLaneVecI32x4: o << "i32x4.extract_lane"; break;
+ case ExtractLaneVecI64x2: o << "i64x2.extract_lane"; break;
+ case ExtractLaneVecF32x4: o << "f32x4.extract_lane"; break;
+ case ExtractLaneVecF64x2: o << "f64x2.extract_lane"; break;
+ }
+ o << " " << int(curr->idx);
+ }
+ void visitSIMDReplace(SIMDReplace* curr) {
+ prepareColor(o);
+ switch (curr->op) {
+ case ReplaceLaneVecI8x16: o << "i8x16.replace_lane"; break;
+ case ReplaceLaneVecI16x8: o << "i16x8.replace_lane"; break;
+ case ReplaceLaneVecI32x4: o << "i32x4.replace_lane"; break;
+ case ReplaceLaneVecI64x2: o << "i64x2.replace_lane"; break;
+ case ReplaceLaneVecF32x4: o << "f32x4.replace_lane"; break;
+ case ReplaceLaneVecF64x2: o << "f64x2.replace_lane"; break;
+ }
+ o << " " << int(curr->idx);
+ }
+ void visitSIMDShuffle(SIMDShuffle* curr) {
+ prepareColor(o);
+ o << "v8x16.shuffle";
+ for (uint8_t mask_index : curr->mask) {
+ o << " " << std::to_string(mask_index);
+ }
+ }
+ void visitSIMDBitselect(SIMDBitselect* curr) {
+ prepareColor(o);
+ o << "v128.bitselect";
+ }
+ void visitSIMDShift(SIMDShift* curr) {
+ prepareColor(o);
+ switch (curr->op) {
+ case ShlVecI8x16: o << "i8x16.shl"; break;
+ case ShrSVecI8x16: o << "i8x16.shr_s"; break;
+ case ShrUVecI8x16: o << "i8x16.shr_u"; break;
+ case ShlVecI16x8: o << "i16x8.shl"; break;
+ case ShrSVecI16x8: o << "i16x8.shr_s"; break;
+ case ShrUVecI16x8: o << "i16x8.shr_u"; break;
+ case ShlVecI32x4: o << "i32x4.shl"; break;
+ case ShrSVecI32x4: o << "i32x4.shr_s"; break;
+ case ShrUVecI32x4: o << "i32x4.shr_u"; break;
+ case ShlVecI64x2: o << "i64x2.shl"; break;
+ case ShrSVecI64x2: o << "i64x2.shr_s"; break;
+ case ShrUVecI64x2: o << "i64x2.shr_u"; break;
+ }
+ }
void visitConst(Const* curr) {
o << curr->value;
}
@@ -262,36 +316,36 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
case TruncFloat64: o << "f64.trunc"; break;
case NearestFloat64: o << "f64.nearest"; break;
case SqrtFloat64: o << "f64.sqrt"; break;
- case ExtendSInt32: o << "i64.extend_s/i32"; break;
- case ExtendUInt32: o << "i64.extend_u/i32"; break;
- case WrapInt64: o << "i32.wrap/i64"; break;
- case TruncSFloat32ToInt32: o << "i32.trunc_s/f32"; break;
- case TruncSFloat32ToInt64: o << "i64.trunc_s/f32"; break;
- case TruncUFloat32ToInt32: o << "i32.trunc_u/f32"; break;
- case TruncUFloat32ToInt64: o << "i64.trunc_u/f32"; break;
- case TruncSFloat64ToInt32: o << "i32.trunc_s/f64"; break;
- case TruncSFloat64ToInt64: o << "i64.trunc_s/f64"; break;
- case TruncUFloat64ToInt32: o << "i32.trunc_u/f64"; break;
- case TruncUFloat64ToInt64: o << "i64.trunc_u/f64"; break;
+ case ExtendSInt32: o << "i64.extend_s/i32"; break;
+ case ExtendUInt32: o << "i64.extend_u/i32"; break;
+ case WrapInt64: o << "i32.wrap/i64"; break;
+ case TruncSFloat32ToInt32: o << "i32.trunc_s/f32"; break;
+ case TruncSFloat32ToInt64: o << "i64.trunc_s/f32"; break;
+ case TruncUFloat32ToInt32: o << "i32.trunc_u/f32"; break;
+ case TruncUFloat32ToInt64: o << "i64.trunc_u/f32"; break;
+ case TruncSFloat64ToInt32: o << "i32.trunc_s/f64"; break;
+ case TruncSFloat64ToInt64: o << "i64.trunc_s/f64"; break;
+ case TruncUFloat64ToInt32: o << "i32.trunc_u/f64"; break;
+ case TruncUFloat64ToInt64: o << "i64.trunc_u/f64"; break;
case ReinterpretFloat32: o << "i32.reinterpret/f32"; break;
case ReinterpretFloat64: o << "i64.reinterpret/f64"; break;
- case ConvertUInt32ToFloat32: o << "f32.convert_u/i32"; break;
- case ConvertUInt32ToFloat64: o << "f64.convert_u/i32"; break;
- case ConvertSInt32ToFloat32: o << "f32.convert_s/i32"; break;
- case ConvertSInt32ToFloat64: o << "f64.convert_s/i32"; break;
- case ConvertUInt64ToFloat32: o << "f32.convert_u/i64"; break;
- case ConvertUInt64ToFloat64: o << "f64.convert_u/i64"; break;
- case ConvertSInt64ToFloat32: o << "f32.convert_s/i64"; break;
- case ConvertSInt64ToFloat64: o << "f64.convert_s/i64"; break;
- case PromoteFloat32: o << "f64.promote/f32"; break;
- case DemoteFloat64: o << "f32.demote/f64"; break;
+ case ConvertUInt32ToFloat32: o << "f32.convert_u/i32"; break;
+ case ConvertUInt32ToFloat64: o << "f64.convert_u/i32"; break;
+ case ConvertSInt32ToFloat32: o << "f32.convert_s/i32"; break;
+ case ConvertSInt32ToFloat64: o << "f64.convert_s/i32"; break;
+ case ConvertUInt64ToFloat32: o << "f32.convert_u/i64"; break;
+ case ConvertUInt64ToFloat64: o << "f64.convert_u/i64"; break;
+ case ConvertSInt64ToFloat32: o << "f32.convert_s/i64"; break;
+ case ConvertSInt64ToFloat64: o << "f64.convert_s/i64"; break;
+ case PromoteFloat32: o << "f64.promote/f32"; break;
+ case DemoteFloat64: o << "f32.demote/f64"; break;
case ReinterpretInt32: o << "f32.reinterpret/i32"; break;
case ReinterpretInt64: o << "f64.reinterpret/i64"; break;
- case ExtendS8Int32: o << "i32.extend8_s"; break;
- case ExtendS16Int32: o << "i32.extend16_s"; break;
- case ExtendS8Int64: o << "i64.extend8_s"; break;
- case ExtendS16Int64: o << "i64.extend16_s"; break;
- case ExtendS32Int64: o << "i64.extend32_s"; break;
+ case ExtendS8Int32: o << "i32.extend8_s"; break;
+ case ExtendS16Int32: o << "i32.extend16_s"; break;
+ case ExtendS8Int64: o << "i64.extend8_s"; break;
+ case ExtendS16Int64: o << "i64.extend16_s"; break;
+ case ExtendS32Int64: o << "i64.extend32_s"; break;
case TruncSatSFloat32ToInt32: o << "i32.trunc_s:sat/f32"; break;
case TruncSatUFloat32ToInt32: o << "i32.trunc_u:sat/f32"; break;
case TruncSatSFloat64ToInt32: o << "i32.trunc_s:sat/f64"; break;
@@ -300,6 +354,39 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
case TruncSatUFloat32ToInt64: o << "i64.trunc_u:sat/f32"; break;
case TruncSatSFloat64ToInt64: o << "i64.trunc_s:sat/f64"; break;
case TruncSatUFloat64ToInt64: o << "i64.trunc_u:sat/f64"; break;
+ case SplatVecI8x16: o << "i8x16.splat"; break;
+ case SplatVecI16x8: o << "i16x8.splat"; break;
+ case SplatVecI32x4: o << "i32x4.splat"; break;
+ case SplatVecI64x2: o << "i64x2.splat"; break;
+ case SplatVecF32x4: o << "f32x4.splat"; break;
+ case SplatVecF64x2: o << "f64x2.splat"; break;
+ case NotVec128: o << "v128.not"; break;
+ case NegVecI8x16: o << "i8x16.neg"; break;
+ case AnyTrueVecI8x16: o << "i8x16.any_true"; break;
+ case AllTrueVecI8x16: o << "i8x16.all_true"; break;
+ case NegVecI16x8: o << "i16x8.neg"; break;
+ case AnyTrueVecI16x8: o << "i16x8.any_true"; break;
+ case AllTrueVecI16x8: o << "i16x8.all_true"; break;
+ case NegVecI32x4: o << "i32x4.neg"; break;
+ case AnyTrueVecI32x4: o << "i32x4.any_true"; break;
+ case AllTrueVecI32x4: o << "i32x4.all_true"; break;
+ case NegVecI64x2: o << "i64x2.neg"; break;
+ case AnyTrueVecI64x2: o << "i64x2.any_true"; break;
+ case AllTrueVecI64x2: o << "i64x2.all_true"; break;
+ case AbsVecF32x4: o << "f32x4.abs"; break;
+ case NegVecF32x4: o << "f32x4.neg"; break;
+ case SqrtVecF32x4: o << "f32x4.sqrt"; break;
+ case AbsVecF64x2: o << "f64x2.abs"; break;
+ case NegVecF64x2: o << "f64x2.neg"; break;
+ case SqrtVecF64x2: o << "f64x2.sqrt"; break;
+ case TruncSatSVecF32x4ToVecI32x4: o << "i32x4.trunc_s/f32x4:sat"; break;
+ case TruncSatUVecF32x4ToVecI32x4: o << "i32x4.trunc_u/f32x4:sat"; break;
+ case TruncSatSVecF64x2ToVecI64x2: o << "i64x2.trunc_s/f64x2:sat"; break;
+ case TruncSatUVecF64x2ToVecI64x2: o << "i64x2.trunc_u/f64x2:sat"; break;
+ case ConvertSVecI32x4ToVecF32x4: o << "f32x4.convert_s/i32x4"; break;
+ case ConvertUVecI32x4ToVecF32x4: o << "f32x4.convert_u/i32x4"; break;
+ case ConvertSVecI64x2ToVecF64x2: o << "f64x2.convert_s/i64x2"; break;
+ case ConvertUVecI64x2ToVecF64x2: o << "f64x2.convert_u/i64x2"; break;
case InvalidUnary: WASM_UNREACHABLE();
}
}
@@ -386,6 +473,86 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
case GtFloat64: o << "f64.gt"; break;
case GeFloat64: o << "f64.ge"; break;
+ case EqVecI8x16: o << "i8x16.eq"; break;
+ case NeVecI8x16: o << "i8x16.ne"; break;
+ case LtSVecI8x16: o << "i8x16.lt_s"; break;
+ case LtUVecI8x16: o << "i8x16.lt_u"; break;
+ case GtSVecI8x16: o << "i8x16.gt_s"; break;
+ case GtUVecI8x16: o << "i8x16.gt_u"; break;
+ case LeSVecI8x16: o << "i8x16.le_s"; break;
+ case LeUVecI8x16: o << "i8x16.le_u"; break;
+ case GeSVecI8x16: o << "i8x16.ge_s"; break;
+ case GeUVecI8x16: o << "i8x16.ge_u"; break;
+ case EqVecI16x8: o << "i16x8.eq"; break;
+ case NeVecI16x8: o << "i16x8.ne"; break;
+ case LtSVecI16x8: o << "i16x8.lt_s"; break;
+ case LtUVecI16x8: o << "i16x8.lt_u"; break;
+ case GtSVecI16x8: o << "i16x8.gt_s"; break;
+ case GtUVecI16x8: o << "i16x8.gt_u"; break;
+ case LeSVecI16x8: o << "i16x8.le_s"; break;
+ case LeUVecI16x8: o << "i16x8.le_u"; break;
+ case GeSVecI16x8: o << "i16x8.ge_s"; break;
+ case GeUVecI16x8: o << "i16x8.ge_u"; break;
+ case EqVecI32x4: o << "i32x4.eq"; break;
+ case NeVecI32x4: o << "i32x4.ne"; break;
+ case LtSVecI32x4: o << "i32x4.lt_s"; break;
+ case LtUVecI32x4: o << "i32x4.lt_u"; break;
+ case GtSVecI32x4: o << "i32x4.gt_s"; break;
+ case GtUVecI32x4: o << "i32x4.gt_u"; break;
+ case LeSVecI32x4: o << "i32x4.le_s"; break;
+ case LeUVecI32x4: o << "i32x4.le_u"; break;
+ case GeSVecI32x4: o << "i32x4.ge_s"; break;
+ case GeUVecI32x4: o << "i32x4.ge_u"; break;
+ case EqVecF32x4: o << "f32x4.eq"; break;
+ case NeVecF32x4: o << "f32x4.ne"; break;
+ case LtVecF32x4: o << "f32x4.lt"; break;
+ case GtVecF32x4: o << "f32x4.gt"; break;
+ case LeVecF32x4: o << "f32x4.le"; break;
+ case GeVecF32x4: o << "f32x4.ge"; break;
+ case EqVecF64x2: o << "f64x2.eq"; break;
+ case NeVecF64x2: o << "f64x2.ne"; break;
+ case LtVecF64x2: o << "f64x2.lt"; break;
+ case GtVecF64x2: o << "f64x2.gt"; break;
+ case LeVecF64x2: o << "f64x2.le"; break;
+ case GeVecF64x2: o << "f64x2.ge"; break;
+
+ case AndVec128: o << "v128.and"; break;
+ case OrVec128: o << "v128.or"; break;
+ case XorVec128: o << "v128.xor"; break;
+
+ case AddVecI8x16: o << "i8x16.add"; break;
+ case AddSatSVecI8x16: o << "i8x16.add_saturate_s"; break;
+ case AddSatUVecI8x16: o << "i8x16.add_saturate_u"; break;
+ case SubVecI8x16: o << "i8x16.sub"; break;
+ case SubSatSVecI8x16: o << "i8x16.sub_saturate_s"; break;
+ case SubSatUVecI8x16: o << "i8x16.sub_saturate_u"; break;
+ case MulVecI8x16: o << "i8x16.mul"; break;
+ case AddVecI16x8: o << "i16x8.add"; break;
+ case AddSatSVecI16x8: o << "i16x8.add_saturate_s"; break;
+ case AddSatUVecI16x8: o << "i16x8.add_saturate_u"; break;
+ case SubVecI16x8: o << "i16x8.sub"; break;
+ case SubSatSVecI16x8: o << "i16x8.sub_saturate_s"; break;
+ case SubSatUVecI16x8: o << "i16x8.sub_saturate_u"; break;
+ case MulVecI16x8: o << "i16x8.mul"; break;
+ case AddVecI32x4: o << "i32x4.add"; break;
+ case SubVecI32x4: o << "i32x4.sub"; break;
+ case MulVecI32x4: o << "i32x4.mul"; break;
+ case AddVecI64x2: o << "i64x2.add"; break;
+ case SubVecI64x2: o << "i64x2.sub"; break;
+
+ case AddVecF32x4: o << "f32x4.add"; break;
+ case SubVecF32x4: o << "f32x4.sub"; break;
+ case MulVecF32x4: o << "f32x4.mul"; break;
+ case DivVecF32x4: o << "f32x4.div"; break;
+ case MinVecF32x4: o << "f32x4.min"; break;
+ case MaxVecF32x4: o << "f32x4.max"; break;
+ case AddVecF64x2: o << "f64x2.add"; break;
+ case SubVecF64x2: o << "f64x2.sub"; break;
+ case MulVecF64x2: o << "f64x2.mul"; break;
+ case DivVecF64x2: o << "f64x2.div"; break;
+ case MinVecF64x2: o << "f64x2.min"; break;
+ case MaxVecF64x2: o << "f64x2.max"; break;
+
case InvalidBinary: WASM_UNREACHABLE();
}
restoreNormalColor(o);
@@ -724,6 +891,46 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
printFullLine(curr->wakeCount);
decIndent();
}
+ void visitSIMDExtract(SIMDExtract* curr) {
+ o << '(';
+ PrintExpressionContents(currFunction, o).visit(curr);
+ incIndent();
+ printFullLine(curr->vec);
+ decIndent();
+ }
+ void visitSIMDReplace(SIMDReplace* curr) {
+ o << '(';
+ PrintExpressionContents(currFunction, o).visit(curr);
+ incIndent();
+ printFullLine(curr->vec);
+ printFullLine(curr->value);
+ decIndent();
+ }
+ void visitSIMDShuffle(SIMDShuffle* curr) {
+ o << '(';
+ PrintExpressionContents(currFunction, o).visit(curr);
+ incIndent();
+ printFullLine(curr->left);
+ printFullLine(curr->right);
+ decIndent();
+ }
+ void visitSIMDBitselect(SIMDBitselect* curr) {
+ o << '(';
+ PrintExpressionContents(currFunction, o).visit(curr);
+ incIndent();
+ printFullLine(curr->left);
+ printFullLine(curr->right);
+ printFullLine(curr->cond);
+ decIndent();
+ }
+ void visitSIMDShift(SIMDShift* curr) {
+ o << '(';
+ PrintExpressionContents(currFunction, o).visit(curr);
+ incIndent();
+ printFullLine(curr->vec);
+ printFullLine(curr->shift);
+ decIndent();
+ }
void visitConst(Const* curr) {
o << '(';
PrintExpressionContents(currFunction, o).visit(curr);
diff --git a/src/passes/RedundantSetElimination.cpp b/src/passes/RedundantSetElimination.cpp
index 8c00a0880..8cd8fbf99 100644
--- a/src/passes/RedundantSetElimination.cpp
+++ b/src/passes/RedundantSetElimination.cpp
@@ -172,7 +172,7 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
#endif
start[i] = getUniqueValue();
} else {
- start[i] = getLiteralValue(LiteralUtils::makeLiteralZero(func->getLocalType(i)));
+ start[i] = getLiteralValue(Literal::makeZero(func->getLocalType(i)));
}
}
} else {
@@ -375,4 +375,3 @@ Pass *createRedundantSetEliminationPass() {
}
} // namespace wasm
-
diff --git a/src/passes/SafeHeap.cpp b/src/passes/SafeHeap.cpp
index ce1adff15..f170041e1 100644
--- a/src/passes/SafeHeap.cpp
+++ b/src/passes/SafeHeap.cpp
@@ -109,7 +109,7 @@ struct SafeHeap : public Pass {
instrumenter.add<AccessInstrumenter>();
instrumenter.run();
// add helper checking funcs and imports
- addGlobals(module);
+ addGlobals(module, runner->options.features);
}
Name dynamicTopPtr, segfault, alignfault;
@@ -156,18 +156,22 @@ struct SafeHeap : public Pass {
return align == bytes && shared && isIntegerType(type);
}
- void addGlobals(Module* module) {
+ void addGlobals(Module* module, FeatureSet features) {
// load funcs
Load load;
- for (auto type : { i32, i64, f32, f64 }) {
+ for (auto type : { i32, i64, f32, f64, v128 }) {
+ if (type == v128 && !features.hasSIMD()) continue;
load.type = type;
- for (Index bytes : { 1, 2, 4, 8 }) {
+ for (Index bytes : { 1, 2, 4, 8, 16 }) {
load.bytes = bytes;
- if (bytes > getTypeSize(type)) continue;
+ if (bytes > getTypeSize(type) ||
+ (type == f32 && bytes != 4) ||
+ (type == f64 && bytes != 8) ||
+ (type == v128 && bytes != 16)) continue;
for (auto signed_ : { true, false }) {
load.signed_ = signed_;
if (isFloatType(type) && signed_) continue;
- for (Index align : { 1, 2, 4, 8 }) {
+ for (Index align : { 1, 2, 4, 8, 16 }) {
load.align = align;
if (align > bytes) continue;
for (auto isAtomic : { true, false }) {
@@ -184,13 +188,17 @@ struct SafeHeap : public Pass {
}
// store funcs
Store store;
- for (auto valueType : { i32, i64, f32, f64 }) {
+ for (auto valueType : { i32, i64, f32, f64, v128 }) {
+ if (valueType == v128 && !features.hasSIMD()) continue;
store.valueType = valueType;
store.type = none;
- for (Index bytes : { 1, 2, 4, 8 }) {
+ for (Index bytes : { 1, 2, 4, 8, 16 }) {
store.bytes = bytes;
- if (bytes > getTypeSize(valueType)) continue;
- for (Index align : { 1, 2, 4, 8 }) {
+ if (bytes > getTypeSize(valueType) ||
+ (valueType == f32 && bytes != 4) ||
+ (valueType == f64 && bytes != 8) ||
+ (valueType == v128 && bytes != 16)) continue;
+ for (Index align : { 1, 2, 4, 8, 16 }) {
store.align = align;
if (align > bytes) continue;
for (auto isAtomic : { true, false }) {
diff --git a/src/shell-interface.h b/src/shell-interface.h
index 23f1c7de5..fc6a5897c 100644
--- a/src/shell-interface.h
+++ b/src/shell-interface.h
@@ -183,11 +183,17 @@ struct ShellExternalInterface : ModuleInstance::ExternalInterface {
uint32_t load32u(Address addr) override { return memory.get<uint32_t>(addr); }
int64_t load64s(Address addr) override { return memory.get<int64_t>(addr); }
uint64_t load64u(Address addr) override { return memory.get<uint64_t>(addr); }
+ std::array<uint8_t, 16> load128(Address addr) override {
+ return memory.get<std::array<uint8_t, 16>>(addr);
+ }
void store8(Address addr, int8_t value) override { memory.set<int8_t>(addr, value); }
void store16(Address addr, int16_t value) override { memory.set<int16_t>(addr, value); }
void store32(Address addr, int32_t value) override { memory.set<int32_t>(addr, value); }
void store64(Address addr, int64_t value) override { memory.set<int64_t>(addr, value); }
+ void store128(Address addr, const std::array<uint8_t, 16>& value) override {
+ memory.set<std::array<uint8_t, 16>>(addr, value);
+ }
void growMemory(Address /*oldSize*/, Address newSize) override {
memory.resize(newSize);
diff --git a/src/tools/feature-options.h b/src/tools/feature-options.h
index 3b35656fc..1bd78d9b9 100644
--- a/src/tools/feature-options.h
+++ b/src/tools/feature-options.h
@@ -70,7 +70,20 @@ struct FeatureOptions : public Options {
Options::Arguments::Zero,
[this](Options *o, const std::string& arguments) {
passOptions.features.setTruncSat(false);
- });
+ })
+ .add("--enable-simd", "",
+ "Enable nontrapping float-to-int operations",
+ Options::Arguments::Zero,
+ [this](Options *o, const std::string& arguments) {
+ passOptions.features.setSIMD();
+ })
+ .add("--disable-simd", "",
+ "Disable nontrapping float-to-int operations",
+ Options::Arguments::Zero,
+ [this](Options *o, const std::string& arguments) {
+ passOptions.features.setSIMD(false);
+ })
+ ;
}
FeatureSet getFeatures() const {
diff --git a/src/tools/fuzzing.h b/src/tools/fuzzing.h
index dcb47529f..999325baa 100644
--- a/src/tools/fuzzing.h
+++ b/src/tools/fuzzing.h
@@ -1268,10 +1268,10 @@ private:
}
// tweak around special values
if (oneIn(3)) { // +- 1
- value = value.add(LiteralUtils::makeLiteralFromInt32(upTo(3) - 1, type));
+ value = value.add(Literal::makeFromInt32(upTo(3) - 1, type));
}
if (oneIn(2)) { // flip sign
- value = value.mul(LiteralUtils::makeLiteralFromInt32(-1, type));
+ value = value.mul(Literal::makeFromInt32(-1, type));
}
break;
}
@@ -1288,7 +1288,7 @@ private:
}
// maybe negative
if (oneIn(2)) {
- value = value.mul(LiteralUtils::makeLiteralFromInt32(-1, type));
+ value = value.mul(Literal::makeFromInt32(-1, type));
}
}
}
diff --git a/src/tools/wasm-ctor-eval.cpp b/src/tools/wasm-ctor-eval.cpp
index b0e2e2ce7..4a7ca51b7 100644
--- a/src/tools/wasm-ctor-eval.cpp
+++ b/src/tools/wasm-ctor-eval.cpp
@@ -195,12 +195,12 @@ struct CtorEvalExternalInterface : EvallingModuleInstance::ExternalInterface {
// fill in fake values for everything else, which is dangerous to use
ModuleUtils::iterDefinedGlobals(wasm_, [&](Global* defined) {
if (globals.find(defined->name) == globals.end()) {
- globals[defined->name] = LiteralUtils::makeLiteralZero(defined->type);
+ globals[defined->name] = Literal::makeZero(defined->type);
}
});
ModuleUtils::iterImportedGlobals(wasm_, [&](Global* import) {
if (globals.find(import->name) == globals.end()) {
- globals[import->name] = LiteralUtils::makeLiteralZero(import->type);
+ globals[import->name] = Literal::makeZero(import->type);
}
});
}
diff --git a/src/tools/wasm-reduce.cpp b/src/tools/wasm-reduce.cpp
index 8c5df7a1b..02174bb8a 100644
--- a/src/tools/wasm-reduce.cpp
+++ b/src/tools/wasm-reduce.cpp
@@ -839,7 +839,7 @@ struct Reducer : public WalkerPass<PostWalker<Reducer, UnifiedExpressionVisitor<
// try to replace with a trivial value
Const* c = builder->makeConst(Literal(int32_t(0)));
if (tryToReplaceCurrent(c)) return true;
- c->value = LiteralUtils::makeLiteralFromInt32(1, curr->type);
+ c->value = Literal::makeFromInt32(1, curr->type);
c->type = curr->type;
return tryToReplaceCurrent(c);
}
diff --git a/src/wasm-binary.h b/src/wasm-binary.h
index 45052bcb2..36c9f2116 100644
--- a/src/wasm-binary.h
+++ b/src/wasm-binary.h
@@ -330,6 +330,7 @@ enum EncodedType {
i64 = -0x2, // 0x7e
f32 = -0x3, // 0x7d
f64 = -0x4, // 0x7c
+ v128 = -0x5, // 0x7b
// elem_type
AnyFunc = -0x10, // 0x70
// func_type form
@@ -549,6 +550,7 @@ enum ASTNodes {
I64ExtendS32 = 0xc4,
TruncSatPrefix = 0xfc,
+ SIMDPrefix = 0xfd,
AtomicPrefix = 0xfe
};
@@ -639,6 +641,149 @@ enum TruncSatOpcodes {
I64UTruncSatF64 = 0x07,
};
+enum SIMDOpcodes {
+ V128Load = 0x00,
+ V128Store = 0x01,
+ V128Const = 0x02,
+ V8x16Shuffle = 0x03,
+ I8x16Splat = 0x04,
+ I8x16ExtractLaneS = 0x05,
+ I8x16ExtractLaneU = 0x06,
+ I8x16ReplaceLane = 0x07,
+ I16x8Splat = 0x08,
+ I16x8ExtractLaneS = 0x09,
+ I16x8ExtractLaneU = 0x0a,
+ I16x8ReplaceLane = 0x0b,
+ I32x4Splat = 0x0c,
+ I32x4ExtractLane = 0x0d,
+ I32x4ReplaceLane = 0x0e,
+ I64x2Splat = 0x0f,
+ I64x2ExtractLane = 0x10,
+ I64x2ReplaceLane = 0x11,
+ F32x4Splat = 0x12,
+ F32x4ExtractLane = 0x13,
+ F32x4ReplaceLane = 0x14,
+ F64x2Splat = 0x15,
+ F64x2ExtractLane = 0x16,
+ F64x2ReplaceLane = 0x17,
+ I8x16Eq = 0x18,
+ I8x16Ne = 0x19,
+ I8x16LtS = 0x1a,
+ I8x16LtU = 0x1b,
+ I8x16GtS = 0x1c,
+ I8x16GtU = 0x1d,
+ I8x16LeS = 0x1e,
+ I8x16LeU = 0x1f,
+ I8x16GeS = 0x20,
+ I8x16GeU = 0x21,
+ I16x8Eq = 0x22,
+ I16x8Ne = 0x23,
+ I16x8LtS = 0x24,
+ I16x8LtU = 0x25,
+ I16x8GtS = 0x26,
+ I16x8GtU = 0x27,
+ I16x8LeS = 0x28,
+ I16x8LeU = 0x29,
+ I16x8GeS = 0x2a,
+ I16x8GeU = 0x2b,
+ I32x4Eq = 0x2c,
+ I32x4Ne = 0x2d,
+ I32x4LtS = 0x2e,
+ I32x4LtU = 0x2f,
+ I32x4GtS = 0x30,
+ I32x4GtU = 0x31,
+ I32x4LeS = 0x32,
+ I32x4LeU = 0x33,
+ I32x4GeS = 0x34,
+ I32x4GeU = 0x35,
+ F32x4Eq = 0x40,
+ F32x4Ne = 0x41,
+ F32x4Lt = 0x42,
+ F32x4Gt = 0x43,
+ F32x4Le = 0x44,
+ F32x4Ge = 0x45,
+ F64x2Eq = 0x46,
+ F64x2Ne = 0x47,
+ F64x2Lt = 0x48,
+ F64x2Gt = 0x49,
+ F64x2Le = 0x4a,
+ F64x2Ge = 0x4b,
+ V128Not = 0x4c,
+ V128And = 0x4d,
+ V128Or = 0x4e,
+ V128Xor = 0x4f,
+ V128Bitselect = 0x50,
+ I8x16Neg = 0x51,
+ I8x16AnyTrue = 0x52,
+ I8x16AllTrue = 0x53,
+ I8x16Shl = 0x54,
+ I8x16ShrS = 0x55,
+ I8x16ShrU = 0x56,
+ I8x16Add = 0x57,
+ I8x16AddSatS = 0x58,
+ I8x16AddSatU = 0x59,
+ I8x16Sub = 0x5a,
+ I8x16SubSatS = 0x5b,
+ I8x16SubSatU = 0x5c,
+ I8x16Mul = 0x5d,
+ I16x8Neg = 0x62,
+ I16x8AnyTrue = 0x63,
+ I16x8AllTrue = 0x64,
+ I16x8Shl = 0x65,
+ I16x8ShrS = 0x66,
+ I16x8ShrU = 0x67,
+ I16x8Add = 0x68,
+ I16x8AddSatS = 0x69,
+ I16x8AddSatU = 0x6a,
+ I16x8Sub = 0x6b,
+ I16x8SubSatS = 0x6c,
+ I16x8SubSatU = 0x6d,
+ I16x8Mul = 0x6e,
+ I32x4Neg = 0x73,
+ I32x4AnyTrue = 0x74,
+ I32x4AllTrue = 0x75,
+ I32x4Shl = 0x76,
+ I32x4ShrS = 0x77,
+ I32x4ShrU = 0x78,
+ I32x4Add = 0x79,
+ I32x4Sub = 0x7c,
+ I32x4Mul = 0x7f,
+ I64x2Neg = 0x84,
+ I64x2AnyTrue = 0x85,
+ I64x2AllTrue = 0x86,
+ I64x2Shl = 0x87,
+ I64x2ShrS = 0x88,
+ I64x2ShrU = 0x89,
+ I64x2Add = 0x8a,
+ I64x2Sub = 0x8d,
+ F32x4Abs = 0x95,
+ F32x4Neg = 0x96,
+ F32x4Sqrt = 0x97,
+ F32x4Add = 0x9a,
+ F32x4Sub = 0x9b,
+ F32x4Mul = 0x9c,
+ F32x4Div = 0x9d,
+ F32x4Min = 0x9e,
+ F32x4Max = 0x9f,
+ F64x2Abs = 0xa0,
+ F64x2Neg = 0xa1,
+ F64x2Sqrt = 0xa2,
+ F64x2Add = 0xa5,
+ F64x2Sub = 0xa6,
+ F64x2Mul = 0xa7,
+ F64x2Div = 0xa8,
+ F64x2Min = 0xa9,
+ F64x2Max = 0xaa,
+ I32x4TruncSatSF32x4 = 0xab,
+ I32x4TruncSatUF32x4 = 0xac,
+ I64x2TruncSatSF64x2 = 0xad,
+ I64x2TruncSatUF64x2 = 0xae,
+ F32x4ConvertSI32x4 = 0xaf,
+ F32x4ConvertUI32x4 = 0xb0,
+ F64x2ConvertSI64x2 = 0xb1,
+ F64x2ConvertUI64x2 = 0xb2
+};
+
enum MemoryAccess {
Offset = 0x10, // bit 4
Alignment = 0x80, // bit 7
@@ -662,7 +807,7 @@ inline S32LEB binaryType(Type type) {
case i64: ret = BinaryConsts::EncodedType::i64; break;
case f32: ret = BinaryConsts::EncodedType::f32; break;
case f64: ret = BinaryConsts::EncodedType::f64; break;
- case v128: assert(false && "v128 not implemented yet");
+ case v128: ret = BinaryConsts::EncodedType::v128; break;
case unreachable: WASM_UNREACHABLE();
}
return S32LEB(ret);
@@ -814,9 +959,11 @@ public:
uint16_t getInt16();
uint32_t getInt32();
uint64_t getInt64();
+ uint8_t getLaneIdx(size_t lanes);
// it is unsafe to return a float directly, due to ABI issues with the signalling bit
Literal getFloat32Literal();
Literal getFloat64Literal();
+ Literal getVec128Literal();
uint32_t getU32LEB();
uint64_t getU64LEB();
int32_t getS32LEB();
@@ -948,6 +1095,7 @@ public:
void readMemoryAccess(Address& alignment, Address& offset);
bool maybeVisitLoad(Expression*& out, uint8_t code, bool isAtomic);
bool maybeVisitStore(Expression*& out, uint8_t code, bool isAtomic);
+ bool maybeVisitNontrappingTrunc(Expression*& out, uint32_t code);
bool maybeVisitAtomicRMW(Expression*& out, uint8_t code);
bool maybeVisitAtomicCmpxchg(Expression*& out, uint8_t code);
bool maybeVisitAtomicWait(Expression*& out, uint8_t code);
@@ -956,6 +1104,16 @@ public:
bool maybeVisitUnary(Expression*& out, uint8_t code);
bool maybeVisitBinary(Expression*& out, uint8_t code);
bool maybeVisitTruncSat(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDBinary(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDUnary(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDConst(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDLoad(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDStore(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDExtract(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDReplace(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDShuffle(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDBitselect(Expression*& out, uint32_t code);
+ bool maybeVisitSIMDShift(Expression*& out, uint32_t code);
void visitSelect(Select* curr);
void visitReturn(Return* curr);
bool maybeVisitHost(Expression*& out, uint8_t code);
diff --git a/src/wasm-builder.h b/src/wasm-builder.h
index f36ec7a88..dccefa144 100644
--- a/src/wasm-builder.h
+++ b/src/wasm-builder.h
@@ -293,6 +293,47 @@ public:
ret->finalize();
return ret;
}
+ SIMDExtract* makeSIMDExtract(SIMDExtractOp op, Expression* vec, uint8_t idx) {
+ auto* ret = allocator.alloc<SIMDExtract>();
+ ret->op = op;
+ ret->vec = vec;
+ ret->idx = idx;
+ ret->finalize();
+ return ret;
+ }
+ SIMDReplace* makeSIMDReplace(SIMDReplaceOp op, Expression* vec, uint8_t idx, Expression* value) {
+ auto* ret = allocator.alloc<SIMDReplace>();
+ ret->op = op;
+ ret->vec = vec;
+ ret->idx = idx;
+ ret->value = value;
+ ret->finalize();
+ return ret;
+ }
+ SIMDShuffle* makeSIMDShuffle(Expression* left, Expression* right, const std::array<uint8_t, 16>& mask) {
+ auto* ret = allocator.alloc<SIMDShuffle>();
+ ret->left = left;
+ ret->right = right;
+ ret->mask = mask;
+ ret->finalize();
+ return ret;
+ }
+ SIMDBitselect* makeSIMDBitselect(Expression* left, Expression* right, Expression* cond) {
+ auto* ret = allocator.alloc<SIMDBitselect>();
+ ret->left = left;
+ ret->right = right;
+ ret->cond = cond;
+ ret->finalize();
+ return ret;
+ }
+ SIMDShift* makeSIMDShift(SIMDShiftOp op, Expression* vec, Expression* shift) {
+ auto* ret = allocator.alloc<SIMDShift>();
+ ret->op = op;
+ ret->vec = vec;
+ ret->shift = shift;
+ ret->finalize();
+ return ret;
+ }
Const* makeConst(Literal value) {
assert(isConcreteType(value.type));
auto* ret = allocator.alloc<Const>();
@@ -474,7 +515,12 @@ public:
case i64: value = Literal(int64_t(0)); break;
case f32: value = Literal(float(0)); break;
case f64: value = Literal(double(0)); break;
- case v128: assert(false && "v128 not implemented yet");
+ case v128: {
+ std::array<uint8_t, 16> bytes;
+ bytes.fill(0);
+ value = Literal(bytes.data());
+ break;
+ }
case none: return ExpressionManipulator::nop(curr);
case unreachable: return ExpressionManipulator::convert<T, Unreachable>(curr);
}
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index 8554daded..e970a4f83 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -303,7 +303,39 @@ public:
case PromoteFloat32: return value.extendToF64();
case ReinterpretFloat64: return value.castToI64();
case DemoteFloat64: return value.demote();
-
+ case SplatVecI8x16: return value.splatI8x16();
+ case SplatVecI16x8: return value.splatI16x8();
+ case SplatVecI32x4: return value.splatI32x4();
+ case SplatVecI64x2: return value.splatI64x2();
+ case SplatVecF32x4: return value.splatF32x4();
+ case SplatVecF64x2: return value.splatF64x2();
+ case NotVec128: return value.notV128();
+ case NegVecI8x16: return value.negI8x16();
+ case AnyTrueVecI8x16: return value.anyTrueI8x16();
+ case AllTrueVecI8x16: return value.allTrueI8x16();
+ case NegVecI16x8: return value.negI16x8();
+ case AnyTrueVecI16x8: return value.anyTrueI16x8();
+ case AllTrueVecI16x8: return value.allTrueI16x8();
+ case NegVecI32x4: return value.negI32x4();
+ case AnyTrueVecI32x4: return value.anyTrueI32x4();
+ case AllTrueVecI32x4: return value.allTrueI32x4();
+ case NegVecI64x2: return value.negI64x2();
+ case AnyTrueVecI64x2: return value.anyTrueI64x2();
+ case AllTrueVecI64x2: return value.allTrueI64x2();
+ case AbsVecF32x4: return value.absF32x4();
+ case NegVecF32x4: return value.negF32x4();
+ case SqrtVecF32x4: return value.sqrtF32x4();
+ case AbsVecF64x2: return value.absF64x2();
+ case NegVecF64x2: return value.negF64x2();
+ case SqrtVecF64x2: return value.sqrtF64x2();
+ case TruncSatSVecF32x4ToVecI32x4: return value.truncSatToSI32x4();
+ case TruncSatUVecF32x4ToVecI32x4: return value.truncSatToUI32x4();
+ case TruncSatSVecF64x2ToVecI64x2: return value.truncSatToSI64x2();
+ case TruncSatUVecF64x2ToVecI64x2: return value.truncSatToUI64x2();
+ case ConvertSVecI32x4ToVecF32x4: return value.convertSToF32x4();
+ case ConvertUVecI32x4ToVecF32x4: return value.convertUToF32x4();
+ case ConvertSVecI64x2ToVecF64x2: return value.convertSToF64x2();
+ case ConvertUVecI64x2ToVecF64x2: return value.convertUToF64x2();
case InvalidUnary: WASM_UNREACHABLE();
}
WASM_UNREACHABLE();
@@ -427,10 +459,172 @@ public:
case MaxFloat32:
case MaxFloat64: return left.max(right);
+ case EqVecI8x16: return left.eqI8x16(right);
+ case NeVecI8x16: return left.neI8x16(right);
+ case LtSVecI8x16: return left.ltSI8x16(right);
+ case LtUVecI8x16: return left.ltUI8x16(right);
+ case GtSVecI8x16: return left.gtSI8x16(right);
+ case GtUVecI8x16: return left.gtUI8x16(right);
+ case LeSVecI8x16: return left.leSI8x16(right);
+ case LeUVecI8x16: return left.leUI8x16(right);
+ case GeSVecI8x16: return left.geSI8x16(right);
+ case GeUVecI8x16: return left.geUI8x16(right);
+ case EqVecI16x8: return left.eqI16x8(right);
+ case NeVecI16x8: return left.neI16x8(right);
+ case LtSVecI16x8: return left.ltSI16x8(right);
+ case LtUVecI16x8: return left.ltUI16x8(right);
+ case GtSVecI16x8: return left.gtSI16x8(right);
+ case GtUVecI16x8: return left.gtUI16x8(right);
+ case LeSVecI16x8: return left.leSI16x8(right);
+ case LeUVecI16x8: return left.leUI16x8(right);
+ case GeSVecI16x8: return left.geSI16x8(right);
+ case GeUVecI16x8: return left.geUI16x8(right);
+ case EqVecI32x4: return left.eqI32x4(right);
+ case NeVecI32x4: return left.neI32x4(right);
+ case LtSVecI32x4: return left.ltSI32x4(right);
+ case LtUVecI32x4: return left.ltUI32x4(right);
+ case GtSVecI32x4: return left.gtSI32x4(right);
+ case GtUVecI32x4: return left.gtUI32x4(right);
+ case LeSVecI32x4: return left.leSI32x4(right);
+ case LeUVecI32x4: return left.leUI32x4(right);
+ case GeSVecI32x4: return left.geSI32x4(right);
+ case GeUVecI32x4: return left.geUI32x4(right);
+ case EqVecF32x4: return left.eqF32x4(right);
+ case NeVecF32x4: return left.neF32x4(right);
+ case LtVecF32x4: return left.ltF32x4(right);
+ case GtVecF32x4: return left.gtF32x4(right);
+ case LeVecF32x4: return left.leF32x4(right);
+ case GeVecF32x4: return left.geF32x4(right);
+ case EqVecF64x2: return left.eqF64x2(right);
+ case NeVecF64x2: return left.neF64x2(right);
+ case LtVecF64x2: return left.ltF64x2(right);
+ case GtVecF64x2: return left.gtF64x2(right);
+ case LeVecF64x2: return left.leF64x2(right);
+ case GeVecF64x2: return left.geF64x2(right);
+
+ case AndVec128: return left.andV128(right);
+ case OrVec128: return left.orV128(right);
+ case XorVec128: return left.xorV128(right);
+
+ case AddVecI8x16: return left.addI8x16(right);
+ case AddSatSVecI8x16: return left.addSaturateSI8x16(right);
+ case AddSatUVecI8x16: return left.addSaturateUI8x16(right);
+ case SubVecI8x16: return left.subI8x16(right);
+ case SubSatSVecI8x16: return left.subSaturateSI8x16(right);
+ case SubSatUVecI8x16: return left.subSaturateUI8x16(right);
+ case MulVecI8x16: return left.mulI8x16(right);
+ case AddVecI16x8: return left.addI16x8(right);
+ case AddSatSVecI16x8: return left.addSaturateSI16x8(right);
+ case AddSatUVecI16x8: return left.addSaturateUI16x8(right);
+ case SubVecI16x8: return left.subI16x8(right);
+ case SubSatSVecI16x8: return left.subSaturateSI16x8(right);
+ case SubSatUVecI16x8: return left.subSaturateUI16x8(right);
+ case MulVecI16x8: return left.mulI16x8(right);
+ case AddVecI32x4: return left.addI32x4(right);
+ case SubVecI32x4: return left.subI32x4(right);
+ case MulVecI32x4: return left.mulI32x4(right);
+ case AddVecI64x2: return left.addI64x2(right);
+ case SubVecI64x2: return left.subI64x2(right);
+
+ case AddVecF32x4: return left.addF32x4(right);
+ case SubVecF32x4: return left.subF32x4(right);
+ case MulVecF32x4: return left.mulF32x4(right);
+ case DivVecF32x4: return left.divF32x4(right);
+ case MinVecF32x4: return left.minF32x4(right);
+ case MaxVecF32x4: return left.maxF32x4(right);
+ case AddVecF64x2: return left.addF64x2(right);
+ case SubVecF64x2: return left.subF64x2(right);
+ case MulVecF64x2: return left.mulF64x2(right);
+ case DivVecF64x2: return left.divF64x2(right);
+ case MinVecF64x2: return left.minF64x2(right);
+ case MaxVecF64x2: return left.maxF64x2(right);
+
case InvalidBinary: WASM_UNREACHABLE();
}
WASM_UNREACHABLE();
}
+ Flow visitSIMDExtract(SIMDExtract *curr) {
+ NOTE_ENTER("SIMDExtract");
+ Flow flow = this->visit(curr->vec);
+ if (flow.breaking()) return flow;
+ Literal vec = flow.value;
+ switch (curr->op) {
+ case ExtractLaneSVecI8x16: return vec.extractLaneSI8x16(curr->idx);
+ case ExtractLaneUVecI8x16: return vec.extractLaneUI8x16(curr->idx);
+ case ExtractLaneSVecI16x8: return vec.extractLaneSI16x8(curr->idx);
+ case ExtractLaneUVecI16x8: return vec.extractLaneUI16x8(curr->idx);
+ case ExtractLaneVecI32x4: return vec.extractLaneI32x4(curr->idx);
+ case ExtractLaneVecI64x2: return vec.extractLaneI64x2(curr->idx);
+ case ExtractLaneVecF32x4: return vec.extractLaneF32x4(curr->idx);
+ case ExtractLaneVecF64x2: return vec.extractLaneF64x2(curr->idx);
+ }
+ WASM_UNREACHABLE();
+ }
+ Flow visitSIMDReplace(SIMDReplace *curr) {
+ NOTE_ENTER("SIMDReplace");
+ Flow flow = this->visit(curr->vec);
+ if (flow.breaking()) return flow;
+ Literal vec = flow.value;
+ flow = this->visit(curr->value);
+ if (flow.breaking()) return flow;
+ Literal value = flow.value;
+ switch (curr->op) {
+ case ReplaceLaneVecI8x16: return vec.replaceLaneI8x16(value, curr->idx);
+ case ReplaceLaneVecI16x8: return vec.replaceLaneI16x8(value, curr->idx);
+ case ReplaceLaneVecI32x4: return vec.replaceLaneI32x4(value, curr->idx);
+ case ReplaceLaneVecI64x2: return vec.replaceLaneI64x2(value, curr->idx);
+ case ReplaceLaneVecF32x4: return vec.replaceLaneF32x4(value, curr->idx);
+ case ReplaceLaneVecF64x2: return vec.replaceLaneF64x2(value, curr->idx);
+ }
+ WASM_UNREACHABLE();
+ }
+ Flow visitSIMDShuffle(SIMDShuffle *curr) {
+ NOTE_ENTER("SIMDShuffle");
+ Flow flow = this->visit(curr->left);
+ if (flow.breaking()) return flow;
+ Literal left = flow.value;
+ flow = this->visit(curr->right);
+ if (flow.breaking()) return flow;
+ Literal right = flow.value;
+ return left.shuffleV8x16(right, curr->mask);
+ }
+ Flow visitSIMDBitselect(SIMDBitselect *curr) {
+ NOTE_ENTER("SIMDShuffle");
+ Flow flow = this->visit(curr->left);
+ if (flow.breaking()) return flow;
+ Literal left = flow.value;
+ flow = this->visit(curr->right);
+ if (flow.breaking()) return flow;
+ Literal right = flow.value;
+ flow = this->visit(curr->cond);
+ if (flow.breaking()) return flow;
+ Literal cond = flow.value;
+ return cond.bitselectV128(left, right);
+ }
+ Flow visitSIMDShift(SIMDShift *curr) {
+ NOTE_ENTER("SIMDShift");
+ Flow flow = this->visit(curr->vec);
+ if (flow.breaking()) return flow;
+ Literal vec = flow.value;
+ flow = this->visit(curr->shift);
+ if (flow.breaking()) return flow;
+ Literal shift = flow.value;
+ switch (curr->op) {
+ case ShlVecI8x16: return vec.shlI8x16(shift);
+ case ShrSVecI8x16: return vec.shrSI8x16(shift);
+ case ShrUVecI8x16: return vec.shrUI8x16(shift);
+ case ShlVecI16x8: return vec.shlI16x8(shift);
+ case ShrSVecI16x8: return vec.shrSI16x8(shift);
+ case ShrUVecI16x8: return vec.shrUI16x8(shift);
+ case ShlVecI32x4: return vec.shlI32x4(shift);
+ case ShrSVecI32x4: return vec.shrSI32x4(shift);
+ case ShrUVecI32x4: return vec.shrUI32x4(shift);
+ case ShlVecI64x2: return vec.shlI64x2(shift);
+ case ShrSVecI64x2: return vec.shrSI64x2(shift);
+ case ShrUVecI64x2: return vec.shrUI64x2(shift);
+ }
+ WASM_UNREACHABLE();
+ }
Flow visitSelect(Select *curr) {
NOTE_ENTER("Select");
Flow ifTrue = visit(curr->ifTrue);
@@ -586,7 +780,7 @@ public:
}
case f32: return Literal(load32u(addr)).castToF32();
case f64: return Literal(load64u(addr)).castToF64();
- case v128: assert(false && "v128 not implemented yet");
+ case v128: return Literal(load128(addr).data());
case none:
case unreachable: WASM_UNREACHABLE();
}
@@ -616,7 +810,7 @@ public:
// write floats carefully, ensuring all bits reach memory
case f32: store32(addr, value.reinterpreti32()); break;
case f64: store64(addr, value.reinterpreti64()); break;
- case v128: assert(false && "v128 not implemented yet");
+ case v128: store128(addr, value.getv128()); break;
case none:
case unreachable: WASM_UNREACHABLE();
}
@@ -630,11 +824,13 @@ public:
virtual uint32_t load32u(Address addr) { WASM_UNREACHABLE(); }
virtual int64_t load64s(Address addr) { WASM_UNREACHABLE(); }
virtual uint64_t load64u(Address addr) { WASM_UNREACHABLE(); }
+ virtual std::array<uint8_t, 16> load128(Address addr) { WASM_UNREACHABLE(); }
virtual void store8(Address addr, int8_t value) { WASM_UNREACHABLE(); }
virtual void store16(Address addr, int16_t value) { WASM_UNREACHABLE(); }
virtual void store32(Address addr, int32_t value) { WASM_UNREACHABLE(); }
virtual void store64(Address addr, int64_t value) { WASM_UNREACHABLE(); }
+ virtual void store128(Address addr, const std::array<uint8_t, 16>&) { WASM_UNREACHABLE(); }
};
SubType* self() {
diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h
index 517398c5c..0845fa70e 100644
--- a/src/wasm-s-parser.h
+++ b/src/wasm-s-parser.h
@@ -190,6 +190,11 @@ private:
Expression* makeAtomicCmpxchg(Element& s, Type type, uint8_t bytes, const char* extra);
Expression* makeAtomicWait(Element& s, Type type);
Expression* makeAtomicWake(Element& s);
+ Expression* makeSIMDExtract(Element& s, SIMDExtractOp op, size_t lanes);
+ Expression* makeSIMDReplace(Element& s, SIMDReplaceOp op, size_t lanes);
+ Expression* makeSIMDShuffle(Element& s);
+ Expression* makeSIMDBitselect(Element& s);
+ Expression* makeSIMDShift(Element& s, SIMDShiftOp);
Expression* makeIf(Element& s);
Expression* makeMaybeBlock(Element& s, size_t i, Type type);
Expression* makeLoop(Element& s);
diff --git a/src/wasm-stack.h b/src/wasm-stack.h
index 6e1150981..0c10a88fc 100644
--- a/src/wasm-stack.h
+++ b/src/wasm-stack.h
@@ -135,6 +135,11 @@ public:
void visitAtomicCmpxchg(AtomicCmpxchg* curr);
void visitAtomicWait(AtomicWait* curr);
void visitAtomicWake(AtomicWake* curr);
+ void visitSIMDExtract(SIMDExtract* curr);
+ void visitSIMDReplace(SIMDReplace* curr);
+ void visitSIMDShuffle(SIMDShuffle* curr);
+ void visitSIMDBitselect(SIMDBitselect* curr);
+ void visitSIMDShift(SIMDShift* curr);
void visitConst(Const* curr);
void visitUnary(Unary* curr);
void visitBinary(Binary* curr);
@@ -634,7 +639,7 @@ void StackWriter<Mode, Parent>::visitLoad(Load* curr) {
}
case f32: o << int8_t(BinaryConsts::F32LoadMem); break;
case f64: o << int8_t(BinaryConsts::F64LoadMem); break;
- case v128: assert(false && "v128 not implemented yet");
+ case v128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Load); break;
case unreachable: return; // the pointer is unreachable, so we are never reached; just don't emit a load
case none: WASM_UNREACHABLE();
}
@@ -701,7 +706,7 @@ void StackWriter<Mode, Parent>::visitStore(Store* curr) {
}
case f32: o << int8_t(BinaryConsts::F32StoreMem); break;
case f64: o << int8_t(BinaryConsts::F64StoreMem); break;
- case v128: assert(false && "v128 not implemented yet");
+ case v128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Store); break;
case none:
case unreachable: WASM_UNREACHABLE();
}
@@ -872,6 +877,84 @@ void StackWriter<Mode, Parent>::visitAtomicWake(AtomicWake* curr) {
}
template<StackWriterMode Mode, typename Parent>
+void StackWriter<Mode, Parent>::visitSIMDExtract(SIMDExtract* curr) {
+ visitChild(curr->vec);
+ if (justAddToStack(curr)) return;
+ o << int8_t(BinaryConsts::SIMDPrefix);
+ switch (curr->op) {
+ case ExtractLaneSVecI8x16: o << U32LEB(BinaryConsts::I8x16ExtractLaneS); break;
+ case ExtractLaneUVecI8x16: o << U32LEB(BinaryConsts::I8x16ExtractLaneU); break;
+ case ExtractLaneSVecI16x8: o << U32LEB(BinaryConsts::I16x8ExtractLaneS); break;
+ case ExtractLaneUVecI16x8: o << U32LEB(BinaryConsts::I16x8ExtractLaneU); break;
+ case ExtractLaneVecI32x4: o << U32LEB(BinaryConsts::I32x4ExtractLane); break;
+ case ExtractLaneVecI64x2: o << U32LEB(BinaryConsts::I64x2ExtractLane); break;
+ case ExtractLaneVecF32x4: o << U32LEB(BinaryConsts::F32x4ExtractLane); break;
+ case ExtractLaneVecF64x2: o << U32LEB(BinaryConsts::F64x2ExtractLane); break;
+ }
+ o << uint8_t(curr->idx);
+}
+
+template<StackWriterMode Mode, typename Parent>
+void StackWriter<Mode, Parent>::visitSIMDReplace(SIMDReplace* curr) {
+ visitChild(curr->vec);
+ visitChild(curr->value);
+ if (justAddToStack(curr)) return;
+ o << int8_t(BinaryConsts::SIMDPrefix);
+ switch (curr->op) {
+ case ReplaceLaneVecI8x16: o << U32LEB(BinaryConsts::I8x16ReplaceLane); break;
+ case ReplaceLaneVecI16x8: o << U32LEB(BinaryConsts::I16x8ReplaceLane); break;
+ case ReplaceLaneVecI32x4: o << U32LEB(BinaryConsts::I32x4ReplaceLane); break;
+ case ReplaceLaneVecI64x2: o << U32LEB(BinaryConsts::I64x2ReplaceLane); break;
+ case ReplaceLaneVecF32x4: o << U32LEB(BinaryConsts::F32x4ReplaceLane); break;
+ case ReplaceLaneVecF64x2: o << U32LEB(BinaryConsts::F64x2ReplaceLane); break;
+ }
+ assert(curr->idx < 16);
+ o << uint8_t(curr->idx);
+}
+
+template<StackWriterMode Mode, typename Parent>
+void StackWriter<Mode, Parent>::visitSIMDShuffle(SIMDShuffle* curr) {
+ visitChild(curr->left);
+ visitChild(curr->right);
+ if (justAddToStack(curr)) return;
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V8x16Shuffle);
+ for (uint8_t m : curr->mask) {
+ o << m;
+ }
+}
+
+template<StackWriterMode Mode, typename Parent>
+void StackWriter<Mode, Parent>::visitSIMDBitselect(SIMDBitselect* curr) {
+ visitChild(curr->left);
+ visitChild(curr->right);
+ visitChild(curr->cond);
+ if (justAddToStack(curr)) return;
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Bitselect);
+}
+
+template<StackWriterMode Mode, typename Parent>
+void StackWriter<Mode, Parent>::visitSIMDShift(SIMDShift* curr) {
+ visitChild(curr->vec);
+ visitChild(curr->shift);
+ if (justAddToStack(curr)) return;
+ o << int8_t(BinaryConsts::SIMDPrefix);
+ switch (curr->op) {
+ case ShlVecI8x16: o << U32LEB(BinaryConsts::I8x16Shl); break;
+ case ShrSVecI8x16: o << U32LEB(BinaryConsts::I8x16ShrS); break;
+ case ShrUVecI8x16: o << U32LEB(BinaryConsts::I8x16ShrU); break;
+ case ShlVecI16x8: o << U32LEB(BinaryConsts::I16x8Shl); break;
+ case ShrSVecI16x8: o << U32LEB(BinaryConsts::I16x8ShrS); break;
+ case ShrUVecI16x8: o << U32LEB(BinaryConsts::I16x8ShrU); break;
+ case ShlVecI32x4: o << U32LEB(BinaryConsts::I32x4Shl); break;
+ case ShrSVecI32x4: o << U32LEB(BinaryConsts::I32x4ShrS); break;
+ case ShrUVecI32x4: o << U32LEB(BinaryConsts::I32x4ShrU); break;
+ case ShlVecI64x2: o << U32LEB(BinaryConsts::I64x2Shl); break;
+ case ShrSVecI64x2: o << U32LEB(BinaryConsts::I64x2ShrS); break;
+ case ShrUVecI64x2: o << U32LEB(BinaryConsts::I64x2ShrU); break;
+ }
+}
+
+template<StackWriterMode Mode, typename Parent>
void StackWriter<Mode, Parent>::visitConst(Const* curr) {
if (debug) std::cerr << "zz node: Const" << curr << " : " << curr->type << std::endl;
if (justAddToStack(curr)) return;
@@ -892,9 +975,17 @@ void StackWriter<Mode, Parent>::visitConst(Const* curr) {
o << int8_t(BinaryConsts::F64Const) << curr->value.reinterpreti64();
break;
}
- case v128: assert(false && "v128 not implemented yet");
+ case v128: {
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Const);
+ std::array<uint8_t, 16> v = curr->value.getv128();
+ for (size_t i = 0; i < 16; ++i) {
+ o << uint8_t(v[i]);
+ }
+ break;
+ }
case none:
- case unreachable: WASM_UNREACHABLE();
+ case unreachable:
+ WASM_UNREACHABLE();
}
if (debug) std::cerr << "zz const node done.\n";
}
@@ -969,6 +1060,39 @@ void StackWriter<Mode, Parent>::visitUnary(Unary* curr) {
case TruncSatUFloat32ToInt64: o << int8_t(BinaryConsts::TruncSatPrefix) << U32LEB(BinaryConsts::I64UTruncSatF32); break;
case TruncSatSFloat64ToInt64: o << int8_t(BinaryConsts::TruncSatPrefix) << U32LEB(BinaryConsts::I64STruncSatF64); break;
case TruncSatUFloat64ToInt64: o << int8_t(BinaryConsts::TruncSatPrefix) << U32LEB(BinaryConsts::I64UTruncSatF64); break;
+ case SplatVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Splat); break;
+ case SplatVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Splat); break;
+ case SplatVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Splat); break;
+ case SplatVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Splat); break;
+ case SplatVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Splat); break;
+ case SplatVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Splat); break;
+ case NotVec128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Not); break;
+ case NegVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Neg); break;
+ case AnyTrueVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16AnyTrue); break;
+ case AllTrueVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16AllTrue); break;
+ case NegVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Neg); break;
+ case AnyTrueVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8AnyTrue); break;
+ case AllTrueVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8AllTrue); break;
+ case NegVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Neg); break;
+ case AnyTrueVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4AnyTrue); break;
+ case AllTrueVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4AllTrue); break;
+ case NegVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Neg); break;
+ case AnyTrueVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2AnyTrue); break;
+ case AllTrueVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2AllTrue); break;
+ case AbsVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Abs); break;
+ case NegVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Neg); break;
+ case SqrtVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Sqrt); break;
+ case AbsVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Abs); break;
+ case NegVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Neg); break;
+ case SqrtVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Sqrt); break;
+ case TruncSatSVecF32x4ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4TruncSatSF32x4); break;
+ case TruncSatUVecF32x4ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4TruncSatUF32x4); break;
+ case TruncSatSVecF64x2ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2TruncSatSF64x2); break;
+ case TruncSatUVecF64x2ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2TruncSatUF64x2); break;
+ case ConvertSVecI32x4ToVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4ConvertSI32x4); break;
+ case ConvertUVecI32x4ToVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4ConvertUI32x4); break;
+ case ConvertSVecI64x2ToVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2ConvertSI64x2); break;
+ case ConvertUVecI64x2ToVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2ConvertUI64x2); break;
case InvalidUnary: WASM_UNREACHABLE();
}
}
@@ -1063,6 +1187,85 @@ void StackWriter<Mode, Parent>::visitBinary(Binary* curr) {
case LeFloat64: o << int8_t(BinaryConsts::F64Le); break;
case GtFloat64: o << int8_t(BinaryConsts::F64Gt); break;
case GeFloat64: o << int8_t(BinaryConsts::F64Ge); break;
+
+ case EqVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Eq); break;
+ case NeVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Ne); break;
+ case LtSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16LtS); break;
+ case LtUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16LtU); break;
+ case GtSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16GtS); break;
+ case GtUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16GtU); break;
+ case LeSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16LeS); break;
+ case LeUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16LeU); break;
+ case GeSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16GeS); break;
+ case GeUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16GeU); break;
+ case EqVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Eq); break;
+ case NeVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Ne); break;
+ case LtSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8LtS); break;
+ case LtUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8LtU); break;
+ case GtSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8GtS); break;
+ case GtUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8GtU); break;
+ case LeSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8LeS); break;
+ case LeUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8LeU); break;
+ case GeSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8GeS); break;
+ case GeUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8GeU); break;
+ case EqVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Eq); break;
+ case NeVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Ne); break;
+ case LtSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4LtS); break;
+ case LtUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4LtU); break;
+ case GtSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4GtS); break;
+ case GtUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4GtU); break;
+ case LeSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4LeS); break;
+ case LeUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4LeU); break;
+ case GeSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4GeS); break;
+ case GeUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4GeU); break;
+ case EqVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Eq); break;
+ case NeVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Ne); break;
+ case LtVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Lt); break;
+ case GtVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Gt); break;
+ case LeVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Le); break;
+ case GeVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Ge); break;
+ case EqVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Eq); break;
+ case NeVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Ne); break;
+ case LtVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Lt); break;
+ case GtVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Gt); break;
+ case LeVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Le); break;
+ case GeVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Ge); break;
+ case AndVec128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128And); break;
+ case OrVec128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Or); break;
+ case XorVec128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Xor); break;
+
+ case AddVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Add); break;
+ case AddSatSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16AddSatS); break;
+ case AddSatUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16AddSatU); break;
+ case SubVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Sub); break;
+ case SubSatSVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16SubSatS); break;
+ case SubSatUVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16SubSatU); break;
+ case MulVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Mul); break;
+ case AddVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Add); break;
+ case AddSatSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8AddSatS); break;
+ case AddSatUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8AddSatU); break;
+ case SubVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Sub); break;
+ case SubSatSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8SubSatS); break;
+ case SubSatUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8SubSatU); break;
+ case MulVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Mul); break;
+ case AddVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Add); break;
+ case SubVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Sub); break;
+ case MulVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Mul); break;
+ case AddVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Add); break;
+ case SubVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Sub); break;
+
+ case AddVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Add); break;
+ case SubVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Sub); break;
+ case MulVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Mul); break;
+ case DivVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Div); break;
+ case MinVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Min); break;
+ case MaxVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Max); break;
+ case AddVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Add); break;
+ case SubVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Sub); break;
+ case MulVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Mul); break;
+ case DivVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Div); break;
+ case MinVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Min); break;
+ case MaxVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Max); break;
case InvalidBinary: WASM_UNREACHABLE();
}
}
diff --git a/src/wasm-traversal.h b/src/wasm-traversal.h
index 9ea1124d0..200a67cb6 100644
--- a/src/wasm-traversal.h
+++ b/src/wasm-traversal.h
@@ -54,6 +54,11 @@ struct Visitor {
ReturnType visitAtomicCmpxchg(AtomicCmpxchg* curr) { return ReturnType(); }
ReturnType visitAtomicWait(AtomicWait* curr) { return ReturnType(); }
ReturnType visitAtomicWake(AtomicWake* curr) { return ReturnType(); }
+ ReturnType visitSIMDExtract(SIMDExtract* curr) { return ReturnType(); }
+ ReturnType visitSIMDReplace(SIMDReplace* curr) { return ReturnType(); }
+ ReturnType visitSIMDShuffle(SIMDShuffle* curr) { return ReturnType(); }
+ ReturnType visitSIMDBitselect(SIMDBitselect* curr) { return ReturnType(); }
+ ReturnType visitSIMDShift(SIMDShift* curr) { return ReturnType(); }
ReturnType visitConst(Const* curr) { return ReturnType(); }
ReturnType visitUnary(Unary* curr) { return ReturnType(); }
ReturnType visitBinary(Binary* curr) { return ReturnType(); }
@@ -97,6 +102,11 @@ struct Visitor {
case Expression::Id::AtomicCmpxchgId: DELEGATE(AtomicCmpxchg);
case Expression::Id::AtomicWaitId: DELEGATE(AtomicWait);
case Expression::Id::AtomicWakeId: DELEGATE(AtomicWake);
+ case Expression::Id::SIMDExtractId: DELEGATE(SIMDExtract);
+ case Expression::Id::SIMDReplaceId: DELEGATE(SIMDReplace);
+ case Expression::Id::SIMDShuffleId: DELEGATE(SIMDShuffle);
+ case Expression::Id::SIMDBitselectId: DELEGATE(SIMDBitselect);
+ case Expression::Id::SIMDShiftId: DELEGATE(SIMDShift);
case Expression::Id::ConstId: DELEGATE(Const);
case Expression::Id::UnaryId: DELEGATE(Unary);
case Expression::Id::BinaryId: DELEGATE(Binary);
@@ -142,6 +152,11 @@ struct OverriddenVisitor {
UNIMPLEMENTED(AtomicCmpxchg);
UNIMPLEMENTED(AtomicWait);
UNIMPLEMENTED(AtomicWake);
+ UNIMPLEMENTED(SIMDExtract);
+ UNIMPLEMENTED(SIMDReplace);
+ UNIMPLEMENTED(SIMDShuffle);
+ UNIMPLEMENTED(SIMDBitselect);
+ UNIMPLEMENTED(SIMDShift);
UNIMPLEMENTED(Const);
UNIMPLEMENTED(Unary);
UNIMPLEMENTED(Binary);
@@ -186,6 +201,11 @@ struct OverriddenVisitor {
case Expression::Id::AtomicCmpxchgId: DELEGATE(AtomicCmpxchg);
case Expression::Id::AtomicWaitId: DELEGATE(AtomicWait);
case Expression::Id::AtomicWakeId: DELEGATE(AtomicWake);
+ case Expression::Id::SIMDExtractId: DELEGATE(SIMDExtract);
+ case Expression::Id::SIMDReplaceId: DELEGATE(SIMDReplace);
+ case Expression::Id::SIMDShuffleId: DELEGATE(SIMDShuffle);
+ case Expression::Id::SIMDBitselectId: DELEGATE(SIMDBitselect);
+ case Expression::Id::SIMDShiftId: DELEGATE(SIMDShift);
case Expression::Id::ConstId: DELEGATE(Const);
case Expression::Id::UnaryId: DELEGATE(Unary);
case Expression::Id::BinaryId: DELEGATE(Binary);
@@ -229,6 +249,11 @@ struct UnifiedExpressionVisitor : public Visitor<SubType, ReturnType> {
ReturnType visitAtomicCmpxchg(AtomicCmpxchg* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
ReturnType visitAtomicWait(AtomicWait* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
ReturnType visitAtomicWake(AtomicWake* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
+ ReturnType visitSIMDExtract(SIMDExtract* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
+ ReturnType visitSIMDReplace(SIMDReplace* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
+ ReturnType visitSIMDShuffle(SIMDShuffle* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
+ ReturnType visitSIMDBitselect(SIMDBitselect* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
+ ReturnType visitSIMDShift(SIMDShift* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
ReturnType visitConst(Const* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
ReturnType visitUnary(Unary* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
ReturnType visitBinary(Binary* curr) { return static_cast<SubType*>(this)->visitExpression(curr); }
@@ -414,6 +439,11 @@ struct Walker : public VisitorType {
static void doVisitAtomicCmpxchg(SubType* self, Expression** currp){ self->visitAtomicCmpxchg((*currp)->cast<AtomicCmpxchg>()); }
static void doVisitAtomicWait(SubType* self, Expression** currp) { self->visitAtomicWait((*currp)->cast<AtomicWait>()); }
static void doVisitAtomicWake(SubType* self, Expression** currp) { self->visitAtomicWake((*currp)->cast<AtomicWake>()); }
+ static void doVisitSIMDExtract(SubType* self, Expression** currp) { self->visitSIMDExtract((*currp)->cast<SIMDExtract>()); }
+ static void doVisitSIMDReplace(SubType* self, Expression** currp) { self->visitSIMDReplace((*currp)->cast<SIMDReplace>()); }
+ static void doVisitSIMDShuffle(SubType* self, Expression** currp) { self->visitSIMDShuffle((*currp)->cast<SIMDShuffle>()); }
+ static void doVisitSIMDBitselect(SubType* self, Expression** currp) { self->visitSIMDBitselect((*currp)->cast<SIMDBitselect>()); }
+ static void doVisitSIMDShift(SubType* self, Expression** currp) { self->visitSIMDShift((*currp)->cast<SIMDShift>()); }
static void doVisitConst(SubType* self, Expression** currp) { self->visitConst((*currp)->cast<Const>()); }
static void doVisitUnary(SubType* self, Expression** currp) { self->visitUnary((*currp)->cast<Unary>()); }
static void doVisitBinary(SubType* self, Expression** currp) { self->visitBinary((*currp)->cast<Binary>()); }
@@ -554,6 +584,36 @@ struct PostWalker : public Walker<SubType, VisitorType> {
self->pushTask(SubType::scan, &curr->cast<AtomicWake>()->ptr);
break;
}
+ case Expression::Id::SIMDExtractId: {
+ self->pushTask(SubType::doVisitSIMDExtract, currp);
+ self->pushTask(SubType::scan, &curr->cast<SIMDExtract>()->vec);
+ break;
+ }
+ case Expression::Id::SIMDReplaceId: {
+ self->pushTask(SubType::doVisitSIMDReplace, currp);
+ self->pushTask(SubType::scan, &curr->cast<SIMDReplace>()->vec);
+ self->pushTask(SubType::scan, &curr->cast<SIMDReplace>()->value);
+ break;
+ }
+ case Expression::Id::SIMDShuffleId: {
+ self->pushTask(SubType::doVisitSIMDShuffle, currp);
+ self->pushTask(SubType::scan, &curr->cast<SIMDShuffle>()->left);
+ self->pushTask(SubType::scan, &curr->cast<SIMDShuffle>()->right);
+ break;
+ }
+ case Expression::Id::SIMDBitselectId: {
+ self->pushTask(SubType::doVisitSIMDBitselect, currp);
+ self->pushTask(SubType::scan, &curr->cast<SIMDBitselect>()->left);
+ self->pushTask(SubType::scan, &curr->cast<SIMDBitselect>()->right);
+ self->pushTask(SubType::scan, &curr->cast<SIMDBitselect>()->cond);
+ break;
+ }
+ case Expression::Id::SIMDShiftId: {
+ self->pushTask(SubType::doVisitSIMDShift, currp);
+ self->pushTask(SubType::scan, &curr->cast<SIMDShift>()->vec);
+ self->pushTask(SubType::scan, &curr->cast<SIMDShift>()->shift);
+ break;
+ }
case Expression::Id::ConstId: {
self->pushTask(SubType::doVisitConst, currp);
break;
diff --git a/src/wasm.h b/src/wasm.h
index b09b4d7d3..52a0ca299 100644
--- a/src/wasm.h
+++ b/src/wasm.h
@@ -25,6 +25,7 @@
#define wasm_wasm_h
#include <algorithm>
+#include <array>
#include <cassert>
#include <map>
#include <string>
@@ -43,7 +44,8 @@ struct FeatureSet {
Atomics = 1 << 0,
MutableGlobals = 1 << 1,
TruncSat = 1 << 2,
- All = Atomics | MutableGlobals | TruncSat
+ SIMD = 1 << 3,
+ All = Atomics | MutableGlobals | TruncSat | SIMD
};
FeatureSet() : features(MVP) {}
@@ -54,13 +56,15 @@ struct FeatureSet {
bool hasAtomics() const { return features & Atomics; }
bool hasMutableGlobals() const { return features & MutableGlobals; }
bool hasTruncSat() const { return features & TruncSat; }
- bool hasAll() const { return features & (Atomics | MutableGlobals | TruncSat); }
+ bool hasSIMD() const { return features & SIMD; }
+ bool hasAll() const { return features & All; }
void makeMVP() { features = MVP; }
void set(Feature f, bool v = true) { features = v ? (features | f) : (features & ~f); }
void setAtomics(bool v = true) { set(Atomics, v); }
void setMutableGlobals(bool v = true) { set(MutableGlobals, v); }
void setTruncSat(bool v = true) { set(TruncSat, v); }
+ void setSIMD(bool v = true) { set(SIMD, v); }
void setAll(bool v = true) { features = v ? All : MVP; }
private:
@@ -116,6 +120,15 @@ enum UnaryOp {
// Saturating float-to-int
TruncSatSFloat32ToInt32, TruncSatUFloat32ToInt32, TruncSatSFloat64ToInt32, TruncSatUFloat64ToInt32,
TruncSatSFloat32ToInt64, TruncSatUFloat32ToInt64, TruncSatSFloat64ToInt64, TruncSatUFloat64ToInt64,
+ // SIMD splats
+ SplatVecI8x16, SplatVecI16x8, SplatVecI32x4, SplatVecI64x2, SplatVecF32x4, SplatVecF64x2,
+ // SIMD arithmetic
+ NotVec128,
+ NegVecI8x16, AnyTrueVecI8x16, AllTrueVecI8x16, NegVecI16x8, AnyTrueVecI16x8, AllTrueVecI16x8,
+ NegVecI32x4, AnyTrueVecI32x4, AllTrueVecI32x4, NegVecI64x2, AnyTrueVecI64x2, AllTrueVecI64x2,
+ AbsVecF32x4, NegVecF32x4, SqrtVecF32x4, AbsVecF64x2, NegVecF64x2, SqrtVecF64x2,
+ TruncSatSVecF32x4ToVecI32x4, TruncSatUVecF32x4ToVecI32x4, TruncSatSVecF64x2ToVecI64x2, TruncSatUVecF64x2ToVecI64x2,
+ ConvertSVecI32x4ToVecF32x4, ConvertUVecI32x4ToVecF32x4, ConvertSVecI64x2ToVecF64x2, ConvertUVecI64x2ToVecF64x2,
InvalidUnary
};
@@ -144,6 +157,19 @@ enum BinaryOp {
// relational ops
EqFloat64, NeFloat64, // int or float
LtFloat64, LeFloat64, GtFloat64, GeFloat64, // float
+ // SIMD relational ops (return vectors)
+ EqVecI8x16, NeVecI8x16, LtSVecI8x16, LtUVecI8x16, GtSVecI8x16, GtUVecI8x16, LeSVecI8x16, LeUVecI8x16, GeSVecI8x16, GeUVecI8x16,
+ EqVecI16x8, NeVecI16x8, LtSVecI16x8, LtUVecI16x8, GtSVecI16x8, GtUVecI16x8, LeSVecI16x8, LeUVecI16x8, GeSVecI16x8, GeUVecI16x8,
+ EqVecI32x4, NeVecI32x4, LtSVecI32x4, LtUVecI32x4, GtSVecI32x4, GtUVecI32x4, LeSVecI32x4, LeUVecI32x4, GeSVecI32x4, GeUVecI32x4,
+ EqVecF32x4, NeVecF32x4, LtVecF32x4, GtVecF32x4, LeVecF32x4, GeVecF32x4,
+ EqVecF64x2, NeVecF64x2, LtVecF64x2, GtVecF64x2, LeVecF64x2, GeVecF64x2,
+ // SIMD arithmetic
+ AndVec128, OrVec128, XorVec128,
+ AddVecI8x16, AddSatSVecI8x16, AddSatUVecI8x16, SubVecI8x16, SubSatSVecI8x16, SubSatUVecI8x16, MulVecI8x16,
+ AddVecI16x8, AddSatSVecI16x8, AddSatUVecI16x8, SubVecI16x8, SubSatSVecI16x8, SubSatUVecI16x8, MulVecI16x8,
+ AddVecI32x4, SubVecI32x4, MulVecI32x4, AddVecI64x2, SubVecI64x2,
+ AddVecF32x4, SubVecF32x4, MulVecF32x4, DivVecF32x4, MinVecF32x4, MaxVecF32x4,
+ AddVecF64x2, SubVecF64x2, MulVecF64x2, DivVecF64x2, MinVecF64x2, MaxVecF64x2,
InvalidBinary
};
@@ -156,6 +182,20 @@ enum AtomicRMWOp {
Add, Sub, And, Or, Xor, Xchg
};
+enum SIMDExtractOp {
+ ExtractLaneSVecI8x16, ExtractLaneUVecI8x16, ExtractLaneSVecI16x8, ExtractLaneUVecI16x8,
+ ExtractLaneVecI32x4, ExtractLaneVecI64x2, ExtractLaneVecF32x4, ExtractLaneVecF64x2
+};
+
+enum SIMDReplaceOp {
+ ReplaceLaneVecI8x16, ReplaceLaneVecI16x8, ReplaceLaneVecI32x4, ReplaceLaneVecI64x2, ReplaceLaneVecF32x4, ReplaceLaneVecF64x2
+};
+
+enum SIMDShiftOp {
+ ShlVecI8x16, ShrSVecI8x16, ShrUVecI8x16, ShlVecI16x8, ShrSVecI16x8, ShrUVecI16x8,
+ ShlVecI32x4, ShrSVecI32x4, ShrUVecI32x4, ShlVecI64x2, ShrSVecI64x2, ShrUVecI64x2
+};
+
//
// Expressions
//
@@ -206,6 +246,11 @@ public:
AtomicCmpxchgId,
AtomicWaitId,
AtomicWakeId,
+ SIMDExtractId,
+ SIMDReplaceId,
+ SIMDShuffleId,
+ SIMDBitselectId,
+ SIMDShiftId,
NumExpressionIds
};
Id _id;
@@ -502,6 +547,67 @@ class AtomicWake : public SpecificExpression<Expression::AtomicWakeId> {
void finalize();
};
+class SIMDExtract : public SpecificExpression<Expression::SIMDExtractId> {
+ public:
+ SIMDExtract() = default;
+ SIMDExtract(MixedArena& allocator) : SIMDExtract() {}
+
+ SIMDExtractOp op;
+ Expression* vec;
+ uint8_t idx;
+
+ void finalize();
+};
+
+class SIMDReplace : public SpecificExpression<Expression::SIMDReplaceId> {
+ public:
+ SIMDReplace() = default;
+ SIMDReplace(MixedArena& allocator) : SIMDReplace() {}
+
+ SIMDReplaceOp op;
+ Expression* vec;
+ uint8_t idx;
+ Expression* value;
+
+ void finalize();
+};
+
+class SIMDShuffle : public SpecificExpression<Expression::SIMDShuffleId> {
+ public:
+ SIMDShuffle() = default;
+ SIMDShuffle(MixedArena& allocator) : SIMDShuffle() {}
+
+ Expression* left;
+ Expression* right;
+ std::array<uint8_t, 16> mask;
+
+ void finalize();
+};
+
+class SIMDBitselect : public SpecificExpression<Expression::SIMDBitselectId> {
+ public:
+ SIMDBitselect() = default;
+ SIMDBitselect(MixedArena& allocator) : SIMDBitselect() {}
+
+ Expression* left;
+ Expression* right;
+ Expression* cond;
+
+ void finalize();
+};
+
+class SIMDShift : public SpecificExpression<Expression::SIMDShiftId> {
+ public:
+ SIMDShift() = default;
+ SIMDShift(MixedArena& allocator) : SIMDShift() {}
+
+ SIMDShiftOp op;
+ Expression* vec;
+ Expression* shift;
+
+ void finalize();
+};
+
class Const : public SpecificExpression<Expression::ConstId> {
public:
Const() {}
diff --git a/src/wasm/literal.cpp b/src/wasm/literal.cpp
index 7b9e64e43..8d44b9b73 100644
--- a/src/wasm/literal.cpp
+++ b/src/wasm/literal.cpp
@@ -28,6 +28,52 @@
namespace wasm {
+template<int N>
+using LaneArray = std::array<Literal, N>;
+
+Literal::Literal(const uint8_t init[16]) : type(Type::v128) {
+ memcpy(&v128, init, 16);
+}
+
+template<typename LaneT, int Lanes>
+static void extractBytes(uint8_t (&dest)[16], const LaneArray<Lanes>& lanes) {
+ std::array<uint8_t, 16> bytes;
+ const size_t lane_width = 16 / Lanes;
+ for (size_t lane_idx = 0; lane_idx < Lanes; ++lane_idx) {
+ uint8_t bits[16];
+ lanes[lane_idx].getBits(bits);
+ LaneT lane;
+ memcpy(&lane, bits, sizeof(lane));
+ for (size_t offset = 0; offset < lane_width; ++offset) {
+ bytes.at(lane_idx * lane_width + offset) = uint8_t(lane >> (8 * offset));
+ }
+ }
+ memcpy(&dest, bytes.data(), sizeof(bytes));
+}
+
+Literal::Literal(const LaneArray<16>& lanes) : type(Type::v128) {
+ extractBytes<uint8_t, 16>(v128, lanes);
+}
+
+Literal::Literal(const LaneArray<8>& lanes) : type(Type::v128) {
+ extractBytes<uint16_t, 8>(v128, lanes);
+}
+
+Literal::Literal(const LaneArray<4>& lanes) : type(Type::v128) {
+ extractBytes<uint32_t, 4>(v128, lanes);
+}
+
+Literal::Literal(const LaneArray<2>& lanes) : type(Type::v128) {
+ extractBytes<uint64_t, 2>(v128, lanes);
+}
+
+std::array<uint8_t, 16> Literal::getv128() const {
+ assert(type == Type::v128);
+ std::array<uint8_t, 16> ret;
+ memcpy(ret.data(), v128, sizeof(ret));
+ return ret;
+}
+
Literal Literal::castToF32() {
assert(type == Type::i32);
Literal ret(i32);
@@ -72,20 +118,26 @@ double Literal::getFloat() const {
}
}
-int64_t Literal::getBits() const {
+void Literal::getBits(uint8_t (&buf)[16]) const {
+ memset(buf, 0, 16);
switch (type) {
- case Type::i32: case Type::f32: return i32;
- case Type::i64: case Type::f64: return i64;
- case Type::v128: assert(false && "v128 not implemented");
- case Type::none: case Type::unreachable: WASM_UNREACHABLE();
+ case Type::i32:
+ case Type::f32: memcpy(buf, &i32, sizeof(i32)); break;
+ case Type::i64:
+ case Type::f64: memcpy(buf, &i64, sizeof(i64)); break;
+ case Type::v128: memcpy(buf, &v128, sizeof(v128)); break;
+ case Type::none:
+ case Type::unreachable: WASM_UNREACHABLE();
}
- WASM_UNREACHABLE();
}
bool Literal::operator==(const Literal& other) const {
if (type != other.type) return false;
if (type == none) return true;
- return getBits() == other.getBits();
+ uint8_t bits[16], other_bits[16];
+ getBits(bits);
+ other.getBits(other_bits);
+ return memcmp(bits, other_bits, 16) == 0;
}
bool Literal::operator!=(const Literal& other) const {
@@ -158,6 +210,15 @@ void Literal::printDouble(std::ostream& o, double d) {
o << text;
}
+void Literal::printVec128(std::ostream& o, const std::array<uint8_t, 16>& v) {
+ o << std::hex;
+ for (auto i = 0; i < 16; ++i) {
+ o << "0x" << uint32_t(v[i]);
+ if (i < 15) o << " ";
+ }
+ o << std::dec;
+}
+
std::ostream& operator<<(std::ostream& o, Literal literal) {
prepareMinorColor(o) << printType(literal.type) << ".const ";
switch (literal.type) {
@@ -166,7 +227,7 @@ std::ostream& operator<<(std::ostream& o, Literal literal) {
case Type::i64: o << literal.i64; break;
case Type::f32: literal.printFloat(o, literal.getf32()); break;
case Type::f64: literal.printDouble(o, literal.getf64()); break;
- case Type::v128: assert(false && "v128 not implemented yet");
+ case Type::v128: o << "i32 "; literal.printVec128(o, literal.getv128()); break;
case Type::unreachable: WASM_UNREACHABLE();
}
restoreNormalColor(o);
@@ -450,6 +511,79 @@ Literal Literal::sub(const Literal& other) const {
WASM_UNREACHABLE();
}
+template<typename T>
+static T add_sat_s(T a, T b) {
+ static_assert(std::is_signed<T>::value, "Trying to instantiate add_sat_s with unsigned type");
+ using UT = typename std::make_unsigned<T>::type;
+ UT ua = static_cast<UT>(a);
+ UT ub = static_cast<UT>(b);
+ UT ures = ua + ub;
+ // overflow if sign of result is different from sign of a and b
+ if (static_cast<T>((ures ^ ua) & (ures ^ ub)) < 0) {
+ return (a < 0)
+ ? std::numeric_limits<T>::min()
+ : std::numeric_limits<T>::max();
+ }
+ return static_cast<T>(ures);
+}
+
+template<typename T>
+static T sub_sat_s(T a, T b) {
+ static_assert(std::is_signed<T>::value, "Trying to instantiate sub_sat_s with unsigned type");
+ using UT = typename std::make_unsigned<T>::type;
+ UT ua = static_cast<UT>(a);
+ UT ub = static_cast<UT>(b);
+ UT ures = ua - ub;
+ // overflow if a and b have different signs and result and a differ in sign
+ if (static_cast<T>((ua ^ ub) & (ures ^ ua)) < 0) {
+ return (a < 0)
+ ? std::numeric_limits<T>::min()
+ : std::numeric_limits<T>::max();
+ }
+ return static_cast<T>(ures);
+}
+
+template<typename T>
+static T add_sat_u(T a, T b) {
+ static_assert(std::is_unsigned<T>::value, "Trying to instantiate add_sat_u with signed type");
+ T res = a + b;
+ // overflow if result is less than arguments
+ return (res < a) ? std::numeric_limits<T>::max() : res;
+}
+
+template<typename T>
+static T sub_sat_u(T a, T b) {
+ static_assert(std::is_unsigned<T>::value, "Trying to instantiate sub_sat_u with signed type");
+ T res = a - b;
+ // overflow if result is greater than a
+ return (res > a) ? 0 : res;
+}
+
+Literal Literal::addSatSI8(const Literal& other) const {
+ return Literal(add_sat_s<int8_t>(geti32(), other.geti32()));
+}
+Literal Literal::addSatUI8(const Literal& other) const {
+ return Literal(add_sat_u<uint8_t>(geti32(), other.geti32()));
+}
+Literal Literal::addSatSI16(const Literal& other) const {
+ return Literal(add_sat_s<int16_t>(geti32(), other.geti32()));
+}
+Literal Literal::addSatUI16(const Literal& other) const {
+ return Literal(add_sat_u<uint16_t>(geti32(), other.geti32()));
+}
+Literal Literal::subSatSI8(const Literal& other) const {
+ return Literal(sub_sat_s<int8_t>(geti32(), other.geti32()));
+}
+Literal Literal::subSatUI8(const Literal& other) const {
+ return Literal(sub_sat_u<uint8_t>(geti32(), other.geti32()));
+}
+Literal Literal::subSatSI16(const Literal& other) const {
+ return Literal(sub_sat_s<int16_t>(geti32(), other.geti32()));
+}
+Literal Literal::subSatUI16(const Literal& other) const {
+ return Literal(sub_sat_u<uint16_t>(geti32(), other.geti32()));
+}
+
Literal Literal::mul(const Literal& other) const {
switch (type) {
case Type::i32: return Literal(uint32_t(i32) * uint32_t(other.i32));
@@ -784,4 +918,538 @@ Literal Literal::copysign(const Literal& other) const {
}
}
+template<typename LaneT, int Lanes>
+static LaneArray<Lanes> getLanes(const Literal& val) {
+ assert(val.type == Type::v128);
+ const size_t lane_width = 16 / Lanes;
+ std::array<uint8_t, 16> bytes = val.getv128();
+ LaneArray<Lanes> lanes;
+ for (size_t lane_idx = 0; lane_idx < Lanes; ++lane_idx) {
+ LaneT lane(0);
+ for (size_t offset = 0; offset < lane_width; ++offset) {
+ lane |= LaneT(bytes.at(lane_idx * lane_width + offset)) << LaneT(8 * offset);
+ }
+ lanes.at(lane_idx) = Literal(lane);
+ }
+ return lanes;
+}
+
+LaneArray<16> Literal::getLanesSI8x16() const {
+ return getLanes<int8_t, 16>(*this);
+}
+LaneArray<16> Literal::getLanesUI8x16() const {
+ return getLanes<uint8_t, 16>(*this);
+}
+LaneArray<8> Literal::getLanesSI16x8() const {
+ return getLanes<int16_t, 8>(*this);
+}
+LaneArray<8> Literal::getLanesUI16x8() const {
+ return getLanes<uint16_t, 8>(*this);
+}
+LaneArray<4> Literal::getLanesI32x4() const {
+ return getLanes<int32_t, 4>(*this);
+}
+LaneArray<2> Literal::getLanesI64x2() const {
+ return getLanes<int64_t, 2>(*this);
+}
+LaneArray<4> Literal::getLanesF32x4() const {
+ auto lanes = getLanesI32x4();
+ for (size_t i = 0; i < lanes.size(); ++i) {
+ lanes[i] = lanes[i].castToF32();
+ }
+ return lanes;
+}
+LaneArray<2> Literal::getLanesF64x2() const {
+ auto lanes = getLanesI64x2();
+ for (size_t i = 0; i < lanes.size(); ++i) {
+ lanes[i] = lanes[i].castToF64();
+ }
+ return lanes;
+}
+
+Literal Literal::shuffleV8x16(const Literal& other, const std::array<uint8_t, 16>& mask) const {
+ assert(type == Type::v128);
+ uint8_t bytes[16];
+ for (size_t i = 0; i < mask.size(); ++i) {
+ bytes[i] = (mask[i] < 16) ? v128[mask[i]] : other.v128[mask[i] - 16];
+ }
+ return Literal(bytes);
+}
+
+template<Type Ty, int Lanes>
+static Literal splat(const Literal& val) {
+ assert(val.type == Ty);
+ LaneArray<Lanes> lanes;
+ lanes.fill(val);
+ return Literal(lanes);
+}
+
+Literal Literal::splatI8x16() const { return splat<Type::i32, 16>(*this); }
+Literal Literal::splatI16x8() const { return splat<Type::i32, 8>(*this); }
+Literal Literal::splatI32x4() const { return splat<Type::i32, 4>(*this); }
+Literal Literal::splatI64x2() const { return splat<Type::i64, 2>(*this); }
+Literal Literal::splatF32x4() const { return splat<Type::f32, 4>(*this); }
+Literal Literal::splatF64x2() const { return splat<Type::f64, 2>(*this); }
+
+Literal Literal::extractLaneSI8x16(uint8_t idx) const { return getLanesSI8x16().at(idx); }
+Literal Literal::extractLaneUI8x16(uint8_t idx) const { return getLanesUI8x16().at(idx); }
+Literal Literal::extractLaneSI16x8(uint8_t idx) const { return getLanesSI16x8().at(idx); }
+Literal Literal::extractLaneUI16x8(uint8_t idx) const { return getLanesUI16x8().at(idx); }
+Literal Literal::extractLaneI32x4(uint8_t idx) const { return getLanesI32x4().at(idx); }
+Literal Literal::extractLaneI64x2(uint8_t idx) const { return getLanesI64x2().at(idx); }
+Literal Literal::extractLaneF32x4(uint8_t idx) const { return getLanesF32x4().at(idx); }
+Literal Literal::extractLaneF64x2(uint8_t idx) const { return getLanesF64x2().at(idx); }
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const>
+static Literal replace(const Literal& val, const Literal& other, uint8_t idx) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ lanes.at(idx) = other;
+ auto ret = Literal(lanes);
+ return ret;
+}
+
+Literal Literal::replaceLaneI8x16(const Literal& other, uint8_t idx) const {
+ return replace<16, &Literal::getLanesUI8x16>(*this, other, idx);
+}
+Literal Literal::replaceLaneI16x8(const Literal& other, uint8_t idx) const {
+ return replace<8, &Literal::getLanesUI16x8>(*this, other, idx);
+}
+Literal Literal::replaceLaneI32x4(const Literal& other, uint8_t idx) const {
+ return replace<4, &Literal::getLanesI32x4>(*this, other, idx);
+}
+Literal Literal::replaceLaneI64x2(const Literal& other, uint8_t idx) const {
+ return replace<2, &Literal::getLanesI64x2>(*this, other, idx);
+}
+Literal Literal::replaceLaneF32x4(const Literal& other, uint8_t idx) const {
+ return replace<4, &Literal::getLanesF32x4>(*this, other, idx);
+}
+Literal Literal::replaceLaneF64x2(const Literal& other, uint8_t idx) const {
+ return replace<2, &Literal::getLanesF64x2>(*this, other, idx);
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const,
+ Literal (Literal::*UnaryOp)(void) const>
+static Literal unary(const Literal& val) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ lanes[i] = (lanes[i].*UnaryOp)();
+ }
+ return Literal(lanes);
+}
+
+Literal Literal::notV128() const {
+ std::array<uint8_t, 16> ones;
+ ones.fill(0xff);
+ return xorV128(Literal(ones.data()));
+}
+Literal Literal::negI8x16() const {
+ return unary<16, &Literal::getLanesUI8x16, &Literal::neg>(*this);
+}
+Literal Literal::negI16x8() const {
+ return unary<8, &Literal::getLanesUI16x8, &Literal::neg>(*this);
+}
+Literal Literal::negI32x4() const {
+ return unary<4, &Literal::getLanesI32x4, &Literal::neg>(*this);
+}
+Literal Literal::negI64x2() const {
+ return unary<2, &Literal::getLanesI64x2, &Literal::neg>(*this);
+}
+Literal Literal::absF32x4() const {
+ return unary<4, &Literal::getLanesF32x4, &Literal::abs>(*this);
+}
+Literal Literal::negF32x4() const {
+ return unary<4, &Literal::getLanesF32x4, &Literal::neg>(*this);
+}
+Literal Literal::sqrtF32x4() const {
+ return unary<4, &Literal::getLanesF32x4, &Literal::sqrt>(*this);
+}
+Literal Literal::absF64x2() const {
+ return unary<2, &Literal::getLanesF64x2, &Literal::abs>(*this);
+}
+Literal Literal::negF64x2() const {
+ return unary<2, &Literal::getLanesF64x2, &Literal::neg>(*this);
+}
+Literal Literal::sqrtF64x2() const {
+ return unary<2, &Literal::getLanesF64x2, &Literal::sqrt>(*this);
+}
+Literal Literal::truncSatToSI32x4() const {
+ return unary<4, &Literal::getLanesF32x4, &Literal::truncSatToSI32>(*this);
+}
+Literal Literal::truncSatToUI32x4() const {
+ return unary<4, &Literal::getLanesF32x4, &Literal::truncSatToUI32>(*this);
+}
+Literal Literal::truncSatToSI64x2() const {
+ return unary<2, &Literal::getLanesF64x2, &Literal::truncSatToSI64>(*this);
+}
+Literal Literal::truncSatToUI64x2() const {
+ return unary<2, &Literal::getLanesF64x2, &Literal::truncSatToUI64>(*this);
+}
+Literal Literal::convertSToF32x4() const {
+ return unary<4, &Literal::getLanesI32x4, &Literal::convertSIToF32>(*this);
+}
+Literal Literal::convertUToF32x4() const {
+ return unary<4, &Literal::getLanesI32x4, &Literal::convertUIToF32>(*this);
+}
+Literal Literal::convertSToF64x2() const {
+ return unary<2, &Literal::getLanesI64x2, &Literal::convertSIToF64>(*this);
+}
+Literal Literal::convertUToF64x2() const {
+ return unary<2, &Literal::getLanesI64x2, &Literal::convertUIToF64>(*this);
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const>
+static Literal any_true(const Literal& val) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ if (lanes[i] != Literal::makeZero(lanes[i].type)) {
+ return Literal(int32_t(1));
+ }
+ }
+ return Literal(int32_t(0));
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const>
+static Literal all_true(const Literal& val) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ if (lanes[i] == Literal::makeZero(lanes[i].type)) {
+ return Literal(int32_t(0));
+ }
+ }
+ return Literal(int32_t(1));
+}
+
+Literal Literal::anyTrueI8x16() const {
+ return any_true<16, &Literal::getLanesUI8x16>(*this);
+}
+Literal Literal::allTrueI8x16() const {
+ return all_true<16, &Literal::getLanesUI8x16>(*this);
+}
+Literal Literal::anyTrueI16x8() const {
+ return any_true<8, &Literal::getLanesUI16x8>(*this);
+}
+Literal Literal::allTrueI16x8() const {
+ return all_true<8, &Literal::getLanesUI16x8>(*this);
+}
+Literal Literal::anyTrueI32x4() const {
+ return any_true<4, &Literal::getLanesI32x4>(*this);
+}
+Literal Literal::allTrueI32x4() const {
+ return all_true<4, &Literal::getLanesI32x4>(*this);
+}
+Literal Literal::anyTrueI64x2() const {
+ return any_true<2, &Literal::getLanesI64x2>(*this);
+}
+Literal Literal::allTrueI64x2() const {
+ return all_true<2, &Literal::getLanesI64x2>(*this);
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const,
+ Literal (Literal::*ShiftOp)(const Literal&) const>
+static Literal shift(const Literal& vec, const Literal& shift) {
+ assert(shift.type == Type::i32);
+ size_t lane_bits = 128 / Lanes;
+ LaneArray<Lanes> lanes = (vec.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ lanes[i] = (lanes[i].*ShiftOp)(Literal(int32_t(shift.geti32() % lane_bits)));
+ }
+ return Literal(lanes);
+}
+
+Literal Literal::shlI8x16(const Literal& other) const {
+ return shift<16, &Literal::getLanesUI8x16, &Literal::shl>(*this, other);
+}
+Literal Literal::shrSI8x16(const Literal& other) const {
+ return shift<16, &Literal::getLanesSI8x16, &Literal::shrS>(*this, other);
+}
+Literal Literal::shrUI8x16(const Literal& other) const {
+ return shift<16, &Literal::getLanesUI8x16, &Literal::shrU>(*this, other);
+}
+Literal Literal::shlI16x8(const Literal& other) const {
+ return shift<8, &Literal::getLanesUI16x8, &Literal::shl>(*this, other);
+}
+Literal Literal::shrSI16x8(const Literal& other) const {
+ return shift<8, &Literal::getLanesSI16x8, &Literal::shrS>(*this, other);
+}
+Literal Literal::shrUI16x8(const Literal& other) const {
+ return shift<8, &Literal::getLanesUI16x8, &Literal::shrU>(*this, other);
+}
+Literal Literal::shlI32x4(const Literal& other) const {
+ return shift<4, &Literal::getLanesI32x4, &Literal::shl>(*this, other);
+}
+Literal Literal::shrSI32x4(const Literal& other) const {
+ return shift<4, &Literal::getLanesI32x4, &Literal::shrS>(*this, other);
+}
+Literal Literal::shrUI32x4(const Literal& other) const {
+ return shift<4, &Literal::getLanesI32x4, &Literal::shrU>(*this, other);
+}
+Literal Literal::shlI64x2(const Literal& other) const {
+ return shift<2, &Literal::getLanesI64x2, &Literal::shl>(*this, other);
+}
+Literal Literal::shrSI64x2(const Literal& other) const {
+ return shift<2, &Literal::getLanesI64x2, &Literal::shrS>(*this, other);
+}
+Literal Literal::shrUI64x2(const Literal& other) const {
+ return shift<2, &Literal::getLanesI64x2, &Literal::shrU>(*this, other);
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const,
+ Literal (Literal::*CompareOp)(const Literal&) const,
+ typename LaneT = int32_t>
+static Literal compare(const Literal& val, const Literal& other) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ LaneArray<Lanes> other_lanes = (other.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ lanes[i] = (lanes[i].*CompareOp)(other_lanes[i]) == Literal(int32_t(1))
+ ? Literal(LaneT(-1))
+ : Literal(LaneT(0));
+ }
+ return Literal(lanes);
+}
+
+Literal Literal::eqI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::eq>(*this, other);
+}
+Literal Literal::neI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::ne>(*this, other);
+}
+Literal Literal::ltSI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesSI8x16, &Literal::ltS>(*this, other);
+}
+Literal Literal::ltUI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::ltU>(*this, other);
+}
+Literal Literal::gtSI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesSI8x16, &Literal::gtS>(*this, other);
+}
+Literal Literal::gtUI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::gtU>(*this, other);
+}
+Literal Literal::leSI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesSI8x16, &Literal::leS>(*this, other);
+}
+Literal Literal::leUI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::leU>(*this, other);
+}
+Literal Literal::geSI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesSI8x16, &Literal::geS>(*this, other);
+}
+Literal Literal::geUI8x16(const Literal& other) const {
+ return compare<16, &Literal::getLanesUI8x16, &Literal::geU>(*this, other);
+}
+Literal Literal::eqI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::eq>(*this, other);
+}
+Literal Literal::neI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::ne>(*this, other);
+}
+Literal Literal::ltSI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesSI16x8, &Literal::ltS>(*this, other);
+}
+Literal Literal::ltUI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::ltU>(*this, other);
+}
+Literal Literal::gtSI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesSI16x8, &Literal::gtS>(*this, other);
+}
+Literal Literal::gtUI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::gtU>(*this, other);
+}
+Literal Literal::leSI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesSI16x8, &Literal::leS>(*this, other);
+}
+Literal Literal::leUI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::leU>(*this, other);
+}
+Literal Literal::geSI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesSI16x8, &Literal::geS>(*this, other);
+}
+Literal Literal::geUI16x8(const Literal& other) const {
+ return compare<8, &Literal::getLanesUI16x8, &Literal::geU>(*this, other);
+}
+Literal Literal::eqI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::eq>(*this, other);
+}
+Literal Literal::neI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::ne>(*this, other);
+}
+Literal Literal::ltSI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::ltS>(*this, other);
+}
+Literal Literal::ltUI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::ltU>(*this, other);
+}
+Literal Literal::gtSI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::gtS>(*this, other);
+}
+Literal Literal::gtUI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::gtU>(*this, other);
+}
+Literal Literal::leSI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::leS>(*this, other);
+}
+Literal Literal::leUI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::leU>(*this, other);
+}
+Literal Literal::geSI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::geS>(*this, other);
+}
+Literal Literal::geUI32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesI32x4, &Literal::geU>(*this, other);
+}
+Literal Literal::eqF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::eq>(*this, other);
+}
+Literal Literal::neF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::ne>(*this, other);
+}
+Literal Literal::ltF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::lt>(*this, other);
+}
+Literal Literal::gtF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::gt>(*this, other);
+}
+Literal Literal::leF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::le>(*this, other);
+}
+Literal Literal::geF32x4(const Literal& other) const {
+ return compare<4, &Literal::getLanesF32x4, &Literal::ge>(*this, other);
+}
+Literal Literal::eqF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::eq, int64_t>(*this, other);
+}
+Literal Literal::neF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::ne, int64_t>(*this, other);
+}
+Literal Literal::ltF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::lt, int64_t>(*this, other);
+}
+Literal Literal::gtF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::gt, int64_t>(*this, other);
+}
+Literal Literal::leF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::le, int64_t>(*this, other);
+}
+Literal Literal::geF64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesF64x2, &Literal::ge, int64_t>(*this, other);
+}
+
+template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const,
+ Literal (Literal::*BinaryOp)(const Literal&) const>
+static Literal binary(const Literal& val, const Literal& other) {
+ LaneArray<Lanes> lanes = (val.*IntoLanes)();
+ LaneArray<Lanes> other_lanes = (other.*IntoLanes)();
+ for (size_t i = 0; i < Lanes; ++i) {
+ lanes[i] = (lanes[i].*BinaryOp)(other_lanes[i]);
+ }
+ return Literal(lanes);
+}
+
+Literal Literal::andV128(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::and_>(*this, other);
+}
+Literal Literal::orV128(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::or_>(*this, other);
+}
+Literal Literal::xorV128(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::xor_>(*this, other);
+}
+Literal Literal::addI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesUI8x16, &Literal::add>(*this, other);
+}
+Literal Literal::addSaturateSI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesUI8x16, &Literal::addSatSI8>(*this, other);
+}
+Literal Literal::addSaturateUI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesSI8x16, &Literal::addSatUI8>(*this, other);
+}
+Literal Literal::subI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesUI8x16, &Literal::sub>(*this, other);
+}
+Literal Literal::subSaturateSI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesUI8x16, &Literal::subSatSI8>(*this, other);
+}
+Literal Literal::subSaturateUI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesSI8x16, &Literal::subSatUI8>(*this, other);
+}
+Literal Literal::mulI8x16(const Literal& other) const {
+ return binary<16, &Literal::getLanesUI8x16, &Literal::mul>(*this, other);
+}
+Literal Literal::addI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesUI16x8, &Literal::add>(*this, other);
+}
+Literal Literal::addSaturateSI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesUI16x8, &Literal::addSatSI16>(*this, other);
+}
+Literal Literal::addSaturateUI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesSI16x8, &Literal::addSatUI16>(*this, other);
+}
+Literal Literal::subI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesUI16x8, &Literal::sub>(*this, other);
+}
+Literal Literal::subSaturateSI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesUI16x8, &Literal::subSatSI16>(*this, other);
+}
+Literal Literal::subSaturateUI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesSI16x8, &Literal::subSatUI16>(*this, other);
+}
+Literal Literal::mulI16x8(const Literal& other) const {
+ return binary<8, &Literal::getLanesUI16x8, &Literal::mul>(*this, other);
+}
+Literal Literal::addI32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::add>(*this, other);
+}
+Literal Literal::subI32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::sub>(*this, other);
+}
+Literal Literal::mulI32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesI32x4, &Literal::mul>(*this, other);
+}
+Literal Literal::addI64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesI64x2, &Literal::add>(*this, other);
+}
+Literal Literal::subI64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesI64x2, &Literal::sub>(*this, other);
+}
+Literal Literal::addF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::add>(*this, other);
+}
+Literal Literal::subF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::sub>(*this, other);
+}
+Literal Literal::mulF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::mul>(*this, other);
+}
+Literal Literal::divF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::div>(*this, other);
+}
+Literal Literal::minF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::min>(*this, other);
+}
+Literal Literal::maxF32x4(const Literal& other) const {
+ return binary<4, &Literal::getLanesF32x4, &Literal::max>(*this, other);
+}
+Literal Literal::addF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::add>(*this, other);
+}
+Literal Literal::subF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::sub>(*this, other);
+}
+Literal Literal::mulF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::mul>(*this, other);
+}
+Literal Literal::divF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::div>(*this, other);
+}
+Literal Literal::minF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::min>(*this, other);
+}
+Literal Literal::maxF64x2(const Literal& other) const {
+ return binary<2, &Literal::getLanesF64x2, &Literal::max>(*this, other);
+}
+
+Literal Literal::bitselectV128(const Literal& left, const Literal& right) const {
+ return andV128(left).orV128(notV128().andV128(right));
+}
+
} // namespace wasm
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index 2a7bff51f..a373a5ec6 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -757,6 +757,14 @@ uint64_t WasmBinaryBuilder::getInt64() {
return ret;
}
+uint8_t WasmBinaryBuilder::getLaneIdx(size_t lanes) {
+ if (debug) std::cerr << "<==" << std::endl;
+ auto ret = getInt8();
+ if (ret >= lanes) throwError("Illegal lane index");
+ if (debug) std::cerr << "getLaneIdx(" << lanes << "): " << ret << " ==>" << std::endl;
+ return ret;
+}
+
Literal WasmBinaryBuilder::getFloat32Literal() {
if (debug) std::cerr << "<==" << std::endl;
auto ret = Literal(getInt32());
@@ -773,6 +781,17 @@ Literal WasmBinaryBuilder::getFloat64Literal() {
return ret;
}
+Literal WasmBinaryBuilder::getVec128Literal() {
+ if (debug) std::cerr << "<==" << std::endl;
+ std::array<uint8_t, 16> bytes;
+ for (auto i = 0; i < 16; ++i) {
+ bytes[i] = getInt8();
+ }
+ auto ret = Literal(bytes.data());
+ if (debug) std::cerr << "getVec128: " << ret << " ==>" << std::endl;
+ return ret;
+}
+
uint32_t WasmBinaryBuilder::getU32LEB() {
if (debug) std::cerr << "<==" << std::endl;
U32LEB ret;
@@ -822,6 +841,7 @@ Type WasmBinaryBuilder::getType() {
case BinaryConsts::EncodedType::i64: return i64;
case BinaryConsts::EncodedType::f32: return f32;
case BinaryConsts::EncodedType::f64: return f64;
+ case BinaryConsts::EncodedType::v128: return v128;
case BinaryConsts::EncodedType::AnyFunc:
case BinaryConsts::EncodedType::Func:
throwError("invalid wasm type: " + std::to_string(type));
@@ -1677,7 +1697,7 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) {
case BinaryConsts::End:
case BinaryConsts::Else: curr = nullptr; break;
case BinaryConsts::AtomicPrefix: {
- code = getInt8();
+ code = static_cast<uint8_t>(getU32LEB());
if (maybeVisitLoad(curr, code, /*isAtomic=*/true)) break;
if (maybeVisitStore(curr, code, /*isAtomic=*/true)) break;
if (maybeVisitAtomicRMW(curr, code)) break;
@@ -1688,11 +1708,26 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) {
break;
}
case BinaryConsts::TruncSatPrefix: {
- uint32_t code = getU32LEB();
- if (maybeVisitTruncSat(curr, code)) break;
+ auto opcode = getU32LEB();
+ if (maybeVisitTruncSat(curr, opcode)) break;
throwError("invalid code after nontrapping float-to-int prefix: " + std::to_string(code));
break;
}
+ case BinaryConsts::SIMDPrefix: {
+ auto opcode = getU32LEB();
+ if (maybeVisitSIMDBinary(curr, opcode)) break;
+ if (maybeVisitSIMDUnary(curr, opcode)) break;
+ if (maybeVisitSIMDConst(curr, opcode)) break;
+ if (maybeVisitSIMDLoad(curr, opcode)) break;
+ if (maybeVisitSIMDStore(curr, opcode)) break;
+ if (maybeVisitSIMDExtract(curr, opcode)) break;
+ if (maybeVisitSIMDReplace(curr, opcode)) break;
+ if (maybeVisitSIMDShuffle(curr, opcode)) break;
+ if (maybeVisitSIMDBitselect(curr, opcode)) break;
+ if (maybeVisitSIMDShift(curr, opcode)) break;
+ throwError("invalid code after SIMD prefix: " + std::to_string(opcode));
+ break;
+ }
default: {
// otherwise, the code is a subcode TODO: optimize
if (maybeVisitBinary(curr, code)) break;
@@ -2077,7 +2112,6 @@ bool WasmBinaryBuilder::maybeVisitStore(Expression*& out, uint8_t code, bool isA
return true;
}
-
bool WasmBinaryBuilder::maybeVisitAtomicRMW(Expression*& out, uint8_t code) {
if (code < BinaryConsts::AtomicRMWOps_Begin || code > BinaryConsts::AtomicRMWOps_End) return false;
auto* curr = allocator.alloc<AtomicRMW>();
@@ -2359,6 +2393,267 @@ bool WasmBinaryBuilder::maybeVisitBinary(Expression*& out, uint8_t code) {
#undef FLOAT_TYPED_CODE
}
+bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) {
+ Binary* curr;
+ switch (code) {
+ case BinaryConsts::I8x16Eq: curr = allocator.alloc<Binary>(); curr->op = EqVecI8x16; break;
+ case BinaryConsts::I8x16Ne: curr = allocator.alloc<Binary>(); curr->op = NeVecI8x16; break;
+ case BinaryConsts::I8x16LtS: curr = allocator.alloc<Binary>(); curr->op = LtSVecI8x16; break;
+ case BinaryConsts::I8x16LtU: curr = allocator.alloc<Binary>(); curr->op = LtUVecI8x16; break;
+ case BinaryConsts::I8x16GtS: curr = allocator.alloc<Binary>(); curr->op = GtSVecI8x16; break;
+ case BinaryConsts::I8x16GtU: curr = allocator.alloc<Binary>(); curr->op = GtUVecI8x16; break;
+ case BinaryConsts::I8x16LeS: curr = allocator.alloc<Binary>(); curr->op = LeSVecI8x16; break;
+ case BinaryConsts::I8x16LeU: curr = allocator.alloc<Binary>(); curr->op = LeUVecI8x16; break;
+ case BinaryConsts::I8x16GeS: curr = allocator.alloc<Binary>(); curr->op = GeSVecI8x16; break;
+ case BinaryConsts::I8x16GeU: curr = allocator.alloc<Binary>(); curr->op = GeUVecI8x16; break;
+ case BinaryConsts::I16x8Eq: curr = allocator.alloc<Binary>(); curr->op = EqVecI16x8; break;
+ case BinaryConsts::I16x8Ne: curr = allocator.alloc<Binary>(); curr->op = NeVecI16x8; break;
+ case BinaryConsts::I16x8LtS: curr = allocator.alloc<Binary>(); curr->op = LtSVecI16x8; break;
+ case BinaryConsts::I16x8LtU: curr = allocator.alloc<Binary>(); curr->op = LtUVecI16x8; break;
+ case BinaryConsts::I16x8GtS: curr = allocator.alloc<Binary>(); curr->op = GtSVecI16x8; break;
+ case BinaryConsts::I16x8GtU: curr = allocator.alloc<Binary>(); curr->op = GtUVecI16x8; break;
+ case BinaryConsts::I16x8LeS: curr = allocator.alloc<Binary>(); curr->op = LeSVecI16x8; break;
+ case BinaryConsts::I16x8LeU: curr = allocator.alloc<Binary>(); curr->op = LeUVecI16x8; break;
+ case BinaryConsts::I16x8GeS: curr = allocator.alloc<Binary>(); curr->op = GeSVecI16x8; break;
+ case BinaryConsts::I16x8GeU: curr = allocator.alloc<Binary>(); curr->op = GeUVecI16x8; break;
+ case BinaryConsts::I32x4Eq: curr = allocator.alloc<Binary>(); curr->op = EqVecI32x4; break;
+ case BinaryConsts::I32x4Ne: curr = allocator.alloc<Binary>(); curr->op = NeVecI32x4; break;
+ case BinaryConsts::I32x4LtS: curr = allocator.alloc<Binary>(); curr->op = LtSVecI32x4; break;
+ case BinaryConsts::I32x4LtU: curr = allocator.alloc<Binary>(); curr->op = LtUVecI32x4; break;
+ case BinaryConsts::I32x4GtS: curr = allocator.alloc<Binary>(); curr->op = GtSVecI32x4; break;
+ case BinaryConsts::I32x4GtU: curr = allocator.alloc<Binary>(); curr->op = GtUVecI32x4; break;
+ case BinaryConsts::I32x4LeS: curr = allocator.alloc<Binary>(); curr->op = LeSVecI32x4; break;
+ case BinaryConsts::I32x4LeU: curr = allocator.alloc<Binary>(); curr->op = LeUVecI32x4; break;
+ case BinaryConsts::I32x4GeS: curr = allocator.alloc<Binary>(); curr->op = GeSVecI32x4; break;
+ case BinaryConsts::I32x4GeU: curr = allocator.alloc<Binary>(); curr->op = GeUVecI32x4; break;
+ case BinaryConsts::F32x4Eq: curr = allocator.alloc<Binary>(); curr->op = EqVecF32x4; break;
+ case BinaryConsts::F32x4Ne: curr = allocator.alloc<Binary>(); curr->op = NeVecF32x4; break;
+ case BinaryConsts::F32x4Lt: curr = allocator.alloc<Binary>(); curr->op = LtVecF32x4; break;
+ case BinaryConsts::F32x4Gt: curr = allocator.alloc<Binary>(); curr->op = GtVecF32x4; break;
+ case BinaryConsts::F32x4Le: curr = allocator.alloc<Binary>(); curr->op = LeVecF32x4; break;
+ case BinaryConsts::F32x4Ge: curr = allocator.alloc<Binary>(); curr->op = GeVecF32x4; break;
+ case BinaryConsts::F64x2Eq: curr = allocator.alloc<Binary>(); curr->op = EqVecF64x2; break;
+ case BinaryConsts::F64x2Ne: curr = allocator.alloc<Binary>(); curr->op = NeVecF64x2; break;
+ case BinaryConsts::F64x2Lt: curr = allocator.alloc<Binary>(); curr->op = LtVecF64x2; break;
+ case BinaryConsts::F64x2Gt: curr = allocator.alloc<Binary>(); curr->op = GtVecF64x2; break;
+ case BinaryConsts::F64x2Le: curr = allocator.alloc<Binary>(); curr->op = LeVecF64x2; break;
+ case BinaryConsts::F64x2Ge: curr = allocator.alloc<Binary>(); curr->op = GeVecF64x2; break;
+ case BinaryConsts::V128And: curr = allocator.alloc<Binary>(); curr->op = AndVec128; break;
+ case BinaryConsts::V128Or: curr = allocator.alloc<Binary>(); curr->op = OrVec128; break;
+ case BinaryConsts::V128Xor: curr = allocator.alloc<Binary>(); curr->op = XorVec128; break;
+ case BinaryConsts::I8x16Add: curr = allocator.alloc<Binary>(); curr->op = AddVecI8x16; break;
+ case BinaryConsts::I8x16AddSatS: curr = allocator.alloc<Binary>(); curr->op = AddSatSVecI8x16; break;
+ case BinaryConsts::I8x16AddSatU: curr = allocator.alloc<Binary>(); curr->op = AddSatUVecI8x16; break;
+ case BinaryConsts::I8x16Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecI8x16; break;
+ case BinaryConsts::I8x16SubSatS: curr = allocator.alloc<Binary>(); curr->op = SubSatSVecI8x16; break;
+ case BinaryConsts::I8x16SubSatU: curr = allocator.alloc<Binary>(); curr->op = SubSatUVecI8x16; break;
+ case BinaryConsts::I8x16Mul: curr = allocator.alloc<Binary>(); curr->op = MulVecI8x16; break;
+ case BinaryConsts::I16x8Add: curr = allocator.alloc<Binary>(); curr->op = AddVecI16x8; break;
+ case BinaryConsts::I16x8AddSatS: curr = allocator.alloc<Binary>(); curr->op = AddSatSVecI16x8; break;
+ case BinaryConsts::I16x8AddSatU: curr = allocator.alloc<Binary>(); curr->op = AddSatUVecI16x8; break;
+ case BinaryConsts::I16x8Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecI16x8; break;
+ case BinaryConsts::I16x8SubSatS: curr = allocator.alloc<Binary>(); curr->op = SubSatSVecI16x8; break;
+ case BinaryConsts::I16x8SubSatU: curr = allocator.alloc<Binary>(); curr->op = SubSatUVecI16x8; break;
+ case BinaryConsts::I16x8Mul: curr = allocator.alloc<Binary>(); curr->op = MulVecI16x8; break;
+ case BinaryConsts::I32x4Add: curr = allocator.alloc<Binary>(); curr->op = AddVecI32x4; break;
+ case BinaryConsts::I32x4Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecI32x4; break;
+ case BinaryConsts::I32x4Mul: curr = allocator.alloc<Binary>(); curr->op = MulVecI32x4; break;
+ case BinaryConsts::I64x2Add: curr = allocator.alloc<Binary>(); curr->op = AddVecI64x2; break;
+ case BinaryConsts::I64x2Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecI64x2; break;
+ case BinaryConsts::F32x4Add: curr = allocator.alloc<Binary>(); curr->op = AddVecF32x4; break;
+ case BinaryConsts::F32x4Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecF32x4; break;
+ case BinaryConsts::F32x4Mul: curr = allocator.alloc<Binary>(); curr->op = MulVecF32x4; break;
+ case BinaryConsts::F32x4Div: curr = allocator.alloc<Binary>(); curr->op = DivVecF32x4; break;
+ case BinaryConsts::F32x4Min: curr = allocator.alloc<Binary>(); curr->op = MinVecF32x4; break;
+ case BinaryConsts::F32x4Max: curr = allocator.alloc<Binary>(); curr->op = MaxVecF32x4; break;
+ case BinaryConsts::F64x2Add: curr = allocator.alloc<Binary>(); curr->op = AddVecF64x2; break;
+ case BinaryConsts::F64x2Sub: curr = allocator.alloc<Binary>(); curr->op = SubVecF64x2; break;
+ case BinaryConsts::F64x2Mul: curr = allocator.alloc<Binary>(); curr->op = MulVecF64x2; break;
+ case BinaryConsts::F64x2Div: curr = allocator.alloc<Binary>(); curr->op = DivVecF64x2; break;
+ case BinaryConsts::F64x2Min: curr = allocator.alloc<Binary>(); curr->op = MinVecF64x2; break;
+ case BinaryConsts::F64x2Max: curr = allocator.alloc<Binary>(); curr->op = MaxVecF64x2; break;
+ default: return false;
+ }
+ if (debug) std::cerr << "zz node: Binary" << std::endl;
+ curr->right = popNonVoidExpression();
+ curr->left = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
+ Unary* curr;
+ switch (code) {
+ case BinaryConsts::I8x16Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecI8x16; break;
+ case BinaryConsts::I16x8Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecI16x8; break;
+ case BinaryConsts::I32x4Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecI32x4; break;
+ case BinaryConsts::I64x2Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecI64x2; break;
+ case BinaryConsts::F32x4Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecF32x4; break;
+ case BinaryConsts::F64x2Splat: curr = allocator.alloc<Unary>(); curr->op = SplatVecF64x2; break;
+ case BinaryConsts::V128Not: curr = allocator.alloc<Unary>(); curr->op = NotVec128; break;
+ case BinaryConsts::I8x16Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecI8x16; break;
+ case BinaryConsts::I8x16AnyTrue: curr = allocator.alloc<Unary>(); curr->op = AnyTrueVecI8x16; break;
+ case BinaryConsts::I8x16AllTrue: curr = allocator.alloc<Unary>(); curr->op = AllTrueVecI8x16; break;
+ case BinaryConsts::I16x8Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecI16x8; break;
+ case BinaryConsts::I16x8AnyTrue: curr = allocator.alloc<Unary>(); curr->op = AnyTrueVecI16x8; break;
+ case BinaryConsts::I16x8AllTrue: curr = allocator.alloc<Unary>(); curr->op = AllTrueVecI16x8; break;
+ case BinaryConsts::I32x4Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecI32x4; break;
+ case BinaryConsts::I32x4AnyTrue: curr = allocator.alloc<Unary>(); curr->op = AnyTrueVecI32x4; break;
+ case BinaryConsts::I32x4AllTrue: curr = allocator.alloc<Unary>(); curr->op = AllTrueVecI32x4; break;
+ case BinaryConsts::I64x2Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecI64x2; break;
+ case BinaryConsts::I64x2AnyTrue: curr = allocator.alloc<Unary>(); curr->op = AnyTrueVecI64x2; break;
+ case BinaryConsts::I64x2AllTrue: curr = allocator.alloc<Unary>(); curr->op = AllTrueVecI64x2; break;
+ case BinaryConsts::F32x4Abs: curr = allocator.alloc<Unary>(); curr->op = AbsVecF32x4; break;
+ case BinaryConsts::F32x4Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecF32x4; break;
+ case BinaryConsts::F32x4Sqrt: curr = allocator.alloc<Unary>(); curr->op = SqrtVecF32x4; break;
+ case BinaryConsts::F64x2Abs: curr = allocator.alloc<Unary>(); curr->op = AbsVecF64x2; break;
+ case BinaryConsts::F64x2Neg: curr = allocator.alloc<Unary>(); curr->op = NegVecF64x2; break;
+ case BinaryConsts::F64x2Sqrt: curr = allocator.alloc<Unary>(); curr->op = SqrtVecF64x2; break;
+ case BinaryConsts::I32x4TruncSatSF32x4: curr = allocator.alloc<Unary>(); curr->op = TruncSatSVecF32x4ToVecI32x4; break;
+ case BinaryConsts::I32x4TruncSatUF32x4: curr = allocator.alloc<Unary>(); curr->op = TruncSatUVecF32x4ToVecI32x4; break;
+ case BinaryConsts::I64x2TruncSatSF64x2: curr = allocator.alloc<Unary>(); curr->op = TruncSatSVecF64x2ToVecI64x2; break;
+ case BinaryConsts::I64x2TruncSatUF64x2: curr = allocator.alloc<Unary>(); curr->op = TruncSatUVecF64x2ToVecI64x2; break;
+ case BinaryConsts::F32x4ConvertSI32x4: curr = allocator.alloc<Unary>(); curr->op = ConvertSVecI32x4ToVecF32x4; break;
+ case BinaryConsts::F32x4ConvertUI32x4: curr = allocator.alloc<Unary>(); curr->op = ConvertUVecI32x4ToVecF32x4; break;
+ case BinaryConsts::F64x2ConvertSI64x2: curr = allocator.alloc<Unary>(); curr->op = ConvertSVecI64x2ToVecF64x2; break;
+ case BinaryConsts::F64x2ConvertUI64x2: curr = allocator.alloc<Unary>(); curr->op = ConvertUVecI64x2ToVecF64x2; break;
+ default: return false;
+ }
+ curr->value = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDConst(Expression*& out, uint32_t code) {
+ if (code != BinaryConsts::V128Const) {
+ return false;
+ }
+ auto* curr = allocator.alloc<Const>();
+ curr->value = getVec128Literal();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) {
+ if (code != BinaryConsts::V128Load) {
+ return false;
+ }
+ auto* curr = allocator.alloc<Load>();
+ curr->type = v128;
+ curr->bytes = 16;
+ readMemoryAccess(curr->align, curr->offset);
+ curr->ptr = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDStore(Expression*& out, uint32_t code) {
+ if (code != BinaryConsts::V128Store) {
+ return false;
+ }
+ auto* curr = allocator.alloc<Store>();
+ curr->bytes = 16;
+ curr->valueType = v128;
+ readMemoryAccess(curr->align, curr->offset);
+ curr->value = popNonVoidExpression();
+ curr->ptr = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDExtract(Expression*& out, uint32_t code) {
+ SIMDExtract* curr;
+ switch (code) {
+ case BinaryConsts::I8x16ExtractLaneS: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneSVecI8x16; curr->idx = getLaneIdx(16); break;
+ case BinaryConsts::I8x16ExtractLaneU: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneUVecI8x16; curr->idx = getLaneIdx(16); break;
+ case BinaryConsts::I16x8ExtractLaneS: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneSVecI16x8; curr->idx = getLaneIdx(8); break;
+ case BinaryConsts::I16x8ExtractLaneU: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneUVecI16x8; curr->idx = getLaneIdx(8); break;
+ case BinaryConsts::I32x4ExtractLane: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneVecI32x4; curr->idx = getLaneIdx(4); break;
+ case BinaryConsts::I64x2ExtractLane: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneVecI64x2; curr->idx = getLaneIdx(2); break;
+ case BinaryConsts::F32x4ExtractLane: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneVecF32x4; curr->idx = getLaneIdx(4); break;
+ case BinaryConsts::F64x2ExtractLane: curr = allocator.alloc<SIMDExtract>(); curr->op = ExtractLaneVecF64x2; curr->idx = getLaneIdx(2); break;
+ default: return false;
+ }
+ curr->vec = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDReplace(Expression*& out, uint32_t code) {
+ SIMDReplace* curr;
+ switch (code) {
+ case BinaryConsts::I8x16ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecI8x16; curr->idx = getLaneIdx(16); break;
+ case BinaryConsts::I16x8ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecI16x8; curr->idx = getLaneIdx(8); break;
+ case BinaryConsts::I32x4ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecI32x4; curr->idx = getLaneIdx(4); break;
+ case BinaryConsts::I64x2ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecI64x2; curr->idx = getLaneIdx(2); break;
+ case BinaryConsts::F32x4ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecF32x4; curr->idx = getLaneIdx(4); break;
+ case BinaryConsts::F64x2ReplaceLane: curr = allocator.alloc<SIMDReplace>(); curr->op = ReplaceLaneVecF64x2; curr->idx = getLaneIdx(2); break;
+ default: return false;
+ }
+ curr->value = popNonVoidExpression();
+ curr->vec = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDShuffle(Expression*& out, uint32_t code) {
+ if (code != BinaryConsts::V8x16Shuffle) {
+ return false;
+ }
+ auto* curr = allocator.alloc<SIMDShuffle>();
+ for (auto i = 0; i < 16; ++i) {
+ curr->mask[i] = getLaneIdx(32);
+ }
+ curr->right = popNonVoidExpression();
+ curr->left = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDBitselect(Expression*& out, uint32_t code) {
+ if (code != BinaryConsts::V128Bitselect) {
+ return false;
+ }
+ auto* curr = allocator.alloc<SIMDBitselect>();
+ curr->cond = popNonVoidExpression();
+ curr->right = popNonVoidExpression();
+ curr->left = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
+bool WasmBinaryBuilder::maybeVisitSIMDShift(Expression*& out, uint32_t code) {
+ SIMDShift* curr;
+ switch (code) {
+ case BinaryConsts::I8x16Shl: curr = allocator.alloc<SIMDShift>(); curr->op = ShlVecI8x16; break;
+ case BinaryConsts::I8x16ShrS: curr = allocator.alloc<SIMDShift>(); curr->op = ShrSVecI8x16; break;
+ case BinaryConsts::I8x16ShrU: curr = allocator.alloc<SIMDShift>(); curr->op = ShrUVecI8x16; break;
+ case BinaryConsts::I16x8Shl: curr = allocator.alloc<SIMDShift>(); curr->op = ShlVecI16x8; break;
+ case BinaryConsts::I16x8ShrS: curr = allocator.alloc<SIMDShift>(); curr->op = ShrSVecI16x8; break;
+ case BinaryConsts::I16x8ShrU: curr = allocator.alloc<SIMDShift>(); curr->op = ShrUVecI16x8; break;
+ case BinaryConsts::I32x4Shl: curr = allocator.alloc<SIMDShift>(); curr->op = ShlVecI32x4; break;
+ case BinaryConsts::I32x4ShrS: curr = allocator.alloc<SIMDShift>(); curr->op = ShrSVecI32x4; break;
+ case BinaryConsts::I32x4ShrU: curr = allocator.alloc<SIMDShift>(); curr->op = ShrUVecI32x4; break;
+ case BinaryConsts::I64x2Shl: curr = allocator.alloc<SIMDShift>(); curr->op = ShlVecI64x2; break;
+ case BinaryConsts::I64x2ShrS: curr = allocator.alloc<SIMDShift>(); curr->op = ShrSVecI64x2; break;
+ case BinaryConsts::I64x2ShrU: curr = allocator.alloc<SIMDShift>(); curr->op = ShrUVecI64x2; break;
+ default: return false;
+ }
+ curr->shift = popNonVoidExpression();
+ curr->vec = popNonVoidExpression();
+ curr->finalize();
+ out = curr;
+ return true;
+}
+
void WasmBinaryBuilder::visitSelect(Select* curr) {
if (debug) std::cerr << "zz node: Select" << std::endl;
curr->condition = popNonVoidExpression();
diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp
index de1e4f2e9..ac60cb208 100644
--- a/src/wasm/wasm-s-parser.cpp
+++ b/src/wasm/wasm-s-parser.cpp
@@ -632,6 +632,11 @@ Type SExpressionWasmBuilder::stringToType(const char* str, bool allowError, bool
if (str[1] == '3' && str[2] == '2' && (prefix || str[3] == 0)) return f32;
if (str[1] == '6' && str[2] == '4' && (prefix || str[3] == 0)) return f64;
}
+ if (str[0] == 'v') {
+ if (str[1] == '1' && str[2] == '2' && str[3] == '8' && (prefix || str[4] == 0)) {
+ return v128;
+ }
+ }
if (allowError) return none;
throw ParseException("invalid wasm type");
}
@@ -859,8 +864,69 @@ Expression* SExpressionWasmBuilder::makeThenOrElse(Element& s) {
}
Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) {
- auto ret = parseConst(s[1]->str(), type, allocator);
- if (!ret) throw ParseException("bad const");
+ if (type != v128) {
+ auto ret = parseConst(s[1]->str(), type, allocator);
+ if (!ret) throw ParseException("bad const");
+ return ret;
+ }
+
+ auto ret = allocator.alloc<Const>();
+ auto getLiteral = [](Expression* expr) {
+ if (expr == nullptr) {
+ throw ParseException("Could not parse v128 lane");
+ }
+ return expr->cast<Const>()->value;
+ };
+ Type lane_t = stringToType(s[1]->str());
+ size_t lanes = s.size() - 2;
+ switch (lanes) {
+ case 2: {
+ if (lane_t != i64 && lane_t != f64) {
+ throw ParseException("Unexpected v128 literal lane type");
+ }
+ std::array<Literal, 2> lanes;
+ for (size_t i = 0; i < 2; ++i) {
+ lanes[i] = getLiteral(parseConst(s[i+2]->str(), lane_t, allocator));
+ }
+ ret->value = Literal(lanes);
+ break;
+ }
+ case 4: {
+ if (lane_t != i32 && lane_t != f32) {
+ throw ParseException("Unexpected v128 literal lane type");
+ }
+ std::array<Literal, 4> lanes;
+ for (size_t i = 0; i < 4; ++i) {
+ lanes[i] = getLiteral(parseConst(s[i+2]->str(), lane_t, allocator));
+ }
+ ret->value = Literal(lanes);
+ break;
+ }
+ case 8: {
+ if (lane_t != i32) {
+ throw ParseException("Unexpected v128 literal lane type");
+ }
+ std::array<Literal, 8> lanes;
+ for (size_t i = 0; i < 8; ++i) {
+ lanes[i] = getLiteral(parseConst(s[i+2]->str(), lane_t, allocator));
+ }
+ ret->value = Literal(lanes);
+ break;
+ }
+ case 16: {
+ if (lane_t != i32) {
+ throw ParseException("Unexpected v128 literal lane type");
+ }
+ std::array<Literal, 16> lanes;
+ for (size_t i = 0; i < 16; ++i) {
+ lanes[i] = getLiteral(parseConst(s[i+2]->str(), lane_t, allocator));
+ }
+ ret->value = Literal(lanes);
+ break;
+ }
+ default: throw ParseException("Unexpected number of lanes in v128 literal");
+ }
+ ret->finalize();
return ret;
}
@@ -1011,6 +1077,63 @@ Expression* SExpressionWasmBuilder::makeAtomicWake(Element& s) {
return ret;
}
+static uint8_t parseLaneIdx(const Element* s, size_t lanes) {
+ const char *str = s->c_str();
+ char *end;
+ auto n = static_cast<unsigned long long>(strtoll(str, &end, 10));
+ if (end == str || *end != '\0') throw ParseException("Expected lane index");
+ if (n > lanes) throw ParseException("lane index must be less than " + std::to_string(lanes));
+ return uint8_t(n);
+}
+
+Expression* SExpressionWasmBuilder::makeSIMDExtract(Element& s, SIMDExtractOp op, size_t lanes) {
+ auto ret = allocator.alloc<SIMDExtract>();
+ ret->op = op;
+ ret->idx = parseLaneIdx(s[1], lanes);
+ ret->vec = parseExpression(s[2]);
+ ret->finalize();
+ return ret;
+}
+
+Expression* SExpressionWasmBuilder::makeSIMDReplace(Element& s, SIMDReplaceOp op, size_t lanes) {
+ auto ret = allocator.alloc<SIMDReplace>();
+ ret->op = op;
+ ret->idx = parseLaneIdx(s[1], lanes);
+ ret->vec = parseExpression(s[2]);
+ ret->value = parseExpression(s[3]);
+ ret->finalize();
+ return ret;
+}
+
+Expression* SExpressionWasmBuilder::makeSIMDShuffle(Element& s) {
+ auto ret = allocator.alloc<SIMDShuffle>();
+ for (size_t i = 0; i < 16; ++i) {
+ ret->mask[i] = parseLaneIdx(s[i+1], 32);
+ }
+ ret->left = parseExpression(s[17]);
+ ret->right = parseExpression(s[18]);
+ ret->finalize();
+ return ret;
+}
+
+Expression* SExpressionWasmBuilder::makeSIMDBitselect(Element& s) {
+ auto ret = allocator.alloc<SIMDBitselect>();
+ ret->left = parseExpression(s[1]);
+ ret->right = parseExpression(s[2]);
+ ret->cond = parseExpression(s[3]);
+ ret->finalize();
+ return ret;
+}
+
+Expression* SExpressionWasmBuilder::makeSIMDShift(Element& s, SIMDShiftOp op) {
+ auto ret = allocator.alloc<SIMDShift>();
+ ret->op = op;
+ ret->vec = parseExpression(s[1]);
+ ret->shift = parseExpression(s[2]);
+ ret->finalize();
+ return ret;
+}
+
Expression* SExpressionWasmBuilder::makeIf(Element& s) {
auto ret = allocator.alloc<If>();
Index i = 1;
diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp
index 3f65c9f7a..e1838bb71 100644
--- a/src/wasm/wasm-validator.cpp
+++ b/src/wasm/wasm-validator.cpp
@@ -245,6 +245,11 @@ public:
void visitAtomicCmpxchg(AtomicCmpxchg* curr);
void visitAtomicWait(AtomicWait* curr);
void visitAtomicWake(AtomicWake* curr);
+ void visitSIMDExtract(SIMDExtract* curr);
+ void visitSIMDReplace(SIMDReplace* curr);
+ void visitSIMDShuffle(SIMDShuffle* curr);
+ void visitSIMDBitselect(SIMDBitselect* curr);
+ void visitSIMDShift(SIMDShift* curr);
void visitBinary(Binary* curr);
void visitUnary(Unary* curr);
void visitSelect(Select* curr);
@@ -498,6 +503,7 @@ void FunctionValidator::visitSetGlobal(SetGlobal* curr) {
void FunctionValidator::visitLoad(Load* curr) {
if (curr->isAtomic) shouldBeTrue(info.features.hasAtomics(), curr, "Atomic operation (atomics are disabled)");
+ if (curr->type == v128) shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
shouldBeFalse(curr->isAtomic && !getModule()->memory.shared, curr, "Atomic operation with non-shared memory");
validateMemBytes(curr->bytes, curr->type, curr);
validateAlignment(curr->align, curr->type, curr->bytes, curr->isAtomic, curr);
@@ -510,9 +516,10 @@ void FunctionValidator::visitLoad(Load* curr) {
void FunctionValidator::visitStore(Store* curr) {
if (curr->isAtomic) shouldBeTrue(info.features.hasAtomics(), curr, "Atomic operation (atomics are disabled)");
+ if (curr->valueType == v128) shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
shouldBeFalse(curr->isAtomic && !getModule()->memory.shared, curr, "Atomic operation with non-shared memory");
validateMemBytes(curr->bytes, curr->valueType, curr);
- validateAlignment(curr->align, curr->type, curr->bytes, curr->isAtomic, curr);
+ validateAlignment(curr->align, curr->valueType, curr->bytes, curr->isAtomic, curr);
shouldBeEqualOrFirstIsUnreachable(curr->ptr->type, i32, curr, "store pointer type must be i32");
shouldBeUnequal(curr->value->type, none, curr, "store value type must not be none");
shouldBeEqualOrFirstIsUnreachable(curr->value->type, curr->valueType, curr, "store value type must match");
@@ -561,20 +568,77 @@ void FunctionValidator::visitAtomicWake(AtomicWake* curr) {
shouldBeEqualOrFirstIsUnreachable(curr->wakeCount->type, i32, curr, "AtomicWake wakeCount type must be i32");
}
+void FunctionValidator::visitSIMDExtract(SIMDExtract* curr) {
+ shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ shouldBeEqualOrFirstIsUnreachable(curr->vec->type, v128, curr, "extract_lane must operate on a v128");
+ Type lane_t = none;
+ size_t lanes = 0;
+ switch (curr->op) {
+ case ExtractLaneSVecI8x16:
+ case ExtractLaneUVecI8x16: lane_t = i32; lanes = 16; break;
+ case ExtractLaneSVecI16x8:
+ case ExtractLaneUVecI16x8: lane_t = i32; lanes = 8; break;
+ case ExtractLaneVecI32x4: lane_t = i32; lanes = 4; break;
+ case ExtractLaneVecI64x2: lane_t = i64; lanes = 2; break;
+ case ExtractLaneVecF32x4: lane_t = f32; lanes = 4; break;
+ case ExtractLaneVecF64x2: lane_t = f64; lanes = 2; break;
+ }
+ shouldBeEqualOrFirstIsUnreachable(curr->type, lane_t, curr, "extract_lane must have same type as vector lane");
+ shouldBeTrue(curr->idx < lanes, curr, "invalid lane index");
+}
+
+void FunctionValidator::visitSIMDReplace(SIMDReplace* curr) {
+ shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ shouldBeEqualOrFirstIsUnreachable(curr->type, v128, curr, "replace_lane must have type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->vec->type, v128, curr, "replace_lane must operate on a v128");
+ Type lane_t = none;
+ size_t lanes = 0;
+ switch (curr->op) {
+ case ReplaceLaneVecI8x16: lane_t = i32; lanes = 16; break;
+ case ReplaceLaneVecI16x8: lane_t = i32; lanes = 8; break;
+ case ReplaceLaneVecI32x4: lane_t = i32; lanes = 4; break;
+ case ReplaceLaneVecI64x2: lane_t = i64; lanes = 2; break;
+ case ReplaceLaneVecF32x4: lane_t = f32; lanes = 4; break;
+ case ReplaceLaneVecF64x2: lane_t = f64; lanes = 2; break;
+ }
+ shouldBeEqualOrFirstIsUnreachable(curr->value->type, lane_t, curr, "unexpected value type");
+ shouldBeTrue(curr->idx < lanes, curr, "invalid lane index");
+}
+
+void FunctionValidator::visitSIMDShuffle(SIMDShuffle* curr) {
+ shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ shouldBeEqualOrFirstIsUnreachable(curr->type, v128, curr, "v128.shuffle must have type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->left->type, v128, curr, "expected operand of type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->right->type, v128, curr, "expected operand of type v128");
+ for (uint8_t idx : curr->mask) {
+ shouldBeTrue(idx < 32, curr, "Invalid lane index in mask");
+ }
+}
+
+void FunctionValidator::visitSIMDBitselect(SIMDBitselect* curr) {
+ shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ shouldBeEqualOrFirstIsUnreachable(curr->type, v128, curr, "v128.bitselect must have type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->left->type, v128, curr, "expected operand of type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->right->type, v128, curr, "expected operand of type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->cond->type, v128, curr, "expected operand of type v128");
+}
+
+void FunctionValidator::visitSIMDShift(SIMDShift* curr) {
+ shouldBeTrue(info.features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
+ shouldBeEqualOrFirstIsUnreachable(curr->type, v128, curr, "vector shift must have type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->vec->type, v128, curr, "expected operand of type v128");
+ shouldBeEqualOrFirstIsUnreachable(curr->shift->type, i32, curr, "expected shift amount to have type i32");
+}
+
void FunctionValidator::validateMemBytes(uint8_t bytes, Type type, Expression* curr) {
- switch (bytes) {
- case 1:
- case 2:
- case 4: break;
- case 8: {
- // if we have a concrete type for the load, then we know the size of the mem operation and
- // can validate it
- if (type != unreachable) {
- shouldBeEqual(getTypeSize(type), 8U, curr, "8-byte mem operations are only allowed with 8-byte wasm types");
- }
- break;
- }
- default: info.fail("Memory operations must be 1,2,4, or 8 bytes", curr, getFunction());
+ switch (type) {
+ case i32: shouldBeTrue(bytes == 1 || bytes == 2 || bytes == 4, curr, "expected i32 operation to touch 1, 2, or 4 bytes"); break;
+ case i64: shouldBeTrue(bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8, curr, "expected i64 operation to touch 1, 2, 4, or 8 bytes"); break;
+ case f32: shouldBeEqual(bytes, uint8_t(4), curr, "expected f32 operation to touch 4 bytes"); break;
+ case f64: shouldBeEqual(bytes, uint8_t(8), curr, "expected f64 operation to touch 8 bytes"); break;
+ case v128: shouldBeEqual(bytes, uint8_t(16), curr, "expected v128 operation to touch 16 bytes"); break;
+ case none: WASM_UNREACHABLE();
+ case unreachable: break;
}
}
@@ -671,6 +735,86 @@ void FunctionValidator::visitBinary(Binary* curr) {
shouldBeEqualOrFirstIsUnreachable(curr->left->type, f64, curr, "f64 op");
break;
}
+ case EqVecI8x16:
+ case NeVecI8x16:
+ case LtSVecI8x16:
+ case LtUVecI8x16:
+ case LeSVecI8x16:
+ case LeUVecI8x16:
+ case GtSVecI8x16:
+ case GtUVecI8x16:
+ case GeSVecI8x16:
+ case GeUVecI8x16:
+ case EqVecI16x8:
+ case NeVecI16x8:
+ case LtSVecI16x8:
+ case LtUVecI16x8:
+ case LeSVecI16x8:
+ case LeUVecI16x8:
+ case GtSVecI16x8:
+ case GtUVecI16x8:
+ case GeSVecI16x8:
+ case GeUVecI16x8:
+ case EqVecI32x4:
+ case NeVecI32x4:
+ case LtSVecI32x4:
+ case LtUVecI32x4:
+ case LeSVecI32x4:
+ case LeUVecI32x4:
+ case GtSVecI32x4:
+ case GtUVecI32x4:
+ case GeSVecI32x4:
+ case GeUVecI32x4:
+ case EqVecF32x4:
+ case NeVecF32x4:
+ case LtVecF32x4:
+ case LeVecF32x4:
+ case GtVecF32x4:
+ case GeVecF32x4:
+ case EqVecF64x2:
+ case NeVecF64x2:
+ case LtVecF64x2:
+ case LeVecF64x2:
+ case GtVecF64x2:
+ case GeVecF64x2:
+ case AndVec128:
+ case OrVec128:
+ case XorVec128:
+ case AddVecI8x16:
+ case AddSatSVecI8x16:
+ case AddSatUVecI8x16:
+ case SubVecI8x16:
+ case SubSatSVecI8x16:
+ case SubSatUVecI8x16:
+ case MulVecI8x16:
+ case AddVecI16x8:
+ case AddSatSVecI16x8:
+ case AddSatUVecI16x8:
+ case SubVecI16x8:
+ case SubSatSVecI16x8:
+ case SubSatUVecI16x8:
+ case MulVecI16x8:
+ case AddVecI32x4:
+ case SubVecI32x4:
+ case MulVecI32x4:
+ case AddVecI64x2:
+ case SubVecI64x2:
+ case AddVecF32x4:
+ case SubVecF32x4:
+ case MulVecF32x4:
+ case DivVecF32x4:
+ case MinVecF32x4:
+ case MaxVecF32x4:
+ case AddVecF64x2:
+ case SubVecF64x2:
+ case MulVecF64x2:
+ case DivVecF64x2:
+ case MinVecF64x2:
+ case MaxVecF64x2: {
+ shouldBeEqualOrFirstIsUnreachable(curr->left->type, v128, curr, "v128 op");
+ shouldBeEqualOrFirstIsUnreachable(curr->right->type, v128, curr, "v128 op");
+ break;
+ }
case InvalidBinary: WASM_UNREACHABLE();
}
}
@@ -804,6 +948,57 @@ void FunctionValidator::visitUnary(Unary* curr) {
shouldBeEqual(curr->value->type, i64, curr, "reinterpret/i64 type must be correct");
break;
}
+ case SplatVecI8x16:
+ case SplatVecI16x8:
+ case SplatVecI32x4:
+ shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type");
+ shouldBeEqual(curr->value->type, i32, curr, "expected i32 splat value");
+ break;
+ case SplatVecI64x2:
+ shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type");
+ shouldBeEqual(curr->value->type, i64, curr, "expected i64 splat value");
+ break;
+ case SplatVecF32x4:
+ shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type");
+ shouldBeEqual(curr->value->type, f32, curr, "expected f32 splat value");
+ break;
+ case SplatVecF64x2:
+ shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type");
+ shouldBeEqual(curr->value->type, f64, curr, "expected i64 splat value");
+ break;
+ case NotVec128:
+ case NegVecI8x16:
+ case NegVecI16x8:
+ case NegVecI32x4:
+ case NegVecI64x2:
+ case AbsVecF32x4:
+ case NegVecF32x4:
+ case SqrtVecF32x4:
+ case AbsVecF64x2:
+ case NegVecF64x2:
+ case SqrtVecF64x2:
+ case TruncSatSVecF32x4ToVecI32x4:
+ case TruncSatUVecF32x4ToVecI32x4:
+ case TruncSatSVecF64x2ToVecI64x2:
+ case TruncSatUVecF64x2ToVecI64x2:
+ case ConvertSVecI32x4ToVecF32x4:
+ case ConvertUVecI32x4ToVecF32x4:
+ case ConvertSVecI64x2ToVecF64x2:
+ case ConvertUVecI64x2ToVecF64x2:
+ shouldBeEqual(curr->type, v128, curr, "expected v128 type");
+ shouldBeEqual(curr->value->type, v128, curr, "expected v128 operand");
+ break;
+ case AnyTrueVecI8x16:
+ case AllTrueVecI8x16:
+ case AnyTrueVecI16x8:
+ case AllTrueVecI16x8:
+ case AnyTrueVecI32x4:
+ case AllTrueVecI32x4:
+ case AnyTrueVecI64x2:
+ case AllTrueVecI64x2:
+ shouldBeEqual(curr->type, i32, curr, "expected boolean reduction to have i32 type");
+ shouldBeEqual(curr->value->type, v128, curr, "expected v128 operand");
+ break;
case InvalidUnary: WASM_UNREACHABLE();
}
}
@@ -895,7 +1090,8 @@ void FunctionValidator::validateAlignment(size_t align, Type type, Index bytes,
case 1:
case 2:
case 4:
- case 8: break;
+ case 8:
+ case 16: break;
default:{
info.fail("bad alignment: " + std::to_string(align), curr, getFunction());
break;
@@ -913,9 +1109,9 @@ void FunctionValidator::validateAlignment(size_t align, Type type, Index bytes,
shouldBeTrue(align <= 8, curr, "alignment must not exceed natural");
break;
}
- case v128: assert(false && "v128 not implemented yet");
- case none:
- case unreachable: {}
+ case v128:
+ case unreachable: break;
+ case none: WASM_UNREACHABLE();
}
}
diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp
index fe7927870..87278d3cd 100644
--- a/src/wasm/wasm.cpp
+++ b/src/wasm/wasm.cpp
@@ -104,6 +104,11 @@ const char* getExpressionName(Expression* curr) {
case Expression::Id::AtomicRMWId: return "atomic_rmw";
case Expression::Id::AtomicWaitId: return "atomic_wait";
case Expression::Id::AtomicWakeId: return "atomic_wake";
+ case Expression::Id::SIMDExtractId: return "simd_extract";
+ case Expression::Id::SIMDReplaceId: return "simd_replace";
+ case Expression::Id::SIMDShuffleId: return "simd_shuffle";
+ case Expression::Id::SIMDBitselectId: return "simd_bitselect";
+ case Expression::Id::SIMDShiftId: return "simd_shift";
case Expression::Id::NumExpressionIds: WASM_UNREACHABLE();
}
WASM_UNREACHABLE();
@@ -416,6 +421,56 @@ void AtomicWake::finalize() {
}
}
+void SIMDExtract::finalize() {
+ assert(vec);
+ switch (op) {
+ case ExtractLaneSVecI8x16:
+ case ExtractLaneUVecI8x16:
+ case ExtractLaneSVecI16x8:
+ case ExtractLaneUVecI16x8:
+ case ExtractLaneVecI32x4: type = i32; break;
+ case ExtractLaneVecI64x2: type = i64; break;
+ case ExtractLaneVecF32x4: type = f32; break;
+ case ExtractLaneVecF64x2: type = f64; break;
+ default: WASM_UNREACHABLE();
+ }
+ if (vec->type == unreachable) {
+ type = unreachable;
+ }
+}
+
+void SIMDReplace::finalize() {
+ assert(vec && value);
+ type = v128;
+ if (vec->type == unreachable || value->type == unreachable) {
+ type = unreachable;
+ }
+}
+
+void SIMDShuffle::finalize() {
+ assert(left && right);
+ type = v128;
+ if (left->type == unreachable || right->type == unreachable) {
+ type = unreachable;
+ }
+}
+
+void SIMDBitselect::finalize() {
+ assert(left && right && cond);
+ type = v128;
+ if (left->type == unreachable || right->type == unreachable || cond->type == unreachable) {
+ type = unreachable;
+ }
+}
+
+void SIMDShift::finalize() {
+ assert(vec && shift);
+ type = v128;
+ if (vec->type == unreachable || shift->type == unreachable) {
+ type = unreachable;
+ }
+}
+
Const* Const::set(Literal value_) {
value = value_;
type = value.type;
@@ -491,6 +546,39 @@ void Unary::finalize() {
case ConvertUInt32ToFloat64:
case ConvertSInt64ToFloat64:
case ConvertUInt64ToFloat64: type = f64; break;
+ case SplatVecI8x16:
+ case SplatVecI16x8:
+ case SplatVecI32x4:
+ case SplatVecI64x2:
+ case SplatVecF32x4:
+ case SplatVecF64x2:
+ case NotVec128:
+ case NegVecI8x16:
+ case NegVecI16x8:
+ case NegVecI32x4:
+ case NegVecI64x2:
+ case AbsVecF32x4:
+ case NegVecF32x4:
+ case SqrtVecF32x4:
+ case AbsVecF64x2:
+ case NegVecF64x2:
+ case SqrtVecF64x2:
+ case TruncSatSVecF32x4ToVecI32x4:
+ case TruncSatUVecF32x4ToVecI32x4:
+ case TruncSatSVecF64x2ToVecI64x2:
+ case TruncSatUVecF64x2ToVecI64x2:
+ case ConvertSVecI32x4ToVecF32x4:
+ case ConvertUVecI32x4ToVecF32x4:
+ case ConvertSVecI64x2ToVecF64x2:
+ case ConvertUVecI64x2ToVecF64x2: type = v128; break;
+ case AnyTrueVecI8x16:
+ case AllTrueVecI8x16:
+ case AnyTrueVecI16x8:
+ case AllTrueVecI16x8:
+ case AnyTrueVecI32x4:
+ case AllTrueVecI32x4:
+ case AnyTrueVecI64x2:
+ case AllTrueVecI64x2: type = i32; break;
case InvalidUnary: WASM_UNREACHABLE();
}
}