summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Lively <7121787+tlively@users.noreply.github.com>2021-04-05 12:33:25 -0700
committerGitHub <noreply@github.com>2021-04-05 12:33:25 -0700
commit1bb172c789bb3a61aeaae78f5464d0544627ed3e (patch)
tree6bcfd54944e52f9c3f8354a3d3f523f45a5ee670
parentc59df4cda843ef11ad261f5c889dddc9a9d59d3b (diff)
downloadbinaryen-1bb172c789bb3a61aeaae78f5464d0544627ed3e.tar.gz
binaryen-1bb172c789bb3a61aeaae78f5464d0544627ed3e.tar.bz2
binaryen-1bb172c789bb3a61aeaae78f5464d0544627ed3e.zip
Update SIMD names and opcodes (#3771)
Also removes experimental SIMD instructions that were not included in the final spec proposal.
-rw-r--r--CHANGELOG.md2
-rwxr-xr-xscripts/gen-s-parser.py97
-rw-r--r--src/binaryen-c.cpp53
-rw-r--r--src/binaryen-c.h29
-rw-r--r--src/gen-s-parser.inc607
-rw-r--r--src/ir/ReFinalize.cpp2
-rw-r--r--src/ir/cost.h56
-rw-r--r--src/ir/effects.h6
-rw-r--r--src/ir/features.h10
-rw-r--r--src/js/binaryen.js-post.js184
-rw-r--r--src/literal.h32
-rw-r--r--src/passes/Print.cpp195
-rw-r--r--src/tools/fuzzing.h28
-rw-r--r--src/wasm-binary.h178
-rw-r--r--src/wasm-builder.h10
-rw-r--r--src/wasm-delegations-fields.h17
-rw-r--r--src/wasm-delegations.h2
-rw-r--r--src/wasm-interpreter.h81
-rw-r--r--src/wasm-s-parser.h2
-rw-r--r--src/wasm.h84
-rw-r--r--src/wasm/literal.cpp93
-rw-r--r--src/wasm/wasm-binary.cpp189
-rw-r--r--src/wasm/wasm-s-parser.cpp15
-rw-r--r--src/wasm/wasm-stack.cpp156
-rw-r--r--src/wasm/wasm-validator.cpp42
-rw-r--r--src/wasm/wasm.cpp43
-rw-r--r--src/wasm2js.h6
-rw-r--r--test/binaryen.js/exception-handling.js.txt8
-rw-r--r--test/binaryen.js/expressions.js24
-rw-r--r--test/binaryen.js/expressions.js.txt13
-rw-r--r--test/binaryen.js/kitchen-sink.js57
-rw-r--r--test/binaryen.js/kitchen-sink.js.txt332
-rw-r--r--test/example/c-api-kitchen-sink.c29
-rw-r--r--test/example/c-api-kitchen-sink.txt126
-rw-r--r--test/passes/precompute-propagate_all-features.txt2
-rw-r--r--test/passes/precompute-propagate_all-features.wast2
-rw-r--r--test/simd.wast741
-rw-r--r--test/simd.wast.from-wast744
-rw-r--r--test/simd.wast.fromBinary744
-rw-r--r--test/simd.wast.fromBinary.noDebugInfo988
-rw-r--r--test/simd64.wast40
-rw-r--r--test/simd64.wast.from-wast40
-rw-r--r--test/simd64.wast.fromBinary40
-rw-r--r--test/simd64.wast.fromBinary.noDebugInfo20
-rw-r--r--test/spec/simd.wast182
45 files changed, 2627 insertions, 3724 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cea49dd55..b177f6efc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,6 +49,8 @@ Current Trunk
- `BinaryenGetFunctionTableSegmentData` is replaced with
`BinaryenElementSegmentGetData`.
- Boolean values in the C API now should use `bool` instead of `int`.
+- Experimental SIMD instructions have been removed and the names and opcodes of
+ the standard instructions have been updated to match the final spec.
v100
----
diff --git a/scripts/gen-s-parser.py b/scripts/gen-s-parser.py
index c65e5ddc4..569289427 100755
--- a/scripts/gen-s-parser.py
+++ b/scripts/gen-s-parser.py
@@ -281,7 +281,7 @@ instructions = [
("v128.load", "makeLoad(s, Type::v128, /*isAtomic=*/false)"),
("v128.store", "makeStore(s, Type::v128, /*isAtomic=*/false)"),
("v128.const", "makeConst(s, Type::v128)"),
- ("v8x16.shuffle", "makeSIMDShuffle(s)"),
+ ("i8x16.shuffle", "makeSIMDShuffle(s)"),
("i8x16.splat", "makeUnary(s, UnaryOp::SplatVecI8x16)"),
("i8x16.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI8x16, 16)"),
("i8x16.extract_lane_u", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneUVecI8x16, 16)"),
@@ -333,6 +333,11 @@ instructions = [
("i32x4.ge_s", "makeBinary(s, BinaryOp::GeSVecI32x4)"),
("i32x4.ge_u", "makeBinary(s, BinaryOp::GeUVecI32x4)"),
("i64x2.eq", "makeBinary(s, BinaryOp::EqVecI64x2)"),
+ ("i64x2.ne", "makeBinary(s, BinaryOp::NeVecI64x2)"),
+ ("i64x2.lt_s", "makeBinary(s, BinaryOp::LtSVecI64x2)"),
+ ("i64x2.gt_s", "makeBinary(s, BinaryOp::GtSVecI64x2)"),
+ ("i64x2.le_s", "makeBinary(s, BinaryOp::LeSVecI64x2)"),
+ ("i64x2.ge_s", "makeBinary(s, BinaryOp::GeSVecI64x2)"),
("f32x4.eq", "makeBinary(s, BinaryOp::EqVecF32x4)"),
("f32x4.ne", "makeBinary(s, BinaryOp::NeVecF32x4)"),
("f32x4.lt", "makeBinary(s, BinaryOp::LtVecF32x4)"),
@@ -350,11 +355,8 @@ instructions = [
("v128.or", "makeBinary(s, BinaryOp::OrVec128)"),
("v128.xor", "makeBinary(s, BinaryOp::XorVec128)"),
("v128.andnot", "makeBinary(s, BinaryOp::AndNotVec128)"),
+ ("v128.any_true", "makeUnary(s, UnaryOp::AnyTrueVec128)"),
("v128.bitselect", "makeSIMDTernary(s, SIMDTernaryOp::Bitselect)"),
- ("v8x16.signselect", "makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec8x16)"),
- ("v16x8.signselect", "makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec16x8)"),
- ("v32x4.signselect", "makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec32x4)"),
- ("v64x2.signselect", "makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec64x2)"),
("v128.load8_lane", "makeSIMDLoadStoreLane(s, LoadLaneVec8x16)"),
("v128.load16_lane", "makeSIMDLoadStoreLane(s, LoadLaneVec16x8)"),
("v128.load32_lane", "makeSIMDLoadStoreLane(s, LoadLaneVec32x4)"),
@@ -366,19 +368,17 @@ instructions = [
("i8x16.popcnt", "makeUnary(s, UnaryOp::PopcntVecI8x16)"),
("i8x16.abs", "makeUnary(s, UnaryOp::AbsVecI8x16)"),
("i8x16.neg", "makeUnary(s, UnaryOp::NegVecI8x16)"),
- ("i8x16.any_true", "makeUnary(s, UnaryOp::AnyTrueVecI8x16)"),
("i8x16.all_true", "makeUnary(s, UnaryOp::AllTrueVecI8x16)"),
("i8x16.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI8x16)"),
("i8x16.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI8x16)"),
("i8x16.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI8x16)"),
("i8x16.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI8x16)"),
("i8x16.add", "makeBinary(s, BinaryOp::AddVecI8x16)"),
- ("i8x16.add_saturate_s", "makeBinary(s, BinaryOp::AddSatSVecI8x16)"),
- ("i8x16.add_saturate_u", "makeBinary(s, BinaryOp::AddSatUVecI8x16)"),
+ ("i8x16.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI8x16)"),
+ ("i8x16.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI8x16)"),
("i8x16.sub", "makeBinary(s, BinaryOp::SubVecI8x16)"),
- ("i8x16.sub_saturate_s", "makeBinary(s, BinaryOp::SubSatSVecI8x16)"),
- ("i8x16.sub_saturate_u", "makeBinary(s, BinaryOp::SubSatUVecI8x16)"),
- ("i8x16.mul", "makeBinary(s, BinaryOp::MulVecI8x16)"),
+ ("i8x16.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI8x16)"),
+ ("i8x16.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI8x16)"),
("i8x16.min_s", "makeBinary(s, BinaryOp::MinSVecI8x16)"),
("i8x16.min_u", "makeBinary(s, BinaryOp::MinUVecI8x16)"),
("i8x16.max_s", "makeBinary(s, BinaryOp::MaxSVecI8x16)"),
@@ -386,18 +386,17 @@ instructions = [
("i8x16.avgr_u", "makeBinary(s, BinaryOp::AvgrUVecI8x16)"),
("i16x8.abs", "makeUnary(s, UnaryOp::AbsVecI16x8)"),
("i16x8.neg", "makeUnary(s, UnaryOp::NegVecI16x8)"),
- ("i16x8.any_true", "makeUnary(s, UnaryOp::AnyTrueVecI16x8)"),
("i16x8.all_true", "makeUnary(s, UnaryOp::AllTrueVecI16x8)"),
("i16x8.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI16x8)"),
("i16x8.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI16x8)"),
("i16x8.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI16x8)"),
("i16x8.shr_u", "makeSIMDShift(s, SIMDShiftOp::ShrUVecI16x8)"),
("i16x8.add", "makeBinary(s, BinaryOp::AddVecI16x8)"),
- ("i16x8.add_saturate_s", "makeBinary(s, BinaryOp::AddSatSVecI16x8)"),
- ("i16x8.add_saturate_u", "makeBinary(s, BinaryOp::AddSatUVecI16x8)"),
+ ("i16x8.add_sat_s", "makeBinary(s, BinaryOp::AddSatSVecI16x8)"),
+ ("i16x8.add_sat_u", "makeBinary(s, BinaryOp::AddSatUVecI16x8)"),
("i16x8.sub", "makeBinary(s, BinaryOp::SubVecI16x8)"),
- ("i16x8.sub_saturate_s", "makeBinary(s, BinaryOp::SubSatSVecI16x8)"),
- ("i16x8.sub_saturate_u", "makeBinary(s, BinaryOp::SubSatUVecI16x8)"),
+ ("i16x8.sub_sat_s", "makeBinary(s, BinaryOp::SubSatSVecI16x8)"),
+ ("i16x8.sub_sat_u", "makeBinary(s, BinaryOp::SubSatUVecI16x8)"),
("i16x8.mul", "makeBinary(s, BinaryOp::MulVecI16x8)"),
("i16x8.min_s", "makeBinary(s, BinaryOp::MinSVecI16x8)"),
("i16x8.min_u", "makeBinary(s, BinaryOp::MinUVecI16x8)"),
@@ -411,7 +410,6 @@ instructions = [
("i16x8.extmul_high_i8x16_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI16x8)"),
("i32x4.abs", "makeUnary(s, UnaryOp::AbsVecI32x4)"),
("i32x4.neg", "makeUnary(s, UnaryOp::NegVecI32x4)"),
- ("i32x4.any_true", "makeUnary(s, UnaryOp::AnyTrueVecI32x4)"),
("i32x4.all_true", "makeUnary(s, UnaryOp::AllTrueVecI32x4)"),
("i32x4.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI32x4)"),
("i32x4.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI32x4)"),
@@ -429,7 +427,9 @@ instructions = [
("i32x4.extmul_high_i16x8_s", "makeBinary(s, BinaryOp::ExtMulHighSVecI32x4)"),
("i32x4.extmul_low_i16x8_u", "makeBinary(s, BinaryOp::ExtMulLowUVecI32x4)"),
("i32x4.extmul_high_i16x8_u", "makeBinary(s, BinaryOp::ExtMulHighUVecI32x4)"),
+ ("i64x2.abs", "makeUnary(s, UnaryOp::AbsVecI64x2)"),
("i64x2.neg", "makeUnary(s, UnaryOp::NegVecI64x2)"),
+ ("i64x2.all_true", "makeUnary(s, UnaryOp::AllTrueVecI64x2)"),
("i64x2.bitmask", "makeUnary(s, UnaryOp::BitmaskVecI64x2)"),
("i64x2.shl", "makeSIMDShift(s, SIMDShiftOp::ShlVecI64x2)"),
("i64x2.shr_s", "makeSIMDShift(s, SIMDShiftOp::ShrSVecI64x2)"),
@@ -444,8 +444,6 @@ instructions = [
("f32x4.abs", "makeUnary(s, UnaryOp::AbsVecF32x4)"),
("f32x4.neg", "makeUnary(s, UnaryOp::NegVecF32x4)"),
("f32x4.sqrt", "makeUnary(s, UnaryOp::SqrtVecF32x4)"),
- ("f32x4.qfma", "makeSIMDTernary(s, SIMDTernaryOp::QFMAF32x4)"),
- ("f32x4.qfms", "makeSIMDTernary(s, SIMDTernaryOp::QFMSF32x4)"),
("f32x4.add", "makeBinary(s, BinaryOp::AddVecF32x4)"),
("f32x4.sub", "makeBinary(s, BinaryOp::SubVecF32x4)"),
("f32x4.mul", "makeBinary(s, BinaryOp::MulVecF32x4)"),
@@ -461,8 +459,6 @@ instructions = [
("f64x2.abs", "makeUnary(s, UnaryOp::AbsVecF64x2)"),
("f64x2.neg", "makeUnary(s, UnaryOp::NegVecF64x2)"),
("f64x2.sqrt", "makeUnary(s, UnaryOp::SqrtVecF64x2)"),
- ("f64x2.qfma", "makeSIMDTernary(s, SIMDTernaryOp::QFMAF64x2)"),
- ("f64x2.qfms", "makeSIMDTernary(s, SIMDTernaryOp::QFMSF64x2)"),
("f64x2.add", "makeBinary(s, BinaryOp::AddVecF64x2)"),
("f64x2.sub", "makeBinary(s, BinaryOp::SubVecF64x2)"),
("f64x2.mul", "makeBinary(s, BinaryOp::MulVecF64x2)"),
@@ -477,57 +473,48 @@ instructions = [
("f64x2.nearest", "makeUnary(s, UnaryOp::NearestVecF64x2)"),
("i32x4.trunc_sat_f32x4_s", "makeUnary(s, UnaryOp::TruncSatSVecF32x4ToVecI32x4)"),
("i32x4.trunc_sat_f32x4_u", "makeUnary(s, UnaryOp::TruncSatUVecF32x4ToVecI32x4)"),
- ("i64x2.trunc_sat_f64x2_s", "makeUnary(s, UnaryOp::TruncSatSVecF64x2ToVecI64x2)"),
- ("i64x2.trunc_sat_f64x2_u", "makeUnary(s, UnaryOp::TruncSatUVecF64x2ToVecI64x2)"),
("f32x4.convert_i32x4_s", "makeUnary(s, UnaryOp::ConvertSVecI32x4ToVecF32x4)"),
("f32x4.convert_i32x4_u", "makeUnary(s, UnaryOp::ConvertUVecI32x4ToVecF32x4)"),
- ("f64x2.convert_i64x2_s", "makeUnary(s, UnaryOp::ConvertSVecI64x2ToVecF64x2)"),
- ("f64x2.convert_i64x2_u", "makeUnary(s, UnaryOp::ConvertUVecI64x2ToVecF64x2)"),
- ("v8x16.load_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec8x16)"),
- ("v16x8.load_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec16x8)"),
- ("v32x4.load_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec32x4)"),
- ("v64x2.load_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec64x2)"),
- ("i16x8.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec8x8ToVecI16x8)"),
- ("i16x8.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec8x8ToVecI16x8)"),
- ("i32x4.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec16x4ToVecI32x4)"),
- ("i32x4.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec16x4ToVecI32x4)"),
- ("i64x2.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec32x2ToVecI64x2)"),
- ("i64x2.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec32x2ToVecI64x2)"),
+ ("v128.load8_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec8x16)"),
+ ("v128.load16_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec16x8)"),
+ ("v128.load32_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec32x4)"),
+ ("v128.load64_splat", "makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec64x2)"),
+ ("v128.load8x8_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec8x8ToVecI16x8)"),
+ ("v128.load8x8_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec8x8ToVecI16x8)"),
+ ("v128.load16x4_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec16x4ToVecI32x4)"),
+ ("v128.load16x4_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec16x4ToVecI32x4)"),
+ ("v128.load32x2_s", "makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec32x2ToVecI64x2)"),
+ ("v128.load32x2_u", "makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec32x2ToVecI64x2)"),
("v128.load32_zero", "makeSIMDLoad(s, SIMDLoadOp::Load32Zero)"),
("v128.load64_zero", "makeSIMDLoad(s, SIMDLoadOp::Load64Zero)"),
("i8x16.narrow_i16x8_s", "makeBinary(s, BinaryOp::NarrowSVecI16x8ToVecI8x16)"),
("i8x16.narrow_i16x8_u", "makeBinary(s, BinaryOp::NarrowUVecI16x8ToVecI8x16)"),
("i16x8.narrow_i32x4_s", "makeBinary(s, BinaryOp::NarrowSVecI32x4ToVecI16x8)"),
("i16x8.narrow_i32x4_u", "makeBinary(s, BinaryOp::NarrowUVecI32x4ToVecI16x8)"),
- ("i16x8.widen_low_i8x16_s", "makeUnary(s, UnaryOp::WidenLowSVecI8x16ToVecI16x8)"),
- ("i16x8.widen_high_i8x16_s", "makeUnary(s, UnaryOp::WidenHighSVecI8x16ToVecI16x8)"),
- ("i16x8.widen_low_i8x16_u", "makeUnary(s, UnaryOp::WidenLowUVecI8x16ToVecI16x8)"),
- ("i16x8.widen_high_i8x16_u", "makeUnary(s, UnaryOp::WidenHighUVecI8x16ToVecI16x8)"),
- ("i32x4.widen_low_i16x8_s", "makeUnary(s, UnaryOp::WidenLowSVecI16x8ToVecI32x4)"),
- ("i32x4.widen_high_i16x8_s", "makeUnary(s, UnaryOp::WidenHighSVecI16x8ToVecI32x4)"),
- ("i32x4.widen_low_i16x8_u", "makeUnary(s, UnaryOp::WidenLowUVecI16x8ToVecI32x4)"),
- ("i32x4.widen_high_i16x8_u", "makeUnary(s, UnaryOp::WidenHighUVecI16x8ToVecI32x4)"),
- ("i64x2.widen_low_i32x4_s", "makeUnary(s, UnaryOp::WidenLowSVecI32x4ToVecI64x2)"),
- ("i64x2.widen_high_i32x4_s", "makeUnary(s, UnaryOp::WidenHighSVecI32x4ToVecI64x2)"),
- ("i64x2.widen_low_i32x4_u", "makeUnary(s, UnaryOp::WidenLowUVecI32x4ToVecI64x2)"),
- ("i64x2.widen_high_i32x4_u", "makeUnary(s, UnaryOp::WidenHighUVecI32x4ToVecI64x2)"),
- ("v8x16.swizzle", "makeBinary(s, BinaryOp::SwizzleVec8x16)"),
+ ("i16x8.extend_low_i8x16_s", "makeUnary(s, UnaryOp::ExtendLowSVecI8x16ToVecI16x8)"),
+ ("i16x8.extend_high_i8x16_s", "makeUnary(s, UnaryOp::ExtendHighSVecI8x16ToVecI16x8)"),
+ ("i16x8.extend_low_i8x16_u", "makeUnary(s, UnaryOp::ExtendLowUVecI8x16ToVecI16x8)"),
+ ("i16x8.extend_high_i8x16_u", "makeUnary(s, UnaryOp::ExtendHighUVecI8x16ToVecI16x8)"),
+ ("i32x4.extend_low_i16x8_s", "makeUnary(s, UnaryOp::ExtendLowSVecI16x8ToVecI32x4)"),
+ ("i32x4.extend_high_i16x8_s", "makeUnary(s, UnaryOp::ExtendHighSVecI16x8ToVecI32x4)"),
+ ("i32x4.extend_low_i16x8_u", "makeUnary(s, UnaryOp::ExtendLowUVecI16x8ToVecI32x4)"),
+ ("i32x4.extend_high_i16x8_u", "makeUnary(s, UnaryOp::ExtendHighUVecI16x8ToVecI32x4)"),
+ ("i64x2.extend_low_i32x4_s", "makeUnary(s, UnaryOp::ExtendLowSVecI32x4ToVecI64x2)"),
+ ("i64x2.extend_high_i32x4_s", "makeUnary(s, UnaryOp::ExtendHighSVecI32x4ToVecI64x2)"),
+ ("i64x2.extend_low_i32x4_u", "makeUnary(s, UnaryOp::ExtendLowUVecI32x4ToVecI64x2)"),
+ ("i64x2.extend_high_i32x4_u", "makeUnary(s, UnaryOp::ExtendHighUVecI32x4ToVecI64x2)"),
+ ("i8x16.swizzle", "makeBinary(s, BinaryOp::SwizzleVec8x16)"),
("i16x8.extadd_pairwise_i8x16_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI8x16ToI16x8)"),
("i16x8.extadd_pairwise_i8x16_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI8x16ToI16x8)"),
("i32x4.extadd_pairwise_i16x8_s", "makeUnary(s, UnaryOp::ExtAddPairwiseSVecI16x8ToI32x4)"),
("i32x4.extadd_pairwise_i16x8_u", "makeUnary(s, UnaryOp::ExtAddPairwiseUVecI16x8ToI32x4)"),
("f64x2.convert_low_i32x4_s", "makeUnary(s, UnaryOp::ConvertLowSVecI32x4ToVecF64x2)"),
("f64x2.convert_low_i32x4_u", "makeUnary(s, UnaryOp::ConvertLowUVecI32x4ToVecF64x2)"),
- ("i32x4.trunc_sat_f64x2_zero_s", "makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4)"),
- ("i32x4.trunc_sat_f64x2_zero_u", "makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4)"),
+ ("i32x4.trunc_sat_f64x2_s_zero", "makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4)"),
+ ("i32x4.trunc_sat_f64x2_u_zero", "makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4)"),
("f32x4.demote_f64x2_zero", "makeUnary(s, UnaryOp::DemoteZeroVecF64x2ToVecF32x4)"),
("f64x2.promote_low_f32x4", "makeUnary(s, UnaryOp::PromoteLowVecF32x4ToVecF64x2)"),
- ("i32x4.widen_i8x16_s", "makeSIMDWiden(s, SIMDWidenOp::WidenSVecI8x16ToVecI32x4)"),
- ("i32x4.widen_i8x16_u", "makeSIMDWiden(s, SIMDWidenOp::WidenUVecI8x16ToVecI32x4)"),
- # prefetch instructions
- ("prefetch.t", "makePrefetch(s, PrefetchOp::PrefetchTemporal)"),
- ("prefetch.nt", "makePrefetch(s, PrefetchOp::PrefetchNontemporal)"),
# reference types instructions
# TODO Add table instructions
("ref.null", "makeRefNull(s)"),
diff --git a/src/binaryen-c.cpp b/src/binaryen-c.cpp
index aff4ed4fc..e257c976c 100644
--- a/src/binaryen-c.cpp
+++ b/src/binaryen-c.cpp
@@ -526,9 +526,9 @@ BinaryenOp BinaryenOrVec128(void) { return OrVec128; }
BinaryenOp BinaryenXorVec128(void) { return XorVec128; }
BinaryenOp BinaryenAndNotVec128(void) { return AndNotVec128; }
BinaryenOp BinaryenBitselectVec128(void) { return Bitselect; }
+BinaryenOp BinaryenAnyTrueVec128(void) { return AnyTrueVec128; }
BinaryenOp BinaryenAbsVecI8x16(void) { return AbsVecI8x16; }
BinaryenOp BinaryenNegVecI8x16(void) { return NegVecI8x16; }
-BinaryenOp BinaryenAnyTrueVecI8x16(void) { return AnyTrueVecI8x16; }
BinaryenOp BinaryenAllTrueVecI8x16(void) { return AllTrueVecI8x16; }
BinaryenOp BinaryenBitmaskVecI8x16(void) { return BitmaskVecI8x16; }
BinaryenOp BinaryenShlVecI8x16(void) { return ShlVecI8x16; }
@@ -540,7 +540,6 @@ BinaryenOp BinaryenAddSatUVecI8x16(void) { return AddSatUVecI8x16; }
BinaryenOp BinaryenSubVecI8x16(void) { return SubVecI8x16; }
BinaryenOp BinaryenSubSatSVecI8x16(void) { return SubSatSVecI8x16; }
BinaryenOp BinaryenSubSatUVecI8x16(void) { return SubSatUVecI8x16; }
-BinaryenOp BinaryenMulVecI8x16(void) { return MulVecI8x16; }
BinaryenOp BinaryenMinSVecI8x16(void) { return MinSVecI8x16; }
BinaryenOp BinaryenMinUVecI8x16(void) { return MinUVecI8x16; }
BinaryenOp BinaryenMaxSVecI8x16(void) { return MaxSVecI8x16; }
@@ -548,7 +547,6 @@ BinaryenOp BinaryenMaxUVecI8x16(void) { return MaxUVecI8x16; }
BinaryenOp BinaryenAvgrUVecI8x16(void) { return AvgrUVecI8x16; }
BinaryenOp BinaryenAbsVecI16x8(void) { return AbsVecI16x8; }
BinaryenOp BinaryenNegVecI16x8(void) { return NegVecI16x8; }
-BinaryenOp BinaryenAnyTrueVecI16x8(void) { return AnyTrueVecI16x8; }
BinaryenOp BinaryenAllTrueVecI16x8(void) { return AllTrueVecI16x8; }
BinaryenOp BinaryenBitmaskVecI16x8(void) { return BitmaskVecI16x8; }
BinaryenOp BinaryenShlVecI16x8(void) { return ShlVecI16x8; }
@@ -568,7 +566,6 @@ BinaryenOp BinaryenMaxUVecI16x8(void) { return MaxUVecI16x8; }
BinaryenOp BinaryenAvgrUVecI16x8(void) { return AvgrUVecI16x8; }
BinaryenOp BinaryenAbsVecI32x4(void) { return AbsVecI32x4; }
BinaryenOp BinaryenNegVecI32x4(void) { return NegVecI32x4; }
-BinaryenOp BinaryenAnyTrueVecI32x4(void) { return AnyTrueVecI32x4; }
BinaryenOp BinaryenAllTrueVecI32x4(void) { return AllTrueVecI32x4; }
BinaryenOp BinaryenBitmaskVecI32x4(void) { return BitmaskVecI32x4; }
BinaryenOp BinaryenShlVecI32x4(void) { return ShlVecI32x4; }
@@ -594,8 +591,6 @@ BinaryenOp BinaryenMulVecI64x2(void) { return MulVecI64x2; }
BinaryenOp BinaryenAbsVecF32x4(void) { return AbsVecF32x4; }
BinaryenOp BinaryenNegVecF32x4(void) { return NegVecF32x4; }
BinaryenOp BinaryenSqrtVecF32x4(void) { return SqrtVecF32x4; }
-BinaryenOp BinaryenQFMAVecF32x4(void) { return QFMAF32x4; }
-BinaryenOp BinaryenQFMSVecF32x4(void) { return QFMSF32x4; }
BinaryenOp BinaryenAddVecF32x4(void) { return AddVecF32x4; }
BinaryenOp BinaryenSubVecF32x4(void) { return SubVecF32x4; }
BinaryenOp BinaryenMulVecF32x4(void) { return MulVecF32x4; }
@@ -611,8 +606,6 @@ BinaryenOp BinaryenPMaxVecF32x4(void) { return PMaxVecF32x4; }
BinaryenOp BinaryenAbsVecF64x2(void) { return AbsVecF64x2; }
BinaryenOp BinaryenNegVecF64x2(void) { return NegVecF64x2; }
BinaryenOp BinaryenSqrtVecF64x2(void) { return SqrtVecF64x2; }
-BinaryenOp BinaryenQFMAVecF64x2(void) { return QFMAF64x2; }
-BinaryenOp BinaryenQFMSVecF64x2(void) { return QFMSF64x2; }
BinaryenOp BinaryenAddVecF64x2(void) { return AddVecF64x2; }
BinaryenOp BinaryenSubVecF64x2(void) { return SubVecF64x2; }
BinaryenOp BinaryenMulVecF64x2(void) { return MulVecF64x2; }
@@ -631,24 +624,12 @@ BinaryenOp BinaryenTruncSatSVecF32x4ToVecI32x4(void) {
BinaryenOp BinaryenTruncSatUVecF32x4ToVecI32x4(void) {
return TruncSatUVecF32x4ToVecI32x4;
}
-BinaryenOp BinaryenTruncSatSVecF64x2ToVecI64x2(void) {
- return TruncSatSVecF64x2ToVecI64x2;
-}
-BinaryenOp BinaryenTruncSatUVecF64x2ToVecI64x2(void) {
- return TruncSatUVecF64x2ToVecI64x2;
-}
BinaryenOp BinaryenConvertSVecI32x4ToVecF32x4(void) {
return ConvertSVecI32x4ToVecF32x4;
}
BinaryenOp BinaryenConvertUVecI32x4ToVecF32x4(void) {
return ConvertUVecI32x4ToVecF32x4;
}
-BinaryenOp BinaryenConvertSVecI64x2ToVecF64x2(void) {
- return ConvertSVecI64x2ToVecF64x2;
-}
-BinaryenOp BinaryenConvertUVecI64x2ToVecF64x2(void) {
- return ConvertUVecI64x2ToVecF64x2;
-}
BinaryenOp BinaryenLoadSplatVec8x16(void) { return LoadSplatVec8x16; }
BinaryenOp BinaryenLoadSplatVec16x8(void) { return LoadSplatVec16x8; }
BinaryenOp BinaryenLoadSplatVec32x4(void) { return LoadSplatVec32x4; }
@@ -683,29 +664,29 @@ BinaryenOp BinaryenNarrowSVecI32x4ToVecI16x8(void) {
BinaryenOp BinaryenNarrowUVecI32x4ToVecI16x8(void) {
return NarrowUVecI32x4ToVecI16x8;
}
-BinaryenOp BinaryenWidenLowSVecI8x16ToVecI16x8(void) {
- return WidenLowSVecI8x16ToVecI16x8;
+BinaryenOp BinaryenExtendLowSVecI8x16ToVecI16x8(void) {
+ return ExtendLowSVecI8x16ToVecI16x8;
}
-BinaryenOp BinaryenWidenHighSVecI8x16ToVecI16x8(void) {
- return WidenHighSVecI8x16ToVecI16x8;
+BinaryenOp BinaryenExtendHighSVecI8x16ToVecI16x8(void) {
+ return ExtendHighSVecI8x16ToVecI16x8;
}
-BinaryenOp BinaryenWidenLowUVecI8x16ToVecI16x8(void) {
- return WidenLowUVecI8x16ToVecI16x8;
+BinaryenOp BinaryenExtendLowUVecI8x16ToVecI16x8(void) {
+ return ExtendLowUVecI8x16ToVecI16x8;
}
-BinaryenOp BinaryenWidenHighUVecI8x16ToVecI16x8(void) {
- return WidenHighUVecI8x16ToVecI16x8;
+BinaryenOp BinaryenExtendHighUVecI8x16ToVecI16x8(void) {
+ return ExtendHighUVecI8x16ToVecI16x8;
}
-BinaryenOp BinaryenWidenLowSVecI16x8ToVecI32x4(void) {
- return WidenLowSVecI16x8ToVecI32x4;
+BinaryenOp BinaryenExtendLowSVecI16x8ToVecI32x4(void) {
+ return ExtendLowSVecI16x8ToVecI32x4;
}
-BinaryenOp BinaryenWidenHighSVecI16x8ToVecI32x4(void) {
- return WidenHighSVecI16x8ToVecI32x4;
+BinaryenOp BinaryenExtendHighSVecI16x8ToVecI32x4(void) {
+ return ExtendHighSVecI16x8ToVecI32x4;
}
-BinaryenOp BinaryenWidenLowUVecI16x8ToVecI32x4(void) {
- return WidenLowUVecI16x8ToVecI32x4;
+BinaryenOp BinaryenExtendLowUVecI16x8ToVecI32x4(void) {
+ return ExtendLowUVecI16x8ToVecI32x4;
}
-BinaryenOp BinaryenWidenHighUVecI16x8ToVecI32x4(void) {
- return WidenHighUVecI16x8ToVecI32x4;
+BinaryenOp BinaryenExtendHighUVecI16x8ToVecI32x4(void) {
+ return ExtendHighUVecI16x8ToVecI32x4;
}
BinaryenOp BinaryenSwizzleVec8x16(void) { return SwizzleVec8x16; }
BinaryenOp BinaryenRefIsNull(void) { return RefIsNull; }
diff --git a/src/binaryen-c.h b/src/binaryen-c.h
index 0dff91f93..686fb5262 100644
--- a/src/binaryen-c.h
+++ b/src/binaryen-c.h
@@ -427,10 +427,10 @@ BINARYEN_API BinaryenOp BinaryenOrVec128(void);
BINARYEN_API BinaryenOp BinaryenXorVec128(void);
BINARYEN_API BinaryenOp BinaryenAndNotVec128(void);
BINARYEN_API BinaryenOp BinaryenBitselectVec128(void);
+BINARYEN_API BinaryenOp BinaryenAnyTrueVec128(void);
// TODO: Add i8x16.popcnt to C and JS APIs once merged to the proposal
BINARYEN_API BinaryenOp BinaryenAbsVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenNegVecI8x16(void);
-BINARYEN_API BinaryenOp BinaryenAnyTrueVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenAllTrueVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenBitmaskVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenShlVecI8x16(void);
@@ -442,7 +442,6 @@ BINARYEN_API BinaryenOp BinaryenAddSatUVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenSubVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenSubSatSVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenSubSatUVecI8x16(void);
-BINARYEN_API BinaryenOp BinaryenMulVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenMinSVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenMinUVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenMaxSVecI8x16(void);
@@ -450,7 +449,6 @@ BINARYEN_API BinaryenOp BinaryenMaxUVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenAvgrUVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenAbsVecI16x8(void);
BINARYEN_API BinaryenOp BinaryenNegVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenAnyTrueVecI16x8(void);
BINARYEN_API BinaryenOp BinaryenAllTrueVecI16x8(void);
BINARYEN_API BinaryenOp BinaryenBitmaskVecI16x8(void);
BINARYEN_API BinaryenOp BinaryenShlVecI16x8(void);
@@ -472,7 +470,6 @@ BINARYEN_API BinaryenOp BinaryenAvgrUVecI16x8(void);
// TODO: Add extending multiplications to APIs once they are merged as well
BINARYEN_API BinaryenOp BinaryenAbsVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenNegVecI32x4(void);
-BINARYEN_API BinaryenOp BinaryenAnyTrueVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenAllTrueVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenBitmaskVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenShlVecI32x4(void);
@@ -496,8 +493,6 @@ BINARYEN_API BinaryenOp BinaryenMulVecI64x2(void);
BINARYEN_API BinaryenOp BinaryenAbsVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenNegVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenSqrtVecF32x4(void);
-BINARYEN_API BinaryenOp BinaryenQFMAVecF32x4(void);
-BINARYEN_API BinaryenOp BinaryenQFMSVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenAddVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenSubVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenMulVecF32x4(void);
@@ -513,8 +508,6 @@ BINARYEN_API BinaryenOp BinaryenNearestVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenAbsVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenNegVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenSqrtVecF64x2(void);
-BINARYEN_API BinaryenOp BinaryenQFMAVecF64x2(void);
-BINARYEN_API BinaryenOp BinaryenQFMSVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenAddVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenSubVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenMulVecF64x2(void);
@@ -530,12 +523,8 @@ BINARYEN_API BinaryenOp BinaryenNearestVecF64x2(void);
// TODO: Add extending pairwise adds to C and JS APIs once merged
BINARYEN_API BinaryenOp BinaryenTruncSatSVecF32x4ToVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenTruncSatUVecF32x4ToVecI32x4(void);
-BINARYEN_API BinaryenOp BinaryenTruncSatSVecF64x2ToVecI64x2(void);
-BINARYEN_API BinaryenOp BinaryenTruncSatUVecF64x2ToVecI64x2(void);
BINARYEN_API BinaryenOp BinaryenConvertSVecI32x4ToVecF32x4(void);
BINARYEN_API BinaryenOp BinaryenConvertUVecI32x4ToVecF32x4(void);
-BINARYEN_API BinaryenOp BinaryenConvertSVecI64x2ToVecF64x2(void);
-BINARYEN_API BinaryenOp BinaryenConvertUVecI64x2ToVecF64x2(void);
BINARYEN_API BinaryenOp BinaryenLoadSplatVec8x16(void);
BINARYEN_API BinaryenOp BinaryenLoadSplatVec16x8(void);
BINARYEN_API BinaryenOp BinaryenLoadSplatVec32x4(void);
@@ -551,14 +540,14 @@ BINARYEN_API BinaryenOp BinaryenNarrowSVecI16x8ToVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenNarrowUVecI16x8ToVecI8x16(void);
BINARYEN_API BinaryenOp BinaryenNarrowSVecI32x4ToVecI16x8(void);
BINARYEN_API BinaryenOp BinaryenNarrowUVecI32x4ToVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenWidenLowSVecI8x16ToVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenWidenHighSVecI8x16ToVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenWidenLowUVecI8x16ToVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenWidenHighUVecI8x16ToVecI16x8(void);
-BINARYEN_API BinaryenOp BinaryenWidenLowSVecI16x8ToVecI32x4(void);
-BINARYEN_API BinaryenOp BinaryenWidenHighSVecI16x8ToVecI32x4(void);
-BINARYEN_API BinaryenOp BinaryenWidenLowUVecI16x8ToVecI32x4(void);
-BINARYEN_API BinaryenOp BinaryenWidenHighUVecI16x8ToVecI32x4(void);
+BINARYEN_API BinaryenOp BinaryenExtendLowSVecI8x16ToVecI16x8(void);
+BINARYEN_API BinaryenOp BinaryenExtendHighSVecI8x16ToVecI16x8(void);
+BINARYEN_API BinaryenOp BinaryenExtendLowUVecI8x16ToVecI16x8(void);
+BINARYEN_API BinaryenOp BinaryenExtendHighUVecI8x16ToVecI16x8(void);
+BINARYEN_API BinaryenOp BinaryenExtendLowSVecI16x8ToVecI32x4(void);
+BINARYEN_API BinaryenOp BinaryenExtendHighSVecI16x8ToVecI32x4(void);
+BINARYEN_API BinaryenOp BinaryenExtendLowUVecI16x8ToVecI32x4(void);
+BINARYEN_API BinaryenOp BinaryenExtendHighUVecI16x8ToVecI32x4(void);
BINARYEN_API BinaryenOp BinaryenSwizzleVec8x16(void);
BINARYEN_API BinaryenOp BinaryenRefIsNull(void);
BINARYEN_API BinaryenOp BinaryenRefIsFunc(void);
diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc
index 4e5a61d92..6dfb92f5f 100644
--- a/src/gen-s-parser.inc
+++ b/src/gen-s-parser.inc
@@ -415,17 +415,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'q': {
- switch (op[9]) {
- case 'a':
- if (strcmp(op, "f32x4.qfma") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::QFMAF32x4); }
- goto parse_error;
- case 's':
- if (strcmp(op, "f32x4.qfms") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::QFMSF32x4); }
- goto parse_error;
- default: goto parse_error;
- }
- }
case 'r':
if (strcmp(op, "f32x4.replace_lane") == 0) { return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF32x4, 4); }
goto parse_error;
@@ -625,29 +614,13 @@ switch (op[0]) {
if (strcmp(op, "f64x2.ceil") == 0) { return makeUnary(s, UnaryOp::CeilVecF64x2); }
goto parse_error;
case 'o': {
- switch (op[14]) {
- case 'i': {
- switch (op[20]) {
- case 's':
- if (strcmp(op, "f64x2.convert_i64x2_s") == 0) { return makeUnary(s, UnaryOp::ConvertSVecI64x2ToVecF64x2); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "f64x2.convert_i64x2_u") == 0) { return makeUnary(s, UnaryOp::ConvertUVecI64x2ToVecF64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[24]) {
- case 's':
- if (strcmp(op, "f64x2.convert_low_i32x4_s") == 0) { return makeUnary(s, UnaryOp::ConvertLowSVecI32x4ToVecF64x2); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "f64x2.convert_low_i32x4_u") == 0) { return makeUnary(s, UnaryOp::ConvertLowUVecI32x4ToVecF64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
+ switch (op[24]) {
+ case 's':
+ if (strcmp(op, "f64x2.convert_low_i32x4_s") == 0) { return makeUnary(s, UnaryOp::ConvertLowSVecI32x4ToVecF64x2); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "f64x2.convert_low_i32x4_u") == 0) { return makeUnary(s, UnaryOp::ConvertLowUVecI32x4ToVecF64x2); }
+ goto parse_error;
default: goto parse_error;
}
}
@@ -740,17 +713,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'q': {
- switch (op[9]) {
- case 'a':
- if (strcmp(op, "f64x2.qfma") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::QFMAF64x2); }
- goto parse_error;
- case 's':
- if (strcmp(op, "f64x2.qfms") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::QFMSF64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
case 'r':
if (strcmp(op, "f64x2.replace_lane") == 0) { return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecF64x2, 2); }
goto parse_error;
@@ -806,12 +768,12 @@ switch (op[0]) {
if (strcmp(op, "i16x8.add") == 0) { return makeBinary(s, BinaryOp::AddVecI16x8); }
goto parse_error;
case '_': {
- switch (op[19]) {
+ switch (op[14]) {
case 's':
- if (strcmp(op, "i16x8.add_saturate_s") == 0) { return makeBinary(s, BinaryOp::AddSatSVecI16x8); }
+ if (strcmp(op, "i16x8.add_sat_s") == 0) { return makeBinary(s, BinaryOp::AddSatSVecI16x8); }
goto parse_error;
case 'u':
- if (strcmp(op, "i16x8.add_saturate_u") == 0) { return makeBinary(s, BinaryOp::AddSatUVecI16x8); }
+ if (strcmp(op, "i16x8.add_sat_u") == 0) { return makeBinary(s, BinaryOp::AddSatUVecI16x8); }
goto parse_error;
default: goto parse_error;
}
@@ -822,9 +784,6 @@ switch (op[0]) {
case 'l':
if (strcmp(op, "i16x8.all_true") == 0) { return makeUnary(s, UnaryOp::AllTrueVecI16x8); }
goto parse_error;
- case 'n':
- if (strcmp(op, "i16x8.any_true") == 0) { return makeUnary(s, UnaryOp::AnyTrueVecI16x8); }
- goto parse_error;
case 'v':
if (strcmp(op, "i16x8.avgr_u") == 0) { return makeBinary(s, BinaryOp::AvgrUVecI16x8); }
goto parse_error;
@@ -852,6 +811,33 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'e': {
+ switch (op[13]) {
+ case 'h': {
+ switch (op[24]) {
+ case 's':
+ if (strcmp(op, "i16x8.extend_high_i8x16_s") == 0) { return makeUnary(s, UnaryOp::ExtendHighSVecI8x16ToVecI16x8); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.extend_high_i8x16_u") == 0) { return makeUnary(s, UnaryOp::ExtendHighUVecI8x16ToVecI16x8); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[23]) {
+ case 's':
+ if (strcmp(op, "i16x8.extend_low_i8x16_s") == 0) { return makeUnary(s, UnaryOp::ExtendLowSVecI8x16ToVecI16x8); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i16x8.extend_low_i8x16_u") == 0) { return makeUnary(s, UnaryOp::ExtendLowUVecI8x16ToVecI16x8); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
case 'm': {
switch (op[13]) {
case 'h': {
@@ -936,17 +922,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'o': {
- switch (op[14]) {
- case 's':
- if (strcmp(op, "i16x8.load8x8_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec8x8ToVecI16x8); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i16x8.load8x8_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec8x8ToVecI16x8); }
- goto parse_error;
- default: goto parse_error;
- }
- }
case 't': {
switch (op[9]) {
case 's':
@@ -1054,12 +1029,12 @@ switch (op[0]) {
if (strcmp(op, "i16x8.sub") == 0) { return makeBinary(s, BinaryOp::SubVecI16x8); }
goto parse_error;
case '_': {
- switch (op[19]) {
+ switch (op[14]) {
case 's':
- if (strcmp(op, "i16x8.sub_saturate_s") == 0) { return makeBinary(s, BinaryOp::SubSatSVecI16x8); }
+ if (strcmp(op, "i16x8.sub_sat_s") == 0) { return makeBinary(s, BinaryOp::SubSatSVecI16x8); }
goto parse_error;
case 'u':
- if (strcmp(op, "i16x8.sub_saturate_u") == 0) { return makeBinary(s, BinaryOp::SubSatUVecI16x8); }
+ if (strcmp(op, "i16x8.sub_sat_u") == 0) { return makeBinary(s, BinaryOp::SubSatUVecI16x8); }
goto parse_error;
default: goto parse_error;
}
@@ -1070,33 +1045,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'w': {
- switch (op[12]) {
- case 'h': {
- switch (op[23]) {
- case 's':
- if (strcmp(op, "i16x8.widen_high_i8x16_s") == 0) { return makeUnary(s, UnaryOp::WidenHighSVecI8x16ToVecI16x8); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i16x8.widen_high_i8x16_u") == 0) { return makeUnary(s, UnaryOp::WidenHighUVecI8x16ToVecI16x8); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[22]) {
- case 's':
- if (strcmp(op, "i16x8.widen_low_i8x16_s") == 0) { return makeUnary(s, UnaryOp::WidenLowSVecI8x16ToVecI16x8); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i16x8.widen_low_i8x16_u") == 0) { return makeUnary(s, UnaryOp::WidenLowUVecI8x16ToVecI16x8); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
@@ -1587,9 +1535,6 @@ switch (op[0]) {
case 'l':
if (strcmp(op, "i32x4.all_true") == 0) { return makeUnary(s, UnaryOp::AllTrueVecI32x4); }
goto parse_error;
- case 'n':
- if (strcmp(op, "i32x4.any_true") == 0) { return makeUnary(s, UnaryOp::AnyTrueVecI32x4); }
- goto parse_error;
default: goto parse_error;
}
}
@@ -1617,6 +1562,33 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'e': {
+ switch (op[13]) {
+ case 'h': {
+ switch (op[24]) {
+ case 's':
+ if (strcmp(op, "i32x4.extend_high_i16x8_s") == 0) { return makeUnary(s, UnaryOp::ExtendHighSVecI16x8ToVecI32x4); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32x4.extend_high_i16x8_u") == 0) { return makeUnary(s, UnaryOp::ExtendHighUVecI16x8ToVecI32x4); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[23]) {
+ case 's':
+ if (strcmp(op, "i32x4.extend_low_i16x8_s") == 0) { return makeUnary(s, UnaryOp::ExtendLowSVecI16x8ToVecI32x4); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i32x4.extend_low_i16x8_u") == 0) { return makeUnary(s, UnaryOp::ExtendLowUVecI16x8ToVecI32x4); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
case 'm': {
switch (op[13]) {
case 'h': {
@@ -1693,17 +1665,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'o': {
- switch (op[15]) {
- case 's':
- if (strcmp(op, "i32x4.load16x4_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec16x4ToVecI32x4); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32x4.load16x4_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec16x4ToVecI32x4); }
- goto parse_error;
- default: goto parse_error;
- }
- }
case 't': {
switch (op[9]) {
case 's':
@@ -1806,50 +1767,12 @@ switch (op[0]) {
}
}
case '6': {
- switch (op[27]) {
- case 's':
- if (strcmp(op, "i32x4.trunc_sat_f64x2_zero_s") == 0) { return makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32x4.trunc_sat_f64x2_zero_u") == 0) { return makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
- case 'w': {
- switch (op[12]) {
- case 'h': {
- switch (op[23]) {
- case 's':
- if (strcmp(op, "i32x4.widen_high_i16x8_s") == 0) { return makeUnary(s, UnaryOp::WidenHighSVecI16x8ToVecI32x4); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32x4.widen_high_i16x8_u") == 0) { return makeUnary(s, UnaryOp::WidenHighUVecI16x8ToVecI32x4); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'i': {
- switch (op[18]) {
- case 's':
- if (strcmp(op, "i32x4.widen_i8x16_s") == 0) { return makeSIMDWiden(s, SIMDWidenOp::WidenSVecI8x16ToVecI32x4); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i32x4.widen_i8x16_u") == 0) { return makeSIMDWiden(s, SIMDWidenOp::WidenUVecI8x16ToVecI32x4); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
switch (op[22]) {
case 's':
- if (strcmp(op, "i32x4.widen_low_i16x8_s") == 0) { return makeUnary(s, UnaryOp::WidenLowSVecI16x8ToVecI32x4); }
+ if (strcmp(op, "i32x4.trunc_sat_f64x2_s_zero") == 0) { return makeUnary(s, UnaryOp::TruncSatZeroSVecF64x2ToVecI32x4); }
goto parse_error;
case 'u':
- if (strcmp(op, "i32x4.widen_low_i16x8_u") == 0) { return makeUnary(s, UnaryOp::WidenLowUVecI16x8ToVecI32x4); }
+ if (strcmp(op, "i32x4.trunc_sat_f64x2_u_zero") == 0) { return makeUnary(s, UnaryOp::TruncSatZeroUVecF64x2ToVecI32x4); }
goto parse_error;
default: goto parse_error;
}
@@ -2388,9 +2311,20 @@ switch (op[0]) {
}
case 'x': {
switch (op[6]) {
- case 'a':
- if (strcmp(op, "i64x2.add") == 0) { return makeBinary(s, BinaryOp::AddVecI64x2); }
- goto parse_error;
+ case 'a': {
+ switch (op[7]) {
+ case 'b':
+ if (strcmp(op, "i64x2.abs") == 0) { return makeUnary(s, UnaryOp::AbsVecI64x2); }
+ goto parse_error;
+ case 'd':
+ if (strcmp(op, "i64x2.add") == 0) { return makeBinary(s, BinaryOp::AddVecI64x2); }
+ goto parse_error;
+ case 'l':
+ if (strcmp(op, "i64x2.all_true") == 0) { return makeUnary(s, UnaryOp::AllTrueVecI64x2); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'b':
if (strcmp(op, "i64x2.bitmask") == 0) { return makeUnary(s, UnaryOp::BitmaskVecI64x2); }
goto parse_error;
@@ -2401,6 +2335,33 @@ switch (op[0]) {
goto parse_error;
case 'x': {
switch (op[9]) {
+ case 'e': {
+ switch (op[13]) {
+ case 'h': {
+ switch (op[24]) {
+ case 's':
+ if (strcmp(op, "i64x2.extend_high_i32x4_s") == 0) { return makeUnary(s, UnaryOp::ExtendHighSVecI32x4ToVecI64x2); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64x2.extend_high_i32x4_u") == 0) { return makeUnary(s, UnaryOp::ExtendHighUVecI32x4ToVecI64x2); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'l': {
+ switch (op[23]) {
+ case 's':
+ if (strcmp(op, "i64x2.extend_low_i32x4_s") == 0) { return makeUnary(s, UnaryOp::ExtendLowSVecI32x4ToVecI64x2); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "i64x2.extend_low_i32x4_u") == 0) { return makeUnary(s, UnaryOp::ExtendLowUVecI32x4ToVecI64x2); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
case 'm': {
switch (op[13]) {
case 'h': {
@@ -2437,13 +2398,24 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'g': {
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "i64x2.ge_s") == 0) { return makeBinary(s, BinaryOp::GeSVecI64x2); }
+ goto parse_error;
+ case 't':
+ if (strcmp(op, "i64x2.gt_s") == 0) { return makeBinary(s, BinaryOp::GtSVecI64x2); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'l': {
- switch (op[15]) {
- case 's':
- if (strcmp(op, "i64x2.load32x2_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec32x2ToVecI64x2); }
+ switch (op[7]) {
+ case 'e':
+ if (strcmp(op, "i64x2.le_s") == 0) { return makeBinary(s, BinaryOp::LeSVecI64x2); }
goto parse_error;
- case 'u':
- if (strcmp(op, "i64x2.load32x2_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec32x2ToVecI64x2); }
+ case 't':
+ if (strcmp(op, "i64x2.lt_s") == 0) { return makeBinary(s, BinaryOp::LtSVecI64x2); }
goto parse_error;
default: goto parse_error;
}
@@ -2451,9 +2423,17 @@ switch (op[0]) {
case 'm':
if (strcmp(op, "i64x2.mul") == 0) { return makeBinary(s, BinaryOp::MulVecI64x2); }
goto parse_error;
- case 'n':
- if (strcmp(op, "i64x2.neg") == 0) { return makeUnary(s, UnaryOp::NegVecI64x2); }
- goto parse_error;
+ case 'n': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "i64x2.ne") == 0) { return makeBinary(s, BinaryOp::NeVecI64x2); }
+ goto parse_error;
+ case 'g':
+ if (strcmp(op, "i64x2.neg") == 0) { return makeUnary(s, UnaryOp::NegVecI64x2); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
case 'r':
if (strcmp(op, "i64x2.replace_lane") == 0) { return makeSIMDReplace(s, SIMDReplaceOp::ReplaceLaneVecI64x2, 2); }
goto parse_error;
@@ -2487,44 +2467,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 't': {
- switch (op[22]) {
- case 's':
- if (strcmp(op, "i64x2.trunc_sat_f64x2_s") == 0) { return makeUnary(s, UnaryOp::TruncSatSVecF64x2ToVecI64x2); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64x2.trunc_sat_f64x2_u") == 0) { return makeUnary(s, UnaryOp::TruncSatUVecF64x2ToVecI64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'w': {
- switch (op[12]) {
- case 'h': {
- switch (op[23]) {
- case 's':
- if (strcmp(op, "i64x2.widen_high_i32x4_s") == 0) { return makeUnary(s, UnaryOp::WidenHighSVecI32x4ToVecI64x2); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64x2.widen_high_i32x4_u") == 0) { return makeUnary(s, UnaryOp::WidenHighUVecI32x4ToVecI64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case 'l': {
- switch (op[22]) {
- case 's':
- if (strcmp(op, "i64x2.widen_low_i32x4_s") == 0) { return makeUnary(s, UnaryOp::WidenLowSVecI32x4ToVecI64x2); }
- goto parse_error;
- case 'u':
- if (strcmp(op, "i64x2.widen_low_i32x4_u") == 0) { return makeUnary(s, UnaryOp::WidenLowUVecI32x4ToVecI64x2); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
@@ -2544,12 +2486,12 @@ switch (op[0]) {
if (strcmp(op, "i8x16.add") == 0) { return makeBinary(s, BinaryOp::AddVecI8x16); }
goto parse_error;
case '_': {
- switch (op[19]) {
+ switch (op[14]) {
case 's':
- if (strcmp(op, "i8x16.add_saturate_s") == 0) { return makeBinary(s, BinaryOp::AddSatSVecI8x16); }
+ if (strcmp(op, "i8x16.add_sat_s") == 0) { return makeBinary(s, BinaryOp::AddSatSVecI8x16); }
goto parse_error;
case 'u':
- if (strcmp(op, "i8x16.add_saturate_u") == 0) { return makeBinary(s, BinaryOp::AddSatUVecI8x16); }
+ if (strcmp(op, "i8x16.add_sat_u") == 0) { return makeBinary(s, BinaryOp::AddSatUVecI8x16); }
goto parse_error;
default: goto parse_error;
}
@@ -2560,9 +2502,6 @@ switch (op[0]) {
case 'l':
if (strcmp(op, "i8x16.all_true") == 0) { return makeUnary(s, UnaryOp::AllTrueVecI8x16); }
goto parse_error;
- case 'n':
- if (strcmp(op, "i8x16.any_true") == 0) { return makeUnary(s, UnaryOp::AnyTrueVecI8x16); }
- goto parse_error;
case 'v':
if (strcmp(op, "i8x16.avgr_u") == 0) { return makeBinary(s, BinaryOp::AvgrUVecI8x16); }
goto parse_error;
@@ -2669,9 +2608,6 @@ switch (op[0]) {
default: goto parse_error;
}
}
- case 'u':
- if (strcmp(op, "i8x16.mul") == 0) { return makeBinary(s, BinaryOp::MulVecI8x16); }
- goto parse_error;
default: goto parse_error;
}
}
@@ -2726,6 +2662,9 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'u':
+ if (strcmp(op, "i8x16.shuffle") == 0) { return makeSIMDShuffle(s); }
+ goto parse_error;
default: goto parse_error;
}
}
@@ -2738,12 +2677,12 @@ switch (op[0]) {
if (strcmp(op, "i8x16.sub") == 0) { return makeBinary(s, BinaryOp::SubVecI8x16); }
goto parse_error;
case '_': {
- switch (op[19]) {
+ switch (op[14]) {
case 's':
- if (strcmp(op, "i8x16.sub_saturate_s") == 0) { return makeBinary(s, BinaryOp::SubSatSVecI8x16); }
+ if (strcmp(op, "i8x16.sub_sat_s") == 0) { return makeBinary(s, BinaryOp::SubSatSVecI8x16); }
goto parse_error;
case 'u':
- if (strcmp(op, "i8x16.sub_saturate_u") == 0) { return makeBinary(s, BinaryOp::SubSatUVecI8x16); }
+ if (strcmp(op, "i8x16.sub_sat_u") == 0) { return makeBinary(s, BinaryOp::SubSatUVecI8x16); }
goto parse_error;
default: goto parse_error;
}
@@ -2751,6 +2690,9 @@ switch (op[0]) {
default: goto parse_error;
}
}
+ case 'w':
+ if (strcmp(op, "i8x16.swizzle") == 0) { return makeBinary(s, BinaryOp::SwizzleVec8x16); }
+ goto parse_error;
default: goto parse_error;
}
}
@@ -2827,25 +2769,9 @@ switch (op[0]) {
case 'n':
if (strcmp(op, "nop") == 0) { return makeNop(); }
goto parse_error;
- case 'p': {
- switch (op[1]) {
- case 'o':
- if (strcmp(op, "pop") == 0) { return makePop(s); }
- goto parse_error;
- case 'r': {
- switch (op[9]) {
- case 'n':
- if (strcmp(op, "prefetch.nt") == 0) { return makePrefetch(s, PrefetchOp::PrefetchNontemporal); }
- goto parse_error;
- case 't':
- if (strcmp(op, "prefetch.t") == 0) { return makePrefetch(s, PrefetchOp::PrefetchTemporal); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- default: goto parse_error;
- }
- }
+ case 'p':
+ if (strcmp(op, "pop") == 0) { return makePop(s); }
+ goto parse_error;
case 'r': {
switch (op[1]) {
case 'e': {
@@ -3036,154 +2962,167 @@ switch (op[0]) {
if (strcmp(op, "unreachable") == 0) { return makeUnreachable(); }
goto parse_error;
case 'v': {
- switch (op[1]) {
- case '1': {
- switch (op[2]) {
- case '2': {
- switch (op[5]) {
- case 'a': {
- switch (op[8]) {
- case '\0':
- if (strcmp(op, "v128.and") == 0) { return makeBinary(s, BinaryOp::AndVec128); }
+ switch (op[5]) {
+ case 'a': {
+ switch (op[7]) {
+ case 'd': {
+ switch (op[8]) {
+ case '\0':
+ if (strcmp(op, "v128.and") == 0) { return makeBinary(s, BinaryOp::AndVec128); }
+ goto parse_error;
+ case 'n':
+ if (strcmp(op, "v128.andnot") == 0) { return makeBinary(s, BinaryOp::AndNotVec128); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'y':
+ if (strcmp(op, "v128.any_true") == 0) { return makeUnary(s, UnaryOp::AnyTrueVec128); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'b':
+ if (strcmp(op, "v128.bitselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::Bitselect); }
+ goto parse_error;
+ case 'c':
+ if (strcmp(op, "v128.const") == 0) { return makeConst(s, Type::v128); }
+ goto parse_error;
+ case 'l': {
+ switch (op[9]) {
+ case '\0':
+ if (strcmp(op, "v128.load") == 0) { return makeLoad(s, Type::v128, /*isAtomic=*/false); }
+ goto parse_error;
+ case '1': {
+ switch (op[11]) {
+ case '_': {
+ switch (op[12]) {
+ case 'l':
+ if (strcmp(op, "v128.load16_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec16x8); }
goto parse_error;
- case 'n':
- if (strcmp(op, "v128.andnot") == 0) { return makeBinary(s, BinaryOp::AndNotVec128); }
+ case 's':
+ if (strcmp(op, "v128.load16_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec16x8); }
goto parse_error;
default: goto parse_error;
}
}
- case 'b':
- if (strcmp(op, "v128.bitselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::Bitselect); }
- goto parse_error;
- case 'c':
- if (strcmp(op, "v128.const") == 0) { return makeConst(s, Type::v128); }
- goto parse_error;
- case 'l': {
- switch (op[9]) {
- case '\0':
- if (strcmp(op, "v128.load") == 0) { return makeLoad(s, Type::v128, /*isAtomic=*/false); }
- goto parse_error;
- case '1':
- if (strcmp(op, "v128.load16_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec16x8); }
+ case 'x': {
+ switch (op[14]) {
+ case 's':
+ if (strcmp(op, "v128.load16x4_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec16x4ToVecI32x4); }
goto parse_error;
- case '3': {
- switch (op[12]) {
- case 'l':
- if (strcmp(op, "v128.load32_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec32x4); }
- goto parse_error;
- case 'z':
- if (strcmp(op, "v128.load32_zero") == 0) { return makeSIMDLoad(s, SIMDLoadOp::Load32Zero); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case '6': {
- switch (op[12]) {
- case 'l':
- if (strcmp(op, "v128.load64_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec64x2); }
- goto parse_error;
- case 'z':
- if (strcmp(op, "v128.load64_zero") == 0) { return makeSIMDLoad(s, SIMDLoadOp::Load64Zero); }
- goto parse_error;
- default: goto parse_error;
- }
- }
- case '8':
- if (strcmp(op, "v128.load8_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec8x16); }
+ case 'u':
+ if (strcmp(op, "v128.load16x4_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec16x4ToVecI32x4); }
goto parse_error;
default: goto parse_error;
}
}
- case 'n':
- if (strcmp(op, "v128.not") == 0) { return makeUnary(s, UnaryOp::NotVec128); }
- goto parse_error;
- case 'o':
- if (strcmp(op, "v128.or") == 0) { return makeBinary(s, BinaryOp::OrVec128); }
- goto parse_error;
- case 's': {
- switch (op[10]) {
- case '\0':
- if (strcmp(op, "v128.store") == 0) { return makeStore(s, Type::v128, /*isAtomic=*/false); }
+ default: goto parse_error;
+ }
+ }
+ case '3': {
+ switch (op[11]) {
+ case '_': {
+ switch (op[12]) {
+ case 'l':
+ if (strcmp(op, "v128.load32_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec32x4); }
goto parse_error;
- case '1':
- if (strcmp(op, "v128.store16_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec16x8); }
+ case 's':
+ if (strcmp(op, "v128.load32_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec32x4); }
goto parse_error;
- case '3':
- if (strcmp(op, "v128.store32_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec32x4); }
+ case 'z':
+ if (strcmp(op, "v128.load32_zero") == 0) { return makeSIMDLoad(s, SIMDLoadOp::Load32Zero); }
goto parse_error;
- case '6':
- if (strcmp(op, "v128.store64_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec64x2); }
+ default: goto parse_error;
+ }
+ }
+ case 'x': {
+ switch (op[14]) {
+ case 's':
+ if (strcmp(op, "v128.load32x2_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec32x2ToVecI64x2); }
goto parse_error;
- case '8':
- if (strcmp(op, "v128.store8_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec8x16); }
+ case 'u':
+ if (strcmp(op, "v128.load32x2_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec32x2ToVecI64x2); }
goto parse_error;
default: goto parse_error;
}
}
- case 'x':
- if (strcmp(op, "v128.xor") == 0) { return makeBinary(s, BinaryOp::XorVec128); }
- goto parse_error;
default: goto parse_error;
}
}
case '6': {
- switch (op[6]) {
+ switch (op[12]) {
case 'l':
- if (strcmp(op, "v16x8.load_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec16x8); }
+ if (strcmp(op, "v128.load64_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec64x2); }
goto parse_error;
case 's':
- if (strcmp(op, "v16x8.signselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec16x8); }
+ if (strcmp(op, "v128.load64_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec64x2); }
+ goto parse_error;
+ case 'z':
+ if (strcmp(op, "v128.load64_zero") == 0) { return makeSIMDLoad(s, SIMDLoadOp::Load64Zero); }
goto parse_error;
default: goto parse_error;
}
}
+ case '8': {
+ switch (op[10]) {
+ case '_': {
+ switch (op[11]) {
+ case 'l':
+ if (strcmp(op, "v128.load8_lane") == 0) { return makeSIMDLoadStoreLane(s, LoadLaneVec8x16); }
+ goto parse_error;
+ case 's':
+ if (strcmp(op, "v128.load8_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec8x16); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ case 'x': {
+ switch (op[13]) {
+ case 's':
+ if (strcmp(op, "v128.load8x8_s") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtSVec8x8ToVecI16x8); }
+ goto parse_error;
+ case 'u':
+ if (strcmp(op, "v128.load8x8_u") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadExtUVec8x8ToVecI16x8); }
+ goto parse_error;
+ default: goto parse_error;
+ }
+ }
+ default: goto parse_error;
+ }
+ }
default: goto parse_error;
}
}
- case '3': {
- switch (op[6]) {
- case 'l':
- if (strcmp(op, "v32x4.load_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec32x4); }
+ case 'n':
+ if (strcmp(op, "v128.not") == 0) { return makeUnary(s, UnaryOp::NotVec128); }
+ goto parse_error;
+ case 'o':
+ if (strcmp(op, "v128.or") == 0) { return makeBinary(s, BinaryOp::OrVec128); }
+ goto parse_error;
+ case 's': {
+ switch (op[10]) {
+ case '\0':
+ if (strcmp(op, "v128.store") == 0) { return makeStore(s, Type::v128, /*isAtomic=*/false); }
goto parse_error;
- case 's':
- if (strcmp(op, "v32x4.signselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec32x4); }
+ case '1':
+ if (strcmp(op, "v128.store16_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec16x8); }
goto parse_error;
- default: goto parse_error;
- }
- }
- case '6': {
- switch (op[6]) {
- case 'l':
- if (strcmp(op, "v64x2.load_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec64x2); }
+ case '3':
+ if (strcmp(op, "v128.store32_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec32x4); }
goto parse_error;
- case 's':
- if (strcmp(op, "v64x2.signselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec64x2); }
+ case '6':
+ if (strcmp(op, "v128.store64_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec64x2); }
goto parse_error;
- default: goto parse_error;
- }
- }
- case '8': {
- switch (op[6]) {
- case 'l':
- if (strcmp(op, "v8x16.load_splat") == 0) { return makeSIMDLoad(s, SIMDLoadOp::LoadSplatVec8x16); }
+ case '8':
+ if (strcmp(op, "v128.store8_lane") == 0) { return makeSIMDLoadStoreLane(s, StoreLaneVec8x16); }
goto parse_error;
- case 's': {
- switch (op[7]) {
- case 'h':
- if (strcmp(op, "v8x16.shuffle") == 0) { return makeSIMDShuffle(s); }
- goto parse_error;
- case 'i':
- if (strcmp(op, "v8x16.signselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::SignSelectVec8x16); }
- goto parse_error;
- case 'w':
- if (strcmp(op, "v8x16.swizzle") == 0) { return makeBinary(s, BinaryOp::SwizzleVec8x16); }
- goto parse_error;
- default: goto parse_error;
- }
- }
default: goto parse_error;
}
}
+ case 'x':
+ if (strcmp(op, "v128.xor") == 0) { return makeBinary(s, BinaryOp::XorVec128); }
+ goto parse_error;
default: goto parse_error;
}
}
diff --git a/src/ir/ReFinalize.cpp b/src/ir/ReFinalize.cpp
index 0f7bab653..5582172c3 100644
--- a/src/ir/ReFinalize.cpp
+++ b/src/ir/ReFinalize.cpp
@@ -115,8 +115,6 @@ void ReFinalize::visitSIMDLoad(SIMDLoad* curr) { curr->finalize(); }
void ReFinalize::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
curr->finalize();
}
-void ReFinalize::visitSIMDWiden(SIMDWiden* curr) { curr->finalize(); }
-void ReFinalize::visitPrefetch(Prefetch* curr) { curr->finalize(); }
void ReFinalize::visitMemoryInit(MemoryInit* curr) { curr->finalize(); }
void ReFinalize::visitDataDrop(DataDrop* curr) { curr->finalize(); }
void ReFinalize::visitMemoryCopy(MemoryCopy* curr) { curr->finalize(); }
diff --git a/src/ir/cost.h b/src/ir/cost.h
index 07219659c..5d01d2611 100644
--- a/src/ir/cost.h
+++ b/src/ir/cost.h
@@ -172,23 +172,23 @@ struct CostAnalyzer : public OverriddenVisitor<CostAnalyzer, Index> {
case SplatVecF32x4:
case SplatVecF64x2:
case NotVec128:
+ case AnyTrueVec128:
case AbsVecI8x16:
case NegVecI8x16:
- case AnyTrueVecI8x16:
case AllTrueVecI8x16:
case BitmaskVecI8x16:
case PopcntVecI8x16:
case AbsVecI16x8:
case NegVecI16x8:
- case AnyTrueVecI16x8:
case AllTrueVecI16x8:
case BitmaskVecI16x8:
case AbsVecI32x4:
case NegVecI32x4:
- case AnyTrueVecI32x4:
case AllTrueVecI32x4:
case BitmaskVecI32x4:
+ case AbsVecI64x2:
case NegVecI64x2:
+ case AllTrueVecI64x2:
case BitmaskVecI64x2:
case AbsVecF32x4:
case NegVecF32x4:
@@ -210,24 +210,20 @@ struct CostAnalyzer : public OverriddenVisitor<CostAnalyzer, Index> {
case ExtAddPairwiseUVecI16x8ToI32x4:
case TruncSatSVecF32x4ToVecI32x4:
case TruncSatUVecF32x4ToVecI32x4:
- case TruncSatSVecF64x2ToVecI64x2:
- case TruncSatUVecF64x2ToVecI64x2:
case ConvertSVecI32x4ToVecF32x4:
case ConvertUVecI32x4ToVecF32x4:
- case ConvertSVecI64x2ToVecF64x2:
- case ConvertUVecI64x2ToVecF64x2:
- case WidenLowSVecI8x16ToVecI16x8:
- case WidenHighSVecI8x16ToVecI16x8:
- case WidenLowUVecI8x16ToVecI16x8:
- case WidenHighUVecI8x16ToVecI16x8:
- case WidenLowSVecI16x8ToVecI32x4:
- case WidenHighSVecI16x8ToVecI32x4:
- case WidenLowUVecI16x8ToVecI32x4:
- case WidenHighUVecI16x8ToVecI32x4:
- case WidenLowSVecI32x4ToVecI64x2:
- case WidenHighSVecI32x4ToVecI64x2:
- case WidenLowUVecI32x4ToVecI64x2:
- case WidenHighUVecI32x4ToVecI64x2:
+ case ExtendLowSVecI8x16ToVecI16x8:
+ case ExtendHighSVecI8x16ToVecI16x8:
+ case ExtendLowUVecI8x16ToVecI16x8:
+ case ExtendHighUVecI8x16ToVecI16x8:
+ case ExtendLowSVecI16x8ToVecI32x4:
+ case ExtendHighSVecI16x8ToVecI32x4:
+ case ExtendLowUVecI16x8ToVecI32x4:
+ case ExtendHighUVecI16x8ToVecI32x4:
+ case ExtendLowSVecI32x4ToVecI64x2:
+ case ExtendHighSVecI32x4ToVecI64x2:
+ case ExtendLowUVecI32x4ToVecI64x2:
+ case ExtendHighUVecI32x4ToVecI64x2:
case ConvertLowSVecI32x4ToVecF64x2:
case ConvertLowUVecI32x4ToVecF64x2:
case TruncSatZeroSVecF64x2ToVecI32x4:
@@ -377,6 +373,11 @@ struct CostAnalyzer : public OverriddenVisitor<CostAnalyzer, Index> {
case GeSVecI32x4:
case GeUVecI32x4:
case EqVecI64x2:
+ case NeVecI64x2:
+ case LtSVecI64x2:
+ case LeSVecI64x2:
+ case GtSVecI64x2:
+ case GeSVecI64x2:
case EqVecF32x4:
case NeVecF32x4:
case LtVecF32x4:
@@ -399,11 +400,6 @@ struct CostAnalyzer : public OverriddenVisitor<CostAnalyzer, Index> {
case SubVecI8x16:
case SubSatSVecI8x16:
case SubSatUVecI8x16:
- ret = 1;
- break;
- case MulVecI8x16:
- ret = 2;
- break;
case MinSVecI8x16:
case MinUVecI8x16:
case MaxSVecI8x16:
@@ -522,29 +518,17 @@ struct CostAnalyzer : public OverriddenVisitor<CostAnalyzer, Index> {
Index ret = 0;
switch (curr->op) {
case Bitselect:
- case SignSelectVec8x16:
- case SignSelectVec16x8:
- case SignSelectVec32x4:
- case SignSelectVec64x2:
ret = 1;
break;
- case QFMAF32x4:
- case QFMSF32x4:
- case QFMAF64x2:
- case QFMSF64x2:
- ret = 2;
- break;
}
return ret + visit(curr->a) + visit(curr->b) + visit(curr->c);
}
Index visitSIMDShift(SIMDShift* curr) {
return 1 + visit(curr->vec) + visit(curr->shift);
}
- Index visitSIMDWiden(SIMDWiden* curr) { return 1 + visit(curr->vec); }
Index visitSIMDShuffle(SIMDShuffle* curr) {
return 1 + visit(curr->left) + visit(curr->right);
}
- Index visitPrefetch(Prefetch* curr) { return 0 + visit(curr->ptr); }
Index visitRefNull(RefNull* curr) { return 1; }
Index visitRefIs(RefIs* curr) { return 1 + visit(curr->value); }
Index visitRefFunc(RefFunc* curr) { return 1; }
diff --git a/src/ir/effects.h b/src/ir/effects.h
index 444099fe9..de228c3f4 100644
--- a/src/ir/effects.h
+++ b/src/ir/effects.h
@@ -440,12 +440,6 @@ private:
}
parent.implicitTrap = true;
}
- void visitSIMDWiden(SIMDWiden* curr) {}
- void visitPrefetch(Prefetch* curr) {
- // Do not reorder with respect to other memory ops
- parent.writesMemory = true;
- parent.readsMemory = true;
- }
void visitMemoryInit(MemoryInit* curr) {
parent.writesMemory = true;
parent.implicitTrap = true;
diff --git a/src/ir/features.h b/src/ir/features.h
index aa87bd5bd..791a4dded 100644
--- a/src/ir/features.h
+++ b/src/ir/features.h
@@ -48,13 +48,10 @@ inline FeatureSet get(UnaryOp op) {
case SplatVecF64x2:
case NotVec128:
case NegVecI8x16:
- case AnyTrueVecI8x16:
case AllTrueVecI8x16:
case NegVecI16x8:
- case AnyTrueVecI16x8:
case AllTrueVecI16x8:
case NegVecI32x4:
- case AnyTrueVecI32x4:
case AllTrueVecI32x4:
case NegVecI64x2:
case AbsVecF32x4:
@@ -65,12 +62,8 @@ inline FeatureSet get(UnaryOp op) {
case SqrtVecF64x2:
case TruncSatSVecF32x4ToVecI32x4:
case TruncSatUVecF32x4ToVecI32x4:
- case TruncSatSVecF64x2ToVecI64x2:
- case TruncSatUVecF64x2ToVecI64x2:
case ConvertSVecI32x4ToVecF32x4:
- case ConvertUVecI32x4ToVecF32x4:
- case ConvertSVecI64x2ToVecF64x2:
- case ConvertUVecI64x2ToVecF64x2: {
+ case ConvertUVecI32x4ToVecF32x4: {
ret.setSIMD();
break;
}
@@ -141,7 +134,6 @@ inline FeatureSet get(BinaryOp op) {
case SubVecI8x16:
case SubSatSVecI8x16:
case SubSatUVecI8x16:
- case MulVecI8x16:
case AddVecI16x8:
case AddSatSVecI16x8:
case AddSatUVecI16x8:
diff --git a/src/js/binaryen.js-post.js b/src/js/binaryen.js-post.js
index bafbaa480..4c8a8c91e 100644
--- a/src/js/binaryen.js-post.js
+++ b/src/js/binaryen.js-post.js
@@ -359,9 +359,9 @@ function initializeConstants() {
'XorVec128',
'AndNotVec128',
'BitselectVec128',
+ 'AnyTrueVec128',
'AbsVecI8x16',
'NegVecI8x16',
- 'AnyTrueVecI8x16',
'AllTrueVecI8x16',
'BitmaskVecI8x16',
'ShlVecI8x16',
@@ -373,7 +373,6 @@ function initializeConstants() {
'SubVecI8x16',
'SubSatSVecI8x16',
'SubSatUVecI8x16',
- 'MulVecI8x16',
'MinSVecI8x16',
'MinUVecI8x16',
'MaxSVecI8x16',
@@ -381,7 +380,6 @@ function initializeConstants() {
'AvgrUVecI8x16',
'AbsVecI16x8',
'NegVecI16x8',
- 'AnyTrueVecI16x8',
'AllTrueVecI16x8',
'BitmaskVecI16x8',
'ShlVecI16x8',
@@ -402,7 +400,6 @@ function initializeConstants() {
'DotSVecI16x8ToVecI32x4',
'AbsVecI32x4',
'NegVecI32x4',
- 'AnyTrueVecI32x4',
'AllTrueVecI32x4',
'BitmaskVecI32x4',
'ShlVecI32x4',
@@ -425,8 +422,6 @@ function initializeConstants() {
'AbsVecF32x4',
'NegVecF32x4',
'SqrtVecF32x4',
- 'QFMAVecF32x4',
- 'QFMSVecF32x4',
'AddVecF32x4',
'SubVecF32x4',
'MulVecF32x4',
@@ -442,8 +437,6 @@ function initializeConstants() {
'AbsVecF64x2',
'NegVecF64x2',
'SqrtVecF64x2',
- 'QFMAVecF64x2',
- 'QFMSVecF64x2',
'AddVecF64x2',
'SubVecF64x2',
'MulVecF64x2',
@@ -458,12 +451,8 @@ function initializeConstants() {
'NearestVecF64x2',
'TruncSatSVecF32x4ToVecI32x4',
'TruncSatUVecF32x4ToVecI32x4',
- 'TruncSatSVecF64x2ToVecI64x2',
- 'TruncSatUVecF64x2ToVecI64x2',
'ConvertSVecI32x4ToVecF32x4',
'ConvertUVecI32x4ToVecF32x4',
- 'ConvertSVecI64x2ToVecF64x2',
- 'ConvertUVecI64x2ToVecF64x2',
'LoadSplatVec8x16',
'LoadSplatVec16x8',
'LoadSplatVec32x4',
@@ -478,14 +467,14 @@ function initializeConstants() {
'NarrowUVecI16x8ToVecI8x16',
'NarrowSVecI32x4ToVecI16x8',
'NarrowUVecI32x4ToVecI16x8',
- 'WidenLowSVecI8x16ToVecI16x8',
- 'WidenHighSVecI8x16ToVecI16x8',
- 'WidenLowUVecI8x16ToVecI16x8',
- 'WidenHighUVecI8x16ToVecI16x8',
- 'WidenLowSVecI16x8ToVecI32x4',
- 'WidenHighSVecI16x8ToVecI32x4',
- 'WidenLowUVecI16x8ToVecI32x4',
- 'WidenHighUVecI16x8ToVecI32x4',
+ 'ExtendLowSVecI8x16ToVecI16x8',
+ 'ExtendHighSVecI8x16ToVecI16x8',
+ 'ExtendLowUVecI8x16ToVecI16x8',
+ 'ExtendHighUVecI8x16ToVecI16x8',
+ 'ExtendLowSVecI16x8ToVecI32x4',
+ 'ExtendHighSVecI16x8ToVecI32x4',
+ 'ExtendLowUVecI16x8ToVecI32x4',
+ 'ExtendHighUVecI16x8ToVecI32x4',
'SwizzleVec8x16',
'RefIsNull',
'RefIsFunc',
@@ -1434,6 +1423,36 @@ function wrapModule(module, self = {}) {
'load'(offset, align, ptr) {
return Module['_BinaryenLoad'](module, 16, false, offset, align, Module['v128'], ptr);
},
+ 'load8_splat'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec8x16'], offset, align, ptr);
+ },
+ 'load16_splat'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec16x8'], offset, align, ptr);
+ },
+ 'load32_splat'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec32x4'], offset, align, ptr);
+ },
+ 'load64_splat'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec64x2'], offset, align, ptr);
+ },
+ 'load8x8_s'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec8x8ToVecI16x8'], offset, align, ptr);
+ },
+ 'load8x8_u'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec8x8ToVecI16x8'], offset, align, ptr);
+ },
+ 'load16x4_s'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec16x4ToVecI32x4'], offset, align, ptr);
+ },
+ 'load16x4_u'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec16x4ToVecI32x4'], offset, align, ptr);
+ },
+ 'load32x2_s'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec32x2ToVecI64x2'], offset, align, ptr);
+ },
+ 'load32x2_u'(offset, align, ptr) {
+ return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec32x2ToVecI64x2'], offset, align, ptr);
+ },
'store'(offset, align, ptr, value) {
return Module['_BinaryenStore'](module, 16, offset, align, ptr, value, Module['v128']);
},
@@ -1447,6 +1466,9 @@ function wrapModule(module, self = {}) {
'not'(value) {
return Module['_BinaryenUnary'](module, Module['NotVec128'], value);
},
+ 'any_true'(value) {
+ return Module['_BinaryenUnary'](module, Module['AnyTrueVec128'], value);
+ },
'and'(left, right) {
return Module['_BinaryenBinary'](module, Module['AndVec128'], left, right);
},
@@ -1468,6 +1490,12 @@ function wrapModule(module, self = {}) {
};
self['i8x16'] = {
+ 'shuffle'(left, right, mask) {
+ return preserveStack(() => Module['_BinaryenSIMDShuffle'](module, left, right, i8sToStack(mask)));
+ },
+ 'swizzle'(left, right) {
+ return Module['_BinaryenBinary'](module, Module['SwizzleVec8x16'], left, right);
+ },
'splat'(value) {
return Module['_BinaryenUnary'](module, Module['SplatVecI8x16'], value);
},
@@ -1516,9 +1544,6 @@ function wrapModule(module, self = {}) {
'neg'(value) {
return Module['_BinaryenUnary'](module, Module['NegVecI8x16'], value);
},
- 'any_true'(value) {
- return Module['_BinaryenUnary'](module, Module['AnyTrueVecI8x16'], value);
- },
'all_true'(value) {
return Module['_BinaryenUnary'](module, Module['AllTrueVecI8x16'], value);
},
@@ -1552,9 +1577,6 @@ function wrapModule(module, self = {}) {
'sub_saturate_u'(left, right) {
return Module['_BinaryenBinary'](module, Module['SubSatUVecI8x16'], left, right);
},
- 'mul'(left, right) {
- return Module['_BinaryenBinary'](module, Module['MulVecI8x16'], left, right);
- },
'min_s'(left, right) {
return Module['_BinaryenBinary'](module, Module['MinSVecI8x16'], left, right);
},
@@ -1627,9 +1649,6 @@ function wrapModule(module, self = {}) {
'neg'(value) {
return Module['_BinaryenUnary'](module, Module['NegVecI16x8'], value);
},
- 'any_true'(value) {
- return Module['_BinaryenUnary'](module, Module['AnyTrueVecI16x8'], value);
- },
'all_true'(value) {
return Module['_BinaryenUnary'](module, Module['AllTrueVecI16x8'], value);
},
@@ -1687,23 +1706,17 @@ function wrapModule(module, self = {}) {
'narrow_i32x4_u'(left, right) {
return Module['_BinaryenBinary'](module, Module['NarrowUVecI32x4ToVecI16x8'], left, right);
},
- 'widen_low_i8x16_s'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenLowSVecI8x16ToVecI16x8'], value);
+ 'extend_low_i8x16_s'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendLowSVecI8x16ToVecI16x8'], value);
},
- 'widen_high_i8x16_s'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenHighSVecI8x16ToVecI16x8'], value);
+ 'extend_high_i8x16_s'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendHighSVecI8x16ToVecI16x8'], value);
},
- 'widen_low_i8x16_u'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenLowUVecI8x16ToVecI16x8'], value);
+ 'extend_low_i8x16_u'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendLowUVecI8x16ToVecI16x8'], value);
},
- 'widen_high_i8x16_u'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenHighUVecI8x16ToVecI16x8'], value);
- },
- 'load8x8_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec8x8ToVecI16x8'], offset, align, ptr);
- },
- 'load8x8_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec8x8ToVecI16x8'], offset, align, ptr);
+ 'extend_high_i8x16_u'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendHighUVecI8x16ToVecI16x8'], value);
},
};
@@ -1753,9 +1766,6 @@ function wrapModule(module, self = {}) {
'neg'(value) {
return Module['_BinaryenUnary'](module, Module['NegVecI32x4'], value);
},
- 'any_true'(value) {
- return Module['_BinaryenUnary'](module, Module['AnyTrueVecI32x4'], value);
- },
'all_true'(value) {
return Module['_BinaryenUnary'](module, Module['AllTrueVecI32x4'], value);
},
@@ -1801,23 +1811,17 @@ function wrapModule(module, self = {}) {
'trunc_sat_f32x4_u'(value) {
return Module['_BinaryenUnary'](module, Module['TruncSatUVecF32x4ToVecI32x4'], value);
},
- 'widen_low_i16x8_s'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenLowSVecI16x8ToVecI32x4'], value);
+ 'extend_low_i16x8_s'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendLowSVecI16x8ToVecI32x4'], value);
},
- 'widen_high_i16x8_s'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenHighSVecI16x8ToVecI32x4'], value);
+ 'extend_high_i16x8_s'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendHighSVecI16x8ToVecI32x4'], value);
},
- 'widen_low_i16x8_u'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenLowUVecI16x8ToVecI32x4'], value);
- },
- 'widen_high_i16x8_u'(value) {
- return Module['_BinaryenUnary'](module, Module['WidenHighUVecI16x8ToVecI32x4'], value);
- },
- 'load16x4_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec16x4ToVecI32x4'], offset, align, ptr);
+ 'extend_low_i16x8_u'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendLowUVecI16x8ToVecI32x4'], value);
},
- 'load16x4_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec16x4ToVecI32x4'], offset, align, ptr);
+ 'extend_high_i16x8_u'(value) {
+ return Module['_BinaryenUnary'](module, Module['ExtendHighUVecI16x8ToVecI32x4'], value);
},
};
@@ -1852,18 +1856,6 @@ function wrapModule(module, self = {}) {
'mul'(left, right) {
return Module['_BinaryenBinary'](module, Module['MulVecI64x2'], left, right);
},
- 'trunc_sat_f64x2_s'(value) {
- return Module['_BinaryenUnary'](module, Module['TruncSatSVecF64x2ToVecI64x2'], value);
- },
- 'trunc_sat_f64x2_u'(value) {
- return Module['_BinaryenUnary'](module, Module['TruncSatUVecF64x2ToVecI64x2'], value);
- },
- 'load32x2_s'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtSVec32x2ToVecI64x2'], offset, align, ptr);
- },
- 'load32x2_u'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadExtUVec32x2ToVecI64x2'], offset, align, ptr);
- },
};
self['f32x4'] = {
@@ -1903,12 +1895,6 @@ function wrapModule(module, self = {}) {
'sqrt'(value) {
return Module['_BinaryenUnary'](module, Module['SqrtVecF32x4'], value);
},
- 'qfma'(a, b, c) {
- return Module['_BinaryenSIMDTernary'](module, Module['QFMAVecF32x4'], a, b, c);
- },
- 'qfms'(a, b, c) {
- return Module['_BinaryenSIMDTernary'](module, Module['QFMSVecF32x4'], a, b, c);
- },
'add'(left, right) {
return Module['_BinaryenBinary'](module, Module['AddVecF32x4'], left, right);
},
@@ -1990,12 +1976,6 @@ function wrapModule(module, self = {}) {
'sqrt'(value) {
return Module['_BinaryenUnary'](module, Module['SqrtVecF64x2'], value);
},
- 'qfma'(a, b, c) {
- return Module['_BinaryenSIMDTernary'](module, Module['QFMAVecF64x2'], a, b, c);
- },
- 'qfms'(a, b, c) {
- return Module['_BinaryenSIMDTernary'](module, Module['QFMSVecF64x2'], a, b, c);
- },
'add'(left, right) {
return Module['_BinaryenBinary'](module, Module['AddVecF64x2'], left, right);
},
@@ -2032,42 +2012,6 @@ function wrapModule(module, self = {}) {
'nearest'(value) {
return Module['_BinaryenUnary'](module, Module['NearestVecF64x2'], value);
},
- 'convert_i64x2_s'(value) {
- return Module['_BinaryenUnary'](module, Module['ConvertSVecI64x2ToVecF64x2'], value);
- },
- 'convert_i64x2_u'(value) {
- return Module['_BinaryenUnary'](module, Module['ConvertUVecI64x2ToVecF64x2'], value);
- },
- };
-
- self['v8x16'] = {
- 'shuffle'(left, right, mask) {
- return preserveStack(() => Module['_BinaryenSIMDShuffle'](module, left, right, i8sToStack(mask)));
- },
- 'swizzle'(left, right) {
- return Module['_BinaryenBinary'](module, Module['SwizzleVec8x16'], left, right);
- },
- 'load_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec8x16'], offset, align, ptr);
- },
- };
-
- self['v16x8'] = {
- 'load_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec16x8'], offset, align, ptr);
- },
- };
-
- self['v32x4'] = {
- 'load_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec32x4'], offset, align, ptr);
- },
- };
-
- self['v64x2'] = {
- 'load_splat'(offset, align, ptr) {
- return Module['_BinaryenSIMDLoad'](module, Module['LoadSplatVec64x2'], offset, align, ptr);
- },
};
self['funcref'] = {
diff --git a/src/literal.h b/src/literal.h
index 06719cd32..a07e2597c 100644
--- a/src/literal.h
+++ b/src/literal.h
@@ -482,6 +482,11 @@ public:
Literal geSI32x4(const Literal& other) const;
Literal geUI32x4(const Literal& other) const;
Literal eqI64x2(const Literal& other) const;
+ Literal neI64x2(const Literal& other) const;
+ Literal ltSI64x2(const Literal& other) const;
+ Literal gtSI64x2(const Literal& other) const;
+ Literal leSI64x2(const Literal& other) const;
+ Literal geSI64x2(const Literal& other) const;
Literal eqF32x4(const Literal& other) const;
Literal neF32x4(const Literal& other) const;
Literal ltF32x4(const Literal& other) const;
@@ -498,10 +503,10 @@ public:
Literal andV128(const Literal& other) const;
Literal orV128(const Literal& other) const;
Literal xorV128(const Literal& other) const;
+ Literal anyTrueV128() const;
Literal bitselectV128(const Literal& left, const Literal& right) const;
Literal absI8x16() const;
Literal negI8x16() const;
- Literal anyTrueI8x16() const;
Literal allTrueI8x16() const;
Literal bitmaskI8x16() const;
Literal shlI8x16(const Literal& other) const;
@@ -513,7 +518,6 @@ public:
Literal subI8x16(const Literal& other) const;
Literal subSaturateSI8x16(const Literal& other) const;
Literal subSaturateUI8x16(const Literal& other) const;
- Literal mulI8x16(const Literal& other) const;
Literal minSI8x16(const Literal& other) const;
Literal minUI8x16(const Literal& other) const;
Literal maxSI8x16(const Literal& other) const;
@@ -522,7 +526,6 @@ public:
Literal popcntI8x16() const;
Literal absI16x8() const;
Literal negI16x8() const;
- Literal anyTrueI16x8() const;
Literal allTrueI16x8() const;
Literal bitmaskI16x8() const;
Literal shlI16x8(const Literal& other) const;
@@ -547,7 +550,6 @@ public:
Literal extMulHighUI16x8(const Literal& other) const;
Literal absI32x4() const;
Literal negI32x4() const;
- Literal anyTrueI32x4() const;
Literal allTrueI32x4() const;
Literal bitmaskI32x4() const;
Literal shlI32x4(const Literal& other) const;
@@ -565,7 +567,9 @@ public:
Literal extMulHighSI32x4(const Literal& other) const;
Literal extMulLowUI32x4(const Literal& other) const;
Literal extMulHighUI32x4(const Literal& other) const;
+ Literal absI64x2() const;
Literal negI64x2() const;
+ Literal allTrueI64x2() const;
Literal shlI64x2(const Literal& other) const;
Literal shrSI64x2(const Literal& other) const;
Literal shrUI64x2(const Literal& other) const;
@@ -608,24 +612,20 @@ public:
Literal nearestF64x2() const;
Literal truncSatToSI32x4() const;
Literal truncSatToUI32x4() const;
- Literal truncSatToSI64x2() const;
- Literal truncSatToUI64x2() const;
Literal convertSToF32x4() const;
Literal convertUToF32x4() const;
- Literal convertSToF64x2() const;
- Literal convertUToF64x2() const;
Literal narrowSToVecI8x16(const Literal& other) const;
Literal narrowUToVecI8x16(const Literal& other) const;
Literal narrowSToVecI16x8(const Literal& other) const;
Literal narrowUToVecI16x8(const Literal& other) const;
- Literal widenLowSToVecI16x8() const;
- Literal widenHighSToVecI16x8() const;
- Literal widenLowUToVecI16x8() const;
- Literal widenHighUToVecI16x8() const;
- Literal widenLowSToVecI32x4() const;
- Literal widenHighSToVecI32x4() const;
- Literal widenLowUToVecI32x4() const;
- Literal widenHighUToVecI32x4() const;
+ Literal extendLowSToVecI16x8() const;
+ Literal extendHighSToVecI16x8() const;
+ Literal extendLowUToVecI16x8() const;
+ Literal extendHighUToVecI16x8() const;
+ Literal extendLowSToVecI32x4() const;
+ Literal extendHighSToVecI32x4() const;
+ Literal extendLowUToVecI32x4() const;
+ Literal extendHighUToVecI32x4() const;
Literal swizzleVec8x16(const Literal& other) const;
// Checks if an RTT value is a sub-rtt of another, that is, whether GC data
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index e3ea04b53..5f8c9f163 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -639,7 +639,7 @@ struct PrintExpressionContents
}
void visitSIMDShuffle(SIMDShuffle* curr) {
prepareColor(o);
- o << "v8x16.shuffle";
+ o << "i8x16.shuffle";
restoreNormalColor(o);
for (uint8_t mask_index : curr->mask) {
o << " " << std::to_string(mask_index);
@@ -651,30 +651,6 @@ struct PrintExpressionContents
case Bitselect:
o << "v128.bitselect";
break;
- case QFMAF32x4:
- o << "f32x4.qfma";
- break;
- case QFMSF32x4:
- o << "f32x4.qfms";
- break;
- case QFMAF64x2:
- o << "f64x2.qfma";
- break;
- case QFMSF64x2:
- o << "f64x2.qfms";
- break;
- case SignSelectVec8x16:
- o << "v8x16.signselect";
- break;
- case SignSelectVec16x8:
- o << "v16x8.signselect";
- break;
- case SignSelectVec32x4:
- o << "v32x4.signselect";
- break;
- case SignSelectVec64x2:
- o << "v64x2.signselect";
- break;
}
restoreNormalColor(o);
}
@@ -724,34 +700,34 @@ struct PrintExpressionContents
prepareColor(o);
switch (curr->op) {
case LoadSplatVec8x16:
- o << "v8x16.load_splat";
+ o << "v128.load8_splat";
break;
case LoadSplatVec16x8:
- o << "v16x8.load_splat";
+ o << "v128.load16_splat";
break;
case LoadSplatVec32x4:
- o << "v32x4.load_splat";
+ o << "v128.load32_splat";
break;
case LoadSplatVec64x2:
- o << "v64x2.load_splat";
+ o << "v128.load64_splat";
break;
case LoadExtSVec8x8ToVecI16x8:
- o << "i16x8.load8x8_s";
+ o << "v128.load8x8_s";
break;
case LoadExtUVec8x8ToVecI16x8:
- o << "i16x8.load8x8_u";
+ o << "v128.load8x8_u";
break;
case LoadExtSVec16x4ToVecI32x4:
- o << "i32x4.load16x4_s";
+ o << "v128.load16x4_s";
break;
case LoadExtUVec16x4ToVecI32x4:
- o << "i32x4.load16x4_u";
+ o << "v128.load16x4_u";
break;
case LoadExtSVec32x2ToVecI64x2:
- o << "i64x2.load32x2_s";
+ o << "v128.load32x2_s";
break;
case LoadExtUVec32x2ToVecI64x2:
- o << "i64x2.load32x2_u";
+ o << "v128.load32x2_u";
break;
case Load32Zero:
o << "v128.load32_zero";
@@ -805,37 +781,6 @@ struct PrintExpressionContents
}
o << " " << int(curr->index);
}
- void visitSIMDWiden(SIMDWiden* curr) {
- prepareColor(o);
- switch (curr->op) {
- case WidenSVecI8x16ToVecI32x4:
- o << "i32x4.widen_i8x16_s ";
- break;
- case WidenUVecI8x16ToVecI32x4:
- o << "i32x4.widen_i8x16_u ";
- break;
- }
- restoreNormalColor(o);
- o << int(curr->index);
- }
- void visitPrefetch(Prefetch* curr) {
- prepareColor(o);
- switch (curr->op) {
- case PrefetchTemporal:
- o << "prefetch.t";
- break;
- case PrefetchNontemporal:
- o << "prefetch.nt";
- break;
- }
- restoreNormalColor(o);
- if (curr->offset) {
- o << " offset=" << curr->offset;
- }
- if (curr->align != 1) {
- o << " align=" << curr->align;
- }
- }
void visitMemoryInit(MemoryInit* curr) {
prepareColor(o);
o << "memory.init";
@@ -1065,15 +1010,15 @@ struct PrintExpressionContents
case NotVec128:
o << "v128.not";
break;
+ case AnyTrueVec128:
+ o << "v128.any_true";
+ break;
case AbsVecI8x16:
o << "i8x16.abs";
break;
case NegVecI8x16:
o << "i8x16.neg";
break;
- case AnyTrueVecI8x16:
- o << "i8x16.any_true";
- break;
case AllTrueVecI8x16:
o << "i8x16.all_true";
break;
@@ -1089,9 +1034,6 @@ struct PrintExpressionContents
case NegVecI16x8:
o << "i16x8.neg";
break;
- case AnyTrueVecI16x8:
- o << "i16x8.any_true";
- break;
case AllTrueVecI16x8:
o << "i16x8.all_true";
break;
@@ -1104,18 +1046,21 @@ struct PrintExpressionContents
case NegVecI32x4:
o << "i32x4.neg";
break;
- case AnyTrueVecI32x4:
- o << "i32x4.any_true";
- break;
case AllTrueVecI32x4:
o << "i32x4.all_true";
break;
case BitmaskVecI32x4:
o << "i32x4.bitmask";
break;
+ case AbsVecI64x2:
+ o << "i64x2.abs";
+ break;
case NegVecI64x2:
o << "i64x2.neg";
break;
+ case AllTrueVecI64x2:
+ o << "i64x2.all_true";
+ break;
case BitmaskVecI64x2:
o << "i64x2.bitmask";
break;
@@ -1179,59 +1124,47 @@ struct PrintExpressionContents
case TruncSatUVecF32x4ToVecI32x4:
o << "i32x4.trunc_sat_f32x4_u";
break;
- case TruncSatSVecF64x2ToVecI64x2:
- o << "i64x2.trunc_sat_f64x2_s";
- break;
- case TruncSatUVecF64x2ToVecI64x2:
- o << "i64x2.trunc_sat_f64x2_u";
- break;
case ConvertSVecI32x4ToVecF32x4:
o << "f32x4.convert_i32x4_s";
break;
case ConvertUVecI32x4ToVecF32x4:
o << "f32x4.convert_i32x4_u";
break;
- case ConvertSVecI64x2ToVecF64x2:
- o << "f64x2.convert_i64x2_s";
- break;
- case ConvertUVecI64x2ToVecF64x2:
- o << "f64x2.convert_i64x2_u";
+ case ExtendLowSVecI8x16ToVecI16x8:
+ o << "i16x8.extend_low_i8x16_s";
break;
- case WidenLowSVecI8x16ToVecI16x8:
- o << "i16x8.widen_low_i8x16_s";
+ case ExtendHighSVecI8x16ToVecI16x8:
+ o << "i16x8.extend_high_i8x16_s";
break;
- case WidenHighSVecI8x16ToVecI16x8:
- o << "i16x8.widen_high_i8x16_s";
+ case ExtendLowUVecI8x16ToVecI16x8:
+ o << "i16x8.extend_low_i8x16_u";
break;
- case WidenLowUVecI8x16ToVecI16x8:
- o << "i16x8.widen_low_i8x16_u";
+ case ExtendHighUVecI8x16ToVecI16x8:
+ o << "i16x8.extend_high_i8x16_u";
break;
- case WidenHighUVecI8x16ToVecI16x8:
- o << "i16x8.widen_high_i8x16_u";
+ case ExtendLowSVecI16x8ToVecI32x4:
+ o << "i32x4.extend_low_i16x8_s";
break;
- case WidenLowSVecI16x8ToVecI32x4:
- o << "i32x4.widen_low_i16x8_s";
+ case ExtendHighSVecI16x8ToVecI32x4:
+ o << "i32x4.extend_high_i16x8_s";
break;
- case WidenHighSVecI16x8ToVecI32x4:
- o << "i32x4.widen_high_i16x8_s";
+ case ExtendLowUVecI16x8ToVecI32x4:
+ o << "i32x4.extend_low_i16x8_u";
break;
- case WidenLowUVecI16x8ToVecI32x4:
- o << "i32x4.widen_low_i16x8_u";
+ case ExtendHighUVecI16x8ToVecI32x4:
+ o << "i32x4.extend_high_i16x8_u";
break;
- case WidenHighUVecI16x8ToVecI32x4:
- o << "i32x4.widen_high_i16x8_u";
+ case ExtendLowSVecI32x4ToVecI64x2:
+ o << "i64x2.extend_low_i32x4_s";
break;
- case WidenLowSVecI32x4ToVecI64x2:
- o << "i64x2.widen_low_i32x4_s";
+ case ExtendHighSVecI32x4ToVecI64x2:
+ o << "i64x2.extend_high_i32x4_s";
break;
- case WidenHighSVecI32x4ToVecI64x2:
- o << "i64x2.widen_high_i32x4_s";
+ case ExtendLowUVecI32x4ToVecI64x2:
+ o << "i64x2.extend_low_i32x4_u";
break;
- case WidenLowUVecI32x4ToVecI64x2:
- o << "i64x2.widen_low_i32x4_u";
- break;
- case WidenHighUVecI32x4ToVecI64x2:
- o << "i64x2.widen_high_i32x4_u";
+ case ExtendHighUVecI32x4ToVecI64x2:
+ o << "i64x2.extend_high_i32x4_u";
break;
case ConvertLowSVecI32x4ToVecF64x2:
o << "f64x2.convert_low_i32x4_s";
@@ -1240,10 +1173,10 @@ struct PrintExpressionContents
o << "f64x2.convert_low_i32x4_u";
break;
case TruncSatZeroSVecF64x2ToVecI32x4:
- o << "i32x4.trunc_sat_f64x2_zero_s";
+ o << "i32x4.trunc_sat_f64x2_s_zero";
break;
case TruncSatZeroUVecF64x2ToVecI32x4:
- o << "i32x4.trunc_sat_f64x2_zero_u";
+ o << "i32x4.trunc_sat_f64x2_u_zero";
break;
case DemoteZeroVecF64x2ToVecF32x4:
o << "f32x4.demote_f64x2_zero";
@@ -1584,6 +1517,21 @@ struct PrintExpressionContents
case EqVecI64x2:
o << "i64x2.eq";
break;
+ case NeVecI64x2:
+ o << "i64x2.ne";
+ break;
+ case LtSVecI64x2:
+ o << "i64x2.lt_s";
+ break;
+ case GtSVecI64x2:
+ o << "i64x2.gt_s";
+ break;
+ case LeSVecI64x2:
+ o << "i64x2.le_s";
+ break;
+ case GeSVecI64x2:
+ o << "i64x2.ge_s";
+ break;
case EqVecF32x4:
o << "f32x4.eq";
break;
@@ -1638,22 +1586,19 @@ struct PrintExpressionContents
o << "i8x16.add";
break;
case AddSatSVecI8x16:
- o << "i8x16.add_saturate_s";
+ o << "i8x16.add_sat_s";
break;
case AddSatUVecI8x16:
- o << "i8x16.add_saturate_u";
+ o << "i8x16.add_sat_u";
break;
case SubVecI8x16:
o << "i8x16.sub";
break;
case SubSatSVecI8x16:
- o << "i8x16.sub_saturate_s";
+ o << "i8x16.sub_sat_s";
break;
case SubSatUVecI8x16:
- o << "i8x16.sub_saturate_u";
- break;
- case MulVecI8x16:
- o << "i8x16.mul";
+ o << "i8x16.sub_sat_u";
break;
case MinSVecI8x16:
o << "i8x16.min_s";
@@ -1674,19 +1619,19 @@ struct PrintExpressionContents
o << "i16x8.add";
break;
case AddSatSVecI16x8:
- o << "i16x8.add_saturate_s";
+ o << "i16x8.add_sat_s";
break;
case AddSatUVecI16x8:
- o << "i16x8.add_saturate_u";
+ o << "i16x8.add_sat_u";
break;
case SubVecI16x8:
o << "i16x8.sub";
break;
case SubSatSVecI16x8:
- o << "i16x8.sub_saturate_s";
+ o << "i16x8.sub_sat_s";
break;
case SubSatUVecI16x8:
- o << "i16x8.sub_saturate_u";
+ o << "i16x8.sub_sat_u";
break;
case MulVecI16x8:
o << "i16x8.mul";
@@ -1844,7 +1789,7 @@ struct PrintExpressionContents
break;
case SwizzleVec8x16:
- o << "v8x16.swizzle";
+ o << "i8x16.swizzle";
break;
case InvalidBinary:
diff --git a/src/tools/fuzzing.h b/src/tools/fuzzing.h
index e6a12543a..81eab1d28 100644
--- a/src/tools/fuzzing.h
+++ b/src/tools/fuzzing.h
@@ -2210,11 +2210,10 @@ private:
}
case Type::v128: {
assert(wasm.features.hasSIMD());
- return buildUnary({pick(AnyTrueVecI8x16,
+ // TODO: Add the other SIMD unary ops
+ return buildUnary({pick(AnyTrueVec128,
AllTrueVecI8x16,
- AnyTrueVecI16x8,
AllTrueVecI16x8,
- AnyTrueVecI32x4,
AllTrueVecI32x4),
make(Type::v128)});
}
@@ -2335,7 +2334,7 @@ private:
return buildUnary({SplatVecF64x2, make(Type::f64)});
case 4:
return buildUnary({pick(NotVec128,
- // TODO: i8x16.popcnt once merged
+ // TODO: add additional SIMD instructions
NegVecI8x16,
NegVecI16x8,
NegVecI32x4,
@@ -2348,20 +2347,16 @@ private:
SqrtVecF64x2,
TruncSatSVecF32x4ToVecI32x4,
TruncSatUVecF32x4ToVecI32x4,
- TruncSatSVecF64x2ToVecI64x2,
- TruncSatUVecF64x2ToVecI64x2,
ConvertSVecI32x4ToVecF32x4,
ConvertUVecI32x4ToVecF32x4,
- ConvertSVecI64x2ToVecF64x2,
- ConvertUVecI64x2ToVecF64x2,
- WidenLowSVecI8x16ToVecI16x8,
- WidenHighSVecI8x16ToVecI16x8,
- WidenLowUVecI8x16ToVecI16x8,
- WidenHighUVecI8x16ToVecI16x8,
- WidenLowSVecI16x8ToVecI32x4,
- WidenHighSVecI16x8ToVecI32x4,
- WidenLowUVecI16x8ToVecI32x4,
- WidenHighUVecI16x8ToVecI32x4),
+ ExtendLowSVecI8x16ToVecI16x8,
+ ExtendHighSVecI8x16ToVecI16x8,
+ ExtendLowUVecI8x16ToVecI16x8,
+ ExtendHighUVecI8x16ToVecI16x8,
+ ExtendLowSVecI16x8ToVecI32x4,
+ ExtendHighSVecI16x8ToVecI32x4,
+ ExtendLowUVecI16x8ToVecI32x4,
+ ExtendHighUVecI16x8ToVecI32x4),
make(Type::v128)});
}
WASM_UNREACHABLE("invalid value");
@@ -2558,7 +2553,6 @@ private:
SubVecI8x16,
SubSatSVecI8x16,
SubSatUVecI8x16,
- MulVecI8x16,
MinSVecI8x16,
MinUVecI8x16,
MaxSVecI8x16,
diff --git a/src/wasm-binary.h b/src/wasm-binary.h
index ec89bb1da..804df5811 100644
--- a/src/wasm-binary.h
+++ b/src/wasm-binary.h
@@ -803,7 +803,6 @@ enum ASTNodes {
I32x4LeU = 0x3e,
I32x4GeS = 0x3f,
I32x4GeU = 0x40,
- I64x2Eq = 0xc0,
F32x4Eq = 0x41,
F32x4Ne = 0x42,
F32x4Lt = 0x43,
@@ -823,28 +822,33 @@ enum ASTNodes {
V128Or = 0x50,
V128Xor = 0x51,
V128Bitselect = 0x52,
-
- V8x16SignSelect = 0x7d,
- V16x8SignSelect = 0x7e,
- V32x4SignSelect = 0x7f,
- V64x2SignSelect = 0x94,
-
- V128Load8Lane = 0x58,
- V128Load16Lane = 0x59,
- V128Load32Lane = 0x5a,
- V128Load64Lane = 0x5b,
- V128Store8Lane = 0x5c,
- V128Store16Lane = 0x5d,
- V128Store32Lane = 0x5e,
- V128Store64Lane = 0x5f,
+ V128AnyTrue = 0x53,
+
+ V128Load8Lane = 0x54,
+ V128Load16Lane = 0x55,
+ V128Load32Lane = 0x56,
+ V128Load64Lane = 0x57,
+ V128Store8Lane = 0x58,
+ V128Store16Lane = 0x59,
+ V128Store32Lane = 0x5a,
+ V128Store64Lane = 0x5b,
+ V128Load32Zero = 0x5c,
+ V128Load64Zero = 0x5d,
+
+ F32x4DemoteZeroF64x2 = 0x5e,
+ F64x2PromoteLowF32x4 = 0x5f,
I8x16Abs = 0x60,
I8x16Neg = 0x61,
- I8x16AnyTrue = 0x62,
+ I8x16Popcnt = 0x62,
I8x16AllTrue = 0x63,
I8x16Bitmask = 0x64,
I8x16NarrowSI16x8 = 0x65,
I8x16NarrowUI16x8 = 0x66,
+ F32x4Ceil = 0x67,
+ F32x4Floor = 0x68,
+ F32x4Trunc = 0x69,
+ F32x4Nearest = 0x6a,
I8x16Shl = 0x6b,
I8x16ShrS = 0x6c,
I8x16ShrU = 0x6d,
@@ -854,26 +858,30 @@ enum ASTNodes {
I8x16Sub = 0x71,
I8x16SubSatS = 0x72,
I8x16SubSatU = 0x73,
- I8x16Mul = 0x75,
+ F64x2Ceil = 0x74,
+ F64x2Floor = 0x75,
I8x16MinS = 0x76,
I8x16MinU = 0x77,
I8x16MaxS = 0x78,
I8x16MaxU = 0x79,
+ F64x2Trunc = 0x7a,
I8x16AvgrU = 0x7b,
-
- I8x16Popcnt = 0x7c,
+ I16x8ExtAddPairWiseSI8x16 = 0x7c,
+ I16x8ExtAddPairWiseUI8x16 = 0x7d,
+ I32x4ExtAddPairWiseSI16x8 = 0x7e,
+ I32x4ExtAddPairWiseUI16x8 = 0x7f,
I16x8Abs = 0x80,
I16x8Neg = 0x81,
- I16x8AnyTrue = 0x82,
+ I16x8Q15MulrSatS = 0x82,
I16x8AllTrue = 0x83,
I16x8Bitmask = 0x84,
I16x8NarrowSI32x4 = 0x85,
I16x8NarrowUI32x4 = 0x86,
- I16x8WidenLowSI8x16 = 0x87,
- I16x8WidenHighSI8x16 = 0x88,
- I16x8WidenLowUI8x16 = 0x89,
- I16x8WidenHighUI8x16 = 0x8a,
+ I16x8ExtendLowSI8x16 = 0x87,
+ I16x8ExtendHighSI8x16 = 0x88,
+ I16x8ExtendLowUI8x16 = 0x89,
+ I16x8ExtendHighUI8x16 = 0x8a,
I16x8Shl = 0x8b,
I16x8ShrS = 0x8c,
I16x8ShrU = 0x8d,
@@ -883,50 +891,88 @@ enum ASTNodes {
I16x8Sub = 0x91,
I16x8SubSatS = 0x92,
I16x8SubSatU = 0x93,
+ F64x2Nearest = 0x94,
I16x8Mul = 0x95,
I16x8MinS = 0x96,
I16x8MinU = 0x97,
I16x8MaxS = 0x98,
I16x8MaxU = 0x99,
+ // 0x9a unused
I16x8AvgrU = 0x9b,
- I16x8Q15MulrSatS = 0x9c,
+ I16x8ExtMulLowSI8x16 = 0x9c,
+ I16x8ExtMulHighSI8x16 = 0x9d,
+ I16x8ExtMulLowUI8x16 = 0x9e,
+ I16x8ExtMulHighUI8x16 = 0x9f,
I32x4Abs = 0xa0,
I32x4Neg = 0xa1,
- I32x4AnyTrue = 0xa2,
+ // 0xa2 unused
I32x4AllTrue = 0xa3,
I32x4Bitmask = 0xa4,
- I32x4WidenLowSI16x8 = 0xa7,
- I32x4WidenHighSI16x8 = 0xa8,
- I32x4WidenLowUI16x8 = 0xa9,
- I32x4WidenHighUI16x8 = 0xaa,
+ // 0xa5 unused
+ // 0xa6 unused
+ I32x4ExtendLowSI16x8 = 0xa7,
+ I32x4ExtendHighSI16x8 = 0xa8,
+ I32x4ExtendLowUI16x8 = 0xa9,
+ I32x4ExtendHighUI16x8 = 0xaa,
I32x4Shl = 0xab,
I32x4ShrS = 0xac,
I32x4ShrU = 0xad,
I32x4Add = 0xae,
+ // 0xaf unused
+ // 0xb0 unused
I32x4Sub = 0xb1,
+ // 0xb2 unused
+ // 0xb3 unused
+ // 0xb4 unused
I32x4Mul = 0xb5,
I32x4MinS = 0xb6,
I32x4MinU = 0xb7,
I32x4MaxS = 0xb8,
I32x4MaxU = 0xb9,
I32x4DotSVecI16x8 = 0xba,
+ // 0xbb unused
+ I32x4ExtMulLowSI16x8 = 0xbc,
+ I32x4ExtMulHighSI16x8 = 0xbd,
+ I32x4ExtMulLowUI16x8 = 0xbe,
+ I32x4ExtMulHighUI16x8 = 0xbf,
- I64x2Bitmask = 0xc4,
- I64x2WidenLowSI32x4 = 0xc7,
- I64x2WidenHighSI32x4 = 0xc8,
- I64x2WidenLowUI32x4 = 0xc9,
- I64x2WidenHighUI32x4 = 0xca,
+ I64x2Abs = 0xc0,
I64x2Neg = 0xc1,
+ // 0xc2 unused
+ I64x2AllTrue = 0xc3,
+ I64x2Bitmask = 0xc4,
+ // 0xc5 unused
+ // 0xc6 unused
+ I64x2ExtendLowSI32x4 = 0xc7,
+ I64x2ExtendHighSI32x4 = 0xc8,
+ I64x2ExtendLowUI32x4 = 0xc9,
+ I64x2ExtendHighUI32x4 = 0xca,
I64x2Shl = 0xcb,
I64x2ShrS = 0xcc,
I64x2ShrU = 0xcd,
I64x2Add = 0xce,
+ // 0xcf unused
+ // 0xd0 unused
I64x2Sub = 0xd1,
+ // 0xd2 unused
+ // 0xd3 unused
+ // 0xd4 unused
I64x2Mul = 0xd5,
+ I64x2Eq = 0xd6,
+ I64x2Ne = 0xd7,
+ I64x2LtS = 0xd8,
+ I64x2GtS = 0xd9,
+ I64x2LeS = 0xda,
+ I64x2GeS = 0xdb,
+ I64x2ExtMulLowSI32x4 = 0xdc,
+ I64x2ExtMulHighSI32x4 = 0xdd,
+ I64x2ExtMulLowUI32x4 = 0xde,
+ I64x2ExtMulHighUI32x4 = 0xdf,
F32x4Abs = 0xe0,
F32x4Neg = 0xe1,
+ // 0xe2 unused
F32x4Sqrt = 0xe3,
F32x4Add = 0xe4,
F32x4Sub = 0xe5,
@@ -937,17 +983,9 @@ enum ASTNodes {
F32x4PMin = 0xea,
F32x4PMax = 0xeb,
- F32x4Ceil = 0xd8,
- F32x4Floor = 0xd9,
- F32x4Trunc = 0xda,
- F32x4Nearest = 0xdb,
- F64x2Ceil = 0xdc,
- F64x2Floor = 0xdd,
- F64x2Trunc = 0xde,
- F64x2Nearest = 0xdf,
-
F64x2Abs = 0xec,
F64x2Neg = 0xed,
+ // 0xee unused
F64x2Sqrt = 0xef,
F64x2Add = 0xf0,
F64x2Sub = 0xf1,
@@ -958,56 +996,14 @@ enum ASTNodes {
F64x2PMin = 0xf6,
F64x2PMax = 0xf7,
- I16x8ExtAddPairWiseSI8x16 = 0xc2,
- I16x8ExtAddPairWiseUI8x16 = 0xc3,
- I32x4ExtAddPairWiseSI16x8 = 0xa5,
- I32x4ExtAddPairWiseUI16x8 = 0xa6,
-
I32x4TruncSatSF32x4 = 0xf8,
I32x4TruncSatUF32x4 = 0xf9,
F32x4ConvertSI32x4 = 0xfa,
F32x4ConvertUI32x4 = 0xfb,
-
- V128Load32Zero = 0xfc,
- V128Load64Zero = 0xfd,
-
- F32x4QFMA = 0xb4,
- F32x4QFMS = 0xd4,
- F64x2QFMA = 0xfe,
- F64x2QFMS = 0xff,
-
- I64x2TruncSatSF64x2 = 0x0100,
- I64x2TruncSatUF64x2 = 0x0101,
- F64x2ConvertSI64x2 = 0x0102,
- F64x2ConvertUI64x2 = 0x0103,
-
- I16x8ExtMulLowSI8x16 = 0x9a,
- I16x8ExtMulHighSI8x16 = 0x9d,
- I16x8ExtMulLowUI8x16 = 0x9e,
- I16x8ExtMulHighUI8x16 = 0x9f,
- I32x4ExtMulLowSI16x8 = 0xbb,
- I32x4ExtMulHighSI16x8 = 0xbd,
- I32x4ExtMulLowUI16x8 = 0xbe,
- I32x4ExtMulHighUI16x8 = 0xbf,
- I64x2ExtMulLowSI32x4 = 0xd2,
- I64x2ExtMulHighSI32x4 = 0xd3,
- I64x2ExtMulLowUI32x4 = 0xd6,
- I64x2ExtMulHighUI32x4 = 0xd7,
-
- F64x2ConvertLowSI32x4 = 0x53,
- F64x2ConvertLowUI32x4 = 0x54,
- I32x4TruncSatZeroSF64x2 = 0x55,
- I32x4TruncSatZeroUF64x2 = 0x56,
- F32x4DemoteZeroF64x2 = 0x57,
- F64x2PromoteLowF32x4 = 0x69,
-
- I32x4WidenSI8x16 = 0x67,
- I32x4WidenUI8x16 = 0x68,
-
- // prefetch opcodes
-
- PrefetchT = 0xc5,
- PrefetchNT = 0xc6,
+ I32x4TruncSatZeroSF64x2 = 0xfc,
+ I32x4TruncSatZeroUF64x2 = 0xfd,
+ F64x2ConvertLowSI32x4 = 0xfe,
+ F64x2ConvertLowUI32x4 = 0xff,
// bulk memory opcodes
@@ -1595,8 +1591,6 @@ public:
bool maybeVisitSIMDShift(Expression*& out, uint32_t code);
bool maybeVisitSIMDLoad(Expression*& out, uint32_t code);
bool maybeVisitSIMDLoadStoreLane(Expression*& out, uint32_t code);
- bool maybeVisitSIMDWiden(Expression*& out, uint32_t code);
- bool maybeVisitPrefetch(Expression*& out, uint32_t code);
bool maybeVisitMemoryInit(Expression*& out, uint32_t code);
bool maybeVisitDataDrop(Expression*& out, uint32_t code);
bool maybeVisitMemoryCopy(Expression*& out, uint32_t code);
diff --git a/src/wasm-builder.h b/src/wasm-builder.h
index 134d42a78..c0d6cbeeb 100644
--- a/src/wasm-builder.h
+++ b/src/wasm-builder.h
@@ -499,16 +499,6 @@ public:
ret->finalize();
return ret;
}
- Prefetch*
- makePrefetch(PrefetchOp op, Address offset, Address align, Expression* ptr) {
- auto* ret = wasm.allocator.alloc<Prefetch>();
- ret->op = op;
- ret->offset = offset;
- ret->align = align;
- ret->ptr = ptr;
- ret->finalize();
- return ret;
- }
MemoryInit* makeMemoryInit(uint32_t segment,
Expression* dest,
Expression* offset,
diff --git a/src/wasm-delegations-fields.h b/src/wasm-delegations-fields.h
index 63ff235d7..16ef11193 100644
--- a/src/wasm-delegations-fields.h
+++ b/src/wasm-delegations-fields.h
@@ -387,23 +387,6 @@ switch (DELEGATE_ID) {
DELEGATE_END(SIMDLoadStoreLane);
break;
}
- case Expression::Id::SIMDWidenId: {
- DELEGATE_START(SIMDWiden);
- DELEGATE_FIELD_CHILD(SIMDWiden, vec);
- DELEGATE_FIELD_INT(SIMDWiden, op);
- DELEGATE_FIELD_INT(SIMDWiden, index);
- DELEGATE_END(SIMDWiden);
- break;
- }
- case Expression::Id::PrefetchId: {
- DELEGATE_START(Prefetch);
- DELEGATE_FIELD_CHILD(Prefetch, ptr);
- DELEGATE_FIELD_INT(Prefetch, op);
- DELEGATE_FIELD_ADDRESS(Prefetch, offset);
- DELEGATE_FIELD_ADDRESS(Prefetch, align);
- DELEGATE_END(Prefetch);
- break;
- }
case Expression::Id::MemoryInitId: {
DELEGATE_START(MemoryInit);
DELEGATE_FIELD_CHILD(MemoryInit, size);
diff --git a/src/wasm-delegations.h b/src/wasm-delegations.h
index 5a05a731f..b063c81be 100644
--- a/src/wasm-delegations.h
+++ b/src/wasm-delegations.h
@@ -40,8 +40,6 @@ DELEGATE(SIMDTernary);
DELEGATE(SIMDShift);
DELEGATE(SIMDLoad);
DELEGATE(SIMDLoadStoreLane);
-DELEGATE(SIMDWiden);
-DELEGATE(Prefetch);
DELEGATE(MemoryInit);
DELEGATE(DataDrop);
DELEGATE(MemoryCopy);
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index 61d29ada1..249ec8fdb 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -465,12 +465,12 @@ public:
return value.splatF64x2();
case NotVec128:
return value.notV128();
+ case AnyTrueVec128:
+ return value.anyTrueV128();
case AbsVecI8x16:
return value.absI8x16();
case NegVecI8x16:
return value.negI8x16();
- case AnyTrueVecI8x16:
- return value.anyTrueI8x16();
case AllTrueVecI8x16:
return value.allTrueI8x16();
case BitmaskVecI8x16:
@@ -481,8 +481,6 @@ public:
return value.absI16x8();
case NegVecI16x8:
return value.negI16x8();
- case AnyTrueVecI16x8:
- return value.anyTrueI16x8();
case AllTrueVecI16x8:
return value.allTrueI16x8();
case BitmaskVecI16x8:
@@ -491,14 +489,16 @@ public:
return value.absI32x4();
case NegVecI32x4:
return value.negI32x4();
- case AnyTrueVecI32x4:
- return value.anyTrueI32x4();
case AllTrueVecI32x4:
return value.allTrueI32x4();
case BitmaskVecI32x4:
return value.bitmaskI32x4();
+ case AbsVecI64x2:
+ return value.absI64x2();
case NegVecI64x2:
return value.negI64x2();
+ case AllTrueVecI64x2:
+ return value.allTrueI64x2();
case BitmaskVecI64x2:
WASM_UNREACHABLE("unimp");
case AbsVecF32x4:
@@ -541,38 +541,30 @@ public:
return value.truncSatToSI32x4();
case TruncSatUVecF32x4ToVecI32x4:
return value.truncSatToUI32x4();
- case TruncSatSVecF64x2ToVecI64x2:
- return value.truncSatToSI64x2();
- case TruncSatUVecF64x2ToVecI64x2:
- return value.truncSatToUI64x2();
case ConvertSVecI32x4ToVecF32x4:
return value.convertSToF32x4();
case ConvertUVecI32x4ToVecF32x4:
return value.convertUToF32x4();
- case ConvertSVecI64x2ToVecF64x2:
- return value.convertSToF64x2();
- case ConvertUVecI64x2ToVecF64x2:
- return value.convertUToF64x2();
- case WidenLowSVecI8x16ToVecI16x8:
- return value.widenLowSToVecI16x8();
- case WidenHighSVecI8x16ToVecI16x8:
- return value.widenHighSToVecI16x8();
- case WidenLowUVecI8x16ToVecI16x8:
- return value.widenLowUToVecI16x8();
- case WidenHighUVecI8x16ToVecI16x8:
- return value.widenHighUToVecI16x8();
- case WidenLowSVecI16x8ToVecI32x4:
- return value.widenLowSToVecI32x4();
- case WidenHighSVecI16x8ToVecI32x4:
- return value.widenHighSToVecI32x4();
- case WidenLowUVecI16x8ToVecI32x4:
- return value.widenLowUToVecI32x4();
- case WidenHighUVecI16x8ToVecI32x4:
- return value.widenHighUToVecI32x4();
- case WidenLowSVecI32x4ToVecI64x2:
- case WidenHighSVecI32x4ToVecI64x2:
- case WidenLowUVecI32x4ToVecI64x2:
- case WidenHighUVecI32x4ToVecI64x2:
+ case ExtendLowSVecI8x16ToVecI16x8:
+ return value.extendLowSToVecI16x8();
+ case ExtendHighSVecI8x16ToVecI16x8:
+ return value.extendHighSToVecI16x8();
+ case ExtendLowUVecI8x16ToVecI16x8:
+ return value.extendLowUToVecI16x8();
+ case ExtendHighUVecI8x16ToVecI16x8:
+ return value.extendHighUToVecI16x8();
+ case ExtendLowSVecI16x8ToVecI32x4:
+ return value.extendLowSToVecI32x4();
+ case ExtendHighSVecI16x8ToVecI32x4:
+ return value.extendHighSToVecI32x4();
+ case ExtendLowUVecI16x8ToVecI32x4:
+ return value.extendLowUToVecI32x4();
+ case ExtendHighUVecI16x8ToVecI32x4:
+ return value.extendHighUToVecI32x4();
+ case ExtendLowSVecI32x4ToVecI64x2:
+ case ExtendHighSVecI32x4ToVecI64x2:
+ case ExtendLowUVecI32x4ToVecI64x2:
+ case ExtendHighUVecI32x4ToVecI64x2:
case ConvertLowSVecI32x4ToVecF64x2:
case ConvertLowUVecI32x4ToVecF64x2:
case TruncSatZeroSVecF64x2ToVecI32x4:
@@ -827,6 +819,16 @@ public:
return left.geUI32x4(right);
case EqVecI64x2:
return left.eqI64x2(right);
+ case NeVecI64x2:
+ return left.neI64x2(right);
+ case LtSVecI64x2:
+ return left.ltSI64x2(right);
+ case GtSVecI64x2:
+ return left.gtSI64x2(right);
+ case LeSVecI64x2:
+ return left.leSI64x2(right);
+ case GeSVecI64x2:
+ return left.geSI64x2(right);
case EqVecF32x4:
return left.eqF32x4(right);
case NeVecF32x4:
@@ -873,8 +875,6 @@ public:
return left.subSaturateSI8x16(right);
case SubSatUVecI8x16:
return left.subSaturateUI8x16(right);
- case MulVecI8x16:
- return left.mulI8x16(right);
case MinSVecI8x16:
return left.minSI8x16(right);
case MinUVecI8x16:
@@ -1142,7 +1142,6 @@ public:
}
WASM_UNREACHABLE("invalid op");
}
- Flow visitSIMDWiden(SIMDWiden* curr) { WASM_UNREACHABLE("unimp"); }
Flow visitSelect(Select* curr) {
NOTE_ENTER("Select");
Flow ifTrue = visit(curr->ifTrue);
@@ -1185,14 +1184,6 @@ public:
NOTE_ENTER("Nop");
return Flow();
}
- Flow visitPrefetch(Prefetch* curr) {
- NOTE_ENTER("Prefetch");
- Flow flow = visit(curr->ptr);
- if (flow.breaking()) {
- return flow;
- }
- return Flow();
- }
Flow visitUnreachable(Unreachable* curr) {
NOTE_ENTER("Unreachable");
trap("unreachable");
diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h
index 03da961bd..c65043d4c 100644
--- a/src/wasm-s-parser.h
+++ b/src/wasm-s-parser.h
@@ -234,8 +234,6 @@ private:
Expression* makeSIMDShift(Element& s, SIMDShiftOp op);
Expression* makeSIMDLoad(Element& s, SIMDLoadOp op);
Expression* makeSIMDLoadStoreLane(Element& s, SIMDLoadStoreLaneOp op);
- Expression* makeSIMDWiden(Element& s, SIMDWidenOp op);
- Expression* makePrefetch(Element& s, PrefetchOp op);
Expression* makeMemoryInit(Element& s);
Expression* makeDataDrop(Element& s);
Expression* makeMemoryCopy(Element& s);
diff --git a/src/wasm.h b/src/wasm.h
index 101f45c81..8e9f93500 100644
--- a/src/wasm.h
+++ b/src/wasm.h
@@ -157,23 +157,23 @@ enum UnaryOp {
// SIMD arithmetic
NotVec128,
+ AnyTrueVec128,
AbsVecI8x16,
NegVecI8x16,
- AnyTrueVecI8x16,
AllTrueVecI8x16,
BitmaskVecI8x16,
PopcntVecI8x16,
AbsVecI16x8,
NegVecI16x8,
- AnyTrueVecI16x8,
AllTrueVecI16x8,
BitmaskVecI16x8,
AbsVecI32x4,
NegVecI32x4,
- AnyTrueVecI32x4,
AllTrueVecI32x4,
BitmaskVecI32x4,
+ AbsVecI64x2,
NegVecI64x2,
+ AllTrueVecI64x2,
BitmaskVecI64x2,
AbsVecF32x4,
NegVecF32x4,
@@ -197,24 +197,20 @@ enum UnaryOp {
// SIMD conversions
TruncSatSVecF32x4ToVecI32x4,
TruncSatUVecF32x4ToVecI32x4,
- TruncSatSVecF64x2ToVecI64x2,
- TruncSatUVecF64x2ToVecI64x2,
ConvertSVecI32x4ToVecF32x4,
ConvertUVecI32x4ToVecF32x4,
- ConvertSVecI64x2ToVecF64x2,
- ConvertUVecI64x2ToVecF64x2,
- WidenLowSVecI8x16ToVecI16x8,
- WidenHighSVecI8x16ToVecI16x8,
- WidenLowUVecI8x16ToVecI16x8,
- WidenHighUVecI8x16ToVecI16x8,
- WidenLowSVecI16x8ToVecI32x4,
- WidenHighSVecI16x8ToVecI32x4,
- WidenLowUVecI16x8ToVecI32x4,
- WidenHighUVecI16x8ToVecI32x4,
- WidenLowSVecI32x4ToVecI64x2,
- WidenHighSVecI32x4ToVecI64x2,
- WidenLowUVecI32x4ToVecI64x2,
- WidenHighUVecI32x4ToVecI64x2,
+ ExtendLowSVecI8x16ToVecI16x8,
+ ExtendHighSVecI8x16ToVecI16x8,
+ ExtendLowUVecI8x16ToVecI16x8,
+ ExtendHighUVecI8x16ToVecI16x8,
+ ExtendLowSVecI16x8ToVecI32x4,
+ ExtendHighSVecI16x8ToVecI32x4,
+ ExtendLowUVecI16x8ToVecI32x4,
+ ExtendHighUVecI16x8ToVecI32x4,
+ ExtendLowSVecI32x4ToVecI64x2,
+ ExtendHighSVecI32x4ToVecI64x2,
+ ExtendLowUVecI32x4ToVecI64x2,
+ ExtendHighUVecI32x4ToVecI64x2,
ConvertLowSVecI32x4ToVecF64x2,
ConvertLowUVecI32x4ToVecF64x2,
@@ -367,6 +363,11 @@ enum BinaryOp {
GeSVecI32x4,
GeUVecI32x4,
EqVecI64x2,
+ NeVecI64x2,
+ LtSVecI64x2,
+ GtSVecI64x2,
+ LeSVecI64x2,
+ GeSVecI64x2,
EqVecF32x4,
NeVecF32x4,
LtVecF32x4,
@@ -391,7 +392,6 @@ enum BinaryOp {
SubVecI8x16,
SubSatSVecI8x16,
SubSatUVecI8x16,
- MulVecI8x16,
MinSVecI8x16,
MinUVecI8x16,
MaxSVecI8x16,
@@ -527,24 +527,6 @@ enum SIMDLoadStoreLaneOp {
enum SIMDTernaryOp {
Bitselect,
- QFMAF32x4,
- QFMSF32x4,
- QFMAF64x2,
- QFMSF64x2,
- SignSelectVec8x16,
- SignSelectVec16x8,
- SignSelectVec32x4,
- SignSelectVec64x2
-};
-
-enum SIMDWidenOp {
- WidenSVecI8x16ToVecI32x4,
- WidenUVecI8x16ToVecI32x4,
-};
-
-enum PrefetchOp {
- PrefetchTemporal,
- PrefetchNontemporal,
};
enum RefIsOp {
@@ -616,7 +598,6 @@ public:
MemorySizeId,
MemoryGrowId,
NopId,
- PrefetchId,
UnreachableId,
AtomicRMWId,
AtomicCmpxchgId,
@@ -630,7 +611,6 @@ public:
SIMDShiftId,
SIMDLoadId,
SIMDLoadStoreLaneId,
- SIMDWidenId,
MemoryInitId,
DataDropId,
MemoryCopyId,
@@ -1079,30 +1059,6 @@ public:
void finalize();
};
-class SIMDWiden : public SpecificExpression<Expression::SIMDWidenId> {
-public:
- SIMDWiden() = default;
- SIMDWiden(MixedArena& allocator) {}
-
- SIMDWidenOp op;
- uint8_t index;
- Expression* vec;
-
- void finalize();
-};
-
-class Prefetch : public SpecificExpression<Expression::PrefetchId> {
-public:
- Prefetch() = default;
- Prefetch(MixedArena& allocator) : Prefetch() {}
-
- PrefetchOp op;
- Address offset;
- Address align;
- Expression* ptr;
- void finalize();
-};
-
class MemoryInit : public SpecificExpression<Expression::MemoryInitId> {
public:
MemoryInit() = default;
diff --git a/src/wasm/literal.cpp b/src/wasm/literal.cpp
index 54e817189..0c5a48b8e 100644
--- a/src/wasm/literal.cpp
+++ b/src/wasm/literal.cpp
@@ -1735,6 +1735,9 @@ Literal Literal::absI16x8() const {
Literal Literal::absI32x4() const {
return unary<4, &Literal::getLanesI32x4, &Literal::abs>(*this);
}
+Literal Literal::absI64x2() const {
+ return unary<2, &Literal::getLanesI64x2, &Literal::abs>(*this);
+}
Literal Literal::negI8x16() const {
return unary<16, &Literal::getLanesUI8x16, &Literal::neg>(*this);
}
@@ -1798,30 +1801,17 @@ Literal Literal::truncSatToSI32x4() const {
Literal Literal::truncSatToUI32x4() const {
return unary<4, &Literal::getLanesF32x4, &Literal::truncSatToUI32>(*this);
}
-Literal Literal::truncSatToSI64x2() const {
- return unary<2, &Literal::getLanesF64x2, &Literal::truncSatToSI64>(*this);
-}
-Literal Literal::truncSatToUI64x2() const {
- return unary<2, &Literal::getLanesF64x2, &Literal::truncSatToUI64>(*this);
-}
Literal Literal::convertSToF32x4() const {
return unary<4, &Literal::getLanesI32x4, &Literal::convertSIToF32>(*this);
}
Literal Literal::convertUToF32x4() const {
return unary<4, &Literal::getLanesI32x4, &Literal::convertUIToF32>(*this);
}
-Literal Literal::convertSToF64x2() const {
- return unary<2, &Literal::getLanesI64x2, &Literal::convertSIToF64>(*this);
-}
-Literal Literal::convertUToF64x2() const {
- return unary<2, &Literal::getLanesI64x2, &Literal::convertUIToF64>(*this);
-}
-template<int Lanes, LaneArray<Lanes> (Literal::*IntoLanes)() const>
-static Literal any_true(const Literal& val) {
- LaneArray<Lanes> lanes = (val.*IntoLanes)();
- for (size_t i = 0; i < Lanes; ++i) {
- if (lanes[i] != Literal::makeZero(lanes[i].type)) {
+Literal Literal::anyTrueV128() const {
+ auto lanes = getLanesI32x4();
+ for (size_t i = 0; i < 4; ++i) {
+ if (lanes[i].geti32() != 0) {
return Literal(int32_t(1));
}
}
@@ -1851,33 +1841,27 @@ static Literal bitmask(const Literal& val) {
return Literal(result);
}
-Literal Literal::anyTrueI8x16() const {
- return any_true<16, &Literal::getLanesUI8x16>(*this);
-}
Literal Literal::allTrueI8x16() const {
return all_true<16, &Literal::getLanesUI8x16>(*this);
}
Literal Literal::bitmaskI8x16() const {
return bitmask<16, &Literal::getLanesSI8x16>(*this);
}
-Literal Literal::anyTrueI16x8() const {
- return any_true<8, &Literal::getLanesUI16x8>(*this);
-}
Literal Literal::allTrueI16x8() const {
return all_true<8, &Literal::getLanesUI16x8>(*this);
}
Literal Literal::bitmaskI16x8() const {
return bitmask<8, &Literal::getLanesSI16x8>(*this);
}
-Literal Literal::anyTrueI32x4() const {
- return any_true<4, &Literal::getLanesI32x4>(*this);
-}
Literal Literal::allTrueI32x4() const {
return all_true<4, &Literal::getLanesI32x4>(*this);
}
Literal Literal::bitmaskI32x4() const {
return bitmask<4, &Literal::getLanesI32x4>(*this);
}
+Literal Literal::allTrueI64x2() const {
+ return all_true<2, &Literal::getLanesI64x2>(*this);
+}
template<int Lanes,
LaneArray<Lanes> (Literal::*IntoLanes)() const,
@@ -2039,6 +2023,26 @@ Literal Literal::eqI64x2(const Literal& other) const {
return compare<2, &Literal::getLanesI64x2, &Literal::eq, int64_t>(*this,
other);
}
+Literal Literal::neI64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesI64x2, &Literal::ne, int64_t>(*this,
+ other);
+}
+Literal Literal::ltSI64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesI64x2, &Literal::ltS, int64_t>(*this,
+ other);
+}
+Literal Literal::gtSI64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesI64x2, &Literal::gtS, int64_t>(*this,
+ other);
+}
+Literal Literal::leSI64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesI64x2, &Literal::leS, int64_t>(*this,
+ other);
+}
+Literal Literal::geSI64x2(const Literal& other) const {
+ return compare<2, &Literal::getLanesI64x2, &Literal::geS, int64_t>(*this,
+ other);
+}
Literal Literal::eqF32x4(const Literal& other) const {
return compare<4, &Literal::getLanesF32x4, &Literal::eq>(*this, other);
}
@@ -2125,9 +2129,6 @@ Literal Literal::subSaturateUI8x16(const Literal& other) const {
return binary<16, &Literal::getLanesSI8x16, &Literal::subSatUI8>(*this,
other);
}
-Literal Literal::mulI8x16(const Literal& other) const {
- return binary<16, &Literal::getLanesUI8x16, &Literal::mul>(*this, other);
-}
Literal Literal::minSI8x16(const Literal& other) const {
return binary<16, &Literal::getLanesSI8x16, &Literal::minInt>(*this, other);
}
@@ -2329,7 +2330,7 @@ enum class LaneOrder { Low, High };
template<size_t Lanes,
LaneArray<Lanes * 2> (Literal::*IntoLanes)() const,
LaneOrder Side>
-Literal widen(const Literal& vec) {
+Literal extend(const Literal& vec) {
LaneArray<Lanes* 2> lanes = (vec.*IntoLanes)();
LaneArray<Lanes> result;
for (size_t i = 0; i < Lanes; ++i) {
@@ -2338,29 +2339,29 @@ Literal widen(const Literal& vec) {
return Literal(result);
}
-Literal Literal::widenLowSToVecI16x8() const {
- return widen<8, &Literal::getLanesSI8x16, LaneOrder::Low>(*this);
+Literal Literal::extendLowSToVecI16x8() const {
+ return extend<8, &Literal::getLanesSI8x16, LaneOrder::Low>(*this);
}
-Literal Literal::widenHighSToVecI16x8() const {
- return widen<8, &Literal::getLanesSI8x16, LaneOrder::High>(*this);
+Literal Literal::extendHighSToVecI16x8() const {
+ return extend<8, &Literal::getLanesSI8x16, LaneOrder::High>(*this);
}
-Literal Literal::widenLowUToVecI16x8() const {
- return widen<8, &Literal::getLanesUI8x16, LaneOrder::Low>(*this);
+Literal Literal::extendLowUToVecI16x8() const {
+ return extend<8, &Literal::getLanesUI8x16, LaneOrder::Low>(*this);
}
-Literal Literal::widenHighUToVecI16x8() const {
- return widen<8, &Literal::getLanesUI8x16, LaneOrder::High>(*this);
+Literal Literal::extendHighUToVecI16x8() const {
+ return extend<8, &Literal::getLanesUI8x16, LaneOrder::High>(*this);
}
-Literal Literal::widenLowSToVecI32x4() const {
- return widen<4, &Literal::getLanesSI16x8, LaneOrder::Low>(*this);
+Literal Literal::extendLowSToVecI32x4() const {
+ return extend<4, &Literal::getLanesSI16x8, LaneOrder::Low>(*this);
}
-Literal Literal::widenHighSToVecI32x4() const {
- return widen<4, &Literal::getLanesSI16x8, LaneOrder::High>(*this);
+Literal Literal::extendHighSToVecI32x4() const {
+ return extend<4, &Literal::getLanesSI16x8, LaneOrder::High>(*this);
}
-Literal Literal::widenLowUToVecI32x4() const {
- return widen<4, &Literal::getLanesUI16x8, LaneOrder::Low>(*this);
+Literal Literal::extendLowUToVecI32x4() const {
+ return extend<4, &Literal::getLanesUI16x8, LaneOrder::Low>(*this);
}
-Literal Literal::widenHighUToVecI32x4() const {
- return widen<4, &Literal::getLanesUI16x8, LaneOrder::High>(*this);
+Literal Literal::extendHighUToVecI32x4() const {
+ return extend<4, &Literal::getLanesUI16x8, LaneOrder::High>(*this);
}
Literal Literal::extMulLowSI16x8(const Literal& other) const {
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index 5425c1551..3a9a1eece 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -3494,12 +3494,6 @@ BinaryConsts::ASTNodes WasmBinaryBuilder::readExpression(Expression*& curr) {
if (maybeVisitSIMDLoadStoreLane(curr, opcode)) {
break;
}
- if (maybeVisitSIMDWiden(curr, opcode)) {
- break;
- }
- if (maybeVisitPrefetch(curr, opcode)) {
- break;
- }
throwError("invalid code after SIMD prefix: " + std::to_string(opcode));
break;
}
@@ -4970,6 +4964,26 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Binary>();
curr->op = EqVecI64x2;
break;
+ case BinaryConsts::I64x2Ne:
+ curr = allocator.alloc<Binary>();
+ curr->op = NeVecI64x2;
+ break;
+ case BinaryConsts::I64x2LtS:
+ curr = allocator.alloc<Binary>();
+ curr->op = LtSVecI64x2;
+ break;
+ case BinaryConsts::I64x2GtS:
+ curr = allocator.alloc<Binary>();
+ curr->op = GtSVecI64x2;
+ break;
+ case BinaryConsts::I64x2LeS:
+ curr = allocator.alloc<Binary>();
+ curr->op = LeSVecI64x2;
+ break;
+ case BinaryConsts::I64x2GeS:
+ curr = allocator.alloc<Binary>();
+ curr->op = GeSVecI64x2;
+ break;
case BinaryConsts::F32x4Eq:
curr = allocator.alloc<Binary>();
curr->op = EqVecF32x4;
@@ -5058,10 +5072,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Binary>();
curr->op = SubSatUVecI8x16;
break;
- case BinaryConsts::I8x16Mul:
- curr = allocator.alloc<Binary>();
- curr->op = MulVecI8x16;
- break;
case BinaryConsts::I8x16MinS:
curr = allocator.alloc<Binary>();
curr->op = MinSVecI8x16;
@@ -5351,6 +5361,10 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = NotVec128;
break;
+ case BinaryConsts::V128AnyTrue:
+ curr = allocator.alloc<Unary>();
+ curr->op = AnyTrueVec128;
+ break;
case BinaryConsts::I8x16Popcnt:
curr = allocator.alloc<Unary>();
curr->op = PopcntVecI8x16;
@@ -5363,10 +5377,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = NegVecI8x16;
break;
- case BinaryConsts::I8x16AnyTrue:
- curr = allocator.alloc<Unary>();
- curr->op = AnyTrueVecI8x16;
- break;
case BinaryConsts::I8x16AllTrue:
curr = allocator.alloc<Unary>();
curr->op = AllTrueVecI8x16;
@@ -5383,10 +5393,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = NegVecI16x8;
break;
- case BinaryConsts::I16x8AnyTrue:
- curr = allocator.alloc<Unary>();
- curr->op = AnyTrueVecI16x8;
- break;
case BinaryConsts::I16x8AllTrue:
curr = allocator.alloc<Unary>();
curr->op = AllTrueVecI16x8;
@@ -5403,10 +5409,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = NegVecI32x4;
break;
- case BinaryConsts::I32x4AnyTrue:
- curr = allocator.alloc<Unary>();
- curr->op = AnyTrueVecI32x4;
- break;
case BinaryConsts::I32x4AllTrue:
curr = allocator.alloc<Unary>();
curr->op = AllTrueVecI32x4;
@@ -5415,10 +5417,18 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = BitmaskVecI32x4;
break;
+ case BinaryConsts::I64x2Abs:
+ curr = allocator.alloc<Unary>();
+ curr->op = AbsVecI64x2;
+ break;
case BinaryConsts::I64x2Neg:
curr = allocator.alloc<Unary>();
curr->op = NegVecI64x2;
break;
+ case BinaryConsts::I64x2AllTrue:
+ curr = allocator.alloc<Unary>();
+ curr->op = AllTrueVecI64x2;
+ break;
case BinaryConsts::I64x2Bitmask:
curr = allocator.alloc<Unary>();
curr->op = BitmaskVecI64x2;
@@ -5503,14 +5513,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = TruncSatUVecF32x4ToVecI32x4;
break;
- case BinaryConsts::I64x2TruncSatSF64x2:
- curr = allocator.alloc<Unary>();
- curr->op = TruncSatSVecF64x2ToVecI64x2;
- break;
- case BinaryConsts::I64x2TruncSatUF64x2:
- curr = allocator.alloc<Unary>();
- curr->op = TruncSatUVecF64x2ToVecI64x2;
- break;
case BinaryConsts::F32x4ConvertSI32x4:
curr = allocator.alloc<Unary>();
curr->op = ConvertSVecI32x4ToVecF32x4;
@@ -5519,61 +5521,53 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) {
curr = allocator.alloc<Unary>();
curr->op = ConvertUVecI32x4ToVecF32x4;
break;
- case BinaryConsts::F64x2ConvertSI64x2:
+ case BinaryConsts::I16x8ExtendLowSI8x16:
curr = allocator.alloc<Unary>();
- curr->op = ConvertSVecI64x2ToVecF64x2;
+ curr->op = ExtendLowSVecI8x16ToVecI16x8;
break;
- case BinaryConsts::F64x2ConvertUI64x2:
+ case BinaryConsts::I16x8ExtendHighSI8x16:
curr = allocator.alloc<Unary>();
- curr->op = ConvertUVecI64x2ToVecF64x2;
+ curr->op = ExtendHighSVecI8x16ToVecI16x8;
break;
- case BinaryConsts::I16x8WidenLowSI8x16:
+ case BinaryConsts::I16x8ExtendLowUI8x16:
curr = allocator.alloc<Unary>();
- curr->op = WidenLowSVecI8x16ToVecI16x8;
+ curr->op = ExtendLowUVecI8x16ToVecI16x8;
break;
- case BinaryConsts::I16x8WidenHighSI8x16:
+ case BinaryConsts::I16x8ExtendHighUI8x16:
curr = allocator.alloc<Unary>();
- curr->op = WidenHighSVecI8x16ToVecI16x8;
+ curr->op = ExtendHighUVecI8x16ToVecI16x8;
break;
- case BinaryConsts::I16x8WidenLowUI8x16:
+ case BinaryConsts::I32x4ExtendLowSI16x8:
curr = allocator.alloc<Unary>();
- curr->op = WidenLowUVecI8x16ToVecI16x8;
+ curr->op = ExtendLowSVecI16x8ToVecI32x4;
break;
- case BinaryConsts::I16x8WidenHighUI8x16:
+ case BinaryConsts::I32x4ExtendHighSI16x8:
curr = allocator.alloc<Unary>();
- curr->op = WidenHighUVecI8x16ToVecI16x8;
+ curr->op = ExtendHighSVecI16x8ToVecI32x4;
break;
- case BinaryConsts::I32x4WidenLowSI16x8:
+ case BinaryConsts::I32x4ExtendLowUI16x8:
curr = allocator.alloc<Unary>();
- curr->op = WidenLowSVecI16x8ToVecI32x4;
+ curr->op = ExtendLowUVecI16x8ToVecI32x4;
break;
- case BinaryConsts::I32x4WidenHighSI16x8:
+ case BinaryConsts::I32x4ExtendHighUI16x8:
curr = allocator.alloc<Unary>();
- curr->op = WidenHighSVecI16x8ToVecI32x4;
+ curr->op = ExtendHighUVecI16x8ToVecI32x4;
break;
- case BinaryConsts::I32x4WidenLowUI16x8:
+ case BinaryConsts::I64x2ExtendLowSI32x4:
curr = allocator.alloc<Unary>();
- curr->op = WidenLowUVecI16x8ToVecI32x4;
+ curr->op = ExtendLowSVecI32x4ToVecI64x2;
break;
- case BinaryConsts::I32x4WidenHighUI16x8:
+ case BinaryConsts::I64x2ExtendHighSI32x4:
curr = allocator.alloc<Unary>();
- curr->op = WidenHighUVecI16x8ToVecI32x4;
+ curr->op = ExtendHighSVecI32x4ToVecI64x2;
break;
- case BinaryConsts::I64x2WidenLowSI32x4:
+ case BinaryConsts::I64x2ExtendLowUI32x4:
curr = allocator.alloc<Unary>();
- curr->op = WidenLowSVecI32x4ToVecI64x2;
+ curr->op = ExtendLowUVecI32x4ToVecI64x2;
break;
- case BinaryConsts::I64x2WidenHighSI32x4:
+ case BinaryConsts::I64x2ExtendHighUI32x4:
curr = allocator.alloc<Unary>();
- curr->op = WidenHighSVecI32x4ToVecI64x2;
- break;
- case BinaryConsts::I64x2WidenLowUI32x4:
- curr = allocator.alloc<Unary>();
- curr->op = WidenLowUVecI32x4ToVecI64x2;
- break;
- case BinaryConsts::I64x2WidenHighUI32x4:
- curr = allocator.alloc<Unary>();
- curr->op = WidenHighUVecI32x4ToVecI64x2;
+ curr->op = ExtendHighUVecI32x4ToVecI64x2;
break;
case BinaryConsts::F64x2ConvertLowSI32x4:
curr = allocator.alloc<Unary>();
@@ -5752,38 +5746,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDTernary(Expression*& out, uint32_t code) {
curr = allocator.alloc<SIMDTernary>();
curr->op = Bitselect;
break;
- case BinaryConsts::V8x16SignSelect:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = SignSelectVec8x16;
- break;
- case BinaryConsts::V16x8SignSelect:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = SignSelectVec16x8;
- break;
- case BinaryConsts::V32x4SignSelect:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = SignSelectVec32x4;
- break;
- case BinaryConsts::V64x2SignSelect:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = SignSelectVec64x2;
- break;
- case BinaryConsts::F32x4QFMA:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = QFMAF32x4;
- break;
- case BinaryConsts::F32x4QFMS:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = QFMSF32x4;
- break;
- case BinaryConsts::F64x2QFMA:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = QFMAF64x2;
- break;
- case BinaryConsts::F64x2QFMS:
- curr = allocator.alloc<SIMDTernary>();
- curr->op = QFMSF64x2;
- break;
default:
return false;
}
@@ -5979,45 +5941,6 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoadStoreLane(Expression*& out,
return true;
}
-bool WasmBinaryBuilder::maybeVisitSIMDWiden(Expression*& out, uint32_t code) {
- SIMDWidenOp op;
- switch (code) {
- case BinaryConsts::I32x4WidenSI8x16:
- op = WidenSVecI8x16ToVecI32x4;
- break;
- case BinaryConsts::I32x4WidenUI8x16:
- op = WidenUVecI8x16ToVecI32x4;
- break;
- default:
- return false;
- }
- auto* curr = allocator.alloc<SIMDWiden>();
- curr->op = op;
- curr->index = getLaneIndex(4);
- curr->vec = popNonVoidExpression();
- curr->finalize();
- out = curr;
- return true;
-}
-
-bool WasmBinaryBuilder::maybeVisitPrefetch(Expression*& out, uint32_t code) {
- PrefetchOp op;
- switch (code) {
- case BinaryConsts::PrefetchT:
- op = PrefetchTemporal;
- break;
- case BinaryConsts::PrefetchNT:
- op = PrefetchNontemporal;
- break;
- default:
- return false;
- }
- Address align, offset;
- readMemoryAccess(align, offset);
- out = Builder(wasm).makePrefetch(op, offset, align, popNonVoidExpression());
- return true;
-}
-
void WasmBinaryBuilder::visitSelect(Select* curr, uint8_t code) {
BYN_TRACE("zz node: Select, code " << int32_t(code) << std::endl);
if (code == BinaryConsts::SelectWithType) {
diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp
index f3ab50f0c..a42b341f6 100644
--- a/src/wasm/wasm-s-parser.cpp
+++ b/src/wasm/wasm-s-parser.cpp
@@ -2095,21 +2095,6 @@ SExpressionWasmBuilder::makeSIMDLoadStoreLane(Element& s,
return ret;
}
-Expression* SExpressionWasmBuilder::makeSIMDWiden(Element& s, SIMDWidenOp op) {
- auto* ret = allocator.alloc<SIMDWiden>();
- ret->op = op;
- ret->index = parseLaneIndex(s[1], 4);
- ret->vec = parseExpression(s[2]);
- ret->finalize();
- return ret;
-}
-
-Expression* SExpressionWasmBuilder::makePrefetch(Element& s, PrefetchOp op) {
- Address offset, align;
- size_t i = parseMemAttributes(s, offset, align, /*defaultAlign*/ 1);
- return Builder(wasm).makePrefetch(op, offset, align, parseExpression(s[i]));
-}
-
Expression* SExpressionWasmBuilder::makeMemoryInit(Element& s) {
auto ret = allocator.alloc<MemoryInit>();
ret->segment = atoi(s[1]->str().c_str());
diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp
index b1e9a75cc..87dfe8247 100644
--- a/src/wasm/wasm-stack.cpp
+++ b/src/wasm/wasm-stack.cpp
@@ -546,30 +546,6 @@ void BinaryInstWriter::visitSIMDTernary(SIMDTernary* curr) {
case Bitselect:
o << U32LEB(BinaryConsts::V128Bitselect);
break;
- case QFMAF32x4:
- o << U32LEB(BinaryConsts::F32x4QFMA);
- break;
- case QFMSF32x4:
- o << U32LEB(BinaryConsts::F32x4QFMS);
- break;
- case QFMAF64x2:
- o << U32LEB(BinaryConsts::F64x2QFMA);
- break;
- case QFMSF64x2:
- o << U32LEB(BinaryConsts::F64x2QFMS);
- break;
- case SignSelectVec8x16:
- o << U32LEB(BinaryConsts::V8x16SignSelect);
- break;
- case SignSelectVec16x8:
- o << U32LEB(BinaryConsts::V16x8SignSelect);
- break;
- case SignSelectVec32x4:
- o << U32LEB(BinaryConsts::V32x4SignSelect);
- break;
- case SignSelectVec64x2:
- o << U32LEB(BinaryConsts::V64x2SignSelect);
- break;
}
}
@@ -692,33 +668,6 @@ void BinaryInstWriter::visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
o << curr->index;
}
-void BinaryInstWriter::visitSIMDWiden(SIMDWiden* curr) {
- o << int8_t(BinaryConsts::SIMDPrefix);
- switch (curr->op) {
- case WidenSVecI8x16ToVecI32x4:
- o << U32LEB(BinaryConsts::I32x4WidenSI8x16);
- break;
- case WidenUVecI8x16ToVecI32x4:
- o << U32LEB(BinaryConsts::I32x4WidenUI8x16);
- break;
- }
- o << uint8_t(curr->index);
-}
-
-void BinaryInstWriter::visitPrefetch(Prefetch* curr) {
- o << int8_t(BinaryConsts::SIMDPrefix);
- switch (curr->op) {
- case PrefetchTemporal:
- o << U32LEB(BinaryConsts::PrefetchT);
- break;
- case PrefetchNontemporal:
- o << U32LEB(BinaryConsts::PrefetchNT);
- break;
- }
- assert(curr->align);
- emitMemoryAccess(curr->align, /*(unused) bytes=*/0, curr->offset);
-}
-
void BinaryInstWriter::visitMemoryInit(MemoryInit* curr) {
o << int8_t(BinaryConsts::MiscPrefix);
o << U32LEB(BinaryConsts::MemoryInit);
@@ -992,16 +941,16 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
case NotVec128:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Not);
break;
+ case AnyTrueVec128:
+ o << int8_t(BinaryConsts::SIMDPrefix)
+ << U32LEB(BinaryConsts::V128AnyTrue);
+ break;
case AbsVecI8x16:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Abs);
break;
case NegVecI8x16:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Neg);
break;
- case AnyTrueVecI8x16:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I8x16AnyTrue);
- break;
case AllTrueVecI8x16:
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I8x16AllTrue);
@@ -1020,10 +969,6 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
case NegVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I16x8Neg);
break;
- case AnyTrueVecI16x8:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I16x8AnyTrue);
- break;
case AllTrueVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I16x8AllTrue);
@@ -1038,10 +983,6 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
case NegVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Neg);
break;
- case AnyTrueVecI32x4:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I32x4AnyTrue);
- break;
case AllTrueVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I32x4AllTrue);
@@ -1050,9 +991,16 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I32x4Bitmask);
break;
+ case AbsVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Abs);
+ break;
case NegVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Neg);
break;
+ case AllTrueVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix)
+ << U32LEB(BinaryConsts::I64x2AllTrue);
+ break;
case BitmaskVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I64x2Bitmask);
@@ -1125,14 +1073,6 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I32x4TruncSatUF32x4);
break;
- case TruncSatSVecF64x2ToVecI64x2:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2TruncSatSF64x2);
- break;
- case TruncSatUVecF64x2ToVecI64x2:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2TruncSatUF64x2);
- break;
case ConvertSVecI32x4ToVecF32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::F32x4ConvertSI32x4);
@@ -1141,61 +1081,53 @@ void BinaryInstWriter::visitUnary(Unary* curr) {
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::F32x4ConvertUI32x4);
break;
- case ConvertSVecI64x2ToVecF64x2:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::F64x2ConvertSI64x2);
- break;
- case ConvertUVecI64x2ToVecF64x2:
- o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::F64x2ConvertUI64x2);
- break;
- case WidenLowSVecI8x16ToVecI16x8:
+ case ExtendLowSVecI8x16ToVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I16x8WidenLowSI8x16);
+ << U32LEB(BinaryConsts::I16x8ExtendLowSI8x16);
break;
- case WidenHighSVecI8x16ToVecI16x8:
+ case ExtendHighSVecI8x16ToVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I16x8WidenHighSI8x16);
+ << U32LEB(BinaryConsts::I16x8ExtendHighSI8x16);
break;
- case WidenLowUVecI8x16ToVecI16x8:
+ case ExtendLowUVecI8x16ToVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I16x8WidenLowUI8x16);
+ << U32LEB(BinaryConsts::I16x8ExtendLowUI8x16);
break;
- case WidenHighUVecI8x16ToVecI16x8:
+ case ExtendHighUVecI8x16ToVecI16x8:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I16x8WidenHighUI8x16);
+ << U32LEB(BinaryConsts::I16x8ExtendHighUI8x16);
break;
- case WidenLowSVecI16x8ToVecI32x4:
+ case ExtendLowSVecI16x8ToVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I32x4WidenLowSI16x8);
+ << U32LEB(BinaryConsts::I32x4ExtendLowSI16x8);
break;
- case WidenHighSVecI16x8ToVecI32x4:
+ case ExtendHighSVecI16x8ToVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I32x4WidenHighSI16x8);
+ << U32LEB(BinaryConsts::I32x4ExtendHighSI16x8);
break;
- case WidenLowUVecI16x8ToVecI32x4:
+ case ExtendLowUVecI16x8ToVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I32x4WidenLowUI16x8);
+ << U32LEB(BinaryConsts::I32x4ExtendLowUI16x8);
break;
- case WidenHighUVecI16x8ToVecI32x4:
+ case ExtendHighUVecI16x8ToVecI32x4:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I32x4WidenHighUI16x8);
+ << U32LEB(BinaryConsts::I32x4ExtendHighUI16x8);
break;
- case WidenLowSVecI32x4ToVecI64x2:
+ case ExtendLowSVecI32x4ToVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2WidenLowSI32x4);
+ << U32LEB(BinaryConsts::I64x2ExtendLowSI32x4);
break;
- case WidenHighSVecI32x4ToVecI64x2:
+ case ExtendHighSVecI32x4ToVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2WidenHighSI32x4);
+ << U32LEB(BinaryConsts::I64x2ExtendHighSI32x4);
break;
- case WidenLowUVecI32x4ToVecI64x2:
+ case ExtendLowUVecI32x4ToVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2WidenLowUI32x4);
+ << U32LEB(BinaryConsts::I64x2ExtendLowUI32x4);
break;
- case WidenHighUVecI32x4ToVecI64x2:
+ case ExtendHighUVecI32x4ToVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
- << U32LEB(BinaryConsts::I64x2WidenHighUI32x4);
+ << U32LEB(BinaryConsts::I64x2ExtendHighUI32x4);
break;
case ConvertLowSVecI32x4ToVecF64x2:
o << int8_t(BinaryConsts::SIMDPrefix)
@@ -1553,6 +1485,21 @@ void BinaryInstWriter::visitBinary(Binary* curr) {
case EqVecI64x2:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Eq);
break;
+ case NeVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Ne);
+ break;
+ case LtSVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2LtS);
+ break;
+ case GtSVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2GtS);
+ break;
+ case LeSVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2LeS);
+ break;
+ case GeSVecI64x2:
+ o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2GeS);
+ break;
case EqVecF32x4:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Eq);
break;
@@ -1623,9 +1570,6 @@ void BinaryInstWriter::visitBinary(Binary* curr) {
o << int8_t(BinaryConsts::SIMDPrefix)
<< U32LEB(BinaryConsts::I8x16SubSatU);
break;
- case MulVecI8x16:
- o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Mul);
- break;
case MinSVecI8x16:
o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16MinS);
break;
diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp
index 8bc12cadf..a8399bfdf 100644
--- a/src/wasm/wasm-validator.cpp
+++ b/src/wasm/wasm-validator.cpp
@@ -1137,7 +1137,7 @@ void FunctionValidator::visitSIMDShuffle(SIMDShuffle* curr) {
shouldBeTrue(
getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)");
shouldBeEqualOrFirstIsUnreachable(
- curr->type, Type(Type::v128), curr, "v128.shuffle must have type v128");
+ curr->type, Type(Type::v128), curr, "i8x16.shuffle must have type v128");
shouldBeEqualOrFirstIsUnreachable(
curr->left->type, Type(Type::v128), curr, "expected operand of type v128");
shouldBeEqualOrFirstIsUnreachable(
@@ -1524,6 +1524,11 @@ void FunctionValidator::visitBinary(Binary* curr) {
case GeSVecI32x4:
case GeUVecI32x4:
case EqVecI64x2:
+ case NeVecI64x2:
+ case LtSVecI64x2:
+ case LeSVecI64x2:
+ case GtSVecI64x2:
+ case GeSVecI64x2:
case EqVecF32x4:
case NeVecF32x4:
case LtVecF32x4:
@@ -1546,7 +1551,6 @@ void FunctionValidator::visitBinary(Binary* curr) {
case SubVecI8x16:
case SubSatSVecI8x16:
case SubSatUVecI8x16:
- case MulVecI8x16:
case MinSVecI8x16:
case MinUVecI8x16:
case MaxSVecI8x16:
@@ -1836,6 +1840,7 @@ void FunctionValidator::visitUnary(Unary* curr) {
case AbsVecI8x16:
case AbsVecI16x8:
case AbsVecI32x4:
+ case AbsVecI64x2:
case NegVecI8x16:
case NegVecI16x8:
case NegVecI32x4:
@@ -1860,24 +1865,20 @@ void FunctionValidator::visitUnary(Unary* curr) {
case ExtAddPairwiseUVecI16x8ToI32x4:
case TruncSatSVecF32x4ToVecI32x4:
case TruncSatUVecF32x4ToVecI32x4:
- case TruncSatSVecF64x2ToVecI64x2:
- case TruncSatUVecF64x2ToVecI64x2:
case ConvertSVecI32x4ToVecF32x4:
case ConvertUVecI32x4ToVecF32x4:
- case ConvertSVecI64x2ToVecF64x2:
- case ConvertUVecI64x2ToVecF64x2:
- case WidenLowSVecI8x16ToVecI16x8:
- case WidenHighSVecI8x16ToVecI16x8:
- case WidenLowUVecI8x16ToVecI16x8:
- case WidenHighUVecI8x16ToVecI16x8:
- case WidenLowSVecI16x8ToVecI32x4:
- case WidenHighSVecI16x8ToVecI32x4:
- case WidenLowUVecI16x8ToVecI32x4:
- case WidenHighUVecI16x8ToVecI32x4:
- case WidenLowSVecI32x4ToVecI64x2:
- case WidenHighSVecI32x4ToVecI64x2:
- case WidenLowUVecI32x4ToVecI64x2:
- case WidenHighUVecI32x4ToVecI64x2:
+ case ExtendLowSVecI8x16ToVecI16x8:
+ case ExtendHighSVecI8x16ToVecI16x8:
+ case ExtendLowUVecI8x16ToVecI16x8:
+ case ExtendHighUVecI8x16ToVecI16x8:
+ case ExtendLowSVecI16x8ToVecI32x4:
+ case ExtendHighSVecI16x8ToVecI32x4:
+ case ExtendLowUVecI16x8ToVecI32x4:
+ case ExtendHighUVecI16x8ToVecI32x4:
+ case ExtendLowSVecI32x4ToVecI64x2:
+ case ExtendHighSVecI32x4ToVecI64x2:
+ case ExtendLowUVecI32x4ToVecI64x2:
+ case ExtendHighUVecI32x4ToVecI64x2:
case ConvertLowSVecI32x4ToVecF64x2:
case ConvertLowUVecI32x4ToVecF64x2:
case TruncSatZeroSVecF64x2ToVecI32x4:
@@ -1888,12 +1889,11 @@ void FunctionValidator::visitUnary(Unary* curr) {
shouldBeEqual(
curr->value->type, Type(Type::v128), curr, "expected v128 operand");
break;
- case AnyTrueVecI8x16:
- case AnyTrueVecI16x8:
- case AnyTrueVecI32x4:
+ case AnyTrueVec128:
case AllTrueVecI8x16:
case AllTrueVecI16x8:
case AllTrueVecI32x4:
+ case AllTrueVecI64x2:
case BitmaskVecI8x16:
case BitmaskVecI16x8:
case BitmaskVecI32x4:
diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp
index 2ccd9a70a..b227ff99f 100644
--- a/src/wasm/wasm.cpp
+++ b/src/wasm/wasm.cpp
@@ -499,11 +499,6 @@ void SIMDLoadStoreLane::finalize() {
}
}
-void SIMDWiden::finalize() {
- assert(vec);
- type = vec->type == Type::unreachable ? Type::unreachable : Type::v128;
-}
-
Index SIMDLoadStoreLane::getMemBytes() {
switch (op) {
case LoadLaneVec8x16:
@@ -538,10 +533,6 @@ bool SIMDLoadStoreLane::isStore() {
WASM_UNREACHABLE("unexpected op");
}
-void Prefetch::finalize() {
- type = ptr->type == Type::unreachable ? Type::unreachable : Type::none;
-}
-
Const* Const::set(Literal value_) {
value = value_;
type = value.type;
@@ -650,6 +641,7 @@ void Unary::finalize() {
case AbsVecI8x16:
case AbsVecI16x8:
case AbsVecI32x4:
+ case AbsVecI64x2:
case PopcntVecI8x16:
case NegVecI8x16:
case NegVecI16x8:
@@ -675,24 +667,20 @@ void Unary::finalize() {
case ExtAddPairwiseUVecI16x8ToI32x4:
case TruncSatSVecF32x4ToVecI32x4:
case TruncSatUVecF32x4ToVecI32x4:
- case TruncSatSVecF64x2ToVecI64x2:
- case TruncSatUVecF64x2ToVecI64x2:
case ConvertSVecI32x4ToVecF32x4:
case ConvertUVecI32x4ToVecF32x4:
- case ConvertSVecI64x2ToVecF64x2:
- case ConvertUVecI64x2ToVecF64x2:
- case WidenLowSVecI8x16ToVecI16x8:
- case WidenHighSVecI8x16ToVecI16x8:
- case WidenLowUVecI8x16ToVecI16x8:
- case WidenHighUVecI8x16ToVecI16x8:
- case WidenLowSVecI16x8ToVecI32x4:
- case WidenHighSVecI16x8ToVecI32x4:
- case WidenLowUVecI16x8ToVecI32x4:
- case WidenHighUVecI16x8ToVecI32x4:
- case WidenLowSVecI32x4ToVecI64x2:
- case WidenHighSVecI32x4ToVecI64x2:
- case WidenLowUVecI32x4ToVecI64x2:
- case WidenHighUVecI32x4ToVecI64x2:
+ case ExtendLowSVecI8x16ToVecI16x8:
+ case ExtendHighSVecI8x16ToVecI16x8:
+ case ExtendLowUVecI8x16ToVecI16x8:
+ case ExtendHighUVecI8x16ToVecI16x8:
+ case ExtendLowSVecI16x8ToVecI32x4:
+ case ExtendHighSVecI16x8ToVecI32x4:
+ case ExtendLowUVecI16x8ToVecI32x4:
+ case ExtendHighUVecI16x8ToVecI32x4:
+ case ExtendLowSVecI32x4ToVecI64x2:
+ case ExtendHighSVecI32x4ToVecI64x2:
+ case ExtendLowUVecI32x4ToVecI64x2:
+ case ExtendHighUVecI32x4ToVecI64x2:
case ConvertLowSVecI32x4ToVecF64x2:
case ConvertLowUVecI32x4ToVecF64x2:
case TruncSatZeroSVecF64x2ToVecI32x4:
@@ -701,12 +689,11 @@ void Unary::finalize() {
case PromoteLowVecF32x4ToVecF64x2:
type = Type::v128;
break;
- case AnyTrueVecI8x16:
- case AnyTrueVecI16x8:
- case AnyTrueVecI32x4:
+ case AnyTrueVec128:
case AllTrueVecI8x16:
case AllTrueVecI16x8:
case AllTrueVecI32x4:
+ case AllTrueVecI64x2:
case BitmaskVecI8x16:
case BitmaskVecI16x8:
case BitmaskVecI32x4:
diff --git a/src/wasm2js.h b/src/wasm2js.h
index 6d3f0682c..9a1a93b2b 100644
--- a/src/wasm2js.h
+++ b/src/wasm2js.h
@@ -1988,8 +1988,6 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m,
}
Ref visitNop(Nop* curr) { return ValueBuilder::makeToplevel(); }
- Ref visitPrefetch(Prefetch* curr) { return ValueBuilder::makeToplevel(); }
-
Ref visitUnreachable(Unreachable* curr) {
return ValueBuilder::makeCall(ABORT_FUNC);
}
@@ -2124,10 +2122,6 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m,
unimplemented(curr);
WASM_UNREACHABLE("unimp");
}
- Ref visitSIMDWiden(SIMDWiden* curr) {
- unimplemented(curr);
- WASM_UNREACHABLE("unimp");
- }
Ref visitMemoryInit(MemoryInit* curr) {
ABI::wasm2js::ensureHelpers(module, ABI::wasm2js::MEMORY_INIT);
return ValueBuilder::makeCall(ABI::wasm2js::MEMORY_INIT,
diff --git a/test/binaryen.js/exception-handling.js.txt b/test/binaryen.js/exception-handling.js.txt
index 434546126..386abfb4d 100644
--- a/test/binaryen.js/exception-handling.js.txt
+++ b/test/binaryen.js/exception-handling.js.txt
@@ -34,7 +34,7 @@
)
)
-getExpressionInfo(throw) = {"id":48,"type":1,"event":"e"}
-getExpressionInfo(rethrow) = {"id":49,"type":1,"target":"l0"}
-getExpressionInfo(try_catch) = {"id":47,"type":1,"name":"l0","hasCatchAll":0,"delegateTarget":"","isDelegate":0}
-getExpressionInfo(try_delegate) = {"id":47,"type":0,"name":"try_outer","hasCatchAll":1,"delegateTarget":"","isDelegate":0}
+getExpressionInfo(throw) = {"id":46,"type":1,"event":"e"}
+getExpressionInfo(rethrow) = {"id":47,"type":1,"target":"l0"}
+getExpressionInfo(try_catch) = {"id":45,"type":1,"name":"l0","hasCatchAll":0,"delegateTarget":"","isDelegate":0}
+getExpressionInfo(try_delegate) = {"id":45,"type":0,"name":"try_outer","hasCatchAll":1,"delegateTarget":"","isDelegate":0}
diff --git a/test/binaryen.js/expressions.js b/test/binaryen.js/expressions.js
index d6256cec1..68f5c8cee 100644
--- a/test/binaryen.js/expressions.js
+++ b/test/binaryen.js/expressions.js
@@ -1079,7 +1079,7 @@ console.log("# SIMDShuffle");
var left = module.v128.const([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]);
var right = module.v128.const([2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]);
var mask = [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18];
- const theSIMDShuffle = binaryen.SIMDShuffle(module.v8x16.shuffle(left, right, mask));
+ const theSIMDShuffle = binaryen.SIMDShuffle(module.i8x16.shuffle(left, right, mask));
assert(theSIMDShuffle instanceof binaryen.SIMDShuffle);
assert(theSIMDShuffle instanceof binaryen.Expression);
assert(theSIMDShuffle.left === left);
@@ -1101,7 +1101,7 @@ console.log("# SIMDShuffle");
assert(
theSIMDShuffle.toText()
==
- "(v8x16.shuffle 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n (v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)\n (v128.const i32x4 0x02020202 0x02020202 0x02020202 0x02020202)\n)\n"
+ "(i8x16.shuffle 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3\n (v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)\n (v128.const i32x4 0x02020202 0x02020202 0x02020202 0x02020202)\n)\n"
);
module.dispose();
@@ -1124,23 +1124,11 @@ console.log("# SIMDTernary");
assert(theSIMDTernary.c === c);
assert(theSIMDTernary.type === binaryen.v128);
- theSIMDTernary.op = op = binaryen.Operations.QFMAVecF64x2;
- assert(theSIMDTernary.op === op);
- theSIMDTernary.a = a = module.v128.const([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]);
- assert(theSIMDTernary.a === a);
- theSIMDTernary.b = b = module.v128.const([2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]);
- assert(theSIMDTernary.b === b);
- theSIMDTernary.c = c = module.v128.const([3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]);
- assert(theSIMDTernary.c === c);
- theSIMDTernary.type = binaryen.f64;
- theSIMDTernary.finalize();
- assert(theSIMDTernary.type === binaryen.v128);
-
- console.log(theSIMDTernary.toText());
+ console.log(theSIMDTernary.toText() + "\n");
assert(
theSIMDTernary.toText()
==
- "(f64x2.qfma\n (v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)\n (v128.const i32x4 0x02020202 0x02020202 0x02020202 0x02020202)\n (v128.const i32x4 0x03030303 0x03030303 0x03030303 0x03030303)\n)\n"
+ "(v128.bitselect\n (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)\n (v128.const i32x4 0x05040302 0x09080706 0x0d0c0b0a 0x11100f0e)\n (v128.const i32x4 0x06050403 0x0a090807 0x0e0d0c0b 0x1211100f)\n)\n"
);
module.dispose();
@@ -1189,7 +1177,7 @@ console.log("# SIMDLoad");
var offset = 16;
var align = 2;
var ptr = module.i32.const(1);
- const theSIMDLoad = binaryen.SIMDLoad(module.i16x8.load8x8_s(offset, align, ptr));
+ const theSIMDLoad = binaryen.SIMDLoad(module.v128.load8x8_s(offset, align, ptr));
assert(theSIMDLoad instanceof binaryen.SIMDLoad);
assert(theSIMDLoad instanceof binaryen.Expression);
assert(theSIMDLoad.offset === offset);
@@ -1213,7 +1201,7 @@ console.log("# SIMDLoad");
assert(
theSIMDLoad.toText()
==
- "(v8x16.load_splat offset=32 align=4\n (i32.const 2)\n)\n"
+ "(v128.load8_splat offset=32 align=4\n (i32.const 2)\n)\n"
);
module.dispose();
diff --git a/test/binaryen.js/expressions.js.txt b/test/binaryen.js/expressions.js.txt
index 0467e9332..a3e877b0f 100644
--- a/test/binaryen.js/expressions.js.txt
+++ b/test/binaryen.js/expressions.js.txt
@@ -155,18 +155,19 @@
)
# SIMDShuffle
-(v8x16.shuffle 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
+(i8x16.shuffle 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
(v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)
(v128.const i32x4 0x02020202 0x02020202 0x02020202 0x02020202)
)
# SIMDTernary
-(f64x2.qfma
- (v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)
- (v128.const i32x4 0x02020202 0x02020202 0x02020202 0x02020202)
- (v128.const i32x4 0x03030303 0x03030303 0x03030303 0x03030303)
+(v128.bitselect
+ (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
+ (v128.const i32x4 0x05040302 0x09080706 0x0d0c0b0a 0x11100f0e)
+ (v128.const i32x4 0x06050403 0x0a090807 0x0e0d0c0b 0x1211100f)
)
+
# SIMDShift
(i8x16.shr_s
(v128.const i32x4 0x01010101 0x01010101 0x01010101 0x01010101)
@@ -174,7 +175,7 @@
)
# SIMDLoad
-(v8x16.load_splat offset=32 align=4
+(v128.load8_splat offset=32 align=4
(i32.const 2)
)
diff --git a/test/binaryen.js/kitchen-sink.js b/test/binaryen.js/kitchen-sink.js
index 084a7d47b..cb2cd32ff 100644
--- a/test/binaryen.js/kitchen-sink.js
+++ b/test/binaryen.js/kitchen-sink.js
@@ -265,19 +265,17 @@ function test_core() {
module.f32x4.splat(module.f32.const(42.0)),
module.f64x2.splat(module.f64.const(42.0)),
module.v128.not(module.v128.const(v128_bytes)),
+ module.v128.any_true(module.v128.const(v128_bytes)),
module.i8x16.abs(module.v128.const(v128_bytes)),
module.i8x16.neg(module.v128.const(v128_bytes)),
- module.i8x16.any_true(module.v128.const(v128_bytes)),
module.i8x16.all_true(module.v128.const(v128_bytes)),
module.i8x16.bitmask(module.v128.const(v128_bytes)),
module.i16x8.abs(module.v128.const(v128_bytes)),
module.i16x8.neg(module.v128.const(v128_bytes)),
- module.i16x8.any_true(module.v128.const(v128_bytes)),
module.i16x8.all_true(module.v128.const(v128_bytes)),
module.i16x8.bitmask(module.v128.const(v128_bytes)),
module.i32x4.abs(module.v128.const(v128_bytes)),
module.i32x4.neg(module.v128.const(v128_bytes)),
- module.i32x4.any_true(module.v128.const(v128_bytes)),
module.i32x4.all_true(module.v128.const(v128_bytes)),
module.i32x4.bitmask(module.v128.const(v128_bytes)),
module.i64x2.neg(module.v128.const(v128_bytes)),
@@ -289,20 +287,16 @@ function test_core() {
module.f64x2.sqrt(module.v128.const(v128_bytes)),
module.i32x4.trunc_sat_f32x4_s(module.v128.const(v128_bytes)),
module.i32x4.trunc_sat_f32x4_u(module.v128.const(v128_bytes)),
- module.i64x2.trunc_sat_f64x2_s(module.v128.const(v128_bytes)),
- module.i64x2.trunc_sat_f64x2_u(module.v128.const(v128_bytes)),
module.f32x4.convert_i32x4_s(module.v128.const(v128_bytes)),
module.f32x4.convert_i32x4_u(module.v128.const(v128_bytes)),
- module.f64x2.convert_i64x2_s(module.v128.const(v128_bytes)),
- module.f64x2.convert_i64x2_u(module.v128.const(v128_bytes)),
- module.i16x8.widen_low_i8x16_s(module.v128.const(v128_bytes)),
- module.i16x8.widen_high_i8x16_s(module.v128.const(v128_bytes)),
- module.i16x8.widen_low_i8x16_u(module.v128.const(v128_bytes)),
- module.i16x8.widen_high_i8x16_u(module.v128.const(v128_bytes)),
- module.i32x4.widen_low_i16x8_s(module.v128.const(v128_bytes)),
- module.i32x4.widen_high_i16x8_s(module.v128.const(v128_bytes)),
- module.i32x4.widen_low_i16x8_u(module.v128.const(v128_bytes)),
- module.i32x4.widen_high_i16x8_u(module.v128.const(v128_bytes)),
+ module.i16x8.extend_low_i8x16_s(module.v128.const(v128_bytes)),
+ module.i16x8.extend_high_i8x16_s(module.v128.const(v128_bytes)),
+ module.i16x8.extend_low_i8x16_u(module.v128.const(v128_bytes)),
+ module.i16x8.extend_high_i8x16_u(module.v128.const(v128_bytes)),
+ module.i32x4.extend_low_i16x8_s(module.v128.const(v128_bytes)),
+ module.i32x4.extend_high_i16x8_s(module.v128.const(v128_bytes)),
+ module.i32x4.extend_low_i16x8_u(module.v128.const(v128_bytes)),
+ module.i32x4.extend_high_i16x8_u(module.v128.const(v128_bytes)),
// Binary
module.i32.add(module.i32.const(-10), module.i32.const(-11)),
module.f64.sub(module.f64.const(-9005.841), module.f64.const(-9007.333)),
@@ -388,7 +382,6 @@ function test_core() {
module.i8x16.sub(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i8x16.sub_saturate_s(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i8x16.sub_saturate_u(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.i8x16.mul(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i8x16.min_s(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i8x16.min_u(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i8x16.max_s(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
@@ -445,7 +438,7 @@ function test_core() {
module.i8x16.narrow_i16x8_u(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i16x8.narrow_i32x4_s(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
module.i16x8.narrow_i32x4_u(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.v8x16.swizzle(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
+ module.i8x16.swizzle(module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
// SIMD lane manipulation
module.i8x16.extract_lane_s(module.v128.const(v128_bytes), 1),
module.i8x16.extract_lane_u(module.v128.const(v128_bytes), 1),
@@ -475,23 +468,19 @@ function test_core() {
module.i64x2.shr_s(module.v128.const(v128_bytes), module.i32.const(1)),
module.i64x2.shr_u(module.v128.const(v128_bytes), module.i32.const(1)),
// SIMD load
- module.v8x16.load_splat(0, 1, module.i32.const(128)),
- module.v16x8.load_splat(16, 1, module.i32.const(128)),
- module.v32x4.load_splat(16, 4, module.i32.const(128)),
- module.v64x2.load_splat(0, 4, module.i32.const(128)),
- module.i16x8.load8x8_s(0, 8, module.i32.const(128)),
- module.i16x8.load8x8_u(0, 8, module.i32.const(128)),
- module.i32x4.load16x4_s(0, 8, module.i32.const(128)),
- module.i32x4.load16x4_u(0, 8, module.i32.const(128)),
- module.i64x2.load32x2_s(0, 8, module.i32.const(128)),
- module.i64x2.load32x2_u(0, 8, module.i32.const(128)),
+ module.v128.load8_splat(0, 1, module.i32.const(128)),
+ module.v128.load16_splat(16, 1, module.i32.const(128)),
+ module.v128.load32_splat(16, 4, module.i32.const(128)),
+ module.v128.load64_splat(0, 4, module.i32.const(128)),
+ module.v128.load8x8_s(0, 8, module.i32.const(128)),
+ module.v128.load8x8_u(0, 8, module.i32.const(128)),
+ module.v128.load16x4_s(0, 8, module.i32.const(128)),
+ module.v128.load16x4_u(0, 8, module.i32.const(128)),
+ module.v128.load32x2_s(0, 8, module.i32.const(128)),
+ module.v128.load32x2_u(0, 8, module.i32.const(128)),
// Other SIMD
- module.v8x16.shuffle(module.v128.const(v128_bytes), module.v128.const(v128_bytes), v128_bytes),
+ module.i8x16.shuffle(module.v128.const(v128_bytes), module.v128.const(v128_bytes), v128_bytes),
module.v128.bitselect(module.v128.const(v128_bytes), module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.f32x4.qfma(module.v128.const(v128_bytes), module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.f32x4.qfms(module.v128.const(v128_bytes), module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.f64x2.qfma(module.v128.const(v128_bytes), module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
- module.f64x2.qfms(module.v128.const(v128_bytes), module.v128.const(v128_bytes), module.v128.const(v128_bytes)),
// Bulk memory
module.memory.init(0, makeInt32(1024), makeInt32(0), makeInt32(12)),
module.data.drop(0),
@@ -676,10 +665,10 @@ function test_core() {
assert(table.base === "");
assert(table.initial === 0);
assert(table.max === 2);
-
+
module.removeTable("t1");
assert(module.getNumTables() === 0);
-
+
module.addTable("t0", 1, 0xffffffff);
module.addActiveElementSegment("t0", "e0", [ binaryen.getFunctionInfo(sinker).name ]);
assert(module.getNumTables() === 1);
diff --git a/test/binaryen.js/kitchen-sink.js.txt b/test/binaryen.js/kitchen-sink.js.txt
index a8ea4eccf..9d97b6523 100644
--- a/test/binaryen.js/kitchen-sink.js.txt
+++ b/test/binaryen.js/kitchen-sink.js.txt
@@ -66,46 +66,46 @@ ReturnId: 19
MemorySizeId: 20
MemoryGrowId: 21
NopId: 22
-UnreachableId: 24
-AtomicCmpxchgId: 26
-AtomicRMWId: 25
-AtomicWaitId: 27
-AtomicNotifyId: 28
-SIMDExtractId: 30
-SIMDReplaceId: 31
-SIMDShuffleId: 32
-SIMDTernaryId: 33
-SIMDShiftId: 34
-SIMDLoadId: 35
-MemoryInitId: 38
-DataDropId: 39
-MemoryCopyId: 40
-MemoryFillId: 41
-PopId: 42
-RefNullId: 43
-RefIsId: 44
-RefFuncId: 45
-RefEqId: 46
-TryId: 47
-ThrowId: 48
-RethrowId: 49
-TupleMakeId: 50
-TupleExtractId: 51
-I31NewId: 52
-I31GetId: 53
-CallRefId: 54
-RefTestId: 55
-RefCastId: 56
-BrOnId: 57
-RttCanonId: 58
-RttSubId: 59
-StructNewId: 60
-StructGetId: 61
-StructSetId: 62
-ArrayNewId: 63
-ArrayGetId: 64
-ArraySetId: 65
-ArrayLenId: 66
+UnreachableId: 23
+AtomicCmpxchgId: 25
+AtomicRMWId: 24
+AtomicWaitId: 26
+AtomicNotifyId: 27
+SIMDExtractId: 29
+SIMDReplaceId: 30
+SIMDShuffleId: 31
+SIMDTernaryId: 32
+SIMDShiftId: 33
+SIMDLoadId: 34
+MemoryInitId: 36
+DataDropId: 37
+MemoryCopyId: 38
+MemoryFillId: 39
+PopId: 40
+RefNullId: 41
+RefIsId: 42
+RefFuncId: 43
+RefEqId: 44
+TryId: 45
+ThrowId: 46
+RethrowId: 47
+TupleMakeId: 48
+TupleExtractId: 49
+I31NewId: 50
+I31GetId: 51
+CallRefId: 52
+RefTestId: 53
+RefCastId: 54
+BrOnId: 55
+RttCanonId: 56
+RttSubId: 57
+StructNewId: 58
+StructGetId: 59
+StructSetId: 60
+ArrayNewId: 61
+ArrayGetId: 62
+ArraySetId: 63
+ArrayLenId: 64
getExpressionInfo={"id":15,"type":4,"op":6}
(f32.neg
(f32.const -33.61199951171875)
@@ -402,17 +402,17 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.abs
+ (v128.any_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.neg
+ (i8x16.abs
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.any_true
+ (i8x16.neg
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -437,11 +437,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i16x8.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -462,11 +457,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i32x4.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i32x4.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -522,16 +512,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i64x2.trunc_sat_f64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i64x2.trunc_sat_f64x2_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(f32x4.convert_i32x4_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -542,52 +522,42 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (f64x2.convert_i64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.convert_i64x2_u
+ (i16x8.extend_low_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_s
+ (i16x8.extend_high_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_s
+ (i16x8.extend_low_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_u
+ (i16x8.extend_high_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_u
+ (i32x4.extend_low_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_s
+ (i32x4.extend_high_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_high_i16x8_s
+ (i32x4.extend_low_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i32x4.widen_high_i16x8_u
+ (i32x4.extend_high_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -1066,13 +1036,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.add_saturate_s
+ (i8x16.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.add_saturate_u
+ (i8x16.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1084,19 +1054,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.sub_saturate_s
+ (i8x16.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.sub_saturate_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i8x16.mul
+ (i8x16.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1138,13 +1102,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.add_saturate_s
+ (i16x8.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.add_saturate_u
+ (i16x8.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1156,13 +1120,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.sub_saturate_s
+ (i16x8.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.sub_saturate_u
+ (i16x8.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1430,7 +1394,7 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (v8x16.swizzle
+ (i8x16.swizzle
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1584,57 +1548,57 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (v8x16.load_splat
+ (v128.load8_splat
(i32.const 128)
)
)
(drop
- (v16x8.load_splat offset=16 align=1
+ (v128.load16_splat offset=16 align=1
(i32.const 128)
)
)
(drop
- (v32x4.load_splat offset=16
+ (v128.load32_splat offset=16
(i32.const 128)
)
)
(drop
- (v64x2.load_splat align=4
+ (v128.load64_splat align=4
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_s
+ (v128.load8x8_s
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_u
+ (v128.load8x8_u
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_s
+ (v128.load16x4_s
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_u
+ (v128.load16x4_u
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_s
+ (v128.load32x2_s
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_u
+ (v128.load32x2_u
(i32.const 128)
)
)
(drop
- (v8x16.shuffle 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ (i8x16.shuffle 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1646,34 +1610,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
- (drop
- (f32x4.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f32x4.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
(memory.init 0
(i32.const 1024)
(i32.const 0)
@@ -2264,17 +2200,17 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.abs
+ (v128.any_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.neg
+ (i8x16.abs
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.any_true
+ (i8x16.neg
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -2299,11 +2235,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i16x8.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -2324,11 +2255,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i32x4.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i32x4.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -2384,16 +2310,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i64x2.trunc_sat_f64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i64x2.trunc_sat_f64x2_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(f32x4.convert_i32x4_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -2404,52 +2320,42 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (f64x2.convert_i64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.convert_i64x2_u
+ (i16x8.extend_low_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_s
+ (i16x8.extend_high_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_s
+ (i16x8.extend_low_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_u
+ (i16x8.extend_high_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_u
+ (i32x4.extend_low_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_s
+ (i32x4.extend_high_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_high_i16x8_s
+ (i32x4.extend_low_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i32x4.widen_high_i16x8_u
+ (i32x4.extend_high_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -2928,13 +2834,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.add_saturate_s
+ (i8x16.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.add_saturate_u
+ (i8x16.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -2946,19 +2852,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i8x16.sub_saturate_s
+ (i8x16.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.sub_saturate_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i8x16.mul
+ (i8x16.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -3000,13 +2900,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.add_saturate_s
+ (i16x8.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.add_saturate_u
+ (i16x8.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -3018,13 +2918,13 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (i16x8.sub_saturate_s
+ (i16x8.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.sub_saturate_u
+ (i16x8.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -3292,7 +3192,7 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (v8x16.swizzle
+ (i8x16.swizzle
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -3446,57 +3346,57 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
)
)
(drop
- (v8x16.load_splat
+ (v128.load8_splat
(i32.const 128)
)
)
(drop
- (v16x8.load_splat offset=16 align=1
+ (v128.load16_splat offset=16 align=1
(i32.const 128)
)
)
(drop
- (v32x4.load_splat offset=16
+ (v128.load32_splat offset=16
(i32.const 128)
)
)
(drop
- (v64x2.load_splat align=4
+ (v128.load64_splat align=4
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_s
+ (v128.load8x8_s
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_u
+ (v128.load8x8_u
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_s
+ (v128.load16x4_s
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_u
+ (v128.load16x4_u
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_s
+ (v128.load32x2_s
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_u
+ (v128.load32x2_u
(i32.const 128)
)
)
(drop
- (v8x16.shuffle 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ (i8x16.shuffle 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -3508,34 +3408,6 @@ getExpressionInfo(tuple[3])={"id":14,"type":5,"value":3.7}
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
- (drop
- (f32x4.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f32x4.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
(memory.init 0
(i32.const 1024)
(i32.const 0)
diff --git a/test/example/c-api-kitchen-sink.c b/test/example/c-api-kitchen-sink.c
index aa82723b4..e4e8c37b0 100644
--- a/test/example/c-api-kitchen-sink.c
+++ b/test/example/c-api-kitchen-sink.c
@@ -403,19 +403,17 @@ void test_core() {
makeUnary(module, BinaryenSplatVecF32x4(), f32),
makeUnary(module, BinaryenSplatVecF64x2(), f64),
makeUnary(module, BinaryenNotVec128(), v128),
+ makeUnary(module, BinaryenAnyTrueVec128(), v128),
makeUnary(module, BinaryenAbsVecI8x16(), v128),
makeUnary(module, BinaryenNegVecI8x16(), v128),
- makeUnary(module, BinaryenAnyTrueVecI8x16(), v128),
makeUnary(module, BinaryenAllTrueVecI8x16(), v128),
makeUnary(module, BinaryenBitmaskVecI8x16(), v128),
makeUnary(module, BinaryenAbsVecI16x8(), v128),
makeUnary(module, BinaryenNegVecI16x8(), v128),
- makeUnary(module, BinaryenAnyTrueVecI16x8(), v128),
makeUnary(module, BinaryenAllTrueVecI16x8(), v128),
makeUnary(module, BinaryenBitmaskVecI16x8(), v128),
makeUnary(module, BinaryenAbsVecI32x4(), v128),
makeUnary(module, BinaryenNegVecI32x4(), v128),
- makeUnary(module, BinaryenAnyTrueVecI32x4(), v128),
makeUnary(module, BinaryenAllTrueVecI32x4(), v128),
makeUnary(module, BinaryenBitmaskVecI32x4(), v128),
makeUnary(module, BinaryenNegVecI64x2(), v128),
@@ -427,20 +425,16 @@ void test_core() {
makeUnary(module, BinaryenSqrtVecF64x2(), v128),
makeUnary(module, BinaryenTruncSatSVecF32x4ToVecI32x4(), v128),
makeUnary(module, BinaryenTruncSatUVecF32x4ToVecI32x4(), v128),
- makeUnary(module, BinaryenTruncSatSVecF64x2ToVecI64x2(), v128),
- makeUnary(module, BinaryenTruncSatUVecF64x2ToVecI64x2(), v128),
makeUnary(module, BinaryenConvertSVecI32x4ToVecF32x4(), v128),
makeUnary(module, BinaryenConvertUVecI32x4ToVecF32x4(), v128),
- makeUnary(module, BinaryenConvertSVecI64x2ToVecF64x2(), v128),
- makeUnary(module, BinaryenConvertUVecI64x2ToVecF64x2(), v128),
- makeUnary(module, BinaryenWidenLowSVecI8x16ToVecI16x8(), v128),
- makeUnary(module, BinaryenWidenHighSVecI8x16ToVecI16x8(), v128),
- makeUnary(module, BinaryenWidenLowUVecI8x16ToVecI16x8(), v128),
- makeUnary(module, BinaryenWidenHighUVecI8x16ToVecI16x8(), v128),
- makeUnary(module, BinaryenWidenLowSVecI16x8ToVecI32x4(), v128),
- makeUnary(module, BinaryenWidenHighSVecI16x8ToVecI32x4(), v128),
- makeUnary(module, BinaryenWidenLowUVecI16x8ToVecI32x4(), v128),
- makeUnary(module, BinaryenWidenHighUVecI16x8ToVecI32x4(), v128),
+ makeUnary(module, BinaryenExtendLowSVecI8x16ToVecI16x8(), v128),
+ makeUnary(module, BinaryenExtendHighSVecI8x16ToVecI16x8(), v128),
+ makeUnary(module, BinaryenExtendLowUVecI8x16ToVecI16x8(), v128),
+ makeUnary(module, BinaryenExtendHighUVecI8x16ToVecI16x8(), v128),
+ makeUnary(module, BinaryenExtendLowSVecI16x8ToVecI32x4(), v128),
+ makeUnary(module, BinaryenExtendHighSVecI16x8ToVecI32x4(), v128),
+ makeUnary(module, BinaryenExtendLowUVecI16x8ToVecI32x4(), v128),
+ makeUnary(module, BinaryenExtendHighUVecI16x8ToVecI32x4(), v128),
// Binary
makeBinary(module, BinaryenAddInt32(), i32),
makeBinary(module, BinaryenSubFloat64(), f64),
@@ -526,7 +520,6 @@ void test_core() {
makeBinary(module, BinaryenSubVecI8x16(), v128),
makeBinary(module, BinaryenSubSatSVecI8x16(), v128),
makeBinary(module, BinaryenSubSatUVecI8x16(), v128),
- makeBinary(module, BinaryenMulVecI8x16(), v128),
makeBinary(module, BinaryenMinSVecI8x16(), v128),
makeBinary(module, BinaryenMinUVecI8x16(), v128),
makeBinary(module, BinaryenMaxSVecI8x16(), v128),
@@ -648,10 +641,6 @@ void test_core() {
// Other SIMD
makeSIMDShuffle(module),
makeSIMDTernary(module, BinaryenBitselectVec128()),
- makeSIMDTernary(module, BinaryenQFMAVecF32x4()),
- makeSIMDTernary(module, BinaryenQFMSVecF32x4()),
- makeSIMDTernary(module, BinaryenQFMAVecF64x2()),
- makeSIMDTernary(module, BinaryenQFMSVecF64x2()),
// Bulk memory
makeMemoryInit(module),
makeDataDrop(module),
diff --git a/test/example/c-api-kitchen-sink.txt b/test/example/c-api-kitchen-sink.txt
index 4cad3e659..e67850376 100644
--- a/test/example/c-api-kitchen-sink.txt
+++ b/test/example/c-api-kitchen-sink.txt
@@ -312,17 +312,17 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i8x16.abs
+ (v128.any_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.neg
+ (i8x16.abs
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.any_true
+ (i8x16.neg
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -347,11 +347,6 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i16x8.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i16x8.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -372,11 +367,6 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i32x4.any_true
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(i32x4.all_true
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -432,16 +422,6 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i64x2.trunc_sat_f64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i64x2.trunc_sat_f64x2_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
(f32x4.convert_i32x4_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -452,52 +432,42 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (f64x2.convert_i64x2_s
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.convert_i64x2_u
+ (i16x8.extend_low_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_s
+ (i16x8.extend_high_i8x16_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_s
+ (i16x8.extend_low_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_low_i8x16_u
+ (i16x8.extend_high_i8x16_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.widen_high_i8x16_u
+ (i32x4.extend_low_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_s
+ (i32x4.extend_high_i16x8_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_high_i16x8_s
+ (i32x4.extend_low_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i32x4.widen_low_i16x8_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i32x4.widen_high_i16x8_u
+ (i32x4.extend_high_i16x8_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
@@ -976,13 +946,13 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i8x16.add_saturate_s
+ (i8x16.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.add_saturate_u
+ (i8x16.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -994,19 +964,13 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i8x16.sub_saturate_s
+ (i8x16.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i8x16.sub_saturate_u
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (i8x16.mul
+ (i8x16.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1048,13 +1012,13 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i16x8.add_saturate_s
+ (i16x8.add_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.add_saturate_u
+ (i16x8.add_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1066,13 +1030,13 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (i16x8.sub_saturate_s
+ (i16x8.sub_sat_s
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
(drop
- (i16x8.sub_saturate_u
+ (i16x8.sub_sat_u
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1340,7 +1304,7 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (v8x16.swizzle
+ (i8x16.swizzle
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1494,57 +1458,57 @@ BinaryenFeatureAll: 8191
)
)
(drop
- (v8x16.load_splat
+ (v128.load8_splat
(i32.const 128)
)
)
(drop
- (v16x8.load_splat offset=16 align=1
+ (v128.load16_splat offset=16 align=1
(i32.const 128)
)
)
(drop
- (v32x4.load_splat offset=16
+ (v128.load32_splat offset=16
(i32.const 128)
)
)
(drop
- (v64x2.load_splat align=4
+ (v128.load64_splat align=4
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_s
+ (v128.load8x8_s
(i32.const 128)
)
)
(drop
- (i16x8.load8x8_u
+ (v128.load8x8_u
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_s
+ (v128.load16x4_s
(i32.const 128)
)
)
(drop
- (i32x4.load16x4_u
+ (v128.load16x4_u
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_s
+ (v128.load32x2_s
(i32.const 128)
)
)
(drop
- (i64x2.load32x2_u
+ (v128.load32x2_u
(i32.const 128)
)
)
(drop
- (v8x16.shuffle 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ (i8x16.shuffle 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
@@ -1556,34 +1520,6 @@ BinaryenFeatureAll: 8191
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
)
- (drop
- (f32x4.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f32x4.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfma
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
- (drop
- (f64x2.qfms
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
- )
- )
(memory.init 0
(i32.const 1024)
(i32.const 0)
diff --git a/test/passes/precompute-propagate_all-features.txt b/test/passes/precompute-propagate_all-features.txt
index 298b46de9..09f2bc42d 100644
--- a/test/passes/precompute-propagate_all-features.txt
+++ b/test/passes/precompute-propagate_all-features.txt
@@ -259,7 +259,7 @@
(func $simd-load (result v128)
(local $x v128)
(local.set $x
- (v8x16.load_splat
+ (v128.load8_splat
(i32.const 0)
)
)
diff --git a/test/passes/precompute-propagate_all-features.wast b/test/passes/precompute-propagate_all-features.wast
index f866c0ea8..bad12eac8 100644
--- a/test/passes/precompute-propagate_all-features.wast
+++ b/test/passes/precompute-propagate_all-features.wast
@@ -172,7 +172,7 @@
)
(func $simd-load (result v128)
(local $x v128)
- (local.set $x (v8x16.load_splat (i32.const 0)))
+ (local.set $x (v128.load8_splat (i32.const 0)))
(local.get $x)
)
(func $tuple-local (result i32 i64)
diff --git a/test/simd.wast b/test/simd.wast
index 8d12ea0ac..a89efb384 100644
--- a/test/simd.wast
+++ b/test/simd.wast
@@ -5,6 +5,56 @@
(local.get $0)
)
)
+ (func $v128.load8x8_s (param $0 i32) (result v128)
+ (v128.load8x8_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load8x8_u (param $0 i32) (result v128)
+ (v128.load8x8_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_s (param $0 i32) (result v128)
+ (v128.load16x4_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_u (param $0 i32) (result v128)
+ (v128.load16x4_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_s (param $0 i32) (result v128)
+ (v128.load32x2_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_u (param $0 i32) (result v128)
+ (v128.load32x2_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load8_splat (param $0 i32) (result v128)
+ (v128.load8_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load16_splat (param $0 i32) (result v128)
+ (v128.load16_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load32_splat (param $0 i32) (result v128)
+ (v128.load32_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_splat (param $0 i32) (result v128)
+ (v128.load64_splat
+ (local.get $0)
+ )
+ )
(func $v128.store (param $0 i32) (param $1 v128)
(v128.store offset=0 align=16
(local.get $0)
@@ -29,8 +79,14 @@
(func $v128.const.f64x2 (result v128)
(v128.const f64x2 1.0 2)
)
- (func $v128.shuffle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (func $i8x16.shuffle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.swizzle
(local.get $0)
(local.get $1)
)
@@ -40,6 +96,21 @@
(local.get $0)
)
)
+ (func $i16x8.splat (param $0 i32) (result v128)
+ (i16x8.splat
+ (local.get $0)
+ )
+ )
+ (func $f32x4.splat (param $0 f32) (result v128)
+ (f32x4.splat
+ (local.get $0)
+ )
+ )
+ (func $f64x2.splat (param $0 f64) (result v128)
+ (f64x2.splat
+ (local.get $0)
+ )
+ )
(func $i8x16.extract_lane_s (param $0 v128) (result i32)
(i8x16.extract_lane_s 0
(local.get $0)
@@ -56,11 +127,6 @@
(local.get $1)
)
)
- (func $i16x8.splat (param $0 i32) (result v128)
- (i16x8.splat
- (local.get $0)
- )
- )
(func $i16x8.extract_lane_s (param $0 v128) (result i32)
(i16x8.extract_lane_s 0
(local.get $0)
@@ -77,11 +143,6 @@
(local.get $1)
)
)
- (func $i32x4.splat (param $0 i32) (result v128)
- (i32x4.splat
- (local.get $0)
- )
- )
(func $i32x4.extract_lane (param $0 v128) (result i32)
(i32x4.extract_lane 0
(local.get $0)
@@ -104,11 +165,6 @@
(local.get $1)
)
)
- (func $f32x4.splat (param $0 f32) (result v128)
- (f32x4.splat
- (local.get $0)
- )
- )
(func $f32x4.extract_lane (param $0 v128) (result f32)
(f32x4.extract_lane 0
(local.get $0)
@@ -120,11 +176,6 @@
(local.get $1)
)
)
- (func $f64x2.splat (param $0 f64) (result v128)
- (f64x2.splat
- (local.get $0)
- )
- )
(func $f64x2.extract_lane (param $0 v128) (result f64)
(f64x2.extract_lane 0
(local.get $0)
@@ -316,12 +367,6 @@
(local.get $1)
)
)
- (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
- (i64x2.eq
- (local.get $0)
- (local.get $1)
- )
- )
(func $f32x4.eq (param $0 v128) (param $1 v128) (result v128)
(f32x4.eq
(local.get $0)
@@ -405,57 +450,34 @@
(local.get $1)
)
)
- (func $v128.or (param $0 v128) (param $1 v128) (result v128)
- (v128.or
- (local.get $0)
- (local.get $1)
- )
- )
- (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
- (v128.xor
- (local.get $0)
- (local.get $1)
- )
- )
(func $v128.andnot (param $0 v128) (param $1 v128) (result v128)
(v128.andnot
(local.get $0)
(local.get $1)
)
)
- (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v128.bitselect
+ (func $v128.or (param $0 v128) (param $1 v128) (result v128)
+ (v128.or
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v8x16.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v8x16.signselect
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $v16x8.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v16x8.signselect
+ (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
+ (v128.xor
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v32x4.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v32x4.signselect
+ (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
+ (v128.bitselect
(local.get $0)
(local.get $1)
(local.get $2)
)
)
- (func $v64x2.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v64x2.signselect
+ (func $v128.any_true (param $0 v128) (result i32)
+ (v128.any_true
(local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $v128.load8_lane (param $0 i32) (param $1 v128) (result v128)
@@ -542,8 +564,23 @@
(local.get $1)
)
)
- (func $i8x16.popcnt (param $0 v128) (result v128)
- (i8x16.popcnt
+ (func $v128.load32_zero (param $0 i32) (result v128)
+ (v128.load32_zero
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_zero (param $0 i32) (result v128)
+ (v128.load64_zero
+ (local.get $0)
+ )
+ )
+ (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
+ (f32x4.demote_f64x2_zero
+ (local.get $0)
+ )
+ )
+ (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
+ (f64x2.promote_low_f32x4
(local.get $0)
)
)
@@ -557,8 +594,8 @@
(local.get $0)
)
)
- (func $i8x16.any_true (param $0 v128) (result i32)
- (i8x16.any_true
+ (func $i8x16.popcnt (param $0 v128) (result v128)
+ (i8x16.popcnt
(local.get $0)
)
)
@@ -572,6 +609,38 @@
(local.get $0)
)
)
+ (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $f32x4.ceil (param $0 v128) (result v128)
+ (f32x4.ceil
+ (local.get $0)
+ )
+ )
+ (func $f32x4.floor (param $0 v128) (result v128)
+ (f32x4.floor
+ (local.get $0)
+ )
+ )
+ (func $f32x4.trunc (param $0 v128) (result v128)
+ (f32x4.trunc
+ (local.get $0)
+ )
+ )
+ (func $f32x4.nearest (param $0 v128) (result v128)
+ (f32x4.nearest
+ (local.get $0)
+ )
+ )
(func $i8x16.shl (param $0 v128) (param $1 i32) (result v128)
(i8x16.shl
(local.get $0)
@@ -596,14 +665,14 @@
(local.get $1)
)
)
- (func $i8x16.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_s
+ (func $i8x16.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_u
+ (func $i8x16.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -614,22 +683,26 @@
(local.get $1)
)
)
- (func $i8x16.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_s
+ (func $i8x16.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_u
+ (func $i8x16.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.mul (param $0 v128) (param $1 v128) (result v128)
- (i8x16.mul
+ (func $f64x2.ceil (param $0 v128) (result v128)
+ (f64x2.ceil
+ (local.get $0)
+ )
+ )
+ (func $f64x2.floor (param $0 v128) (result v128)
+ (f64x2.floor
(local.get $0)
- (local.get $1)
)
)
(func $i8x16.min_s (param $0 v128) (param $1 v128) (result v128)
@@ -656,12 +729,37 @@
(local.get $1)
)
)
+ (func $f64x2.trunc (param $0 v128) (result v128)
+ (f64x2.trunc
+ (local.get $0)
+ )
+ )
(func $i8x16.avgr_u (param $0 v128) (param $1 v128) (result v128)
(i8x16.avgr_u
(local.get $0)
(local.get $1)
)
)
+ (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i16x8.abs (param $0 v128) (result v128)
(i16x8.abs
(local.get $0)
@@ -672,9 +770,10 @@
(local.get $0)
)
)
- (func $i16x8.any_true (param $0 v128) (result i32)
- (i16x8.any_true
+ (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.q15mulr_sat_s
(local.get $0)
+ (local.get $1)
)
)
(func $i16x8.all_true (param $0 v128) (result i32)
@@ -687,7 +786,39 @@
(local.get $0)
)
)
- (func $i16x8.shl (param $0 v128) (param $1 i32) (result v128)
+ (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_u
+ (local.get $0)
+ )
+ )
+(func $i16x8.shl (param $0 v128) (param $1 i32) (result v128)
(i16x8.shl
(local.get $0)
(local.get $1)
@@ -711,14 +842,14 @@
(local.get $1)
)
)
- (func $i16x8.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_s
+ (func $i16x8.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_u
+ (func $i16x8.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -729,18 +860,23 @@
(local.get $1)
)
)
- (func $i16x8.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_s
+ (func $i16x8.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_u
+ (func $i16x8.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_u
(local.get $0)
(local.get $1)
)
)
+ (func $f64x2.nearest (param $0 v128) (result v128)
+ (f64x2.nearest
+ (local.get $0)
+ )
+ )
(func $i16x8.mul (param $0 v128) (param $1 v128) (result v128)
(i16x8.mul
(local.get $0)
@@ -777,12 +913,6 @@
(local.get $1)
)
)
- (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.q15mulr_sat_s
- (local.get $0)
- (local.get $1)
- )
- )
(func $i16x8.extmul_low_i8x16_s (param $0 v128) (param $1 v128) (result v128)
(i16x8.extmul_low_i8x16_s
(local.get $0)
@@ -817,11 +947,6 @@
(local.get $0)
)
)
- (func $i32x4.any_true (param $0 v128) (result i32)
- (i32x4.any_true
- (local.get $0)
- )
- )
(func $i32x4.all_true (param $0 v128) (result i32)
(i32x4.all_true
(local.get $0)
@@ -832,6 +957,26 @@
(local.get $0)
)
)
+ (func $i32x4.extend_low_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_low_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i32x4.shl (param $0 v128) (param $1 i32) (result v128)
(i32x4.shl
(local.get $0)
@@ -922,16 +1067,46 @@
(local.get $1)
)
)
+ (func $i64x2.abs (param $0 v128) (result v128)
+ (i64x2.abs
+ (local.get $0)
+ )
+ )
(func $i64x2.neg (param $0 v128) (result v128)
(i64x2.neg
(local.get $0)
)
)
+ (func $i64x2.all_true (param $0 v128) (result i32)
+ (i64x2.all_true
+ (local.get $0)
+ )
+ )
(func $i64x2.bitmask (param $0 v128) (result i32)
(i64x2.bitmask
(local.get $0)
)
)
+ (func $i64x2.extend_low_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_low_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_u
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_u
+ (local.get $0)
+ )
+ )
(func $i64x2.shl (param $0 v128) (param $1 i32) (result v128)
(i64x2.shl
(local.get $0)
@@ -968,6 +1143,42 @@
(local.get $1)
)
)
+ (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.eq
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ne (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ne
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.lt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.lt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.gt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.gt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.le_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.le_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ge_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ge_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
(func $i64x2.extmul_low_i32x4_s (param $0 v128) (param $1 v128) (result v128)
(i64x2.extmul_low_i32x4_s
(local.get $0)
@@ -992,6 +1203,21 @@
(local.get $1)
)
)
+ (func $f32x4.abs (param $0 v128) (result v128)
+ (f32x4.abs
+ (local.get $0)
+ )
+ )
+ (func $f32x4.neg (param $0 v128) (result v128)
+ (f32x4.neg
+ (local.get $0)
+ )
+ )
+ (func $f32x4.sqrt (param $0 v128) (result v128)
+ (f32x4.sqrt
+ (local.get $0)
+ )
+ )
(func $f32x4.add (param $0 v128) (param $1 v128) (result v128)
(f32x4.add
(local.get $0)
@@ -1040,53 +1266,19 @@
(local.get $1)
)
)
- (func $f32x4.ceil (param $0 v128) (result v128)
- (f32x4.ceil
- (local.get $0)
- )
- )
- (func $f32x4.floor (param $0 v128) (result v128)
- (f32x4.floor
- (local.get $0)
- )
- )
- (func $f32x4.trunc (param $0 v128) (result v128)
- (f32x4.trunc
- (local.get $0)
- )
- )
- (func $f32x4.nearest (param $0 v128) (result v128)
- (f32x4.nearest
- (local.get $0)
- )
- )
- (func $f32x4.abs (param $0 v128) (result v128)
- (f32x4.abs
- (local.get $0)
- )
- )
- (func $f32x4.neg (param $0 v128) (result v128)
- (f32x4.neg
- (local.get $0)
- )
- )
- (func $f32x4.sqrt (param $0 v128) (result v128)
- (f32x4.sqrt
+ (func $f64x2.abs (param $0 v128) (result v128)
+ (f64x2.abs
(local.get $0)
)
)
- (func $f32x4.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfma
+ (func $f64x2.neg (param $0 v128) (result v128)
+ (f64x2.neg
(local.get $0)
- (local.get $1)
- (local.get $2)
)
)
- (func $f32x4.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfms
+ (func $f64x2.sqrt (param $0 v128) (result v128)
+ (f64x2.sqrt
(local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $f64x2.add (param $0 v128) (param $1 v128) (result v128)
@@ -1137,75 +1329,6 @@
(local.get $1)
)
)
- (func $f64x2.ceil (param $0 v128) (result v128)
- (f64x2.ceil
- (local.get $0)
- )
- )
- (func $f64x2.floor (param $0 v128) (result v128)
- (f64x2.floor
- (local.get $0)
- )
- )
- (func $f64x2.trunc (param $0 v128) (result v128)
- (f64x2.trunc
- (local.get $0)
- )
- )
- (func $f64x2.nearest (param $0 v128) (result v128)
- (f64x2.nearest
- (local.get $0)
- )
- )
- (func $f64x2.abs (param $0 v128) (result v128)
- (f64x2.abs
- (local.get $0)
- )
- )
- (func $f64x2.neg (param $0 v128) (result v128)
- (f64x2.neg
- (local.get $0)
- )
- )
- (func $f64x2.sqrt (param $0 v128) (result v128)
- (f64x2.sqrt
- (local.get $0)
- )
- )
- (func $f64x2.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfma
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $f64x2.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfms
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_u
- (local.get $0)
- )
- )
(func $i32x4.trunc_sat_f32x4_s (param $0 v128) (result v128)
(i32x4.trunc_sat_f32x4_s
(local.get $0)
@@ -1216,16 +1339,6 @@
(local.get $0)
)
)
- (func $i64x2.trunc_sat_f64x2_s (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_s
- (local.get $0)
- )
- )
- (func $i64x2.trunc_sat_f64x2_u (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_u
- (local.get $0)
- )
- )
(func $f32x4.convert_i32x4_s (param $0 v128) (result v128)
(f32x4.convert_i32x4_s
(local.get $0)
@@ -1236,173 +1349,13 @@
(local.get $0)
)
)
- (func $f64x2.convert_i64x2_s (param $0 v128) (result v128)
- (f64x2.convert_i64x2_s
- (local.get $0)
- )
- )
- (func $f64x2.convert_i64x2_u (param $0 v128) (result v128)
- (f64x2.convert_i64x2_u
- (local.get $0)
- )
- )
- (func $v8x16.load_splat (param $0 i32) (result v128)
- (v8x16.load_splat
- (local.get $0)
- )
- )
- (func $v16x8.load_splat (param $0 i32) (result v128)
- (v16x8.load_splat
- (local.get $0)
- )
- )
- (func $v32x4.load_splat (param $0 i32) (result v128)
- (v32x4.load_splat
- (local.get $0)
- )
- )
- (func $v64x2.load_splat (param $0 i32) (result v128)
- (v64x2.load_splat
- (local.get $0)
- )
- )
- (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.widen_low_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_high_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_low_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_u
+ (func $i32x4.trunc_sat_f64x2_s_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_s_zero
(local.get $0)
)
)
- (func $i16x8.widen_high_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_u (param $0 i32) (result v128)
- (i16x8.load8x8_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_s (param $0 i32) (result v128)
- (i16x8.load8x8_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_s (param $0 i32) (result v128)
- (i32x4.load16x4_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_u (param $0 i32) (result v128)
- (i32x4.load16x4_u
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_s (param $0 i32) (result v128)
- (i64x2.load32x2_s
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_u (param $0 i32) (result v128)
- (i64x2.load32x2_u
- (local.get $0)
- )
- )
- (func $v128.load32_zero (param $0 i32) (result v128)
- (v128.load32_zero
- (local.get $0)
- )
- )
- (func $v128.load64_zero (param $0 i32) (result v128)
- (v128.load64_zero
- (local.get $0)
- )
- )
- (func $v8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.swizzle
- (local.get $0)
- (local.get $1)
- )
- )
- (func $prefetch.t (param $0 i32)
- (prefetch.t offset=3 align=2
- (local.get $0)
- )
- )
- (func $prefetch.nt (param $0 i32)
- (prefetch.nt offset=3 align=2
+ (func $i32x4.trunc_sat_f64x2_u_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_u_zero
(local.get $0)
)
)
@@ -1416,34 +1369,4 @@
(local.get $0)
)
)
- (func $i32x4.trunc_sat_f64x2_zero_s (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_s
- (local.get $0)
- )
- )
- (func $i32x4.trunc_sat_f64x2_zero_u (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_u
- (local.get $0)
- )
- )
- (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
- (f32x4.demote_f64x2_zero
- (local.get $0)
- )
- )
- (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
- (f64x2.promote_low_f32x4
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_s (param $0 v128) (result v128)
- (i32x4.widen_i8x16_s 0
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_u (param $0 v128) (result v128)
- (i32x4.widen_i8x16_u 0
- (local.get $0)
- )
- )
)
diff --git a/test/simd.wast.from-wast b/test/simd.wast.from-wast
index 6fbacea11..7940a00da 100644
--- a/test/simd.wast.from-wast
+++ b/test/simd.wast.from-wast
@@ -2,13 +2,11 @@
(type $v128_v128_=>_v128 (func (param v128 v128) (result v128)))
(type $v128_=>_v128 (func (param v128) (result v128)))
(type $i32_=>_v128 (func (param i32) (result v128)))
- (type $v128_=>_i32 (func (param v128) (result i32)))
(type $v128_i32_=>_v128 (func (param v128 i32) (result v128)))
- (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
+ (type $v128_=>_i32 (func (param v128) (result i32)))
(type $i32_v128_=>_none (func (param i32 v128)))
(type $i32_v128_=>_v128 (func (param i32 v128) (result v128)))
(type $none_=>_v128 (func (result v128)))
- (type $i32_=>_none (func (param i32)))
(type $v128_=>_i64 (func (param v128) (result i64)))
(type $v128_=>_f32 (func (param v128) (result f32)))
(type $v128_=>_f64 (func (param v128) (result f64)))
@@ -17,12 +15,63 @@
(type $v128_i64_=>_v128 (func (param v128 i64) (result v128)))
(type $v128_f32_=>_v128 (func (param v128 f32) (result v128)))
(type $v128_f64_=>_v128 (func (param v128 f64) (result v128)))
+ (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
(memory $0 1 1)
(func $v128.load (param $0 i32) (result v128)
(v128.load
(local.get $0)
)
)
+ (func $v128.load8x8_s (param $0 i32) (result v128)
+ (v128.load8x8_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load8x8_u (param $0 i32) (result v128)
+ (v128.load8x8_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_s (param $0 i32) (result v128)
+ (v128.load16x4_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_u (param $0 i32) (result v128)
+ (v128.load16x4_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_s (param $0 i32) (result v128)
+ (v128.load32x2_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_u (param $0 i32) (result v128)
+ (v128.load32x2_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load8_splat (param $0 i32) (result v128)
+ (v128.load8_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load16_splat (param $0 i32) (result v128)
+ (v128.load16_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load32_splat (param $0 i32) (result v128)
+ (v128.load32_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_splat (param $0 i32) (result v128)
+ (v128.load64_splat
+ (local.get $0)
+ )
+ )
(func $v128.store (param $0 i32) (param $1 v128)
(v128.store
(local.get $0)
@@ -47,8 +96,14 @@
(func $v128.const.f64x2 (result v128)
(v128.const i32x4 0x00000000 0x3ff00000 0x00000000 0x40000000)
)
- (func $v128.shuffle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (func $i8x16.shuffle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.swizzle
(local.get $0)
(local.get $1)
)
@@ -58,6 +113,21 @@
(local.get $0)
)
)
+ (func $i16x8.splat (param $0 i32) (result v128)
+ (i16x8.splat
+ (local.get $0)
+ )
+ )
+ (func $f32x4.splat (param $0 f32) (result v128)
+ (f32x4.splat
+ (local.get $0)
+ )
+ )
+ (func $f64x2.splat (param $0 f64) (result v128)
+ (f64x2.splat
+ (local.get $0)
+ )
+ )
(func $i8x16.extract_lane_s (param $0 v128) (result i32)
(i8x16.extract_lane_s 0
(local.get $0)
@@ -74,11 +144,6 @@
(local.get $1)
)
)
- (func $i16x8.splat (param $0 i32) (result v128)
- (i16x8.splat
- (local.get $0)
- )
- )
(func $i16x8.extract_lane_s (param $0 v128) (result i32)
(i16x8.extract_lane_s 0
(local.get $0)
@@ -95,11 +160,6 @@
(local.get $1)
)
)
- (func $i32x4.splat (param $0 i32) (result v128)
- (i32x4.splat
- (local.get $0)
- )
- )
(func $i32x4.extract_lane (param $0 v128) (result i32)
(i32x4.extract_lane 0
(local.get $0)
@@ -122,11 +182,6 @@
(local.get $1)
)
)
- (func $f32x4.splat (param $0 f32) (result v128)
- (f32x4.splat
- (local.get $0)
- )
- )
(func $f32x4.extract_lane (param $0 v128) (result f32)
(f32x4.extract_lane 0
(local.get $0)
@@ -138,11 +193,6 @@
(local.get $1)
)
)
- (func $f64x2.splat (param $0 f64) (result v128)
- (f64x2.splat
- (local.get $0)
- )
- )
(func $f64x2.extract_lane (param $0 v128) (result f64)
(f64x2.extract_lane 0
(local.get $0)
@@ -334,12 +384,6 @@
(local.get $1)
)
)
- (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
- (i64x2.eq
- (local.get $0)
- (local.get $1)
- )
- )
(func $f32x4.eq (param $0 v128) (param $1 v128) (result v128)
(f32x4.eq
(local.get $0)
@@ -423,57 +467,34 @@
(local.get $1)
)
)
- (func $v128.or (param $0 v128) (param $1 v128) (result v128)
- (v128.or
- (local.get $0)
- (local.get $1)
- )
- )
- (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
- (v128.xor
- (local.get $0)
- (local.get $1)
- )
- )
(func $v128.andnot (param $0 v128) (param $1 v128) (result v128)
(v128.andnot
(local.get $0)
(local.get $1)
)
)
- (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v128.bitselect
+ (func $v128.or (param $0 v128) (param $1 v128) (result v128)
+ (v128.or
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v8x16.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v8x16.signselect
+ (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
+ (v128.xor
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v16x8.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v16x8.signselect
+ (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
+ (v128.bitselect
(local.get $0)
(local.get $1)
(local.get $2)
)
)
- (func $v32x4.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v32x4.signselect
+ (func $v128.any_true (param $0 v128) (result i32)
+ (v128.any_true
(local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $v64x2.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v64x2.signselect
- (local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $v128.load8_lane (param $0 i32) (param $1 v128) (result v128)
@@ -560,8 +581,23 @@
(local.get $1)
)
)
- (func $i8x16.popcnt (param $0 v128) (result v128)
- (i8x16.popcnt
+ (func $v128.load32_zero (param $0 i32) (result v128)
+ (v128.load32_zero
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_zero (param $0 i32) (result v128)
+ (v128.load64_zero
+ (local.get $0)
+ )
+ )
+ (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
+ (f32x4.demote_f64x2_zero
+ (local.get $0)
+ )
+ )
+ (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
+ (f64x2.promote_low_f32x4
(local.get $0)
)
)
@@ -575,8 +611,8 @@
(local.get $0)
)
)
- (func $i8x16.any_true (param $0 v128) (result i32)
- (i8x16.any_true
+ (func $i8x16.popcnt (param $0 v128) (result v128)
+ (i8x16.popcnt
(local.get $0)
)
)
@@ -590,6 +626,38 @@
(local.get $0)
)
)
+ (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $f32x4.ceil (param $0 v128) (result v128)
+ (f32x4.ceil
+ (local.get $0)
+ )
+ )
+ (func $f32x4.floor (param $0 v128) (result v128)
+ (f32x4.floor
+ (local.get $0)
+ )
+ )
+ (func $f32x4.trunc (param $0 v128) (result v128)
+ (f32x4.trunc
+ (local.get $0)
+ )
+ )
+ (func $f32x4.nearest (param $0 v128) (result v128)
+ (f32x4.nearest
+ (local.get $0)
+ )
+ )
(func $i8x16.shl (param $0 v128) (param $1 i32) (result v128)
(i8x16.shl
(local.get $0)
@@ -614,14 +682,14 @@
(local.get $1)
)
)
- (func $i8x16.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_s
+ (func $i8x16.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_u
+ (func $i8x16.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -632,22 +700,26 @@
(local.get $1)
)
)
- (func $i8x16.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_s
+ (func $i8x16.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_u
+ (func $i8x16.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.mul (param $0 v128) (param $1 v128) (result v128)
- (i8x16.mul
+ (func $f64x2.ceil (param $0 v128) (result v128)
+ (f64x2.ceil
+ (local.get $0)
+ )
+ )
+ (func $f64x2.floor (param $0 v128) (result v128)
+ (f64x2.floor
(local.get $0)
- (local.get $1)
)
)
(func $i8x16.min_s (param $0 v128) (param $1 v128) (result v128)
@@ -674,12 +746,37 @@
(local.get $1)
)
)
+ (func $f64x2.trunc (param $0 v128) (result v128)
+ (f64x2.trunc
+ (local.get $0)
+ )
+ )
(func $i8x16.avgr_u (param $0 v128) (param $1 v128) (result v128)
(i8x16.avgr_u
(local.get $0)
(local.get $1)
)
)
+ (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i16x8.abs (param $0 v128) (result v128)
(i16x8.abs
(local.get $0)
@@ -690,9 +787,10 @@
(local.get $0)
)
)
- (func $i16x8.any_true (param $0 v128) (result i32)
- (i16x8.any_true
+ (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.q15mulr_sat_s
(local.get $0)
+ (local.get $1)
)
)
(func $i16x8.all_true (param $0 v128) (result i32)
@@ -705,6 +803,38 @@
(local.get $0)
)
)
+ (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_u
+ (local.get $0)
+ )
+ )
(func $i16x8.shl (param $0 v128) (param $1 i32) (result v128)
(i16x8.shl
(local.get $0)
@@ -729,14 +859,14 @@
(local.get $1)
)
)
- (func $i16x8.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_s
+ (func $i16x8.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_u
+ (func $i16x8.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -747,18 +877,23 @@
(local.get $1)
)
)
- (func $i16x8.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_s
+ (func $i16x8.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_u
+ (func $i16x8.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_u
(local.get $0)
(local.get $1)
)
)
+ (func $f64x2.nearest (param $0 v128) (result v128)
+ (f64x2.nearest
+ (local.get $0)
+ )
+ )
(func $i16x8.mul (param $0 v128) (param $1 v128) (result v128)
(i16x8.mul
(local.get $0)
@@ -795,12 +930,6 @@
(local.get $1)
)
)
- (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.q15mulr_sat_s
- (local.get $0)
- (local.get $1)
- )
- )
(func $i16x8.extmul_low_i8x16_s (param $0 v128) (param $1 v128) (result v128)
(i16x8.extmul_low_i8x16_s
(local.get $0)
@@ -835,11 +964,6 @@
(local.get $0)
)
)
- (func $i32x4.any_true (param $0 v128) (result i32)
- (i32x4.any_true
- (local.get $0)
- )
- )
(func $i32x4.all_true (param $0 v128) (result i32)
(i32x4.all_true
(local.get $0)
@@ -850,6 +974,26 @@
(local.get $0)
)
)
+ (func $i32x4.extend_low_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_low_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i32x4.shl (param $0 v128) (param $1 i32) (result v128)
(i32x4.shl
(local.get $0)
@@ -940,16 +1084,46 @@
(local.get $1)
)
)
+ (func $i64x2.abs (param $0 v128) (result v128)
+ (i64x2.abs
+ (local.get $0)
+ )
+ )
(func $i64x2.neg (param $0 v128) (result v128)
(i64x2.neg
(local.get $0)
)
)
+ (func $i64x2.all_true (param $0 v128) (result i32)
+ (i64x2.all_true
+ (local.get $0)
+ )
+ )
(func $i64x2.bitmask (param $0 v128) (result i32)
(i64x2.bitmask
(local.get $0)
)
)
+ (func $i64x2.extend_low_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_low_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_u
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_u
+ (local.get $0)
+ )
+ )
(func $i64x2.shl (param $0 v128) (param $1 i32) (result v128)
(i64x2.shl
(local.get $0)
@@ -986,6 +1160,42 @@
(local.get $1)
)
)
+ (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.eq
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ne (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ne
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.lt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.lt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.gt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.gt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.le_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.le_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ge_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ge_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
(func $i64x2.extmul_low_i32x4_s (param $0 v128) (param $1 v128) (result v128)
(i64x2.extmul_low_i32x4_s
(local.get $0)
@@ -1010,6 +1220,21 @@
(local.get $1)
)
)
+ (func $f32x4.abs (param $0 v128) (result v128)
+ (f32x4.abs
+ (local.get $0)
+ )
+ )
+ (func $f32x4.neg (param $0 v128) (result v128)
+ (f32x4.neg
+ (local.get $0)
+ )
+ )
+ (func $f32x4.sqrt (param $0 v128) (result v128)
+ (f32x4.sqrt
+ (local.get $0)
+ )
+ )
(func $f32x4.add (param $0 v128) (param $1 v128) (result v128)
(f32x4.add
(local.get $0)
@@ -1058,53 +1283,19 @@
(local.get $1)
)
)
- (func $f32x4.ceil (param $0 v128) (result v128)
- (f32x4.ceil
- (local.get $0)
- )
- )
- (func $f32x4.floor (param $0 v128) (result v128)
- (f32x4.floor
- (local.get $0)
- )
- )
- (func $f32x4.trunc (param $0 v128) (result v128)
- (f32x4.trunc
- (local.get $0)
- )
- )
- (func $f32x4.nearest (param $0 v128) (result v128)
- (f32x4.nearest
- (local.get $0)
- )
- )
- (func $f32x4.abs (param $0 v128) (result v128)
- (f32x4.abs
- (local.get $0)
- )
- )
- (func $f32x4.neg (param $0 v128) (result v128)
- (f32x4.neg
+ (func $f64x2.abs (param $0 v128) (result v128)
+ (f64x2.abs
(local.get $0)
)
)
- (func $f32x4.sqrt (param $0 v128) (result v128)
- (f32x4.sqrt
+ (func $f64x2.neg (param $0 v128) (result v128)
+ (f64x2.neg
(local.get $0)
)
)
- (func $f32x4.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfma
+ (func $f64x2.sqrt (param $0 v128) (result v128)
+ (f64x2.sqrt
(local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $f32x4.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfms
- (local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $f64x2.add (param $0 v128) (param $1 v128) (result v128)
@@ -1155,75 +1346,6 @@
(local.get $1)
)
)
- (func $f64x2.ceil (param $0 v128) (result v128)
- (f64x2.ceil
- (local.get $0)
- )
- )
- (func $f64x2.floor (param $0 v128) (result v128)
- (f64x2.floor
- (local.get $0)
- )
- )
- (func $f64x2.trunc (param $0 v128) (result v128)
- (f64x2.trunc
- (local.get $0)
- )
- )
- (func $f64x2.nearest (param $0 v128) (result v128)
- (f64x2.nearest
- (local.get $0)
- )
- )
- (func $f64x2.abs (param $0 v128) (result v128)
- (f64x2.abs
- (local.get $0)
- )
- )
- (func $f64x2.neg (param $0 v128) (result v128)
- (f64x2.neg
- (local.get $0)
- )
- )
- (func $f64x2.sqrt (param $0 v128) (result v128)
- (f64x2.sqrt
- (local.get $0)
- )
- )
- (func $f64x2.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfma
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $f64x2.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfms
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_u
- (local.get $0)
- )
- )
(func $i32x4.trunc_sat_f32x4_s (param $0 v128) (result v128)
(i32x4.trunc_sat_f32x4_s
(local.get $0)
@@ -1234,16 +1356,6 @@
(local.get $0)
)
)
- (func $i64x2.trunc_sat_f64x2_s (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_s
- (local.get $0)
- )
- )
- (func $i64x2.trunc_sat_f64x2_u (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_u
- (local.get $0)
- )
- )
(func $f32x4.convert_i32x4_s (param $0 v128) (result v128)
(f32x4.convert_i32x4_s
(local.get $0)
@@ -1254,173 +1366,13 @@
(local.get $0)
)
)
- (func $f64x2.convert_i64x2_s (param $0 v128) (result v128)
- (f64x2.convert_i64x2_s
- (local.get $0)
- )
- )
- (func $f64x2.convert_i64x2_u (param $0 v128) (result v128)
- (f64x2.convert_i64x2_u
- (local.get $0)
- )
- )
- (func $v8x16.load_splat (param $0 i32) (result v128)
- (v8x16.load_splat
- (local.get $0)
- )
- )
- (func $v16x8.load_splat (param $0 i32) (result v128)
- (v16x8.load_splat
+ (func $i32x4.trunc_sat_f64x2_s_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_s_zero
(local.get $0)
)
)
- (func $v32x4.load_splat (param $0 i32) (result v128)
- (v32x4.load_splat
- (local.get $0)
- )
- )
- (func $v64x2.load_splat (param $0 i32) (result v128)
- (v64x2.load_splat
- (local.get $0)
- )
- )
- (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.widen_low_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_high_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_low_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_u
- (local.get $0)
- )
- )
- (func $i16x8.widen_high_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_u (param $0 i32) (result v128)
- (i16x8.load8x8_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_s (param $0 i32) (result v128)
- (i16x8.load8x8_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_s (param $0 i32) (result v128)
- (i32x4.load16x4_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_u (param $0 i32) (result v128)
- (i32x4.load16x4_u
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_s (param $0 i32) (result v128)
- (i64x2.load32x2_s
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_u (param $0 i32) (result v128)
- (i64x2.load32x2_u
- (local.get $0)
- )
- )
- (func $v128.load32_zero (param $0 i32) (result v128)
- (v128.load32_zero
- (local.get $0)
- )
- )
- (func $v128.load64_zero (param $0 i32) (result v128)
- (v128.load64_zero
- (local.get $0)
- )
- )
- (func $v8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.swizzle
- (local.get $0)
- (local.get $1)
- )
- )
- (func $prefetch.t (param $0 i32)
- (prefetch.t offset=3 align=2
- (local.get $0)
- )
- )
- (func $prefetch.nt (param $0 i32)
- (prefetch.nt offset=3 align=2
+ (func $i32x4.trunc_sat_f64x2_u_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_u_zero
(local.get $0)
)
)
@@ -1434,34 +1386,4 @@
(local.get $0)
)
)
- (func $i32x4.trunc_sat_f64x2_zero_s (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_s
- (local.get $0)
- )
- )
- (func $i32x4.trunc_sat_f64x2_zero_u (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_u
- (local.get $0)
- )
- )
- (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
- (f32x4.demote_f64x2_zero
- (local.get $0)
- )
- )
- (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
- (f64x2.promote_low_f32x4
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_s (param $0 v128) (result v128)
- (i32x4.widen_i8x16_s 0
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_u (param $0 v128) (result v128)
- (i32x4.widen_i8x16_u 0
- (local.get $0)
- )
- )
)
diff --git a/test/simd.wast.fromBinary b/test/simd.wast.fromBinary
index 4c84b5b25..56f3fa2cc 100644
--- a/test/simd.wast.fromBinary
+++ b/test/simd.wast.fromBinary
@@ -2,13 +2,11 @@
(type $v128_v128_=>_v128 (func (param v128 v128) (result v128)))
(type $v128_=>_v128 (func (param v128) (result v128)))
(type $i32_=>_v128 (func (param i32) (result v128)))
- (type $v128_=>_i32 (func (param v128) (result i32)))
(type $v128_i32_=>_v128 (func (param v128 i32) (result v128)))
- (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
+ (type $v128_=>_i32 (func (param v128) (result i32)))
(type $i32_v128_=>_none (func (param i32 v128)))
(type $i32_v128_=>_v128 (func (param i32 v128) (result v128)))
(type $none_=>_v128 (func (result v128)))
- (type $i32_=>_none (func (param i32)))
(type $v128_=>_i64 (func (param v128) (result i64)))
(type $v128_=>_f32 (func (param v128) (result f32)))
(type $v128_=>_f64 (func (param v128) (result f64)))
@@ -17,12 +15,63 @@
(type $v128_i64_=>_v128 (func (param v128 i64) (result v128)))
(type $v128_f32_=>_v128 (func (param v128 f32) (result v128)))
(type $v128_f64_=>_v128 (func (param v128 f64) (result v128)))
+ (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
(memory $0 1 1)
(func $v128.load (param $0 i32) (result v128)
(v128.load
(local.get $0)
)
)
+ (func $v128.load8x8_s (param $0 i32) (result v128)
+ (v128.load8x8_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load8x8_u (param $0 i32) (result v128)
+ (v128.load8x8_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_s (param $0 i32) (result v128)
+ (v128.load16x4_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load16x4_u (param $0 i32) (result v128)
+ (v128.load16x4_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_s (param $0 i32) (result v128)
+ (v128.load32x2_s
+ (local.get $0)
+ )
+ )
+ (func $v128.load32x2_u (param $0 i32) (result v128)
+ (v128.load32x2_u
+ (local.get $0)
+ )
+ )
+ (func $v128.load8_splat (param $0 i32) (result v128)
+ (v128.load8_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load16_splat (param $0 i32) (result v128)
+ (v128.load16_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load32_splat (param $0 i32) (result v128)
+ (v128.load32_splat
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_splat (param $0 i32) (result v128)
+ (v128.load64_splat
+ (local.get $0)
+ )
+ )
(func $v128.store (param $0 i32) (param $1 v128)
(v128.store
(local.get $0)
@@ -47,8 +96,14 @@
(func $v128.const.f64x2 (result v128)
(v128.const i32x4 0x00000000 0x3ff00000 0x00000000 0x40000000)
)
- (func $v128.shuffle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (func $i8x16.shuffle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.swizzle
(local.get $0)
(local.get $1)
)
@@ -58,6 +113,21 @@
(local.get $0)
)
)
+ (func $i16x8.splat (param $0 i32) (result v128)
+ (i16x8.splat
+ (local.get $0)
+ )
+ )
+ (func $f32x4.splat (param $0 f32) (result v128)
+ (f32x4.splat
+ (local.get $0)
+ )
+ )
+ (func $f64x2.splat (param $0 f64) (result v128)
+ (f64x2.splat
+ (local.get $0)
+ )
+ )
(func $i8x16.extract_lane_s (param $0 v128) (result i32)
(i8x16.extract_lane_s 0
(local.get $0)
@@ -74,11 +144,6 @@
(local.get $1)
)
)
- (func $i16x8.splat (param $0 i32) (result v128)
- (i16x8.splat
- (local.get $0)
- )
- )
(func $i16x8.extract_lane_s (param $0 v128) (result i32)
(i16x8.extract_lane_s 0
(local.get $0)
@@ -95,11 +160,6 @@
(local.get $1)
)
)
- (func $i32x4.splat (param $0 i32) (result v128)
- (i32x4.splat
- (local.get $0)
- )
- )
(func $i32x4.extract_lane (param $0 v128) (result i32)
(i32x4.extract_lane 0
(local.get $0)
@@ -122,11 +182,6 @@
(local.get $1)
)
)
- (func $f32x4.splat (param $0 f32) (result v128)
- (f32x4.splat
- (local.get $0)
- )
- )
(func $f32x4.extract_lane (param $0 v128) (result f32)
(f32x4.extract_lane 0
(local.get $0)
@@ -138,11 +193,6 @@
(local.get $1)
)
)
- (func $f64x2.splat (param $0 f64) (result v128)
- (f64x2.splat
- (local.get $0)
- )
- )
(func $f64x2.extract_lane (param $0 v128) (result f64)
(f64x2.extract_lane 0
(local.get $0)
@@ -334,12 +384,6 @@
(local.get $1)
)
)
- (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
- (i64x2.eq
- (local.get $0)
- (local.get $1)
- )
- )
(func $f32x4.eq (param $0 v128) (param $1 v128) (result v128)
(f32x4.eq
(local.get $0)
@@ -423,57 +467,34 @@
(local.get $1)
)
)
- (func $v128.or (param $0 v128) (param $1 v128) (result v128)
- (v128.or
- (local.get $0)
- (local.get $1)
- )
- )
- (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
- (v128.xor
- (local.get $0)
- (local.get $1)
- )
- )
(func $v128.andnot (param $0 v128) (param $1 v128) (result v128)
(v128.andnot
(local.get $0)
(local.get $1)
)
)
- (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v128.bitselect
+ (func $v128.or (param $0 v128) (param $1 v128) (result v128)
+ (v128.or
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v8x16.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v8x16.signselect
+ (func $v128.xor (param $0 v128) (param $1 v128) (result v128)
+ (v128.xor
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $v16x8.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v16x8.signselect
+ (func $v128.bitselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
+ (v128.bitselect
(local.get $0)
(local.get $1)
(local.get $2)
)
)
- (func $v32x4.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v32x4.signselect
+ (func $v128.any_true (param $0 v128) (result i32)
+ (v128.any_true
(local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $v64x2.signselect (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v64x2.signselect
- (local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $v128.load8_lane (param $0 i32) (param $1 v128) (result v128)
@@ -560,8 +581,23 @@
(local.get $1)
)
)
- (func $i8x16.popcnt (param $0 v128) (result v128)
- (i8x16.popcnt
+ (func $v128.load32_zero (param $0 i32) (result v128)
+ (v128.load32_zero
+ (local.get $0)
+ )
+ )
+ (func $v128.load64_zero (param $0 i32) (result v128)
+ (v128.load64_zero
+ (local.get $0)
+ )
+ )
+ (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
+ (f32x4.demote_f64x2_zero
+ (local.get $0)
+ )
+ )
+ (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
+ (f64x2.promote_low_f32x4
(local.get $0)
)
)
@@ -575,8 +611,8 @@
(local.get $0)
)
)
- (func $i8x16.any_true (param $0 v128) (result i32)
- (i8x16.any_true
+ (func $i8x16.popcnt (param $0 v128) (result v128)
+ (i8x16.popcnt
(local.get $0)
)
)
@@ -590,6 +626,38 @@
(local.get $0)
)
)
+ (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $f32x4.ceil (param $0 v128) (result v128)
+ (f32x4.ceil
+ (local.get $0)
+ )
+ )
+ (func $f32x4.floor (param $0 v128) (result v128)
+ (f32x4.floor
+ (local.get $0)
+ )
+ )
+ (func $f32x4.trunc (param $0 v128) (result v128)
+ (f32x4.trunc
+ (local.get $0)
+ )
+ )
+ (func $f32x4.nearest (param $0 v128) (result v128)
+ (f32x4.nearest
+ (local.get $0)
+ )
+ )
(func $i8x16.shl (param $0 v128) (param $1 i32) (result v128)
(i8x16.shl
(local.get $0)
@@ -614,14 +682,14 @@
(local.get $1)
)
)
- (func $i8x16.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_s
+ (func $i8x16.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_u
+ (func $i8x16.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -632,22 +700,26 @@
(local.get $1)
)
)
- (func $i8x16.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_s
+ (func $i8x16.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_u
+ (func $i8x16.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $i8x16.mul (param $0 v128) (param $1 v128) (result v128)
- (i8x16.mul
+ (func $f64x2.ceil (param $0 v128) (result v128)
+ (f64x2.ceil
+ (local.get $0)
+ )
+ )
+ (func $f64x2.floor (param $0 v128) (result v128)
+ (f64x2.floor
(local.get $0)
- (local.get $1)
)
)
(func $i8x16.min_s (param $0 v128) (param $1 v128) (result v128)
@@ -674,12 +746,37 @@
(local.get $1)
)
)
+ (func $f64x2.trunc (param $0 v128) (result v128)
+ (f64x2.trunc
+ (local.get $0)
+ )
+ )
(func $i8x16.avgr_u (param $0 v128) (param $1 v128) (result v128)
(i8x16.avgr_u
(local.get $0)
(local.get $1)
)
)
+ (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i16x8.abs (param $0 v128) (result v128)
(i16x8.abs
(local.get $0)
@@ -690,9 +787,10 @@
(local.get $0)
)
)
- (func $i16x8.any_true (param $0 v128) (result i32)
- (i16x8.any_true
+ (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.q15mulr_sat_s
(local.get $0)
+ (local.get $1)
)
)
(func $i16x8.all_true (param $0 v128) (result i32)
@@ -705,6 +803,38 @@
(local.get $0)
)
)
+ (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_u
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_s (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_s
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_low_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_u
+ (local.get $0)
+ )
+ )
+ (func $i16x8.extend_high_i8x16_u (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_u
+ (local.get $0)
+ )
+ )
(func $i16x8.shl (param $0 v128) (param $1 i32) (result v128)
(i16x8.shl
(local.get $0)
@@ -729,14 +859,14 @@
(local.get $1)
)
)
- (func $i16x8.add_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_s
+ (func $i16x8.add_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.add_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_u
+ (func $i16x8.add_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.add_sat_u
(local.get $0)
(local.get $1)
)
@@ -747,18 +877,23 @@
(local.get $1)
)
)
- (func $i16x8.sub_saturate_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_s
+ (func $i16x8.sub_sat_s (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $i16x8.sub_saturate_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_u
+ (func $i16x8.sub_sat_u (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.sub_sat_u
(local.get $0)
(local.get $1)
)
)
+ (func $f64x2.nearest (param $0 v128) (result v128)
+ (f64x2.nearest
+ (local.get $0)
+ )
+ )
(func $i16x8.mul (param $0 v128) (param $1 v128) (result v128)
(i16x8.mul
(local.get $0)
@@ -795,12 +930,6 @@
(local.get $1)
)
)
- (func $i16x8.q15mulr_sat_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.q15mulr_sat_s
- (local.get $0)
- (local.get $1)
- )
- )
(func $i16x8.extmul_low_i8x16_s (param $0 v128) (param $1 v128) (result v128)
(i16x8.extmul_low_i8x16_s
(local.get $0)
@@ -835,11 +964,6 @@
(local.get $0)
)
)
- (func $i32x4.any_true (param $0 v128) (result i32)
- (i32x4.any_true
- (local.get $0)
- )
- )
(func $i32x4.all_true (param $0 v128) (result i32)
(i32x4.all_true
(local.get $0)
@@ -850,6 +974,26 @@
(local.get $0)
)
)
+ (func $i32x4.extend_low_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_s (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_s
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_low_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_u
+ (local.get $0)
+ )
+ )
+ (func $i32x4.extend_high_i16x8_u (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_u
+ (local.get $0)
+ )
+ )
(func $i32x4.shl (param $0 v128) (param $1 i32) (result v128)
(i32x4.shl
(local.get $0)
@@ -940,16 +1084,46 @@
(local.get $1)
)
)
+ (func $i64x2.abs (param $0 v128) (result v128)
+ (i64x2.abs
+ (local.get $0)
+ )
+ )
(func $i64x2.neg (param $0 v128) (result v128)
(i64x2.neg
(local.get $0)
)
)
+ (func $i64x2.all_true (param $0 v128) (result i32)
+ (i64x2.all_true
+ (local.get $0)
+ )
+ )
(func $i64x2.bitmask (param $0 v128) (result i32)
(i64x2.bitmask
(local.get $0)
)
)
+ (func $i64x2.extend_low_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_s (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_s
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_low_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_u
+ (local.get $0)
+ )
+ )
+ (func $i64x2.extend_high_i32x4_u (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_u
+ (local.get $0)
+ )
+ )
(func $i64x2.shl (param $0 v128) (param $1 i32) (result v128)
(i64x2.shl
(local.get $0)
@@ -986,6 +1160,42 @@
(local.get $1)
)
)
+ (func $i64x2.eq (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.eq
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ne (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ne
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.lt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.lt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.gt_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.gt_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.le_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.le_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $i64x2.ge_s (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ge_s
+ (local.get $0)
+ (local.get $1)
+ )
+ )
(func $i64x2.extmul_low_i32x4_s (param $0 v128) (param $1 v128) (result v128)
(i64x2.extmul_low_i32x4_s
(local.get $0)
@@ -1010,6 +1220,21 @@
(local.get $1)
)
)
+ (func $f32x4.abs (param $0 v128) (result v128)
+ (f32x4.abs
+ (local.get $0)
+ )
+ )
+ (func $f32x4.neg (param $0 v128) (result v128)
+ (f32x4.neg
+ (local.get $0)
+ )
+ )
+ (func $f32x4.sqrt (param $0 v128) (result v128)
+ (f32x4.sqrt
+ (local.get $0)
+ )
+ )
(func $f32x4.add (param $0 v128) (param $1 v128) (result v128)
(f32x4.add
(local.get $0)
@@ -1058,53 +1283,19 @@
(local.get $1)
)
)
- (func $f32x4.ceil (param $0 v128) (result v128)
- (f32x4.ceil
- (local.get $0)
- )
- )
- (func $f32x4.floor (param $0 v128) (result v128)
- (f32x4.floor
- (local.get $0)
- )
- )
- (func $f32x4.trunc (param $0 v128) (result v128)
- (f32x4.trunc
- (local.get $0)
- )
- )
- (func $f32x4.nearest (param $0 v128) (result v128)
- (f32x4.nearest
- (local.get $0)
- )
- )
- (func $f32x4.abs (param $0 v128) (result v128)
- (f32x4.abs
- (local.get $0)
- )
- )
- (func $f32x4.neg (param $0 v128) (result v128)
- (f32x4.neg
+ (func $f64x2.abs (param $0 v128) (result v128)
+ (f64x2.abs
(local.get $0)
)
)
- (func $f32x4.sqrt (param $0 v128) (result v128)
- (f32x4.sqrt
+ (func $f64x2.neg (param $0 v128) (result v128)
+ (f64x2.neg
(local.get $0)
)
)
- (func $f32x4.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfma
+ (func $f64x2.sqrt (param $0 v128) (result v128)
+ (f64x2.sqrt
(local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $f32x4.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfms
- (local.get $0)
- (local.get $1)
- (local.get $2)
)
)
(func $f64x2.add (param $0 v128) (param $1 v128) (result v128)
@@ -1155,75 +1346,6 @@
(local.get $1)
)
)
- (func $f64x2.ceil (param $0 v128) (result v128)
- (f64x2.ceil
- (local.get $0)
- )
- )
- (func $f64x2.floor (param $0 v128) (result v128)
- (f64x2.floor
- (local.get $0)
- )
- )
- (func $f64x2.trunc (param $0 v128) (result v128)
- (f64x2.trunc
- (local.get $0)
- )
- )
- (func $f64x2.nearest (param $0 v128) (result v128)
- (f64x2.nearest
- (local.get $0)
- )
- )
- (func $f64x2.abs (param $0 v128) (result v128)
- (f64x2.abs
- (local.get $0)
- )
- )
- (func $f64x2.neg (param $0 v128) (result v128)
- (f64x2.neg
- (local.get $0)
- )
- )
- (func $f64x2.sqrt (param $0 v128) (result v128)
- (f64x2.sqrt
- (local.get $0)
- )
- )
- (func $f64x2.qfma (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfma
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $f64x2.qfms (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfms
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_s (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.extadd_pairwise_i8x16_u (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_s (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.extadd_pairwise_i16x8_u (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_u
- (local.get $0)
- )
- )
(func $i32x4.trunc_sat_f32x4_s (param $0 v128) (result v128)
(i32x4.trunc_sat_f32x4_s
(local.get $0)
@@ -1234,16 +1356,6 @@
(local.get $0)
)
)
- (func $i64x2.trunc_sat_f64x2_s (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_s
- (local.get $0)
- )
- )
- (func $i64x2.trunc_sat_f64x2_u (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_u
- (local.get $0)
- )
- )
(func $f32x4.convert_i32x4_s (param $0 v128) (result v128)
(f32x4.convert_i32x4_s
(local.get $0)
@@ -1254,173 +1366,13 @@
(local.get $0)
)
)
- (func $f64x2.convert_i64x2_s (param $0 v128) (result v128)
- (f64x2.convert_i64x2_s
- (local.get $0)
- )
- )
- (func $f64x2.convert_i64x2_u (param $0 v128) (result v128)
- (f64x2.convert_i64x2_u
- (local.get $0)
- )
- )
- (func $v8x16.load_splat (param $0 i32) (result v128)
- (v8x16.load_splat
- (local.get $0)
- )
- )
- (func $v16x8.load_splat (param $0 i32) (result v128)
- (v16x8.load_splat
+ (func $i32x4.trunc_sat_f64x2_s_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_s_zero
(local.get $0)
)
)
- (func $v32x4.load_splat (param $0 i32) (result v128)
- (v32x4.load_splat
- (local.get $0)
- )
- )
- (func $v64x2.load_splat (param $0 i32) (result v128)
- (v64x2.load_splat
- (local.get $0)
- )
- )
- (func $i8x16.narrow_i16x8_s (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i8x16.narrow_i16x8_u (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_s (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_s
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.narrow_i32x4_u (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_u
- (local.get $0)
- (local.get $1)
- )
- )
- (func $i16x8.widen_low_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_high_i8x16_s (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_s
- (local.get $0)
- )
- )
- (func $i16x8.widen_low_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_u
- (local.get $0)
- )
- )
- (func $i16x8.widen_high_i8x16_u (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_s (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_s
- (local.get $0)
- )
- )
- (func $i32x4.widen_low_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_u
- (local.get $0)
- )
- )
- (func $i32x4.widen_high_i16x8_u (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_s (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_s
- (local.get $0)
- )
- )
- (func $i64x2.widen_low_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_u
- (local.get $0)
- )
- )
- (func $i64x2.widen_high_i32x4_u (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_u (param $0 i32) (result v128)
- (i16x8.load8x8_u
- (local.get $0)
- )
- )
- (func $i16x8.load8x8_s (param $0 i32) (result v128)
- (i16x8.load8x8_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_s (param $0 i32) (result v128)
- (i32x4.load16x4_s
- (local.get $0)
- )
- )
- (func $i32x4.load16x4_u (param $0 i32) (result v128)
- (i32x4.load16x4_u
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_s (param $0 i32) (result v128)
- (i64x2.load32x2_s
- (local.get $0)
- )
- )
- (func $i64x2.load32x2_u (param $0 i32) (result v128)
- (i64x2.load32x2_u
- (local.get $0)
- )
- )
- (func $v128.load32_zero (param $0 i32) (result v128)
- (v128.load32_zero
- (local.get $0)
- )
- )
- (func $v128.load64_zero (param $0 i32) (result v128)
- (v128.load64_zero
- (local.get $0)
- )
- )
- (func $v8x16.swizzle (param $0 v128) (param $1 v128) (result v128)
- (v8x16.swizzle
- (local.get $0)
- (local.get $1)
- )
- )
- (func $prefetch.t (param $0 i32)
- (prefetch.t offset=3 align=2
- (local.get $0)
- )
- )
- (func $prefetch.nt (param $0 i32)
- (prefetch.nt offset=3 align=2
+ (func $i32x4.trunc_sat_f64x2_u_zero (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_u_zero
(local.get $0)
)
)
@@ -1434,35 +1386,5 @@
(local.get $0)
)
)
- (func $i32x4.trunc_sat_f64x2_zero_s (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_s
- (local.get $0)
- )
- )
- (func $i32x4.trunc_sat_f64x2_zero_u (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_u
- (local.get $0)
- )
- )
- (func $f32x4.demote_f64x2_zero (param $0 v128) (result v128)
- (f32x4.demote_f64x2_zero
- (local.get $0)
- )
- )
- (func $f64x2.promote_low_f32x4 (param $0 v128) (result v128)
- (f64x2.promote_low_f32x4
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_s (param $0 v128) (result v128)
- (i32x4.widen_i8x16_s 0
- (local.get $0)
- )
- )
- (func $i32x4.widen_i8x16_u (param $0 v128) (result v128)
- (i32x4.widen_i8x16_u 0
- (local.get $0)
- )
- )
)
diff --git a/test/simd.wast.fromBinary.noDebugInfo b/test/simd.wast.fromBinary.noDebugInfo
index bb5cdd5e0..78a6576d5 100644
--- a/test/simd.wast.fromBinary.noDebugInfo
+++ b/test/simd.wast.fromBinary.noDebugInfo
@@ -2,13 +2,11 @@
(type $v128_v128_=>_v128 (func (param v128 v128) (result v128)))
(type $v128_=>_v128 (func (param v128) (result v128)))
(type $i32_=>_v128 (func (param i32) (result v128)))
- (type $v128_=>_i32 (func (param v128) (result i32)))
(type $v128_i32_=>_v128 (func (param v128 i32) (result v128)))
- (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
+ (type $v128_=>_i32 (func (param v128) (result i32)))
(type $i32_v128_=>_none (func (param i32 v128)))
(type $i32_v128_=>_v128 (func (param i32 v128) (result v128)))
(type $none_=>_v128 (func (result v128)))
- (type $i32_=>_none (func (param i32)))
(type $v128_=>_i64 (func (param v128) (result i64)))
(type $v128_=>_f32 (func (param v128) (result f32)))
(type $v128_=>_f64 (func (param v128) (result f64)))
@@ -17,1452 +15,1376 @@
(type $v128_i64_=>_v128 (func (param v128 i64) (result v128)))
(type $v128_f32_=>_v128 (func (param v128 f32) (result v128)))
(type $v128_f64_=>_v128 (func (param v128 f64) (result v128)))
+ (type $v128_v128_v128_=>_v128 (func (param v128 v128 v128) (result v128)))
(memory $0 1 1)
(func $0 (param $0 i32) (result v128)
(v128.load
(local.get $0)
)
)
- (func $1 (param $0 i32) (param $1 v128)
+ (func $1 (param $0 i32) (result v128)
+ (v128.load8x8_s
+ (local.get $0)
+ )
+ )
+ (func $2 (param $0 i32) (result v128)
+ (v128.load8x8_u
+ (local.get $0)
+ )
+ )
+ (func $3 (param $0 i32) (result v128)
+ (v128.load16x4_s
+ (local.get $0)
+ )
+ )
+ (func $4 (param $0 i32) (result v128)
+ (v128.load16x4_u
+ (local.get $0)
+ )
+ )
+ (func $5 (param $0 i32) (result v128)
+ (v128.load32x2_s
+ (local.get $0)
+ )
+ )
+ (func $6 (param $0 i32) (result v128)
+ (v128.load32x2_u
+ (local.get $0)
+ )
+ )
+ (func $7 (param $0 i32) (result v128)
+ (v128.load8_splat
+ (local.get $0)
+ )
+ )
+ (func $8 (param $0 i32) (result v128)
+ (v128.load16_splat
+ (local.get $0)
+ )
+ )
+ (func $9 (param $0 i32) (result v128)
+ (v128.load32_splat
+ (local.get $0)
+ )
+ )
+ (func $10 (param $0 i32) (result v128)
+ (v128.load64_splat
+ (local.get $0)
+ )
+ )
+ (func $11 (param $0 i32) (param $1 v128)
(v128.store
(local.get $0)
(local.get $1)
)
)
- (func $2 (result v128)
+ (func $12 (result v128)
(v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d)
)
- (func $3 (result v128)
+ (func $13 (result v128)
(v128.const i32x4 0x00020001 0x00040003 0x00060005 0x00080007)
)
- (func $4 (result v128)
+ (func $14 (result v128)
(v128.const i32x4 0x00000001 0x00000002 0x00000003 0x00000004)
)
- (func $5 (result v128)
+ (func $15 (result v128)
(v128.const i32x4 0x00000001 0x00000000 0x00000002 0x00000000)
)
- (func $6 (result v128)
+ (func $16 (result v128)
(v128.const i32x4 0x3f800000 0x40000000 0x40400000 0x40800000)
)
- (func $7 (result v128)
+ (func $17 (result v128)
(v128.const i32x4 0x00000000 0x3ff00000 0x00000000 0x40000000)
)
- (func $8 (param $0 v128) (param $1 v128) (result v128)
- (v8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
+ (func $18 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31
(local.get $0)
(local.get $1)
)
)
- (func $9 (param $0 i32) (result v128)
+ (func $19 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.swizzle
+ (local.get $0)
+ (local.get $1)
+ )
+ )
+ (func $20 (param $0 i32) (result v128)
(i8x16.splat
(local.get $0)
)
)
- (func $10 (param $0 v128) (result i32)
+ (func $21 (param $0 i32) (result v128)
+ (i16x8.splat
+ (local.get $0)
+ )
+ )
+ (func $22 (param $0 f32) (result v128)
+ (f32x4.splat
+ (local.get $0)
+ )
+ )
+ (func $23 (param $0 f64) (result v128)
+ (f64x2.splat
+ (local.get $0)
+ )
+ )
+ (func $24 (param $0 v128) (result i32)
(i8x16.extract_lane_s 0
(local.get $0)
)
)
- (func $11 (param $0 v128) (result i32)
+ (func $25 (param $0 v128) (result i32)
(i8x16.extract_lane_u 0
(local.get $0)
)
)
- (func $12 (param $0 v128) (param $1 i32) (result v128)
+ (func $26 (param $0 v128) (param $1 i32) (result v128)
(i8x16.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $13 (param $0 i32) (result v128)
- (i16x8.splat
- (local.get $0)
- )
- )
- (func $14 (param $0 v128) (result i32)
+ (func $27 (param $0 v128) (result i32)
(i16x8.extract_lane_s 0
(local.get $0)
)
)
- (func $15 (param $0 v128) (result i32)
+ (func $28 (param $0 v128) (result i32)
(i16x8.extract_lane_u 0
(local.get $0)
)
)
- (func $16 (param $0 v128) (param $1 i32) (result v128)
+ (func $29 (param $0 v128) (param $1 i32) (result v128)
(i16x8.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $17 (param $0 i32) (result v128)
- (i32x4.splat
- (local.get $0)
- )
- )
- (func $18 (param $0 v128) (result i32)
+ (func $30 (param $0 v128) (result i32)
(i32x4.extract_lane 0
(local.get $0)
)
)
- (func $19 (param $0 v128) (param $1 i32) (result v128)
+ (func $31 (param $0 v128) (param $1 i32) (result v128)
(i32x4.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $20 (param $0 v128) (result i64)
+ (func $32 (param $0 v128) (result i64)
(i64x2.extract_lane 0
(local.get $0)
)
)
- (func $21 (param $0 v128) (param $1 i64) (result v128)
+ (func $33 (param $0 v128) (param $1 i64) (result v128)
(i64x2.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $22 (param $0 f32) (result v128)
- (f32x4.splat
- (local.get $0)
- )
- )
- (func $23 (param $0 v128) (result f32)
+ (func $34 (param $0 v128) (result f32)
(f32x4.extract_lane 0
(local.get $0)
)
)
- (func $24 (param $0 v128) (param $1 f32) (result v128)
+ (func $35 (param $0 v128) (param $1 f32) (result v128)
(f32x4.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $25 (param $0 f64) (result v128)
- (f64x2.splat
- (local.get $0)
- )
- )
- (func $26 (param $0 v128) (result f64)
+ (func $36 (param $0 v128) (result f64)
(f64x2.extract_lane 0
(local.get $0)
)
)
- (func $27 (param $0 v128) (param $1 f64) (result v128)
+ (func $37 (param $0 v128) (param $1 f64) (result v128)
(f64x2.replace_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $28 (param $0 v128) (param $1 v128) (result v128)
+ (func $38 (param $0 v128) (param $1 v128) (result v128)
(i8x16.eq
(local.get $0)
(local.get $1)
)
)
- (func $29 (param $0 v128) (param $1 v128) (result v128)
+ (func $39 (param $0 v128) (param $1 v128) (result v128)
(i8x16.ne
(local.get $0)
(local.get $1)
)
)
- (func $30 (param $0 v128) (param $1 v128) (result v128)
+ (func $40 (param $0 v128) (param $1 v128) (result v128)
(i8x16.lt_s
(local.get $0)
(local.get $1)
)
)
- (func $31 (param $0 v128) (param $1 v128) (result v128)
+ (func $41 (param $0 v128) (param $1 v128) (result v128)
(i8x16.lt_u
(local.get $0)
(local.get $1)
)
)
- (func $32 (param $0 v128) (param $1 v128) (result v128)
+ (func $42 (param $0 v128) (param $1 v128) (result v128)
(i8x16.gt_s
(local.get $0)
(local.get $1)
)
)
- (func $33 (param $0 v128) (param $1 v128) (result v128)
+ (func $43 (param $0 v128) (param $1 v128) (result v128)
(i8x16.gt_u
(local.get $0)
(local.get $1)
)
)
- (func $34 (param $0 v128) (param $1 v128) (result v128)
+ (func $44 (param $0 v128) (param $1 v128) (result v128)
(i8x16.le_s
(local.get $0)
(local.get $1)
)
)
- (func $35 (param $0 v128) (param $1 v128) (result v128)
+ (func $45 (param $0 v128) (param $1 v128) (result v128)
(i8x16.le_u
(local.get $0)
(local.get $1)
)
)
- (func $36 (param $0 v128) (param $1 v128) (result v128)
+ (func $46 (param $0 v128) (param $1 v128) (result v128)
(i8x16.ge_s
(local.get $0)
(local.get $1)
)
)
- (func $37 (param $0 v128) (param $1 v128) (result v128)
+ (func $47 (param $0 v128) (param $1 v128) (result v128)
(i8x16.ge_u
(local.get $0)
(local.get $1)
)
)
- (func $38 (param $0 v128) (param $1 v128) (result v128)
+ (func $48 (param $0 v128) (param $1 v128) (result v128)
(i16x8.eq
(local.get $0)
(local.get $1)
)
)
- (func $39 (param $0 v128) (param $1 v128) (result v128)
+ (func $49 (param $0 v128) (param $1 v128) (result v128)
(i16x8.ne
(local.get $0)
(local.get $1)
)
)
- (func $40 (param $0 v128) (param $1 v128) (result v128)
+ (func $50 (param $0 v128) (param $1 v128) (result v128)
(i16x8.lt_s
(local.get $0)
(local.get $1)
)
)
- (func $41 (param $0 v128) (param $1 v128) (result v128)
+ (func $51 (param $0 v128) (param $1 v128) (result v128)
(i16x8.lt_u
(local.get $0)
(local.get $1)
)
)
- (func $42 (param $0 v128) (param $1 v128) (result v128)
+ (func $52 (param $0 v128) (param $1 v128) (result v128)
(i16x8.gt_s
(local.get $0)
(local.get $1)
)
)
- (func $43 (param $0 v128) (param $1 v128) (result v128)
+ (func $53 (param $0 v128) (param $1 v128) (result v128)
(i16x8.gt_u
(local.get $0)
(local.get $1)
)
)
- (func $44 (param $0 v128) (param $1 v128) (result v128)
+ (func $54 (param $0 v128) (param $1 v128) (result v128)
(i16x8.le_s
(local.get $0)
(local.get $1)
)
)
- (func $45 (param $0 v128) (param $1 v128) (result v128)
+ (func $55 (param $0 v128) (param $1 v128) (result v128)
(i16x8.le_u
(local.get $0)
(local.get $1)
)
)
- (func $46 (param $0 v128) (param $1 v128) (result v128)
+ (func $56 (param $0 v128) (param $1 v128) (result v128)
(i16x8.ge_s
(local.get $0)
(local.get $1)
)
)
- (func $47 (param $0 v128) (param $1 v128) (result v128)
+ (func $57 (param $0 v128) (param $1 v128) (result v128)
(i16x8.ge_u
(local.get $0)
(local.get $1)
)
)
- (func $48 (param $0 v128) (param $1 v128) (result v128)
+ (func $58 (param $0 v128) (param $1 v128) (result v128)
(i32x4.eq
(local.get $0)
(local.get $1)
)
)
- (func $49 (param $0 v128) (param $1 v128) (result v128)
+ (func $59 (param $0 v128) (param $1 v128) (result v128)
(i32x4.ne
(local.get $0)
(local.get $1)
)
)
- (func $50 (param $0 v128) (param $1 v128) (result v128)
+ (func $60 (param $0 v128) (param $1 v128) (result v128)
(i32x4.lt_s
(local.get $0)
(local.get $1)
)
)
- (func $51 (param $0 v128) (param $1 v128) (result v128)
+ (func $61 (param $0 v128) (param $1 v128) (result v128)
(i32x4.lt_u
(local.get $0)
(local.get $1)
)
)
- (func $52 (param $0 v128) (param $1 v128) (result v128)
+ (func $62 (param $0 v128) (param $1 v128) (result v128)
(i32x4.gt_s
(local.get $0)
(local.get $1)
)
)
- (func $53 (param $0 v128) (param $1 v128) (result v128)
+ (func $63 (param $0 v128) (param $1 v128) (result v128)
(i32x4.gt_u
(local.get $0)
(local.get $1)
)
)
- (func $54 (param $0 v128) (param $1 v128) (result v128)
+ (func $64 (param $0 v128) (param $1 v128) (result v128)
(i32x4.le_s
(local.get $0)
(local.get $1)
)
)
- (func $55 (param $0 v128) (param $1 v128) (result v128)
+ (func $65 (param $0 v128) (param $1 v128) (result v128)
(i32x4.le_u
(local.get $0)
(local.get $1)
)
)
- (func $56 (param $0 v128) (param $1 v128) (result v128)
+ (func $66 (param $0 v128) (param $1 v128) (result v128)
(i32x4.ge_s
(local.get $0)
(local.get $1)
)
)
- (func $57 (param $0 v128) (param $1 v128) (result v128)
+ (func $67 (param $0 v128) (param $1 v128) (result v128)
(i32x4.ge_u
(local.get $0)
(local.get $1)
)
)
- (func $58 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.eq
- (local.get $0)
- (local.get $1)
- )
- )
- (func $59 (param $0 v128) (param $1 v128) (result v128)
+ (func $68 (param $0 v128) (param $1 v128) (result v128)
(f32x4.eq
(local.get $0)
(local.get $1)
)
)
- (func $60 (param $0 v128) (param $1 v128) (result v128)
+ (func $69 (param $0 v128) (param $1 v128) (result v128)
(f32x4.ne
(local.get $0)
(local.get $1)
)
)
- (func $61 (param $0 v128) (param $1 v128) (result v128)
+ (func $70 (param $0 v128) (param $1 v128) (result v128)
(f32x4.lt
(local.get $0)
(local.get $1)
)
)
- (func $62 (param $0 v128) (param $1 v128) (result v128)
+ (func $71 (param $0 v128) (param $1 v128) (result v128)
(f32x4.gt
(local.get $0)
(local.get $1)
)
)
- (func $63 (param $0 v128) (param $1 v128) (result v128)
+ (func $72 (param $0 v128) (param $1 v128) (result v128)
(f32x4.le
(local.get $0)
(local.get $1)
)
)
- (func $64 (param $0 v128) (param $1 v128) (result v128)
+ (func $73 (param $0 v128) (param $1 v128) (result v128)
(f32x4.ge
(local.get $0)
(local.get $1)
)
)
- (func $65 (param $0 v128) (param $1 v128) (result v128)
+ (func $74 (param $0 v128) (param $1 v128) (result v128)
(f64x2.eq
(local.get $0)
(local.get $1)
)
)
- (func $66 (param $0 v128) (param $1 v128) (result v128)
+ (func $75 (param $0 v128) (param $1 v128) (result v128)
(f64x2.ne
(local.get $0)
(local.get $1)
)
)
- (func $67 (param $0 v128) (param $1 v128) (result v128)
+ (func $76 (param $0 v128) (param $1 v128) (result v128)
(f64x2.lt
(local.get $0)
(local.get $1)
)
)
- (func $68 (param $0 v128) (param $1 v128) (result v128)
+ (func $77 (param $0 v128) (param $1 v128) (result v128)
(f64x2.gt
(local.get $0)
(local.get $1)
)
)
- (func $69 (param $0 v128) (param $1 v128) (result v128)
+ (func $78 (param $0 v128) (param $1 v128) (result v128)
(f64x2.le
(local.get $0)
(local.get $1)
)
)
- (func $70 (param $0 v128) (param $1 v128) (result v128)
+ (func $79 (param $0 v128) (param $1 v128) (result v128)
(f64x2.ge
(local.get $0)
(local.get $1)
)
)
- (func $71 (param $0 v128) (result v128)
+ (func $80 (param $0 v128) (result v128)
(v128.not
(local.get $0)
)
)
- (func $72 (param $0 v128) (param $1 v128) (result v128)
+ (func $81 (param $0 v128) (param $1 v128) (result v128)
(v128.and
(local.get $0)
(local.get $1)
)
)
- (func $73 (param $0 v128) (param $1 v128) (result v128)
- (v128.or
- (local.get $0)
- (local.get $1)
- )
- )
- (func $74 (param $0 v128) (param $1 v128) (result v128)
- (v128.xor
- (local.get $0)
- (local.get $1)
- )
- )
- (func $75 (param $0 v128) (param $1 v128) (result v128)
+ (func $82 (param $0 v128) (param $1 v128) (result v128)
(v128.andnot
(local.get $0)
(local.get $1)
)
)
- (func $76 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v128.bitselect
- (local.get $0)
- (local.get $1)
- (local.get $2)
- )
- )
- (func $77 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v8x16.signselect
+ (func $83 (param $0 v128) (param $1 v128) (result v128)
+ (v128.or
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $78 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v16x8.signselect
+ (func $84 (param $0 v128) (param $1 v128) (result v128)
+ (v128.xor
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $79 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v32x4.signselect
+ (func $85 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
+ (v128.bitselect
(local.get $0)
(local.get $1)
(local.get $2)
)
)
- (func $80 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v64x2.signselect
+ (func $86 (param $0 v128) (result i32)
+ (v128.any_true
(local.get $0)
- (local.get $1)
- (local.get $2)
)
)
- (func $81 (param $0 i32) (param $1 v128) (result v128)
+ (func $87 (param $0 i32) (param $1 v128) (result v128)
(v128.load8_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $82 (param $0 i32) (param $1 v128) (result v128)
+ (func $88 (param $0 i32) (param $1 v128) (result v128)
(v128.load16_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $83 (param $0 i32) (param $1 v128) (result v128)
+ (func $89 (param $0 i32) (param $1 v128) (result v128)
(v128.load32_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $84 (param $0 i32) (param $1 v128) (result v128)
+ (func $90 (param $0 i32) (param $1 v128) (result v128)
(v128.load64_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $85 (param $0 i32) (param $1 v128) (result v128)
+ (func $91 (param $0 i32) (param $1 v128) (result v128)
(v128.load64_lane align=1 0
(local.get $0)
(local.get $1)
)
)
- (func $86 (param $0 i32) (param $1 v128) (result v128)
+ (func $92 (param $0 i32) (param $1 v128) (result v128)
(v128.load64_lane offset=32 0
(local.get $0)
(local.get $1)
)
)
- (func $87 (param $0 i32) (param $1 v128) (result v128)
+ (func $93 (param $0 i32) (param $1 v128) (result v128)
(v128.load64_lane offset=32 align=1 0
(local.get $0)
(local.get $1)
)
)
- (func $88 (param $0 i32) (param $1 v128)
+ (func $94 (param $0 i32) (param $1 v128)
(v128.store8_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $89 (param $0 i32) (param $1 v128)
+ (func $95 (param $0 i32) (param $1 v128)
(v128.store16_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $90 (param $0 i32) (param $1 v128)
+ (func $96 (param $0 i32) (param $1 v128)
(v128.store32_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $91 (param $0 i32) (param $1 v128)
+ (func $97 (param $0 i32) (param $1 v128)
(v128.store64_lane 0
(local.get $0)
(local.get $1)
)
)
- (func $92 (param $0 i32) (param $1 v128)
+ (func $98 (param $0 i32) (param $1 v128)
(v128.store64_lane align=1 0
(local.get $0)
(local.get $1)
)
)
- (func $93 (param $0 i32) (param $1 v128)
+ (func $99 (param $0 i32) (param $1 v128)
(v128.store64_lane offset=32 0
(local.get $0)
(local.get $1)
)
)
- (func $94 (param $0 i32) (param $1 v128)
+ (func $100 (param $0 i32) (param $1 v128)
(v128.store64_lane offset=32 align=1 0
(local.get $0)
(local.get $1)
)
)
- (func $95 (param $0 v128) (result v128)
- (i8x16.popcnt
- (local.get $0)
- )
- )
- (func $96 (param $0 v128) (result v128)
- (i8x16.abs
+ (func $101 (param $0 i32) (result v128)
+ (v128.load32_zero
(local.get $0)
)
)
- (func $97 (param $0 v128) (result v128)
- (i8x16.neg
+ (func $102 (param $0 i32) (result v128)
+ (v128.load64_zero
(local.get $0)
)
)
- (func $98 (param $0 v128) (result i32)
- (i8x16.any_true
+ (func $103 (param $0 v128) (result v128)
+ (f32x4.demote_f64x2_zero
(local.get $0)
)
)
- (func $99 (param $0 v128) (result i32)
- (i8x16.all_true
+ (func $104 (param $0 v128) (result v128)
+ (f64x2.promote_low_f32x4
(local.get $0)
)
)
- (func $100 (param $0 v128) (result i32)
- (i8x16.bitmask
+ (func $105 (param $0 v128) (result v128)
+ (i8x16.abs
(local.get $0)
)
)
- (func $101 (param $0 v128) (param $1 i32) (result v128)
- (i8x16.shl
+ (func $106 (param $0 v128) (result v128)
+ (i8x16.neg
(local.get $0)
- (local.get $1)
)
)
- (func $102 (param $0 v128) (param $1 i32) (result v128)
- (i8x16.shr_s
+ (func $107 (param $0 v128) (result v128)
+ (i8x16.popcnt
(local.get $0)
- (local.get $1)
)
)
- (func $103 (param $0 v128) (param $1 i32) (result v128)
- (i8x16.shr_u
+ (func $108 (param $0 v128) (result i32)
+ (i8x16.all_true
(local.get $0)
- (local.get $1)
)
)
- (func $104 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add
+ (func $109 (param $0 v128) (result i32)
+ (i8x16.bitmask
(local.get $0)
- (local.get $1)
)
)
- (func $105 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_s
+ (func $110 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_s
(local.get $0)
(local.get $1)
)
)
- (func $106 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.add_saturate_u
+ (func $111 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.narrow_i16x8_u
(local.get $0)
(local.get $1)
)
)
- (func $107 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub
+ (func $112 (param $0 v128) (result v128)
+ (f32x4.ceil
(local.get $0)
- (local.get $1)
)
)
- (func $108 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_s
+ (func $113 (param $0 v128) (result v128)
+ (f32x4.floor
(local.get $0)
- (local.get $1)
)
)
- (func $109 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.sub_saturate_u
+ (func $114 (param $0 v128) (result v128)
+ (f32x4.trunc
(local.get $0)
- (local.get $1)
)
)
- (func $110 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.mul
+ (func $115 (param $0 v128) (result v128)
+ (f32x4.nearest
(local.get $0)
- (local.get $1)
)
)
- (func $111 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.min_s
+ (func $116 (param $0 v128) (param $1 i32) (result v128)
+ (i8x16.shl
(local.get $0)
(local.get $1)
)
)
- (func $112 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.min_u
+ (func $117 (param $0 v128) (param $1 i32) (result v128)
+ (i8x16.shr_s
(local.get $0)
(local.get $1)
)
)
- (func $113 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.max_s
+ (func $118 (param $0 v128) (param $1 i32) (result v128)
+ (i8x16.shr_u
(local.get $0)
(local.get $1)
)
)
- (func $114 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.max_u
+ (func $119 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add
(local.get $0)
(local.get $1)
)
)
- (func $115 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.avgr_u
+ (func $120 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $116 (param $0 v128) (result v128)
- (i16x8.abs
- (local.get $0)
- )
- )
- (func $117 (param $0 v128) (result v128)
- (i16x8.neg
- (local.get $0)
- )
- )
- (func $118 (param $0 v128) (result i32)
- (i16x8.any_true
- (local.get $0)
- )
- )
- (func $119 (param $0 v128) (result i32)
- (i16x8.all_true
- (local.get $0)
- )
- )
- (func $120 (param $0 v128) (result i32)
- (i16x8.bitmask
- (local.get $0)
- )
- )
- (func $121 (param $0 v128) (param $1 i32) (result v128)
- (i16x8.shl
+ (func $121 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.add_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $122 (param $0 v128) (param $1 i32) (result v128)
- (i16x8.shr_s
+ (func $122 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub
(local.get $0)
(local.get $1)
)
)
- (func $123 (param $0 v128) (param $1 i32) (result v128)
- (i16x8.shr_u
+ (func $123 (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.sub_sat_s
(local.get $0)
(local.get $1)
)
)
(func $124 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add
+ (i8x16.sub_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $125 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_s
+ (func $125 (param $0 v128) (result v128)
+ (f64x2.ceil
(local.get $0)
- (local.get $1)
)
)
- (func $126 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.add_saturate_u
+ (func $126 (param $0 v128) (result v128)
+ (f64x2.floor
(local.get $0)
- (local.get $1)
)
)
(func $127 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub
+ (i8x16.min_s
(local.get $0)
(local.get $1)
)
)
(func $128 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_s
+ (i8x16.min_u
(local.get $0)
(local.get $1)
)
)
(func $129 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.sub_saturate_u
+ (i8x16.max_s
(local.get $0)
(local.get $1)
)
)
(func $130 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.mul
+ (i8x16.max_u
(local.get $0)
(local.get $1)
)
)
- (func $131 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.min_s
+ (func $131 (param $0 v128) (result v128)
+ (f64x2.trunc
(local.get $0)
- (local.get $1)
)
)
(func $132 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.min_u
+ (i8x16.avgr_u
(local.get $0)
(local.get $1)
)
)
- (func $133 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.max_s
+ (func $133 (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_s
(local.get $0)
- (local.get $1)
)
)
- (func $134 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.max_u
+ (func $134 (param $0 v128) (result v128)
+ (i16x8.extadd_pairwise_i8x16_u
(local.get $0)
- (local.get $1)
)
)
- (func $135 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.avgr_u
+ (func $135 (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_s
(local.get $0)
- (local.get $1)
)
)
- (func $136 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.q15mulr_sat_s
+ (func $136 (param $0 v128) (result v128)
+ (i32x4.extadd_pairwise_i16x8_u
(local.get $0)
- (local.get $1)
)
)
- (func $137 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.extmul_low_i8x16_s
+ (func $137 (param $0 v128) (result v128)
+ (i16x8.abs
(local.get $0)
- (local.get $1)
)
)
- (func $138 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.extmul_high_i8x16_s
+ (func $138 (param $0 v128) (result v128)
+ (i16x8.neg
(local.get $0)
- (local.get $1)
)
)
(func $139 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.extmul_low_i8x16_u
+ (i16x8.q15mulr_sat_s
(local.get $0)
(local.get $1)
)
)
- (func $140 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.extmul_high_i8x16_u
+ (func $140 (param $0 v128) (result i32)
+ (i16x8.all_true
(local.get $0)
- (local.get $1)
)
)
- (func $141 (param $0 v128) (result v128)
- (i32x4.abs
+ (func $141 (param $0 v128) (result i32)
+ (i16x8.bitmask
(local.get $0)
)
)
- (func $142 (param $0 v128) (result v128)
- (i32x4.neg
+ (func $142 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_s
(local.get $0)
+ (local.get $1)
)
)
- (func $143 (param $0 v128) (result i32)
- (i32x4.any_true
+ (func $143 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.narrow_i32x4_u
(local.get $0)
+ (local.get $1)
)
)
- (func $144 (param $0 v128) (result i32)
- (i32x4.all_true
+ (func $144 (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_s
(local.get $0)
)
)
- (func $145 (param $0 v128) (result i32)
- (i32x4.bitmask
+ (func $145 (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_s
(local.get $0)
)
)
- (func $146 (param $0 v128) (param $1 i32) (result v128)
- (i32x4.shl
+ (func $146 (param $0 v128) (result v128)
+ (i16x8.extend_low_i8x16_u
(local.get $0)
- (local.get $1)
)
)
- (func $147 (param $0 v128) (param $1 i32) (result v128)
- (i32x4.shr_s
+ (func $147 (param $0 v128) (result v128)
+ (i16x8.extend_high_i8x16_u
(local.get $0)
- (local.get $1)
)
)
(func $148 (param $0 v128) (param $1 i32) (result v128)
- (i32x4.shr_u
+ (i16x8.shl
(local.get $0)
(local.get $1)
)
)
- (func $149 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.add
+ (func $149 (param $0 v128) (param $1 i32) (result v128)
+ (i16x8.shr_s
(local.get $0)
(local.get $1)
)
)
- (func $150 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.sub
+ (func $150 (param $0 v128) (param $1 i32) (result v128)
+ (i16x8.shr_u
(local.get $0)
(local.get $1)
)
)
(func $151 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.mul
+ (i16x8.add
(local.get $0)
(local.get $1)
)
)
(func $152 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.min_s
+ (i16x8.add_sat_s
(local.get $0)
(local.get $1)
)
)
(func $153 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.min_u
+ (i16x8.add_sat_u
(local.get $0)
(local.get $1)
)
)
(func $154 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.max_s
+ (i16x8.sub
(local.get $0)
(local.get $1)
)
)
(func $155 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.max_u
+ (i16x8.sub_sat_s
(local.get $0)
(local.get $1)
)
)
(func $156 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.dot_i16x8_s
+ (i16x8.sub_sat_u
(local.get $0)
(local.get $1)
)
)
- (func $157 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.extmul_low_i16x8_s
+ (func $157 (param $0 v128) (result v128)
+ (f64x2.nearest
(local.get $0)
- (local.get $1)
)
)
(func $158 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.extmul_high_i16x8_s
+ (i16x8.mul
(local.get $0)
(local.get $1)
)
)
(func $159 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.extmul_low_i16x8_u
+ (i16x8.min_s
(local.get $0)
(local.get $1)
)
)
(func $160 (param $0 v128) (param $1 v128) (result v128)
- (i32x4.extmul_high_i16x8_u
+ (i16x8.min_u
(local.get $0)
(local.get $1)
)
)
- (func $161 (param $0 v128) (result v128)
- (i64x2.neg
+ (func $161 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.max_s
(local.get $0)
+ (local.get $1)
)
)
- (func $162 (param $0 v128) (result i32)
- (i64x2.bitmask
+ (func $162 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.max_u
(local.get $0)
+ (local.get $1)
)
)
- (func $163 (param $0 v128) (param $1 i32) (result v128)
- (i64x2.shl
+ (func $163 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.avgr_u
(local.get $0)
(local.get $1)
)
)
- (func $164 (param $0 v128) (param $1 i32) (result v128)
- (i64x2.shr_s
+ (func $164 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.extmul_low_i8x16_s
(local.get $0)
(local.get $1)
)
)
- (func $165 (param $0 v128) (param $1 i32) (result v128)
- (i64x2.shr_u
+ (func $165 (param $0 v128) (param $1 v128) (result v128)
+ (i16x8.extmul_high_i8x16_s
(local.get $0)
(local.get $1)
)
)
(func $166 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.add
+ (i16x8.extmul_low_i8x16_u
(local.get $0)
(local.get $1)
)
)
(func $167 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.sub
+ (i16x8.extmul_high_i8x16_u
(local.get $0)
(local.get $1)
)
)
- (func $168 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.mul
+ (func $168 (param $0 v128) (result v128)
+ (i32x4.abs
(local.get $0)
- (local.get $1)
)
)
- (func $169 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.extmul_low_i32x4_s
+ (func $169 (param $0 v128) (result v128)
+ (i32x4.neg
(local.get $0)
- (local.get $1)
)
)
- (func $170 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.extmul_high_i32x4_s
+ (func $170 (param $0 v128) (result i32)
+ (i32x4.all_true
(local.get $0)
- (local.get $1)
)
)
- (func $171 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.extmul_low_i32x4_u
+ (func $171 (param $0 v128) (result i32)
+ (i32x4.bitmask
(local.get $0)
- (local.get $1)
)
)
- (func $172 (param $0 v128) (param $1 v128) (result v128)
- (i64x2.extmul_high_i32x4_u
+ (func $172 (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_s
(local.get $0)
- (local.get $1)
)
)
- (func $173 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.add
+ (func $173 (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_s
(local.get $0)
- (local.get $1)
)
)
- (func $174 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.sub
+ (func $174 (param $0 v128) (result v128)
+ (i32x4.extend_low_i16x8_u
(local.get $0)
- (local.get $1)
)
)
- (func $175 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.mul
+ (func $175 (param $0 v128) (result v128)
+ (i32x4.extend_high_i16x8_u
(local.get $0)
- (local.get $1)
)
)
- (func $176 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.div
+ (func $176 (param $0 v128) (param $1 i32) (result v128)
+ (i32x4.shl
(local.get $0)
(local.get $1)
)
)
- (func $177 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.min
+ (func $177 (param $0 v128) (param $1 i32) (result v128)
+ (i32x4.shr_s
(local.get $0)
(local.get $1)
)
)
- (func $178 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.max
+ (func $178 (param $0 v128) (param $1 i32) (result v128)
+ (i32x4.shr_u
(local.get $0)
(local.get $1)
)
)
(func $179 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.pmin
+ (i32x4.add
(local.get $0)
(local.get $1)
)
)
(func $180 (param $0 v128) (param $1 v128) (result v128)
- (f32x4.pmax
+ (i32x4.sub
(local.get $0)
(local.get $1)
)
)
- (func $181 (param $0 v128) (result v128)
- (f32x4.ceil
+ (func $181 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.mul
(local.get $0)
+ (local.get $1)
)
)
- (func $182 (param $0 v128) (result v128)
- (f32x4.floor
+ (func $182 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.min_s
(local.get $0)
+ (local.get $1)
)
)
- (func $183 (param $0 v128) (result v128)
- (f32x4.trunc
+ (func $183 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.min_u
(local.get $0)
+ (local.get $1)
)
)
- (func $184 (param $0 v128) (result v128)
- (f32x4.nearest
+ (func $184 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.max_s
(local.get $0)
+ (local.get $1)
)
)
- (func $185 (param $0 v128) (result v128)
- (f32x4.abs
+ (func $185 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.max_u
(local.get $0)
+ (local.get $1)
)
)
- (func $186 (param $0 v128) (result v128)
- (f32x4.neg
+ (func $186 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.dot_i16x8_s
(local.get $0)
+ (local.get $1)
)
)
- (func $187 (param $0 v128) (result v128)
- (f32x4.sqrt
+ (func $187 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.extmul_low_i16x8_s
(local.get $0)
+ (local.get $1)
)
)
- (func $188 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfma
+ (func $188 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.extmul_high_i16x8_s
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $189 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f32x4.qfms
+ (func $189 (param $0 v128) (param $1 v128) (result v128)
+ (i32x4.extmul_low_i16x8_u
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
(func $190 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.add
+ (i32x4.extmul_high_i16x8_u
(local.get $0)
(local.get $1)
)
)
- (func $191 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.sub
+ (func $191 (param $0 v128) (result v128)
+ (i64x2.abs
(local.get $0)
- (local.get $1)
)
)
- (func $192 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.mul
+ (func $192 (param $0 v128) (result v128)
+ (i64x2.neg
(local.get $0)
- (local.get $1)
)
)
- (func $193 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.div
+ (func $193 (param $0 v128) (result i32)
+ (i64x2.all_true
(local.get $0)
- (local.get $1)
)
)
- (func $194 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.min
+ (func $194 (param $0 v128) (result i32)
+ (i64x2.bitmask
(local.get $0)
- (local.get $1)
)
)
- (func $195 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.max
+ (func $195 (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_s
(local.get $0)
- (local.get $1)
)
)
- (func $196 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.pmin
+ (func $196 (param $0 v128) (result v128)
+ (i64x2.extend_high_i32x4_s
(local.get $0)
- (local.get $1)
)
)
- (func $197 (param $0 v128) (param $1 v128) (result v128)
- (f64x2.pmax
+ (func $197 (param $0 v128) (result v128)
+ (i64x2.extend_low_i32x4_u
(local.get $0)
- (local.get $1)
)
)
(func $198 (param $0 v128) (result v128)
- (f64x2.ceil
+ (i64x2.extend_high_i32x4_u
(local.get $0)
)
)
- (func $199 (param $0 v128) (result v128)
- (f64x2.floor
+ (func $199 (param $0 v128) (param $1 i32) (result v128)
+ (i64x2.shl
(local.get $0)
+ (local.get $1)
)
)
- (func $200 (param $0 v128) (result v128)
- (f64x2.trunc
+ (func $200 (param $0 v128) (param $1 i32) (result v128)
+ (i64x2.shr_s
(local.get $0)
+ (local.get $1)
)
)
- (func $201 (param $0 v128) (result v128)
- (f64x2.nearest
+ (func $201 (param $0 v128) (param $1 i32) (result v128)
+ (i64x2.shr_u
(local.get $0)
+ (local.get $1)
)
)
- (func $202 (param $0 v128) (result v128)
- (f64x2.abs
+ (func $202 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.add
(local.get $0)
+ (local.get $1)
)
)
- (func $203 (param $0 v128) (result v128)
- (f64x2.neg
+ (func $203 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.sub
(local.get $0)
+ (local.get $1)
)
)
- (func $204 (param $0 v128) (result v128)
- (f64x2.sqrt
+ (func $204 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.mul
(local.get $0)
+ (local.get $1)
)
)
- (func $205 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfma
+ (func $205 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.eq
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $206 (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (f64x2.qfms
+ (func $206 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ne
(local.get $0)
(local.get $1)
- (local.get $2)
)
)
- (func $207 (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_s
+ (func $207 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.lt_s
(local.get $0)
+ (local.get $1)
)
)
- (func $208 (param $0 v128) (result v128)
- (i16x8.extadd_pairwise_i8x16_u
+ (func $208 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.gt_s
(local.get $0)
+ (local.get $1)
)
)
- (func $209 (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_s
+ (func $209 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.le_s
(local.get $0)
+ (local.get $1)
)
)
- (func $210 (param $0 v128) (result v128)
- (i32x4.extadd_pairwise_i16x8_u
+ (func $210 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.ge_s
(local.get $0)
+ (local.get $1)
)
)
- (func $211 (param $0 v128) (result v128)
- (i32x4.trunc_sat_f32x4_s
+ (func $211 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.extmul_low_i32x4_s
(local.get $0)
+ (local.get $1)
)
)
- (func $212 (param $0 v128) (result v128)
- (i32x4.trunc_sat_f32x4_u
+ (func $212 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.extmul_high_i32x4_s
(local.get $0)
+ (local.get $1)
)
)
- (func $213 (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_s
+ (func $213 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.extmul_low_i32x4_u
(local.get $0)
+ (local.get $1)
)
)
- (func $214 (param $0 v128) (result v128)
- (i64x2.trunc_sat_f64x2_u
+ (func $214 (param $0 v128) (param $1 v128) (result v128)
+ (i64x2.extmul_high_i32x4_u
(local.get $0)
+ (local.get $1)
)
)
(func $215 (param $0 v128) (result v128)
- (f32x4.convert_i32x4_s
+ (f32x4.abs
(local.get $0)
)
)
(func $216 (param $0 v128) (result v128)
- (f32x4.convert_i32x4_u
+ (f32x4.neg
(local.get $0)
)
)
(func $217 (param $0 v128) (result v128)
- (f64x2.convert_i64x2_s
+ (f32x4.sqrt
(local.get $0)
)
)
- (func $218 (param $0 v128) (result v128)
- (f64x2.convert_i64x2_u
+ (func $218 (param $0 v128) (param $1 v128) (result v128)
+ (f32x4.add
(local.get $0)
+ (local.get $1)
)
)
- (func $219 (param $0 i32) (result v128)
- (v8x16.load_splat
+ (func $219 (param $0 v128) (param $1 v128) (result v128)
+ (f32x4.sub
(local.get $0)
+ (local.get $1)
)
)
- (func $220 (param $0 i32) (result v128)
- (v16x8.load_splat
+ (func $220 (param $0 v128) (param $1 v128) (result v128)
+ (f32x4.mul
(local.get $0)
+ (local.get $1)
)
)
- (func $221 (param $0 i32) (result v128)
- (v32x4.load_splat
+ (func $221 (param $0 v128) (param $1 v128) (result v128)
+ (f32x4.div
(local.get $0)
+ (local.get $1)
)
)
- (func $222 (param $0 i32) (result v128)
- (v64x2.load_splat
+ (func $222 (param $0 v128) (param $1 v128) (result v128)
+ (f32x4.min
(local.get $0)
+ (local.get $1)
)
)
(func $223 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_s
+ (f32x4.max
(local.get $0)
(local.get $1)
)
)
(func $224 (param $0 v128) (param $1 v128) (result v128)
- (i8x16.narrow_i16x8_u
+ (f32x4.pmin
(local.get $0)
(local.get $1)
)
)
(func $225 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_s
+ (f32x4.pmax
(local.get $0)
(local.get $1)
)
)
- (func $226 (param $0 v128) (param $1 v128) (result v128)
- (i16x8.narrow_i32x4_u
+ (func $226 (param $0 v128) (result v128)
+ (f64x2.abs
(local.get $0)
- (local.get $1)
)
)
(func $227 (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_s
+ (f64x2.neg
(local.get $0)
)
)
(func $228 (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_s
+ (f64x2.sqrt
(local.get $0)
)
)
- (func $229 (param $0 v128) (result v128)
- (i16x8.widen_low_i8x16_u
+ (func $229 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.add
(local.get $0)
+ (local.get $1)
)
)
- (func $230 (param $0 v128) (result v128)
- (i16x8.widen_high_i8x16_u
+ (func $230 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.sub
(local.get $0)
+ (local.get $1)
)
)
- (func $231 (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_s
+ (func $231 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.mul
(local.get $0)
+ (local.get $1)
)
)
- (func $232 (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_s
+ (func $232 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.div
(local.get $0)
+ (local.get $1)
)
)
- (func $233 (param $0 v128) (result v128)
- (i32x4.widen_low_i16x8_u
+ (func $233 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.min
(local.get $0)
+ (local.get $1)
)
)
- (func $234 (param $0 v128) (result v128)
- (i32x4.widen_high_i16x8_u
+ (func $234 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.max
(local.get $0)
+ (local.get $1)
)
)
- (func $235 (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_s
+ (func $235 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.pmin
(local.get $0)
+ (local.get $1)
)
)
- (func $236 (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_s
+ (func $236 (param $0 v128) (param $1 v128) (result v128)
+ (f64x2.pmax
(local.get $0)
+ (local.get $1)
)
)
(func $237 (param $0 v128) (result v128)
- (i64x2.widen_low_i32x4_u
+ (i32x4.trunc_sat_f32x4_s
(local.get $0)
)
)
(func $238 (param $0 v128) (result v128)
- (i64x2.widen_high_i32x4_u
- (local.get $0)
- )
- )
- (func $239 (param $0 i32) (result v128)
- (i16x8.load8x8_u
- (local.get $0)
- )
- )
- (func $240 (param $0 i32) (result v128)
- (i16x8.load8x8_s
- (local.get $0)
- )
- )
- (func $241 (param $0 i32) (result v128)
- (i32x4.load16x4_s
- (local.get $0)
- )
- )
- (func $242 (param $0 i32) (result v128)
- (i32x4.load16x4_u
- (local.get $0)
- )
- )
- (func $243 (param $0 i32) (result v128)
- (i64x2.load32x2_s
- (local.get $0)
- )
- )
- (func $244 (param $0 i32) (result v128)
- (i64x2.load32x2_u
- (local.get $0)
- )
- )
- (func $245 (param $0 i32) (result v128)
- (v128.load32_zero
+ (i32x4.trunc_sat_f32x4_u
(local.get $0)
)
)
- (func $246 (param $0 i32) (result v128)
- (v128.load64_zero
+ (func $239 (param $0 v128) (result v128)
+ (f32x4.convert_i32x4_s
(local.get $0)
)
)
- (func $247 (param $0 v128) (param $1 v128) (result v128)
- (v8x16.swizzle
+ (func $240 (param $0 v128) (result v128)
+ (f32x4.convert_i32x4_u
(local.get $0)
- (local.get $1)
)
)
- (func $248 (param $0 i32)
- (prefetch.t offset=3 align=2
+ (func $241 (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_s_zero
(local.get $0)
)
)
- (func $249 (param $0 i32)
- (prefetch.nt offset=3 align=2
+ (func $242 (param $0 v128) (result v128)
+ (i32x4.trunc_sat_f64x2_u_zero
(local.get $0)
)
)
- (func $250 (param $0 v128) (result v128)
+ (func $243 (param $0 v128) (result v128)
(f64x2.convert_low_i32x4_s
(local.get $0)
)
)
- (func $251 (param $0 v128) (result v128)
+ (func $244 (param $0 v128) (result v128)
(f64x2.convert_low_i32x4_u
(local.get $0)
)
)
- (func $252 (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_s
- (local.get $0)
- )
- )
- (func $253 (param $0 v128) (result v128)
- (i32x4.trunc_sat_f64x2_zero_u
- (local.get $0)
- )
- )
- (func $254 (param $0 v128) (result v128)
- (f32x4.demote_f64x2_zero
- (local.get $0)
- )
- )
- (func $255 (param $0 v128) (result v128)
- (f64x2.promote_low_f32x4
- (local.get $0)
- )
- )
- (func $256 (param $0 v128) (result v128)
- (i32x4.widen_i8x16_s 0
- (local.get $0)
- )
- )
- (func $257 (param $0 v128) (result v128)
- (i32x4.widen_i8x16_u 0
- (local.get $0)
- )
- )
)
diff --git a/test/simd64.wast b/test/simd64.wast
index 65b1ef8b7..f5cf85055 100644
--- a/test/simd64.wast
+++ b/test/simd64.wast
@@ -11,53 +11,53 @@
(local.get $1)
)
)
- (func $v8x16.load_splat (param $0 i64) (result v128)
- (v8x16.load_splat
+ (func $v128.load8_splat (param $0 i64) (result v128)
+ (v128.load8_splat
(local.get $0)
)
)
- (func $v16x8.load_splat (param $0 i64) (result v128)
- (v16x8.load_splat
+ (func $v128.load16_splat (param $0 i64) (result v128)
+ (v128.load16_splat
(local.get $0)
)
)
- (func $v32x4.load_splat (param $0 i64) (result v128)
- (v32x4.load_splat
+ (func $v128.load32_splat (param $0 i64) (result v128)
+ (v128.load32_splat
(local.get $0)
)
)
- (func $v64x2.load_splat (param $0 i64) (result v128)
- (v64x2.load_splat
+ (func $v128.load64_splat (param $0 i64) (result v128)
+ (v128.load64_splat
(local.get $0)
)
)
- (func $i16x8.load8x8_u (param $0 i64) (result v128)
- (i16x8.load8x8_u
+ (func $v128.load8x8_u (param $0 i64) (result v128)
+ (v128.load8x8_u
(local.get $0)
)
)
- (func $i16x8.load8x8_s (param $0 i64) (result v128)
- (i16x8.load8x8_s
+ (func $v128.load8x8_s (param $0 i64) (result v128)
+ (v128.load8x8_s
(local.get $0)
)
)
- (func $i32x4.load16x4_s (param $0 i64) (result v128)
- (i32x4.load16x4_s
+ (func $v128.load16x4_s (param $0 i64) (result v128)
+ (v128.load16x4_s
(local.get $0)
)
)
- (func $i32x4.load16x4_u (param $0 i64) (result v128)
- (i32x4.load16x4_u
+ (func $v128.load16x4_u (param $0 i64) (result v128)
+ (v128.load16x4_u
(local.get $0)
)
)
- (func $i64x2.load32x2_s (param $0 i64) (result v128)
- (i64x2.load32x2_s
+ (func $v128.load32x2_s (param $0 i64) (result v128)
+ (v128.load32x2_s
(local.get $0)
)
)
- (func $i64x2.load32x2_u (param $0 i64) (result v128)
- (i64x2.load32x2_u
+ (func $v128.load32x2_u (param $0 i64) (result v128)
+ (v128.load32x2_u
(local.get $0)
)
)
diff --git a/test/simd64.wast.from-wast b/test/simd64.wast.from-wast
index cd08c79ed..a12e5d08f 100644
--- a/test/simd64.wast.from-wast
+++ b/test/simd64.wast.from-wast
@@ -13,53 +13,53 @@
(local.get $1)
)
)
- (func $v8x16.load_splat (param $0 i64) (result v128)
- (v8x16.load_splat
+ (func $v128.load8_splat (param $0 i64) (result v128)
+ (v128.load8_splat
(local.get $0)
)
)
- (func $v16x8.load_splat (param $0 i64) (result v128)
- (v16x8.load_splat
+ (func $v128.load16_splat (param $0 i64) (result v128)
+ (v128.load16_splat
(local.get $0)
)
)
- (func $v32x4.load_splat (param $0 i64) (result v128)
- (v32x4.load_splat
+ (func $v128.load32_splat (param $0 i64) (result v128)
+ (v128.load32_splat
(local.get $0)
)
)
- (func $v64x2.load_splat (param $0 i64) (result v128)
- (v64x2.load_splat
+ (func $v128.load64_splat (param $0 i64) (result v128)
+ (v128.load64_splat
(local.get $0)
)
)
- (func $i16x8.load8x8_u (param $0 i64) (result v128)
- (i16x8.load8x8_u
+ (func $v128.load8x8_u (param $0 i64) (result v128)
+ (v128.load8x8_u
(local.get $0)
)
)
- (func $i16x8.load8x8_s (param $0 i64) (result v128)
- (i16x8.load8x8_s
+ (func $v128.load8x8_s (param $0 i64) (result v128)
+ (v128.load8x8_s
(local.get $0)
)
)
- (func $i32x4.load16x4_s (param $0 i64) (result v128)
- (i32x4.load16x4_s
+ (func $v128.load16x4_s (param $0 i64) (result v128)
+ (v128.load16x4_s
(local.get $0)
)
)
- (func $i32x4.load16x4_u (param $0 i64) (result v128)
- (i32x4.load16x4_u
+ (func $v128.load16x4_u (param $0 i64) (result v128)
+ (v128.load16x4_u
(local.get $0)
)
)
- (func $i64x2.load32x2_s (param $0 i64) (result v128)
- (i64x2.load32x2_s
+ (func $v128.load32x2_s (param $0 i64) (result v128)
+ (v128.load32x2_s
(local.get $0)
)
)
- (func $i64x2.load32x2_u (param $0 i64) (result v128)
- (i64x2.load32x2_u
+ (func $v128.load32x2_u (param $0 i64) (result v128)
+ (v128.load32x2_u
(local.get $0)
)
)
diff --git a/test/simd64.wast.fromBinary b/test/simd64.wast.fromBinary
index 6202b7c06..988f678b8 100644
--- a/test/simd64.wast.fromBinary
+++ b/test/simd64.wast.fromBinary
@@ -13,53 +13,53 @@
(local.get $1)
)
)
- (func $v8x16.load_splat (param $0 i64) (result v128)
- (v8x16.load_splat
+ (func $v128.load8_splat (param $0 i64) (result v128)
+ (v128.load8_splat
(local.get $0)
)
)
- (func $v16x8.load_splat (param $0 i64) (result v128)
- (v16x8.load_splat
+ (func $v128.load16_splat (param $0 i64) (result v128)
+ (v128.load16_splat
(local.get $0)
)
)
- (func $v32x4.load_splat (param $0 i64) (result v128)
- (v32x4.load_splat
+ (func $v128.load32_splat (param $0 i64) (result v128)
+ (v128.load32_splat
(local.get $0)
)
)
- (func $v64x2.load_splat (param $0 i64) (result v128)
- (v64x2.load_splat
+ (func $v128.load64_splat (param $0 i64) (result v128)
+ (v128.load64_splat
(local.get $0)
)
)
- (func $i16x8.load8x8_u (param $0 i64) (result v128)
- (i16x8.load8x8_u
+ (func $v128.load8x8_u (param $0 i64) (result v128)
+ (v128.load8x8_u
(local.get $0)
)
)
- (func $i16x8.load8x8_s (param $0 i64) (result v128)
- (i16x8.load8x8_s
+ (func $v128.load8x8_s (param $0 i64) (result v128)
+ (v128.load8x8_s
(local.get $0)
)
)
- (func $i32x4.load16x4_s (param $0 i64) (result v128)
- (i32x4.load16x4_s
+ (func $v128.load16x4_s (param $0 i64) (result v128)
+ (v128.load16x4_s
(local.get $0)
)
)
- (func $i32x4.load16x4_u (param $0 i64) (result v128)
- (i32x4.load16x4_u
+ (func $v128.load16x4_u (param $0 i64) (result v128)
+ (v128.load16x4_u
(local.get $0)
)
)
- (func $i64x2.load32x2_s (param $0 i64) (result v128)
- (i64x2.load32x2_s
+ (func $v128.load32x2_s (param $0 i64) (result v128)
+ (v128.load32x2_s
(local.get $0)
)
)
- (func $i64x2.load32x2_u (param $0 i64) (result v128)
- (i64x2.load32x2_u
+ (func $v128.load32x2_u (param $0 i64) (result v128)
+ (v128.load32x2_u
(local.get $0)
)
)
diff --git a/test/simd64.wast.fromBinary.noDebugInfo b/test/simd64.wast.fromBinary.noDebugInfo
index 67ce14b29..2e96dfe64 100644
--- a/test/simd64.wast.fromBinary.noDebugInfo
+++ b/test/simd64.wast.fromBinary.noDebugInfo
@@ -14,52 +14,52 @@
)
)
(func $2 (param $0 i64) (result v128)
- (v8x16.load_splat
+ (v128.load8_splat
(local.get $0)
)
)
(func $3 (param $0 i64) (result v128)
- (v16x8.load_splat
+ (v128.load16_splat
(local.get $0)
)
)
(func $4 (param $0 i64) (result v128)
- (v32x4.load_splat
+ (v128.load32_splat
(local.get $0)
)
)
(func $5 (param $0 i64) (result v128)
- (v64x2.load_splat
+ (v128.load64_splat
(local.get $0)
)
)
(func $6 (param $0 i64) (result v128)
- (i16x8.load8x8_u
+ (v128.load8x8_u
(local.get $0)
)
)
(func $7 (param $0 i64) (result v128)
- (i16x8.load8x8_s
+ (v128.load8x8_s
(local.get $0)
)
)
(func $8 (param $0 i64) (result v128)
- (i32x4.load16x4_s
+ (v128.load16x4_s
(local.get $0)
)
)
(func $9 (param $0 i64) (result v128)
- (i32x4.load16x4_u
+ (v128.load16x4_u
(local.get $0)
)
)
(func $10 (param $0 i64) (result v128)
- (i64x2.load32x2_s
+ (v128.load32x2_s
(local.get $0)
)
)
(func $11 (param $0 i64) (result v128)
- (i64x2.load32x2_u
+ (v128.load32x2_u
(local.get $0)
)
)
diff --git a/test/spec/simd.wast b/test/spec/simd.wast
index 87bbb96cb..4876a032d 100644
--- a/test/spec/simd.wast
+++ b/test/spec/simd.wast
@@ -14,11 +14,11 @@
(func (export "v128.const.i64x2") (result v128) (v128.const i64x2 1 2))
(func (export "v128.const.f32x4") (result v128) (v128.const f32x4 1.0 2 3 4))
(func (export "v128.const.f64x2") (result v128) (v128.const f64x2 1.0 2))
- (func (export "v128.shuffle_interleave_bytes") (param $0 v128) (param $1 v128) (result v128)
- (v8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31 (local.get $0) (local.get $1))
+ (func (export "i8x16.shuffle_interleave_bytes") (param $0 v128) (param $1 v128) (result v128)
+ (i8x16.shuffle 0 17 2 19 4 21 6 23 8 25 10 27 12 29 14 31 (local.get $0) (local.get $1))
)
- (func (export "v128.shuffle_reverse_i32s") (param $0 v128) (result v128)
- (v8x16.shuffle 12 13 14 15 8 9 10 11 4 5 6 7 0 1 2 3 (local.get $0) (local.get $0))
+ (func (export "i8x16.shuffle_reverse_i32s") (param $0 v128) (result v128)
+ (i8x16.shuffle 12 13 14 15 8 9 10 11 4 5 6 7 0 1 2 3 (local.get $0) (local.get $0))
)
(func (export "i8x16.splat") (param $0 i32) (result v128) (i8x16.splat (local.get $0)))
(func (export "i8x16.extract_lane_s_first") (param $0 v128) (result i32) (i8x16.extract_lane_s 0 (local.get $0)))
@@ -105,18 +105,6 @@
(func (export "v128.bitselect") (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
(v128.bitselect (local.get $0) (local.get $1) (local.get $2))
)
- (func (export "v8x16.signselect") (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v8x16.signselect (local.get $0) (local.get $1) (local.get $2))
- )
- (func (export "v16x8.signselect") (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v16x8.signselect (local.get $0) (local.get $1) (local.get $2))
- )
- (func (export "v32x4.signselect") (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v32x4.signselect (local.get $0) (local.get $1) (local.get $2))
- )
- (func (export "v64x2.signselect") (param $0 v128) (param $1 v128) (param $2 v128) (result v128)
- (v64x2.signselect (local.get $0) (local.get $1) (local.get $2))
- )
(func (export "v128.load8_lane") (param $0 i32) (param $1 v128) (result v128) (v128.load8_lane 0 (local.get $0) (local.get $1)))
(func (export "v128.load16_lane") (param $0 i32) (param $1 v128) (result v128) (v128.load16_lane 0 (local.get $0) (local.get $1)))
(func (export "v128.load32_lane") (param $0 i32) (param $1 v128) (result v128) (v128.load32_lane 0 (local.get $0) (local.get $1)))
@@ -128,19 +116,17 @@
(func (export "i8x16.popcnt") (param $0 v128) (result v128) (i8x16.popcnt (local.get $0)))
(func (export "i8x16.abs") (param $0 v128) (result v128) (i8x16.abs (local.get $0)))
(func (export "i8x16.neg") (param $0 v128) (result v128) (i8x16.neg (local.get $0)))
- (func (export "i8x16.any_true") (param $0 v128) (result i32) (i8x16.any_true (local.get $0)))
(func (export "i8x16.all_true") (param $0 v128) (result i32) (i8x16.all_true (local.get $0)))
(func (export "i8x16.bitmask") (param $0 v128) (result i32) (i8x16.bitmask (local.get $0)))
(func (export "i8x16.shl") (param $0 v128) (param $1 i32) (result v128) (i8x16.shl (local.get $0) (local.get $1)))
(func (export "i8x16.shr_s") (param $0 v128) (param $1 i32) (result v128) (i8x16.shr_s (local.get $0) (local.get $1)))
(func (export "i8x16.shr_u") (param $0 v128) (param $1 i32) (result v128) (i8x16.shr_u (local.get $0) (local.get $1)))
(func (export "i8x16.add") (param $0 v128) (param $1 v128) (result v128) (i8x16.add (local.get $0) (local.get $1)))
- (func (export "i8x16.add_saturate_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.add_saturate_s (local.get $0) (local.get $1)))
- (func (export "i8x16.add_saturate_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.add_saturate_u (local.get $0) (local.get $1)))
+ (func (export "i8x16.add_sat_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.add_sat_s (local.get $0) (local.get $1)))
+ (func (export "i8x16.add_sat_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.add_sat_u (local.get $0) (local.get $1)))
(func (export "i8x16.sub") (param $0 v128) (param $1 v128) (result v128) (i8x16.sub (local.get $0) (local.get $1)))
- (func (export "i8x16.sub_saturate_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.sub_saturate_s (local.get $0) (local.get $1)))
- (func (export "i8x16.sub_saturate_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.sub_saturate_u (local.get $0) (local.get $1)))
- (func (export "i8x16.mul") (param $0 v128) (param $1 v128) (result v128) (i8x16.mul (local.get $0) (local.get $1)))
+ (func (export "i8x16.sub_sat_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.sub_sat_s (local.get $0) (local.get $1)))
+ (func (export "i8x16.sub_sat_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.sub_sat_u (local.get $0) (local.get $1)))
(func (export "i8x16.min_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.min_s (local.get $0) (local.get $1)))
(func (export "i8x16.min_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.min_u (local.get $0) (local.get $1)))
(func (export "i8x16.max_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.max_s (local.get $0) (local.get $1)))
@@ -148,18 +134,17 @@
(func (export "i8x16.avgr_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.avgr_u (local.get $0) (local.get $1)))
(func (export "i16x8.abs") (param $0 v128) (result v128) (i16x8.abs (local.get $0)))
(func (export "i16x8.neg") (param $0 v128) (result v128) (i16x8.neg (local.get $0)))
- (func (export "i16x8.any_true") (param $0 v128) (result i32) (i16x8.any_true (local.get $0)))
(func (export "i16x8.all_true") (param $0 v128) (result i32) (i16x8.all_true (local.get $0)))
(func (export "i16x8.bitmask") (param $0 v128) (result i32) (i16x8.bitmask (local.get $0)))
(func (export "i16x8.shl") (param $0 v128) (param $1 i32) (result v128) (i16x8.shl (local.get $0) (local.get $1)))
(func (export "i16x8.shr_s") (param $0 v128) (param $1 i32) (result v128) (i16x8.shr_s (local.get $0) (local.get $1)))
(func (export "i16x8.shr_u") (param $0 v128) (param $1 i32) (result v128) (i16x8.shr_u (local.get $0) (local.get $1)))
(func (export "i16x8.add") (param $0 v128) (param $1 v128) (result v128) (i16x8.add (local.get $0) (local.get $1)))
- (func (export "i16x8.add_saturate_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.add_saturate_s (local.get $0) (local.get $1)))
- (func (export "i16x8.add_saturate_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.add_saturate_u (local.get $0) (local.get $1)))
+ (func (export "i16x8.add_sat_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.add_sat_s (local.get $0) (local.get $1)))
+ (func (export "i16x8.add_sat_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.add_sat_u (local.get $0) (local.get $1)))
(func (export "i16x8.sub") (param $0 v128) (param $1 v128) (result v128) (i16x8.sub (local.get $0) (local.get $1)))
- (func (export "i16x8.sub_saturate_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.sub_saturate_s (local.get $0) (local.get $1)))
- (func (export "i16x8.sub_saturate_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.sub_saturate_u (local.get $0) (local.get $1)))
+ (func (export "i16x8.sub_sat_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.sub_sat_s (local.get $0) (local.get $1)))
+ (func (export "i16x8.sub_sat_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.sub_sat_u (local.get $0) (local.get $1)))
(func (export "i16x8.mul") (param $0 v128) (param $1 v128) (result v128) (i16x8.mul (local.get $0) (local.get $1)))
(func (export "i16x8.min_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.min_s (local.get $0) (local.get $1)))
(func (export "i16x8.min_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.min_u (local.get $0) (local.get $1)))
@@ -170,7 +155,6 @@
;; TODO: extending multiplications
(func (export "i32x4.abs") (param $0 v128) (result v128) (i32x4.abs (local.get $0)))
(func (export "i32x4.neg") (param $0 v128) (result v128) (i32x4.neg (local.get $0)))
- (func (export "i32x4.any_true") (param $0 v128) (result i32) (i32x4.any_true (local.get $0)))
(func (export "i32x4.all_true") (param $0 v128) (result i32) (i32x4.all_true (local.get $0)))
(func (export "i32x4.bitmask") (param $0 v128) (result i32) (i32x4.bitmask (local.get $0)))
(func (export "i32x4.shl") (param $0 v128) (param $1 i32) (result v128) (i32x4.shl (local.get $0) (local.get $1)))
@@ -195,8 +179,6 @@
(func (export "f32x4.abs") (param $0 v128) (result v128) (f32x4.abs (local.get $0)))
(func (export "f32x4.neg") (param $0 v128) (result v128) (f32x4.neg (local.get $0)))
(func (export "f32x4.sqrt") (param $0 v128) (result v128) (f32x4.sqrt (local.get $0)))
- (func (export "f32x4.qfma") (param $0 v128) (param $1 v128) (param $2 v128) (result v128) (f32x4.qfma (local.get $0) (local.get $1) (local.get $2)))
- (func (export "f32x4.qfms") (param $0 v128) (param $1 v128) (param $2 v128) (result v128) (f32x4.qfms (local.get $0) (local.get $1) (local.get $2)))
(func (export "f32x4.add") (param $0 v128) (param $1 v128) (result v128) (f32x4.add (local.get $0) (local.get $1)))
(func (export "f32x4.sub") (param $0 v128) (param $1 v128) (result v128) (f32x4.sub (local.get $0) (local.get $1)))
(func (export "f32x4.mul") (param $0 v128) (param $1 v128) (result v128) (f32x4.mul (local.get $0) (local.get $1)))
@@ -212,8 +194,6 @@
(func (export "f64x2.abs") (param $0 v128) (result v128) (f64x2.abs (local.get $0)))
(func (export "f64x2.neg") (param $0 v128) (result v128) (f64x2.neg (local.get $0)))
(func (export "f64x2.sqrt") (param $0 v128) (result v128) (f64x2.sqrt (local.get $0)))
- (func (export "f64x2.qfma") (param $0 v128) (param $1 v128) (param $2 v128) (result v128) (f64x2.qfma (local.get $0) (local.get $1) (local.get $2)))
- (func (export "f64x2.qfms") (param $0 v128) (param $1 v128) (param $2 v128) (result v128) (f64x2.qfms (local.get $0) (local.get $1) (local.get $2)))
(func (export "f64x2.add") (param $0 v128) (param $1 v128) (result v128) (f64x2.add (local.get $0) (local.get $1)))
(func (export "f64x2.sub") (param $0 v128) (param $1 v128) (result v128) (f64x2.sub (local.get $0) (local.get $1)))
(func (export "f64x2.mul") (param $0 v128) (param $1 v128) (result v128) (f64x2.mul (local.get $0) (local.get $1)))
@@ -229,51 +209,47 @@
;; TODO: Extending pairwise adds once they have interpreter support
(func (export "i32x4.trunc_sat_f32x4_s") (param $0 v128) (result v128) (i32x4.trunc_sat_f32x4_s (local.get $0)))
(func (export "i32x4.trunc_sat_f32x4_u") (param $0 v128) (result v128) (i32x4.trunc_sat_f32x4_u (local.get $0)))
- (func (export "i64x2.trunc_sat_f64x2_s") (param $0 v128) (result v128) (i64x2.trunc_sat_f64x2_s (local.get $0)))
- (func (export "i64x2.trunc_sat_f64x2_u") (param $0 v128) (result v128) (i64x2.trunc_sat_f64x2_u (local.get $0)))
(func (export "f32x4.convert_i32x4_s") (param $0 v128) (result v128) (f32x4.convert_i32x4_s (local.get $0)))
(func (export "f32x4.convert_i32x4_u") (param $0 v128) (result v128) (f32x4.convert_i32x4_u (local.get $0)))
- (func (export "f64x2.convert_i64x2_s") (param $0 v128) (result v128) (f64x2.convert_i64x2_s (local.get $0)))
- (func (export "f64x2.convert_i64x2_u") (param $0 v128) (result v128) (f64x2.convert_i64x2_u (local.get $0)))
- (func (export "v8x16.load_splat") (param $0 i32) (result v128) (v8x16.load_splat (local.get $0)))
- (func (export "v16x8.load_splat") (param $0 i32) (result v128) (v16x8.load_splat (local.get $0)))
- (func (export "v32x4.load_splat") (param $0 i32) (result v128) (v32x4.load_splat (local.get $0)))
- (func (export "v64x2.load_splat") (param $0 i32) (result v128) (v64x2.load_splat (local.get $0)))
+ (func (export "v128.load8_splat") (param $0 i32) (result v128) (v128.load8_splat (local.get $0)))
+ (func (export "v128.load16_splat") (param $0 i32) (result v128) (v128.load16_splat (local.get $0)))
+ (func (export "v128.load32_splat") (param $0 i32) (result v128) (v128.load32_splat (local.get $0)))
+ (func (export "v128.load64_splat") (param $0 i32) (result v128) (v128.load64_splat (local.get $0)))
(func (export "i8x16.narrow_i16x8_s") (param $0 v128) (param $1 v128) (result v128) (i8x16.narrow_i16x8_s (local.get $0) (local.get $1)))
(func (export "i8x16.narrow_i16x8_u") (param $0 v128) (param $1 v128) (result v128) (i8x16.narrow_i16x8_u (local.get $0) (local.get $1)))
(func (export "i16x8.narrow_i32x4_s") (param $0 v128) (param $1 v128) (result v128) (i16x8.narrow_i32x4_s (local.get $0) (local.get $1)))
(func (export "i16x8.narrow_i32x4_u") (param $0 v128) (param $1 v128) (result v128) (i16x8.narrow_i32x4_u (local.get $0) (local.get $1)))
- (func (export "i16x8.widen_low_i8x16_s") (param $0 v128) (result v128) (i16x8.widen_low_i8x16_s (local.get $0)))
- (func (export "i16x8.widen_high_i8x16_s") (param $0 v128) (result v128) (i16x8.widen_high_i8x16_s (local.get $0)))
- (func (export "i16x8.widen_low_i8x16_u") (param $0 v128) (result v128) (i16x8.widen_low_i8x16_u (local.get $0)))
- (func (export "i16x8.widen_high_i8x16_u") (param $0 v128) (result v128) (i16x8.widen_high_i8x16_u (local.get $0)))
- (func (export "i32x4.widen_low_i16x8_s") (param $0 v128) (result v128) (i32x4.widen_low_i16x8_s (local.get $0)))
- (func (export "i32x4.widen_high_i16x8_s") (param $0 v128) (result v128) (i32x4.widen_high_i16x8_s (local.get $0)))
- (func (export "i32x4.widen_low_i16x8_u") (param $0 v128) (result v128) (i32x4.widen_low_i16x8_u (local.get $0)))
- (func (export "i32x4.widen_high_i16x8_u") (param $0 v128) (result v128) (i32x4.widen_high_i16x8_u (local.get $0)))
- (func (export "i64x2.widen_low_i32x4_s") (param $0 v128) (result v128) (i64x2.widen_low_i32x4_s (local.get $0)))
- (func (export "i64x2.widen_high_i32x4_s") (param $0 v128) (result v128) (i64x2.widen_high_i32x4_s (local.get $0)))
- (func (export "i64x2.widen_low_i32x4_u") (param $0 v128) (result v128) (i64x2.widen_low_i32x4_u (local.get $0)))
- (func (export "i64x2.widen_high_i32x4_u") (param $0 v128) (result v128) (i64x2.widen_high_i32x4_u (local.get $0)))
- (func (export "i16x8.load8x8_u") (param $0 i32) (result v128) (i16x8.load8x8_u (local.get $0)))
- (func (export "i16x8.load8x8_s") (param $0 i32) (result v128) (i16x8.load8x8_s (local.get $0)))
- (func (export "i32x4.load16x4_u") (param $0 i32) (result v128) (i32x4.load16x4_u (local.get $0)))
- (func (export "i32x4.load16x4_s") (param $0 i32) (result v128) (i32x4.load16x4_s (local.get $0)))
- (func (export "i64x2.load32x2_u") (param $0 i32) (result v128) (i64x2.load32x2_u (local.get $0)))
- (func (export "i64x2.load32x2_s") (param $0 i32) (result v128) (i64x2.load32x2_s (local.get $0)))
+ (func (export "i16x8.extend_low_i8x16_s") (param $0 v128) (result v128) (i16x8.extend_low_i8x16_s (local.get $0)))
+ (func (export "i16x8.extend_high_i8x16_s") (param $0 v128) (result v128) (i16x8.extend_high_i8x16_s (local.get $0)))
+ (func (export "i16x8.extend_low_i8x16_u") (param $0 v128) (result v128) (i16x8.extend_low_i8x16_u (local.get $0)))
+ (func (export "i16x8.extend_high_i8x16_u") (param $0 v128) (result v128) (i16x8.extend_high_i8x16_u (local.get $0)))
+ (func (export "i32x4.extend_low_i16x8_s") (param $0 v128) (result v128) (i32x4.extend_low_i16x8_s (local.get $0)))
+ (func (export "i32x4.extend_high_i16x8_s") (param $0 v128) (result v128) (i32x4.extend_high_i16x8_s (local.get $0)))
+ (func (export "i32x4.extend_low_i16x8_u") (param $0 v128) (result v128) (i32x4.extend_low_i16x8_u (local.get $0)))
+ (func (export "i32x4.extend_high_i16x8_u") (param $0 v128) (result v128) (i32x4.extend_high_i16x8_u (local.get $0)))
+ (func (export "i64x2.extend_low_i32x4_s") (param $0 v128) (result v128) (i64x2.extend_low_i32x4_s (local.get $0)))
+ (func (export "i64x2.extend_high_i32x4_s") (param $0 v128) (result v128) (i64x2.extend_high_i32x4_s (local.get $0)))
+ (func (export "i64x2.extend_low_i32x4_u") (param $0 v128) (result v128) (i64x2.extend_low_i32x4_u (local.get $0)))
+ (func (export "i64x2.extend_high_i32x4_u") (param $0 v128) (result v128) (i64x2.extend_high_i32x4_u (local.get $0)))
+ (func (export "v128.load8x8_u") (param $0 i32) (result v128) (v128.load8x8_u (local.get $0)))
+ (func (export "v128.load8x8_s") (param $0 i32) (result v128) (v128.load8x8_s (local.get $0)))
+ (func (export "v128.load16x4_u") (param $0 i32) (result v128) (v128.load16x4_u (local.get $0)))
+ (func (export "v128.load16x4_s") (param $0 i32) (result v128) (v128.load16x4_s (local.get $0)))
+ (func (export "v128.load32x2_u") (param $0 i32) (result v128) (v128.load32x2_u (local.get $0)))
+ (func (export "v128.load32x2_s") (param $0 i32) (result v128) (v128.load32x2_s (local.get $0)))
(func (export "v128.load32_zero") (param $0 i32) (result v128) (v128.load32_zero (local.get $0)))
(func (export "v128.load64_zero") (param $0 i32) (result v128) (v128.load64_zero (local.get $0)))
- (func (export "v8x16.swizzle") (param $0 v128) (param $1 v128) (result v128) (v8x16.swizzle (local.get $0) (local.get $1)))
+ (func (export "i8x16.swizzle") (param $0 v128) (param $1 v128) (result v128) (i8x16.swizzle (local.get $0) (local.get $1)))
)
;; TODO: Additional f64x2 conversions if specified
;; Basic v128 manipulation
(assert_return (invoke "v128.load" (i32.const 128)) (v128.const i8x16 87 65 83 77 83 73 77 68 71 79 69 83 70 65 83 84))
(assert_return (invoke "v128.store" (i32.const 16) (v128.const i32x4 1 2 3 4)) (v128.const i32x4 1 2 3 4))
-(assert_return (invoke "v8x16.load_splat" (i32.const 128)) (v128.const i8x16 87 87 87 87 87 87 87 87 87 87 87 87 87 87 87 87))
-(assert_return (invoke "v16x8.load_splat" (i32.const 128)) (v128.const i8x16 87 65 87 65 87 65 87 65 87 65 87 65 87 65 87 65))
-(assert_return (invoke "v32x4.load_splat" (i32.const 128)) (v128.const i8x16 87 65 83 77 87 65 83 77 87 65 83 77 87 65 83 77))
-(assert_return (invoke "v64x2.load_splat" (i32.const 128)) (v128.const i8x16 87 65 83 77 83 73 77 68 87 65 83 77 83 73 77 68))
+(assert_return (invoke "v128.load8_splat" (i32.const 128)) (v128.const i8x16 87 87 87 87 87 87 87 87 87 87 87 87 87 87 87 87))
+(assert_return (invoke "v128.load16_splat" (i32.const 128)) (v128.const i8x16 87 65 87 65 87 65 87 65 87 65 87 65 87 65 87 65))
+(assert_return (invoke "v128.load32_splat" (i32.const 128)) (v128.const i8x16 87 65 83 77 87 65 83 77 87 65 83 77 87 65 83 77))
+(assert_return (invoke "v128.load64_splat" (i32.const 128)) (v128.const i8x16 87 65 83 77 83 73 77 68 87 65 83 77 83 73 77 68))
(assert_return (invoke "v128.const.i8x16") (v128.const i32x4 0x04030201 0x08070605 0x0c0b0a09 0x100f0e0d))
(assert_return (invoke "v128.const.i16x8") (v128.const i8x16 01 00 02 00 03 00 04 00 05 00 06 00 07 00 08 00))
(assert_return (invoke "v128.const.i32x4") (v128.const i8x16 01 00 00 00 02 00 00 00 03 00 00 00 04 00 00 00))
@@ -281,13 +257,13 @@
(assert_return (invoke "v128.const.f32x4") (v128.const f32x4 1 2 3 4))
(assert_return (invoke "v128.const.f64x2") (v128.const f64x2 1 2))
(assert_return
- (invoke "v128.shuffle_interleave_bytes"
+ (invoke "i8x16.shuffle_interleave_bytes"
(v128.const i8x16 1 0 3 0 5 0 7 0 9 0 11 0 13 0 15 0)
(v128.const i8x16 0 2 0 4 0 6 0 8 0 10 0 12 0 14 0 16)
)
(v128.const i8x16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
)
-(assert_return (invoke "v128.shuffle_reverse_i32s" (v128.const i32x4 1 2 3 4)) (v128.const i32x4 4 3 2 1))
+(assert_return (invoke "i8x16.shuffle_reverse_i32s" (v128.const i32x4 1 2 3 4)) (v128.const i32x4 4 3 2 1))
;; i8x16 lane accesses
(assert_return (invoke "i8x16.splat" (i32.const 5)) (v128.const i8x16 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5))
@@ -604,10 +580,6 @@
(assert_return (invoke "i8x16.neg" (v128.const i32x4 0 1 42 -3 -56 127 -128 -126 0 -1 -42 3 56 -127 -128 126))
(v128.const i32x4 0 -1 -42 3 56 -127 -128 126 0 1 42 -3 -56 127 -128 -126)
)
-(assert_return (invoke "i8x16.any_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) (i32.const 0))
-(assert_return (invoke "i8x16.any_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0)) (i32.const 1))
-(assert_return (invoke "i8x16.any_true" (v128.const i32x4 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1)) (i32.const 1))
-(assert_return (invoke "i8x16.any_true" (v128.const i32x4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) (i32.const 1))
(assert_return (invoke "i8x16.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) (i32.const 0))
(assert_return (invoke "i8x16.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0)) (i32.const 0))
(assert_return (invoke "i8x16.all_true" (v128.const i32x4 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1)) (i32.const 0))
@@ -639,14 +611,14 @@
(v128.const i32x4 3 17 0 0 0 135 109 46 145 225 48 184 17 249 128 215)
)
(assert_return
- (invoke "i8x16.add_saturate_s"
+ (invoke "i8x16.add_sat_s"
(v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
(v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
)
(v128.const i32x4 3 17 0 128 0 135 109 46 127 225 48 184 17 249 127 215)
)
(assert_return
- (invoke "i8x16.add_saturate_u"
+ (invoke "i8x16.add_sat_u"
(v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
(v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
)
@@ -660,27 +632,20 @@
(v128.const i32x4 253 67 254 0 254 123 159 12 61 167 158 100 17 251 130 187)
)
(assert_return
- (invoke "i8x16.sub_saturate_s"
+ (invoke "i8x16.sub_sat_s"
(v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
(v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
)
(v128.const i32x4 253 67 254 0 127 128 159 12 61 167 158 128 17 251 130 127)
)
(assert_return
- (invoke "i8x16.sub_saturate_u"
+ (invoke "i8x16.sub_sat_u"
(v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
(v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
)
(v128.const i32x4 0 0 254 0 0 123 0 12 61 167 158 100 17 0 0 0)
)
(assert_return
- (invoke "i8x16.mul"
- (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
- (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
- )
- (v128.const i32x4 0 230 255 0 255 6 106 237 230 52 223 76 0 6 127 126)
-)
-(assert_return
(invoke "i8x16.min_s"
(v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73)
(v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142)
@@ -723,10 +688,6 @@
(assert_return (invoke "i16x8.neg" (v128.const i32x4 0 1 42 -3 -56 32767 -32768 32766))
(v128.const i32x4 0 -1 -42 3 56 -32767 -32768 -32766)
)
-(assert_return (invoke "i16x8.any_true" (v128.const i32x4 0 0 0 0 0 0 0 0)) (i32.const 0))
-(assert_return (invoke "i16x8.any_true" (v128.const i32x4 0 0 1 0 0 0 0 0)) (i32.const 1))
-(assert_return (invoke "i16x8.any_true" (v128.const i32x4 1 1 1 1 1 0 1 1)) (i32.const 1))
-(assert_return (invoke "i16x8.any_true" (v128.const i32x4 1 1 1 1 1 1 1 1)) (i32.const 1))
(assert_return (invoke "i16x8.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0)) (i32.const 0))
(assert_return (invoke "i16x8.all_true" (v128.const i32x4 0 0 1 0 0 0 0 0)) (i32.const 0))
(assert_return (invoke "i16x8.all_true" (v128.const i32x4 1 1 1 1 1 0 1 1)) (i32.const 0))
@@ -746,14 +707,14 @@
(v128.const i32x4 768 65281 0 0 34560 12288 63744 32768)
)
(assert_return
- (invoke "i16x8.add_saturate_s"
+ (invoke "i16x8.add_sat_s"
(v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766)
(v128.const i32x4 768 1 32768 33024 1536 18688 65280 2)
)
(v128.const i32x4 768 65281 32768 0 34560 12288 63744 32767)
)
(assert_return
- (invoke "i16x8.add_saturate_u"
+ (invoke "i16x8.add_sat_u"
(v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766)
(v128.const i32x4 768 1 32768 33024 1536 18688 65280 2)
)
@@ -767,14 +728,14 @@
(v128.const i32x4 64768 65279 0 65024 31488 40448 64256 32764)
)
(assert_return
- (invoke "i16x8.sub_saturate_s"
+ (invoke "i16x8.sub_sat_s"
(v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766)
(v128.const i32x4 768 1 32768 33024 1536 18688 65280 2)
)
(v128.const i32x4 64768 65279 0 32767 32768 40448 64256 32764)
)
(assert_return
- (invoke "i16x8.sub_saturate_u"
+ (invoke "i16x8.sub_sat_u"
(v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766)
(v128.const i32x4 768 1 32768 33024 1536 18688 65280 2)
)
@@ -826,10 +787,6 @@
;; i32x4 arithmetic
(assert_return (invoke "i32x4.abs" (v128.const i32x4 0 1 0x80000000 0x80000001)) (v128.const i32x4 0 1 0x80000000 0x7fffffff))
(assert_return (invoke "i32x4.neg" (v128.const i32x4 0 1 0x80000000 0x80000001)) (v128.const i32x4 0 -1 0x80000000 0x7fffffff))
-(assert_return (invoke "i32x4.any_true" (v128.const i32x4 0 0 0 0)) (i32.const 0))
-(assert_return (invoke "i32x4.any_true" (v128.const i32x4 0 0 1 0)) (i32.const 1))
-(assert_return (invoke "i32x4.any_true" (v128.const i32x4 1 0 1 1)) (i32.const 1))
-(assert_return (invoke "i32x4.any_true" (v128.const i32x4 1 1 1 1)) (i32.const 1))
(assert_return (invoke "i32x4.all_true" (v128.const i32x4 0 0 0 0)) (i32.const 0))
(assert_return (invoke "i32x4.all_true" (v128.const i32x4 0 0 1 0)) (i32.const 0))
(assert_return (invoke "i32x4.all_true" (v128.const i32x4 1 0 1 1)) (i32.const 0))
@@ -882,7 +839,6 @@
(assert_return (invoke "f32x4.abs" (v128.const f32x4 -0 nan -infinity 5)) (v128.const f32x4 0 nan infinity 5))
(assert_return (invoke "f32x4.neg" (v128.const f32x4 -0 nan -infinity 5)) (v128.const f32x4 0 -nan infinity -5))
(assert_return (invoke "f32x4.sqrt" (v128.const f32x4 -0 nan infinity 4)) (v128.const f32x4 -0 nan infinity 2))
-;; TODO: qfma/qfms tests
(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 1)) (v128.const f32x4 nan nan infinity 43))
(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity -infinity 1)) (v128.const f32x4 nan nan infinity 41))
(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 2)) (v128.const f32x4 nan nan infinity 84))
@@ -911,7 +867,6 @@
(assert_return (invoke "f64x2.neg" (v128.const f64x2 -infinity 5)) (v128.const f64x2 infinity -5))
(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 -0 nan)) (v128.const f64x2 -0 nan))
(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 infinity 4)) (v128.const f64x2 infinity 2))
-;; TODO: qfma/qfms tests
(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan))
(assert_return (invoke "f64x2.add" (v128.const f64x2 infinity 42) (v128.const f64x2 infinity 1)) (v128.const f64x2 infinity 43))
(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan))
@@ -956,16 +911,8 @@
;; conversions
(assert_return (invoke "i32x4.trunc_sat_f32x4_s" (v128.const f32x4 42 nan infinity -infinity)) (v128.const i32x4 42 0 2147483647 -2147483648))
(assert_return (invoke "i32x4.trunc_sat_f32x4_u" (v128.const f32x4 42 nan infinity -infinity)) (v128.const i32x4 42 0 4294967295 0))
-(assert_return (invoke "i64x2.trunc_sat_f64x2_s" (v128.const f64x2 42 nan)) (v128.const i64x2 42 0))
-(assert_return (invoke "i64x2.trunc_sat_f64x2_s" (v128.const f64x2 infinity -infinity)) (v128.const i64x2 9223372036854775807 -9223372036854775808))
-(assert_return (invoke "i64x2.trunc_sat_f64x2_u" (v128.const f64x2 42 nan)) (v128.const i64x2 42 0))
-(assert_return (invoke "i64x2.trunc_sat_f64x2_u" (v128.const f64x2 infinity -infinity)) (v128.const i64x2 18446744073709551615 0))
(assert_return (invoke "f32x4.convert_i32x4_s" (v128.const i32x4 0 -1 2147483647 -2147483648)) (v128.const f32x4 0 -1 2147483648 -2147483648))
(assert_return (invoke "f32x4.convert_i32x4_u" (v128.const i32x4 0 -1 2147483647 -2147483648)) (v128.const f32x4 0 4294967296 2147483648 2147483648))
-(assert_return (invoke "f64x2.convert_i64x2_s" (v128.const i64x2 0 -1)) (v128.const f64x2 0 -1))
-(assert_return (invoke "f64x2.convert_i64x2_s" (v128.const i64x2 9223372036854775807 -9223372036854775808)) (v128.const f64x2 9223372036854775807 -9223372036854775808))
-(assert_return (invoke "f64x2.convert_i64x2_u" (v128.const i64x2 0 -1)) (v128.const f64x2 0 18446744073709551616))
-(assert_return (invoke "f64x2.convert_i64x2_u" (v128.const i64x2 9223372036854775807 -9223372036854775808)) (v128.const f64x2 9223372036854775807 9223372036854775808))
(assert_return
(invoke "i8x16.narrow_i16x8_s"
(v128.const i16x8 129 127 -32767 32767 -32768 -1 1 0)
@@ -995,44 +942,43 @@
(v128.const i16x8 32769 32767 0 65535 0 1 0 0)
)
(assert_return
- (invoke "i16x8.widen_low_i8x16_s"
+ (invoke "i16x8.extend_low_i8x16_s"
(v128.const i8x16 0 1 -1 -128 127 129 64 -64 -64 64 129 127 -128 -1 1 0)
)
(v128.const i16x8 0 1 -1 -128 127 -127 64 -64)
)
(assert_return
- (invoke "i16x8.widen_high_i8x16_s"
+ (invoke "i16x8.extend_high_i8x16_s"
(v128.const i8x16 0 1 -1 -128 127 129 64 -64 -64 64 129 127 -128 -1 1 0)
)
(v128.const i16x8 -64 64 -127 127 -128 -1 1 0)
)
(assert_return
- (invoke "i16x8.widen_low_i8x16_u"
+ (invoke "i16x8.extend_low_i8x16_u"
(v128.const i8x16 0 1 -1 -128 127 129 64 -64 -64 64 129 127 -128 -1 1 0)
)
(v128.const i16x8 0 1 255 128 127 129 64 192)
)
(assert_return
- (invoke "i16x8.widen_high_i8x16_u"
+ (invoke "i16x8.extend_high_i8x16_u"
(v128.const i8x16 0 1 -1 -128 127 129 64 -64 -64 64 129 127 -128 -1 1 0)
)
(v128.const i16x8 192 64 129 127 128 255 1 0)
)
-(assert_return (invoke "i32x4.widen_low_i16x8_s" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 0 1 -1 -32768))
-(assert_return (invoke "i32x4.widen_high_i16x8_s" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 32767 -32767 16384 -16384))
-(assert_return (invoke "i32x4.widen_low_i16x8_u" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 0 1 65535 32768))
-(assert_return (invoke "i32x4.widen_high_i16x8_u" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 32767 32769 16384 49152))
-;; TODO: test i64x2 widens
-(assert_return (invoke "i16x8.load8x8_s" (i32.const 256)) (v128.const i16x8 0xff80 0xff90 0xffa0 0xffb0 0xffc0 0xffd0 0xffe0 0xfff0))
-(assert_return (invoke "i16x8.load8x8_u" (i32.const 256)) (v128.const i16x8 0x0080 0x0090 0x00a0 0x00b0 0x00c0 0x00d0 0x00e0 0x00f0))
-(assert_return (invoke "i32x4.load16x4_s" (i32.const 256)) (v128.const i32x4 0xffff9080 0xffffb0a0 0xffffd0c0 0xfffff0e0))
-(assert_return (invoke "i32x4.load16x4_u" (i32.const 256)) (v128.const i32x4 0x00009080 0x0000b0a0 0x0000d0c0 0x0000f0e0))
-(assert_return (invoke "i64x2.load32x2_s" (i32.const 256)) (v128.const i64x2 0xffffffffb0a09080 0xfffffffff0e0d0c0))
-(assert_return (invoke "i64x2.load32x2_u" (i32.const 256)) (v128.const i64x2 0x00000000b0a09080 0x00000000f0e0d0c0))
+(assert_return (invoke "i32x4.extend_low_i16x8_s" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 0 1 -1 -32768))
+(assert_return (invoke "i32x4.extend_high_i16x8_s" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 32767 -32767 16384 -16384))
+(assert_return (invoke "i32x4.extend_low_i16x8_u" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 0 1 65535 32768))
+(assert_return (invoke "i32x4.extend_high_i16x8_u" (v128.const i16x8 0 1 -1 32768 32767 32769 16384 -16384)) (v128.const i32x4 32767 32769 16384 49152))
+(assert_return (invoke "v128.load8x8_s" (i32.const 256)) (v128.const i16x8 0xff80 0xff90 0xffa0 0xffb0 0xffc0 0xffd0 0xffe0 0xfff0))
+(assert_return (invoke "v128.load8x8_u" (i32.const 256)) (v128.const i16x8 0x0080 0x0090 0x00a0 0x00b0 0x00c0 0x00d0 0x00e0 0x00f0))
+(assert_return (invoke "v128.load16x4_s" (i32.const 256)) (v128.const i32x4 0xffff9080 0xffffb0a0 0xffffd0c0 0xfffff0e0))
+(assert_return (invoke "v128.load16x4_u" (i32.const 256)) (v128.const i32x4 0x00009080 0x0000b0a0 0x0000d0c0 0x0000f0e0))
+(assert_return (invoke "v128.load32x2_s" (i32.const 256)) (v128.const i64x2 0xffffffffb0a09080 0xfffffffff0e0d0c0))
+(assert_return (invoke "v128.load32x2_u" (i32.const 256)) (v128.const i64x2 0x00000000b0a09080 0x00000000f0e0d0c0))
(assert_return (invoke "v128.load32_zero" (i32.const 256)) (v128.const i32x4 0xb0a09080 0 0 0))
(assert_return (invoke "v128.load64_zero" (i32.const 256)) (v128.const i64x2 0xf0e0d0c0b0a09080 0))
(assert_return
- (invoke "v8x16.swizzle"
+ (invoke "i8x16.swizzle"
(v128.const i8x16 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff)
(v128.const i8x16 0 4 8 12 16 255 129 128 127 17 15 13 12 8 4 0)
)