diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/wasm-binary.h | 122 | ||||
-rw-r--r-- | src/wasm/wasm-binary.cpp | 120 | ||||
-rw-r--r-- | src/wasm/wasm-stack.cpp | 120 |
3 files changed, 181 insertions, 181 deletions
diff --git a/src/wasm-binary.h b/src/wasm-binary.h index 804df5811..b2f681b6e 100644 --- a/src/wasm-binary.h +++ b/src/wasm-binary.h @@ -735,21 +735,21 @@ enum ASTNodes { // SIMD opcodes V128Load = 0x00, - I16x8LoadExtSVec8x8 = 0x01, - I16x8LoadExtUVec8x8 = 0x02, - I32x4LoadExtSVec16x4 = 0x03, - I32x4LoadExtUVec16x4 = 0x04, - I64x2LoadExtSVec32x2 = 0x05, - I64x2LoadExtUVec32x2 = 0x06, - V8x16LoadSplat = 0x07, - V16x8LoadSplat = 0x08, - V32x4LoadSplat = 0x09, - V64x2LoadSplat = 0x0a, + V128Load8x8S = 0x01, + V128Load8x8U = 0x02, + V128Load16x4S = 0x03, + V128Load16x4U = 0x04, + V128Load32x2S = 0x05, + V128Load32x2U = 0x06, + V128Load8Splat = 0x07, + V128Load16Splat = 0x08, + V128Load32Splat = 0x09, + V128Load64Splat = 0x0a, V128Store = 0x0b, V128Const = 0x0c, - V8x16Shuffle = 0x0d, - V8x16Swizzle = 0x0e, + I8x16Shuffle = 0x0d, + I8x16Swizzle = 0x0e, I8x16Splat = 0x0f, I16x8Splat = 0x10, @@ -818,7 +818,7 @@ enum ASTNodes { V128Not = 0x4d, V128And = 0x4e, - V128AndNot = 0x4f, + V128Andnot = 0x4f, V128Or = 0x50, V128Xor = 0x51, V128Bitselect = 0x52, @@ -835,7 +835,7 @@ enum ASTNodes { V128Load32Zero = 0x5c, V128Load64Zero = 0x5d, - F32x4DemoteZeroF64x2 = 0x5e, + F32x4DemoteF64x2Zero = 0x5e, F64x2PromoteLowF32x4 = 0x5f, I8x16Abs = 0x60, @@ -843,8 +843,8 @@ enum ASTNodes { I8x16Popcnt = 0x62, I8x16AllTrue = 0x63, I8x16Bitmask = 0x64, - I8x16NarrowSI16x8 = 0x65, - I8x16NarrowUI16x8 = 0x66, + I8x16NarrowI16x8S = 0x65, + I8x16NarrowI16x8U = 0x66, F32x4Ceil = 0x67, F32x4Floor = 0x68, F32x4Trunc = 0x69, @@ -866,22 +866,22 @@ enum ASTNodes { I8x16MaxU = 0x79, F64x2Trunc = 0x7a, I8x16AvgrU = 0x7b, - I16x8ExtAddPairWiseSI8x16 = 0x7c, - I16x8ExtAddPairWiseUI8x16 = 0x7d, - I32x4ExtAddPairWiseSI16x8 = 0x7e, - I32x4ExtAddPairWiseUI16x8 = 0x7f, + I16x8ExtaddPairwiseI8x16S = 0x7c, + I16x8ExtaddPairwiseI8x16U = 0x7d, + I32x4ExtaddPairwiseI16x8S = 0x7e, + I32x4ExtaddPairwiseI16x8U = 0x7f, I16x8Abs = 0x80, I16x8Neg = 0x81, - I16x8Q15MulrSatS = 0x82, + I16x8Q15mulrSatS = 0x82, I16x8AllTrue = 0x83, I16x8Bitmask = 0x84, - I16x8NarrowSI32x4 = 0x85, - I16x8NarrowUI32x4 = 0x86, - I16x8ExtendLowSI8x16 = 0x87, - I16x8ExtendHighSI8x16 = 0x88, - I16x8ExtendLowUI8x16 = 0x89, - I16x8ExtendHighUI8x16 = 0x8a, + I16x8NarrowI32x4S = 0x85, + I16x8NarrowI32x4U = 0x86, + I16x8ExtendLowI8x16S = 0x87, + I16x8ExtendHighI8x16S = 0x88, + I16x8ExtendLowI8x16U = 0x89, + I16x8ExtendHighI8x16U = 0x8a, I16x8Shl = 0x8b, I16x8ShrS = 0x8c, I16x8ShrU = 0x8d, @@ -899,10 +899,10 @@ enum ASTNodes { I16x8MaxU = 0x99, // 0x9a unused I16x8AvgrU = 0x9b, - I16x8ExtMulLowSI8x16 = 0x9c, - I16x8ExtMulHighSI8x16 = 0x9d, - I16x8ExtMulLowUI8x16 = 0x9e, - I16x8ExtMulHighUI8x16 = 0x9f, + I16x8ExtmulLowI8x16S = 0x9c, + I16x8ExtmulHighI8x16S = 0x9d, + I16x8ExtmulLowI8x16U = 0x9e, + I16x8ExtmulHighI8x16U = 0x9f, I32x4Abs = 0xa0, I32x4Neg = 0xa1, @@ -911,10 +911,10 @@ enum ASTNodes { I32x4Bitmask = 0xa4, // 0xa5 unused // 0xa6 unused - I32x4ExtendLowSI16x8 = 0xa7, - I32x4ExtendHighSI16x8 = 0xa8, - I32x4ExtendLowUI16x8 = 0xa9, - I32x4ExtendHighUI16x8 = 0xaa, + I32x4ExtendLowI16x8S = 0xa7, + I32x4ExtendHighI16x8S = 0xa8, + I32x4ExtendLowI16x8U = 0xa9, + I32x4ExtendHighI16x8U = 0xaa, I32x4Shl = 0xab, I32x4ShrS = 0xac, I32x4ShrU = 0xad, @@ -930,12 +930,12 @@ enum ASTNodes { I32x4MinU = 0xb7, I32x4MaxS = 0xb8, I32x4MaxU = 0xb9, - I32x4DotSVecI16x8 = 0xba, + I32x4DotI16x8S = 0xba, // 0xbb unused - I32x4ExtMulLowSI16x8 = 0xbc, - I32x4ExtMulHighSI16x8 = 0xbd, - I32x4ExtMulLowUI16x8 = 0xbe, - I32x4ExtMulHighUI16x8 = 0xbf, + I32x4ExtmulLowI16x8S = 0xbc, + I32x4ExtmulHighI16x8S = 0xbd, + I32x4ExtmulLowI16x8U = 0xbe, + I32x4ExtmulHighI16x8U = 0xbf, I64x2Abs = 0xc0, I64x2Neg = 0xc1, @@ -944,10 +944,10 @@ enum ASTNodes { I64x2Bitmask = 0xc4, // 0xc5 unused // 0xc6 unused - I64x2ExtendLowSI32x4 = 0xc7, - I64x2ExtendHighSI32x4 = 0xc8, - I64x2ExtendLowUI32x4 = 0xc9, - I64x2ExtendHighUI32x4 = 0xca, + I64x2ExtendLowI32x4S = 0xc7, + I64x2ExtendHighI32x4S = 0xc8, + I64x2ExtendLowI32x4U = 0xc9, + I64x2ExtendHighI32x4U = 0xca, I64x2Shl = 0xcb, I64x2ShrS = 0xcc, I64x2ShrU = 0xcd, @@ -965,10 +965,10 @@ enum ASTNodes { I64x2GtS = 0xd9, I64x2LeS = 0xda, I64x2GeS = 0xdb, - I64x2ExtMulLowSI32x4 = 0xdc, - I64x2ExtMulHighSI32x4 = 0xdd, - I64x2ExtMulLowUI32x4 = 0xde, - I64x2ExtMulHighUI32x4 = 0xdf, + I64x2ExtmulLowI32x4S = 0xdc, + I64x2ExtmulHighI32x4S = 0xdd, + I64x2ExtmulLowI32x4U = 0xde, + I64x2ExtmulHighI32x4U = 0xdf, F32x4Abs = 0xe0, F32x4Neg = 0xe1, @@ -980,8 +980,8 @@ enum ASTNodes { F32x4Div = 0xe7, F32x4Min = 0xe8, F32x4Max = 0xe9, - F32x4PMin = 0xea, - F32x4PMax = 0xeb, + F32x4Pmin = 0xea, + F32x4Pmax = 0xeb, F64x2Abs = 0xec, F64x2Neg = 0xed, @@ -993,17 +993,17 @@ enum ASTNodes { F64x2Div = 0xf3, F64x2Min = 0xf4, F64x2Max = 0xf5, - F64x2PMin = 0xf6, - F64x2PMax = 0xf7, - - I32x4TruncSatSF32x4 = 0xf8, - I32x4TruncSatUF32x4 = 0xf9, - F32x4ConvertSI32x4 = 0xfa, - F32x4ConvertUI32x4 = 0xfb, - I32x4TruncSatZeroSF64x2 = 0xfc, - I32x4TruncSatZeroUF64x2 = 0xfd, - F64x2ConvertLowSI32x4 = 0xfe, - F64x2ConvertLowUI32x4 = 0xff, + F64x2Pmin = 0xf6, + F64x2Pmax = 0xf7, + + I32x4TruncSatF32x4S = 0xf8, + I32x4TruncSatF32x4U = 0xf9, + F32x4ConvertI32x4S = 0xfa, + F32x4ConvertI32x4U = 0xfb, + I32x4TruncSatF64x2SZero = 0xfc, + I32x4TruncSatF64x2UZero = 0xfd, + F64x2ConvertLowI32x4S = 0xfe, + F64x2ConvertLowI32x4U = 0xff, // bulk memory opcodes diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp index 9f675ce6c..4ca434bdf 100644 --- a/src/wasm/wasm-binary.cpp +++ b/src/wasm/wasm-binary.cpp @@ -5044,7 +5044,7 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = XorVec128; break; - case BinaryConsts::V128AndNot: + case BinaryConsts::V128Andnot: curr = allocator.alloc<Binary>(); curr->op = AndNotVec128; break; @@ -5140,23 +5140,23 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = AvgrUVecI16x8; break; - case BinaryConsts::I16x8Q15MulrSatS: + case BinaryConsts::I16x8Q15mulrSatS: curr = allocator.alloc<Binary>(); curr->op = Q15MulrSatSVecI16x8; break; - case BinaryConsts::I16x8ExtMulLowSI8x16: + case BinaryConsts::I16x8ExtmulLowI8x16S: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowSVecI16x8; break; - case BinaryConsts::I16x8ExtMulHighSI8x16: + case BinaryConsts::I16x8ExtmulHighI8x16S: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighSVecI16x8; break; - case BinaryConsts::I16x8ExtMulLowUI8x16: + case BinaryConsts::I16x8ExtmulLowI8x16U: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowUVecI16x8; break; - case BinaryConsts::I16x8ExtMulHighUI8x16: + case BinaryConsts::I16x8ExtmulHighI8x16U: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighUVecI16x8; break; @@ -5188,23 +5188,23 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = MaxUVecI32x4; break; - case BinaryConsts::I32x4DotSVecI16x8: + case BinaryConsts::I32x4DotI16x8S: curr = allocator.alloc<Binary>(); curr->op = DotSVecI16x8ToVecI32x4; break; - case BinaryConsts::I32x4ExtMulLowSI16x8: + case BinaryConsts::I32x4ExtmulLowI16x8S: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowSVecI32x4; break; - case BinaryConsts::I32x4ExtMulHighSI16x8: + case BinaryConsts::I32x4ExtmulHighI16x8S: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighSVecI32x4; break; - case BinaryConsts::I32x4ExtMulLowUI16x8: + case BinaryConsts::I32x4ExtmulLowI16x8U: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowUVecI32x4; break; - case BinaryConsts::I32x4ExtMulHighUI16x8: + case BinaryConsts::I32x4ExtmulHighI16x8U: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighUVecI32x4; break; @@ -5220,19 +5220,19 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = MulVecI64x2; break; - case BinaryConsts::I64x2ExtMulLowSI32x4: + case BinaryConsts::I64x2ExtmulLowI32x4S: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowSVecI64x2; break; - case BinaryConsts::I64x2ExtMulHighSI32x4: + case BinaryConsts::I64x2ExtmulHighI32x4S: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighSVecI64x2; break; - case BinaryConsts::I64x2ExtMulLowUI32x4: + case BinaryConsts::I64x2ExtmulLowI32x4U: curr = allocator.alloc<Binary>(); curr->op = ExtMulLowUVecI64x2; break; - case BinaryConsts::I64x2ExtMulHighUI32x4: + case BinaryConsts::I64x2ExtmulHighI32x4U: curr = allocator.alloc<Binary>(); curr->op = ExtMulHighUVecI64x2; break; @@ -5260,11 +5260,11 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = MaxVecF32x4; break; - case BinaryConsts::F32x4PMin: + case BinaryConsts::F32x4Pmin: curr = allocator.alloc<Binary>(); curr->op = PMinVecF32x4; break; - case BinaryConsts::F32x4PMax: + case BinaryConsts::F32x4Pmax: curr = allocator.alloc<Binary>(); curr->op = PMaxVecF32x4; break; @@ -5292,31 +5292,31 @@ bool WasmBinaryBuilder::maybeVisitSIMDBinary(Expression*& out, uint32_t code) { curr = allocator.alloc<Binary>(); curr->op = MaxVecF64x2; break; - case BinaryConsts::F64x2PMin: + case BinaryConsts::F64x2Pmin: curr = allocator.alloc<Binary>(); curr->op = PMinVecF64x2; break; - case BinaryConsts::F64x2PMax: + case BinaryConsts::F64x2Pmax: curr = allocator.alloc<Binary>(); curr->op = PMaxVecF64x2; break; - case BinaryConsts::I8x16NarrowSI16x8: + case BinaryConsts::I8x16NarrowI16x8S: curr = allocator.alloc<Binary>(); curr->op = NarrowSVecI16x8ToVecI8x16; break; - case BinaryConsts::I8x16NarrowUI16x8: + case BinaryConsts::I8x16NarrowI16x8U: curr = allocator.alloc<Binary>(); curr->op = NarrowUVecI16x8ToVecI8x16; break; - case BinaryConsts::I16x8NarrowSI32x4: + case BinaryConsts::I16x8NarrowI32x4S: curr = allocator.alloc<Binary>(); curr->op = NarrowSVecI32x4ToVecI16x8; break; - case BinaryConsts::I16x8NarrowUI32x4: + case BinaryConsts::I16x8NarrowI32x4U: curr = allocator.alloc<Binary>(); curr->op = NarrowUVecI32x4ToVecI16x8; break; - case BinaryConsts::V8x16Swizzle: + case BinaryConsts::I8x16Swizzle: curr = allocator.alloc<Binary>(); curr->op = SwizzleVec8x16; break; @@ -5489,103 +5489,103 @@ bool WasmBinaryBuilder::maybeVisitSIMDUnary(Expression*& out, uint32_t code) { curr = allocator.alloc<Unary>(); curr->op = NearestVecF64x2; break; - case BinaryConsts::I16x8ExtAddPairWiseSI8x16: + case BinaryConsts::I16x8ExtaddPairwiseI8x16S: curr = allocator.alloc<Unary>(); curr->op = ExtAddPairwiseSVecI8x16ToI16x8; break; - case BinaryConsts::I16x8ExtAddPairWiseUI8x16: + case BinaryConsts::I16x8ExtaddPairwiseI8x16U: curr = allocator.alloc<Unary>(); curr->op = ExtAddPairwiseUVecI8x16ToI16x8; break; - case BinaryConsts::I32x4ExtAddPairWiseSI16x8: + case BinaryConsts::I32x4ExtaddPairwiseI16x8S: curr = allocator.alloc<Unary>(); curr->op = ExtAddPairwiseSVecI16x8ToI32x4; break; - case BinaryConsts::I32x4ExtAddPairWiseUI16x8: + case BinaryConsts::I32x4ExtaddPairwiseI16x8U: curr = allocator.alloc<Unary>(); curr->op = ExtAddPairwiseUVecI16x8ToI32x4; break; - case BinaryConsts::I32x4TruncSatSF32x4: + case BinaryConsts::I32x4TruncSatF32x4S: curr = allocator.alloc<Unary>(); curr->op = TruncSatSVecF32x4ToVecI32x4; break; - case BinaryConsts::I32x4TruncSatUF32x4: + case BinaryConsts::I32x4TruncSatF32x4U: curr = allocator.alloc<Unary>(); curr->op = TruncSatUVecF32x4ToVecI32x4; break; - case BinaryConsts::F32x4ConvertSI32x4: + case BinaryConsts::F32x4ConvertI32x4S: curr = allocator.alloc<Unary>(); curr->op = ConvertSVecI32x4ToVecF32x4; break; - case BinaryConsts::F32x4ConvertUI32x4: + case BinaryConsts::F32x4ConvertI32x4U: curr = allocator.alloc<Unary>(); curr->op = ConvertUVecI32x4ToVecF32x4; break; - case BinaryConsts::I16x8ExtendLowSI8x16: + case BinaryConsts::I16x8ExtendLowI8x16S: curr = allocator.alloc<Unary>(); curr->op = ExtendLowSVecI8x16ToVecI16x8; break; - case BinaryConsts::I16x8ExtendHighSI8x16: + case BinaryConsts::I16x8ExtendHighI8x16S: curr = allocator.alloc<Unary>(); curr->op = ExtendHighSVecI8x16ToVecI16x8; break; - case BinaryConsts::I16x8ExtendLowUI8x16: + case BinaryConsts::I16x8ExtendLowI8x16U: curr = allocator.alloc<Unary>(); curr->op = ExtendLowUVecI8x16ToVecI16x8; break; - case BinaryConsts::I16x8ExtendHighUI8x16: + case BinaryConsts::I16x8ExtendHighI8x16U: curr = allocator.alloc<Unary>(); curr->op = ExtendHighUVecI8x16ToVecI16x8; break; - case BinaryConsts::I32x4ExtendLowSI16x8: + case BinaryConsts::I32x4ExtendLowI16x8S: curr = allocator.alloc<Unary>(); curr->op = ExtendLowSVecI16x8ToVecI32x4; break; - case BinaryConsts::I32x4ExtendHighSI16x8: + case BinaryConsts::I32x4ExtendHighI16x8S: curr = allocator.alloc<Unary>(); curr->op = ExtendHighSVecI16x8ToVecI32x4; break; - case BinaryConsts::I32x4ExtendLowUI16x8: + case BinaryConsts::I32x4ExtendLowI16x8U: curr = allocator.alloc<Unary>(); curr->op = ExtendLowUVecI16x8ToVecI32x4; break; - case BinaryConsts::I32x4ExtendHighUI16x8: + case BinaryConsts::I32x4ExtendHighI16x8U: curr = allocator.alloc<Unary>(); curr->op = ExtendHighUVecI16x8ToVecI32x4; break; - case BinaryConsts::I64x2ExtendLowSI32x4: + case BinaryConsts::I64x2ExtendLowI32x4S: curr = allocator.alloc<Unary>(); curr->op = ExtendLowSVecI32x4ToVecI64x2; break; - case BinaryConsts::I64x2ExtendHighSI32x4: + case BinaryConsts::I64x2ExtendHighI32x4S: curr = allocator.alloc<Unary>(); curr->op = ExtendHighSVecI32x4ToVecI64x2; break; - case BinaryConsts::I64x2ExtendLowUI32x4: + case BinaryConsts::I64x2ExtendLowI32x4U: curr = allocator.alloc<Unary>(); curr->op = ExtendLowUVecI32x4ToVecI64x2; break; - case BinaryConsts::I64x2ExtendHighUI32x4: + case BinaryConsts::I64x2ExtendHighI32x4U: curr = allocator.alloc<Unary>(); curr->op = ExtendHighUVecI32x4ToVecI64x2; break; - case BinaryConsts::F64x2ConvertLowSI32x4: + case BinaryConsts::F64x2ConvertLowI32x4S: curr = allocator.alloc<Unary>(); curr->op = ConvertLowSVecI32x4ToVecF64x2; break; - case BinaryConsts::F64x2ConvertLowUI32x4: + case BinaryConsts::F64x2ConvertLowI32x4U: curr = allocator.alloc<Unary>(); curr->op = ConvertLowUVecI32x4ToVecF64x2; break; - case BinaryConsts::I32x4TruncSatZeroSF64x2: + case BinaryConsts::I32x4TruncSatF64x2SZero: curr = allocator.alloc<Unary>(); curr->op = TruncSatZeroSVecF64x2ToVecI32x4; break; - case BinaryConsts::I32x4TruncSatZeroUF64x2: + case BinaryConsts::I32x4TruncSatF64x2UZero: curr = allocator.alloc<Unary>(); curr->op = TruncSatZeroUVecF64x2ToVecI32x4; break; - case BinaryConsts::F32x4DemoteZeroF64x2: + case BinaryConsts::F32x4DemoteF64x2Zero: curr = allocator.alloc<Unary>(); curr->op = DemoteZeroVecF64x2ToVecF32x4; break; @@ -5725,7 +5725,7 @@ bool WasmBinaryBuilder::maybeVisitSIMDReplace(Expression*& out, uint32_t code) { } bool WasmBinaryBuilder::maybeVisitSIMDShuffle(Expression*& out, uint32_t code) { - if (code != BinaryConsts::V8x16Shuffle) { + if (code != BinaryConsts::I8x16Shuffle) { return false; } auto* curr = allocator.alloc<SIMDShuffle>(); @@ -5832,43 +5832,43 @@ bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) { } SIMDLoad* curr; switch (code) { - case BinaryConsts::V8x16LoadSplat: + case BinaryConsts::V128Load8Splat: curr = allocator.alloc<SIMDLoad>(); curr->op = Load8SplatVec128; break; - case BinaryConsts::V16x8LoadSplat: + case BinaryConsts::V128Load16Splat: curr = allocator.alloc<SIMDLoad>(); curr->op = Load16SplatVec128; break; - case BinaryConsts::V32x4LoadSplat: + case BinaryConsts::V128Load32Splat: curr = allocator.alloc<SIMDLoad>(); curr->op = Load32SplatVec128; break; - case BinaryConsts::V64x2LoadSplat: + case BinaryConsts::V128Load64Splat: curr = allocator.alloc<SIMDLoad>(); curr->op = Load64SplatVec128; break; - case BinaryConsts::I16x8LoadExtSVec8x8: + case BinaryConsts::V128Load8x8S: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtSVec8x8ToVecI16x8; break; - case BinaryConsts::I16x8LoadExtUVec8x8: + case BinaryConsts::V128Load8x8U: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtUVec8x8ToVecI16x8; break; - case BinaryConsts::I32x4LoadExtSVec16x4: + case BinaryConsts::V128Load16x4S: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtSVec16x4ToVecI32x4; break; - case BinaryConsts::I32x4LoadExtUVec16x4: + case BinaryConsts::V128Load16x4U: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtUVec16x4ToVecI32x4; break; - case BinaryConsts::I64x2LoadExtSVec32x2: + case BinaryConsts::V128Load32x2S: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtSVec32x2ToVecI64x2; break; - case BinaryConsts::I64x2LoadExtUVec32x2: + case BinaryConsts::V128Load32x2U: curr = allocator.alloc<SIMDLoad>(); curr->op = LoadExtUVec32x2ToVecI64x2; break; diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp index 0965ca0fe..1b56c1389 100644 --- a/src/wasm/wasm-stack.cpp +++ b/src/wasm/wasm-stack.cpp @@ -534,7 +534,7 @@ void BinaryInstWriter::visitSIMDReplace(SIMDReplace* curr) { } void BinaryInstWriter::visitSIMDShuffle(SIMDShuffle* curr) { - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V8x16Shuffle); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Shuffle); for (uint8_t m : curr->mask) { o << m; } @@ -595,34 +595,34 @@ void BinaryInstWriter::visitSIMDLoad(SIMDLoad* curr) { o << int8_t(BinaryConsts::SIMDPrefix); switch (curr->op) { case Load8SplatVec128: - o << U32LEB(BinaryConsts::V8x16LoadSplat); + o << U32LEB(BinaryConsts::V128Load8Splat); break; case Load16SplatVec128: - o << U32LEB(BinaryConsts::V16x8LoadSplat); + o << U32LEB(BinaryConsts::V128Load16Splat); break; case Load32SplatVec128: - o << U32LEB(BinaryConsts::V32x4LoadSplat); + o << U32LEB(BinaryConsts::V128Load32Splat); break; case Load64SplatVec128: - o << U32LEB(BinaryConsts::V64x2LoadSplat); + o << U32LEB(BinaryConsts::V128Load64Splat); break; case LoadExtSVec8x8ToVecI16x8: - o << U32LEB(BinaryConsts::I16x8LoadExtSVec8x8); + o << U32LEB(BinaryConsts::V128Load8x8S); break; case LoadExtUVec8x8ToVecI16x8: - o << U32LEB(BinaryConsts::I16x8LoadExtUVec8x8); + o << U32LEB(BinaryConsts::V128Load8x8U); break; case LoadExtSVec16x4ToVecI32x4: - o << U32LEB(BinaryConsts::I32x4LoadExtSVec16x4); + o << U32LEB(BinaryConsts::V128Load16x4S); break; case LoadExtUVec16x4ToVecI32x4: - o << U32LEB(BinaryConsts::I32x4LoadExtUVec16x4); + o << U32LEB(BinaryConsts::V128Load16x4U); break; case LoadExtSVec32x2ToVecI64x2: - o << U32LEB(BinaryConsts::I64x2LoadExtSVec32x2); + o << U32LEB(BinaryConsts::V128Load32x2S); break; case LoadExtUVec32x2ToVecI64x2: - o << U32LEB(BinaryConsts::I64x2LoadExtUVec32x2); + o << U32LEB(BinaryConsts::V128Load32x2U); break; case Load32ZeroVec128: o << U32LEB(BinaryConsts::V128Load32Zero); @@ -1051,103 +1051,103 @@ void BinaryInstWriter::visitUnary(Unary* curr) { break; case ExtAddPairwiseSVecI8x16ToI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtAddPairWiseSI8x16); + << U32LEB(BinaryConsts::I16x8ExtaddPairwiseI8x16S); break; case ExtAddPairwiseUVecI8x16ToI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtAddPairWiseUI8x16); + << U32LEB(BinaryConsts::I16x8ExtaddPairwiseI8x16U); break; case ExtAddPairwiseSVecI16x8ToI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtAddPairWiseSI16x8); + << U32LEB(BinaryConsts::I32x4ExtaddPairwiseI16x8S); break; case ExtAddPairwiseUVecI16x8ToI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtAddPairWiseUI16x8); + << U32LEB(BinaryConsts::I32x4ExtaddPairwiseI16x8U); break; case TruncSatSVecF32x4ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4TruncSatSF32x4); + << U32LEB(BinaryConsts::I32x4TruncSatF32x4S); break; case TruncSatUVecF32x4ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4TruncSatUF32x4); + << U32LEB(BinaryConsts::I32x4TruncSatF32x4U); break; case ConvertSVecI32x4ToVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::F32x4ConvertSI32x4); + << U32LEB(BinaryConsts::F32x4ConvertI32x4S); break; case ConvertUVecI32x4ToVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::F32x4ConvertUI32x4); + << U32LEB(BinaryConsts::F32x4ConvertI32x4U); break; case ExtendLowSVecI8x16ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtendLowSI8x16); + << U32LEB(BinaryConsts::I16x8ExtendLowI8x16S); break; case ExtendHighSVecI8x16ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtendHighSI8x16); + << U32LEB(BinaryConsts::I16x8ExtendHighI8x16S); break; case ExtendLowUVecI8x16ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtendLowUI8x16); + << U32LEB(BinaryConsts::I16x8ExtendLowI8x16U); break; case ExtendHighUVecI8x16ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtendHighUI8x16); + << U32LEB(BinaryConsts::I16x8ExtendHighI8x16U); break; case ExtendLowSVecI16x8ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtendLowSI16x8); + << U32LEB(BinaryConsts::I32x4ExtendLowI16x8S); break; case ExtendHighSVecI16x8ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtendHighSI16x8); + << U32LEB(BinaryConsts::I32x4ExtendHighI16x8S); break; case ExtendLowUVecI16x8ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtendLowUI16x8); + << U32LEB(BinaryConsts::I32x4ExtendLowI16x8U); break; case ExtendHighUVecI16x8ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtendHighUI16x8); + << U32LEB(BinaryConsts::I32x4ExtendHighI16x8U); break; case ExtendLowSVecI32x4ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtendLowSI32x4); + << U32LEB(BinaryConsts::I64x2ExtendLowI32x4S); break; case ExtendHighSVecI32x4ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtendHighSI32x4); + << U32LEB(BinaryConsts::I64x2ExtendHighI32x4S); break; case ExtendLowUVecI32x4ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtendLowUI32x4); + << U32LEB(BinaryConsts::I64x2ExtendLowI32x4U); break; case ExtendHighUVecI32x4ToVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtendHighUI32x4); + << U32LEB(BinaryConsts::I64x2ExtendHighI32x4U); break; case ConvertLowSVecI32x4ToVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::F64x2ConvertLowSI32x4); + << U32LEB(BinaryConsts::F64x2ConvertLowI32x4S); break; case ConvertLowUVecI32x4ToVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::F64x2ConvertLowUI32x4); + << U32LEB(BinaryConsts::F64x2ConvertLowI32x4U); break; case TruncSatZeroSVecF64x2ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4TruncSatZeroSF64x2); + << U32LEB(BinaryConsts::I32x4TruncSatF64x2SZero); break; case TruncSatZeroUVecF64x2ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4TruncSatZeroUF64x2); + << U32LEB(BinaryConsts::I32x4TruncSatF64x2UZero); break; case DemoteZeroVecF64x2ToVecF32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::F32x4DemoteZeroF64x2); + << U32LEB(BinaryConsts::F32x4DemoteF64x2Zero); break; case PromoteLowVecF32x4ToVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) @@ -1546,7 +1546,7 @@ void BinaryInstWriter::visitBinary(Binary* curr) { o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Xor); break; case AndNotVec128: - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128AndNot); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Andnot); break; case AddVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I8x16Add); @@ -1627,23 +1627,23 @@ void BinaryInstWriter::visitBinary(Binary* curr) { break; case Q15MulrSatSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8Q15MulrSatS); + << U32LEB(BinaryConsts::I16x8Q15mulrSatS); break; case ExtMulLowSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtMulLowSI8x16); + << U32LEB(BinaryConsts::I16x8ExtmulLowI8x16S); break; case ExtMulHighSVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtMulHighSI8x16); + << U32LEB(BinaryConsts::I16x8ExtmulHighI8x16S); break; case ExtMulLowUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtMulLowUI8x16); + << U32LEB(BinaryConsts::I16x8ExtmulLowI8x16U); break; case ExtMulHighUVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8ExtMulHighUI8x16); + << U32LEB(BinaryConsts::I16x8ExtmulHighI8x16U); break; case AddVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I32x4Add); @@ -1668,23 +1668,23 @@ void BinaryInstWriter::visitBinary(Binary* curr) { break; case DotSVecI16x8ToVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4DotSVecI16x8); + << U32LEB(BinaryConsts::I32x4DotI16x8S); break; case ExtMulLowSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtMulLowSI16x8); + << U32LEB(BinaryConsts::I32x4ExtmulLowI16x8S); break; case ExtMulHighSVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtMulHighSI16x8); + << U32LEB(BinaryConsts::I32x4ExtmulHighI16x8S); break; case ExtMulLowUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtMulLowUI16x8); + << U32LEB(BinaryConsts::I32x4ExtmulLowI16x8U); break; case ExtMulHighUVecI32x4: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I32x4ExtMulHighUI16x8); + << U32LEB(BinaryConsts::I32x4ExtmulHighI16x8U); break; case AddVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::I64x2Add); @@ -1697,19 +1697,19 @@ void BinaryInstWriter::visitBinary(Binary* curr) { break; case ExtMulLowSVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtMulLowSI32x4); + << U32LEB(BinaryConsts::I64x2ExtmulLowI32x4S); break; case ExtMulHighSVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtMulHighSI32x4); + << U32LEB(BinaryConsts::I64x2ExtmulHighI32x4S); break; case ExtMulLowUVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtMulLowUI32x4); + << U32LEB(BinaryConsts::I64x2ExtmulLowI32x4U); break; case ExtMulHighUVecI64x2: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I64x2ExtMulHighUI32x4); + << U32LEB(BinaryConsts::I64x2ExtmulHighI32x4U); break; case AddVecF32x4: @@ -1731,10 +1731,10 @@ void BinaryInstWriter::visitBinary(Binary* curr) { o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Max); break; case PMinVecF32x4: - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4PMin); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Pmin); break; case PMaxVecF32x4: - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4PMax); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F32x4Pmax); break; case AddVecF64x2: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Add); @@ -1755,32 +1755,32 @@ void BinaryInstWriter::visitBinary(Binary* curr) { o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Max); break; case PMinVecF64x2: - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2PMin); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Pmin); break; case PMaxVecF64x2: - o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2PMax); + o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::F64x2Pmax); break; case NarrowSVecI16x8ToVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I8x16NarrowSI16x8); + << U32LEB(BinaryConsts::I8x16NarrowI16x8S); break; case NarrowUVecI16x8ToVecI8x16: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I8x16NarrowUI16x8); + << U32LEB(BinaryConsts::I8x16NarrowI16x8U); break; case NarrowSVecI32x4ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8NarrowSI32x4); + << U32LEB(BinaryConsts::I16x8NarrowI32x4S); break; case NarrowUVecI32x4ToVecI16x8: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::I16x8NarrowUI32x4); + << U32LEB(BinaryConsts::I16x8NarrowI32x4U); break; case SwizzleVec8x16: o << int8_t(BinaryConsts::SIMDPrefix) - << U32LEB(BinaryConsts::V8x16Swizzle); + << U32LEB(BinaryConsts::I8x16Swizzle); break; case InvalidBinary: |