diff options
author | Thomas Lively <7121787+tlively@users.noreply.github.com> | 2020-01-07 11:16:44 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-01-07 11:16:44 -0800 |
commit | e8f9d207427bda2f6e22c28ff0210b294b1f70e1 (patch) | |
tree | 503b20fb06274e38af7e25e3a1a4106827c52693 /src | |
parent | f73b40c7873dbd2dd46a962f3afe5b97a7fc8b0a (diff) | |
download | binaryen-e8f9d207427bda2f6e22c28ff0210b294b1f70e1.tar.gz binaryen-e8f9d207427bda2f6e22c28ff0210b294b1f70e1.tar.bz2 binaryen-e8f9d207427bda2f6e22c28ff0210b294b1f70e1.zip |
[NFC] Enforce use of `Type::` on type names (#2434)
Diffstat (limited to 'src')
76 files changed, 2204 insertions, 2049 deletions
diff --git a/src/abi/js.h b/src/abi/js.h index 89e3f0087..de74899b6 100644 --- a/src/abi/js.h +++ b/src/abi/js.h @@ -67,14 +67,14 @@ ensureScratchMemoryHelpers(Module* wasm, wasm->addFunction(std::move(func)); }; - ensureImport(SCRATCH_LOAD_I32, {i32}, i32); - ensureImport(SCRATCH_STORE_I32, {i32, i32}, none); - ensureImport(SCRATCH_LOAD_I64, {}, i64); - ensureImport(SCRATCH_STORE_I64, {i64}, none); - ensureImport(SCRATCH_LOAD_F32, {}, f32); - ensureImport(SCRATCH_STORE_F32, {f32}, none); - ensureImport(SCRATCH_LOAD_F64, {}, f64); - ensureImport(SCRATCH_STORE_F64, {f64}, none); + ensureImport(SCRATCH_LOAD_I32, {Type::i32}, Type::i32); + ensureImport(SCRATCH_STORE_I32, {Type::i32, Type::i32}, Type::none); + ensureImport(SCRATCH_LOAD_I64, {}, Type::i64); + ensureImport(SCRATCH_STORE_I64, {Type::i64}, Type::none); + ensureImport(SCRATCH_LOAD_F32, {}, Type::f32); + ensureImport(SCRATCH_STORE_F32, {Type::f32}, Type::none); + ensureImport(SCRATCH_LOAD_F64, {}, Type::f64); + ensureImport(SCRATCH_STORE_F64, {Type::f64}, Type::none); } inline bool isScratchMemoryHelper(cashew::IString name) { diff --git a/src/abi/stack.h b/src/abi/stack.h index 265a7af6e..c06caa2d0 100644 --- a/src/abi/stack.h +++ b/src/abi/stack.h @@ -88,7 +88,7 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { local, builder.makeGlobalGet(stackPointer->name, PointerType))); // TODO: add stack max check Expression* added; - if (PointerType == i32) { + if (PointerType == Type::i32) { added = builder.makeBinary(AddInt32, builder.makeLocalGet(local, PointerType), builder.makeConst(Literal(int32_t(size)))); @@ -104,7 +104,7 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { FindAllPointers<Return> finder(func->body); for (auto** ptr : finder.list) { auto* ret = (*ptr)->cast<Return>(); - if (ret->value && ret->value->type != unreachable) { + if (ret->value && ret->value->type != Type::unreachable) { // handle the returned value auto* block = builder.makeBlock(); auto temp = builder.addVar(func, ret->value->type); @@ -120,10 +120,10 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { } } // add stack restores to the body - if (func->body->type == none) { + if (func->body->type == Type::none) { block->list.push_back(func->body); block->list.push_back(makeStackRestore()); - } else if (func->body->type == unreachable) { + } else if (func->body->type == Type::unreachable) { block->list.push_back(func->body); // no need to restore the old stack value, we're gone anyhow } else { diff --git a/src/asm2wasm.h b/src/asm2wasm.h index b30a1f6da..1f1805379 100644 --- a/src/asm2wasm.h +++ b/src/asm2wasm.h @@ -430,7 +430,7 @@ public: // zero bool import; IString module, base; - MappedGlobal() : type(none), import(false) {} + MappedGlobal() : type(Type::none), import(false) {} MappedGlobal(Type type) : type(type), import(false) {} MappedGlobal(Type type, bool import, IString module, IString base) : type(type), import(import), module(module), base(base) {} @@ -456,7 +456,7 @@ public: private: void allocateGlobal(IString name, Type type, Literal value = Literal()) { assert(mappedGlobals.find(name) == mappedGlobals.end()); - if (value.type == none) { + if (value.type == Type::none) { value = Literal::makeZero(type); } mappedGlobals.emplace(name, MappedGlobal(type)); @@ -529,12 +529,8 @@ private: // ok since in JS, double can contain everything i32 and f32 can). for (size_t i = 0; i < params.size(); i++) { if (mergedParams.size() > i) { - // TODO: Is this dead? - // if (mergedParams[i] == Type::none) { - // mergedParams[i] = params[i]; // use a more concrete type - // } else if (mergedParams[i] != params[i]) { - mergedParams[i] = f64; // overloaded type, make it a double + mergedParams[i] = Type::f64; // overloaded type, make it a double } } else { mergedParams.push_back(params[i]); // add a new param @@ -557,7 +553,7 @@ private: } Type getResultTypeOfCallUsingParent(Ref parent, AsmData* data) { - auto result = none; + Type result = Type::none; if (!!parent) { // if the parent is a seq, we cannot be the last element in it (we would // have a coercion, which would be the parent), so we must be (us, @@ -642,18 +638,18 @@ private: if (op == PLUS) { return isInteger ? BinaryOp::AddInt32 - : (leftType == f32 ? BinaryOp::AddFloat32 - : BinaryOp::AddFloat64); + : (leftType == Type::f32 ? BinaryOp::AddFloat32 + : BinaryOp::AddFloat64); } if (op == MINUS) { return isInteger ? BinaryOp::SubInt32 - : (leftType == f32 ? BinaryOp::SubFloat32 - : BinaryOp::SubFloat64); + : (leftType == Type::f32 ? BinaryOp::SubFloat32 + : BinaryOp::SubFloat64); } if (op == MUL) { return isInteger ? BinaryOp::MulInt32 - : (leftType == f32 ? BinaryOp::MulFloat32 - : BinaryOp::MulFloat64); + : (leftType == Type::f32 ? BinaryOp::MulFloat32 + : BinaryOp::MulFloat64); } if (op == AND) { return BinaryOp::AndInt32; @@ -674,14 +670,14 @@ private: return BinaryOp::ShrUInt32; } if (op == EQ) { - return isInteger - ? BinaryOp::EqInt32 - : (leftType == f32 ? BinaryOp::EqFloat32 : BinaryOp::EqFloat64); + return isInteger ? BinaryOp::EqInt32 + : (leftType == Type::f32 ? BinaryOp::EqFloat32 + : BinaryOp::EqFloat64); } if (op == NE) { - return isInteger - ? BinaryOp::NeInt32 - : (leftType == f32 ? BinaryOp::NeFloat32 : BinaryOp::NeFloat64); + return isInteger ? BinaryOp::NeInt32 + : (leftType == Type::f32 ? BinaryOp::NeFloat32 + : BinaryOp::NeFloat64); } bool isUnsigned = isUnsignedCoercion(left) || isUnsignedCoercion(right); @@ -690,7 +686,8 @@ private: if (isInteger) { return isUnsigned ? BinaryOp::DivUInt32 : BinaryOp::DivSInt32; } - return leftType == f32 ? BinaryOp::DivFloat32 : BinaryOp::DivFloat64; + return leftType == Type::f32 ? BinaryOp::DivFloat32 + : BinaryOp::DivFloat64; } if (op == MOD) { if (isInteger) { @@ -703,25 +700,25 @@ private: if (isInteger) { return isUnsigned ? BinaryOp::GeUInt32 : BinaryOp::GeSInt32; } - return leftType == f32 ? BinaryOp::GeFloat32 : BinaryOp::GeFloat64; + return leftType == Type::f32 ? BinaryOp::GeFloat32 : BinaryOp::GeFloat64; } if (op == GT) { if (isInteger) { return isUnsigned ? BinaryOp::GtUInt32 : BinaryOp::GtSInt32; } - return leftType == f32 ? BinaryOp::GtFloat32 : BinaryOp::GtFloat64; + return leftType == Type::f32 ? BinaryOp::GtFloat32 : BinaryOp::GtFloat64; } if (op == LE) { if (isInteger) { return isUnsigned ? BinaryOp::LeUInt32 : BinaryOp::LeSInt32; } - return leftType == f32 ? BinaryOp::LeFloat32 : BinaryOp::LeFloat64; + return leftType == Type::f32 ? BinaryOp::LeFloat32 : BinaryOp::LeFloat64; } if (op == LT) { if (isInteger) { return isUnsigned ? BinaryOp::LtUInt32 : BinaryOp::LtSInt32; } - return leftType == f32 ? BinaryOp::LtFloat32 : BinaryOp::LtFloat64; + return leftType == Type::f32 ? BinaryOp::LtFloat32 : BinaryOp::LtFloat64; } abort_on("bad wasm binary op", op); abort(); // avoid warning @@ -785,7 +782,7 @@ private: Literal getLiteral(Ref ast) { Literal ret = checkLiteral(ast); - assert(ret.type != none); + assert(ret.type != Type::none); return ret; } @@ -805,15 +802,15 @@ private: if (base == ABS) { assert(operands && operands->size() == 1); Type type = (*operands)[0]->type; - if (type == i32) { + if (type == Type::i32) { sig = Signature(Type::i32, Type::i32); return true; } - if (type == f32) { + if (type == Type::f32) { sig = Signature(Type::f32, Type::f32); return true; } - if (type == f64) { + if (type == Type::f64) { sig = Signature(Type::f64, Type::f64); return true; } @@ -838,7 +835,7 @@ private: } Expression* truncateToInt32(Expression* value) { - if (value->type == i64) { + if (value->type == Type::i64) { return builder.makeUnary(UnaryOp::WrapInt64, value); } // either i32, or a call_import whose type we don't know yet (but would be @@ -895,7 +892,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { import->name = MEMORY_BASE; import->module = "env"; import->base = MEMORY_BASE; - import->type = i32; + import->type = Type::i32; wasm.addGlobal(import); } @@ -905,7 +902,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { import->name = TABLE_BASE; import->module = "env"; import->base = TABLE_BASE; - import->type = i32; + import->type = Type::i32; wasm.addGlobal(import); } @@ -1277,7 +1274,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // when function pointer casts are emulated. if (wasm.table.segments.size() == 0) { wasm.table.segments.emplace_back( - builder.makeGlobalGet(Name(TABLE_BASE), i32)); + builder.makeGlobalGet(Name(TABLE_BASE), Type::i32)); } auto& segment = wasm.table.segments[0]; functionTableStarts[name] = @@ -1332,7 +1329,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { auto value = pair[1]->getInteger(); auto* global = builder.makeGlobal(key, - i32, + Type::i32, builder.makeConst(Literal(int32_t(value))), Builder::Immutable); wasm.addGlobal(global); @@ -1512,11 +1509,11 @@ void Asm2WasmBuilder::processAsm(Ref ast) { curr->operands[i]->type == Type::unreachable); // overloaded, upgrade to f64 switch (curr->operands[i]->type) { - case i32: + case Type::i32: curr->operands[i] = parent->builder.makeUnary( ConvertSInt32ToFloat64, curr->operands[i]); break; - case f32: + case Type::f32: curr->operands[i] = parent->builder.makeUnary(PromoteFloat32, curr->operands[i]); break; @@ -1533,18 +1530,18 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // we use a JS f64 value which is the most general, and convert to // it switch (old) { - case i32: { + case Type::i32: { Unary* trunc = parent->builder.makeUnary(TruncSFloat64ToInt32, curr); replaceCurrent( makeTrappingUnary(trunc, parent->trappingFunctions)); break; } - case f32: { + case Type::f32: { replaceCurrent(parent->builder.makeUnary(DemoteFloat64, curr)); break; } - case none: { + case Type::none: { // this function returns a value, but we are not using it, so it // must be dropped. autodrop will do that for us. break; @@ -1553,7 +1550,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { WASM_UNREACHABLE("unexpected type"); } } else { - assert(old == none); + assert(old == Type::none); // we don't want a return value here, but the import does provide // one autodrop will do that for us. } @@ -1651,8 +1648,8 @@ void Asm2WasmBuilder::processAsm(Ref ast) { i > 0 && (expressionStack[i - 1]->is<Block>() || expressionStack[i - 1]->is<Loop>() || expressionStack[i - 1]->is<If>()); - if (i == 0 || parentIsStructure || exp->type == none || - exp->type == unreachable) { + if (i == 0 || parentIsStructure || exp->type == Type::none || + exp->type == Type::unreachable) { if (debugLocations.count(exp) > 0) { // already present, so look back up i++; @@ -1746,33 +1743,34 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // returns x / y auto* func = wasm.getFunction(udivmoddi4); Builder::clearLocals(func); - Index xl = Builder::addParam(func, "xl", i32), - xh = Builder::addParam(func, "xh", i32), - yl = Builder::addParam(func, "yl", i32), - yh = Builder::addParam(func, "yh", i32), - r = Builder::addParam(func, "r", i32), - x64 = Builder::addVar(func, "x64", i64), - y64 = Builder::addVar(func, "y64", i64); + Index xl = Builder::addParam(func, "xl", Type::i32), + xh = Builder::addParam(func, "xh", Type::i32), + yl = Builder::addParam(func, "yl", Type::i32), + yh = Builder::addParam(func, "yh", Type::i32), + r = Builder::addParam(func, "r", Type::i32), + x64 = Builder::addVar(func, "x64", Type::i64), + y64 = Builder::addVar(func, "y64", Type::i64); auto* body = allocator.alloc<Block>(); body->list.push_back( builder.makeLocalSet(x64, I64Utilities::recreateI64(builder, xl, xh))); body->list.push_back( builder.makeLocalSet(y64, I64Utilities::recreateI64(builder, yl, yh))); - body->list.push_back(builder.makeIf( - builder.makeLocalGet(r, i32), - builder.makeStore(8, - 0, - 8, - builder.makeLocalGet(r, i32), - builder.makeBinary(RemUInt64, - builder.makeLocalGet(x64, i64), - builder.makeLocalGet(y64, i64)), - i64))); body->list.push_back( - builder.makeLocalSet(x64, - builder.makeBinary(DivUInt64, - builder.makeLocalGet(x64, i64), - builder.makeLocalGet(y64, i64)))); + builder.makeIf(builder.makeLocalGet(r, Type::i32), + builder.makeStore( + 8, + 0, + 8, + builder.makeLocalGet(r, Type::i32), + builder.makeBinary(RemUInt64, + builder.makeLocalGet(x64, Type::i64), + builder.makeLocalGet(y64, Type::i64)), + Type::i64))); + body->list.push_back(builder.makeLocalSet( + x64, + builder.makeBinary(DivUInt64, + builder.makeLocalGet(x64, Type::i64), + builder.makeLocalGet(y64, Type::i64)))); body->list.push_back( builder.makeGlobalSet(tempRet0, I64Utilities::getI64High(builder, x64))); body->list.push_back(I64Utilities::getI64Low(builder, x64)); @@ -1841,7 +1839,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { return; } addedI32Temp = true; - Builder::addVar(function, I32_TEMP, i32); + Builder::addVar(function, I32_TEMP, Type::i32); functionVariables.insert(I32_TEMP); asmData.addVar(I32_TEMP, ASM_INT); }; @@ -1867,7 +1865,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (name == DEBUGGER) { Call* call = allocator.alloc<Call>(); call->target = DEBUGGER; - call->type = none; + call->type = Type::none; static bool addedImport = false; if (!addedImport) { addedImport = true; @@ -1945,13 +1943,14 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (ret->valueType != ret->value->type) { // in asm.js we have some implicit coercions that we must do explicitly // here - if (ret->valueType == f32 && ret->value->type == f64) { + if (ret->valueType == Type::f32 && ret->value->type == Type::f64) { auto conv = allocator.alloc<Unary>(); conv->op = DemoteFloat64; conv->value = ret->value; conv->type = Type::f32; ret->value = conv; - } else if (ret->valueType == f64 && ret->value->type == f32) { + } else if (ret->valueType == Type::f64 && + ret->value->type == Type::f32) { ret->value = ensureDouble(ret->value); } else { abort_on("bad sub[] types", ast); @@ -1963,9 +1962,9 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (what == BINARY) { if ((ast[1] == OR || ast[1] == TRSHIFT) && ast[3]->isNumber() && ast[3]->getNumber() == 0) { - auto ret = - process(ast[2]); // just look through the ()|0 or ()>>>0 coercion - fixCallType(ret, i32); + // just look through the ()|0 or ()>>>0 coercion + auto ret = process(ast[2]); + fixCallType(ret, Type::i32); return ret; } auto ret = allocator.alloc<Binary>(); @@ -1981,7 +1980,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { call->target = F64_REM; call->operands.push_back(ensureDouble(ret->left)); call->operands.push_back(ensureDouble(ret->right)); - call->type = f64; + call->type = Type::f64; static bool addedImport = false; if (!addedImport) { addedImport = true; @@ -2013,22 +2012,22 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { } else if (what == UNARY_PREFIX) { if (ast[1] == PLUS) { Literal literal = checkLiteral(ast); - if (literal.type != none) { + if (literal.type != Type::none) { return builder.makeConst(literal); } auto ret = process(ast[2]); // we are a +() coercion - if (ret->type == i32) { + if (ret->type == Type::i32) { auto conv = allocator.alloc<Unary>(); conv->op = isUnsignedCoercion(ast[2]) ? ConvertUInt32ToFloat64 : ConvertSInt32ToFloat64; conv->value = ret; - conv->type = Type::f64; + conv->type = Type::Type::f64; return conv; } - if (ret->type == f32) { + if (ret->type == Type::f32) { return ensureDouble(ret); } - fixCallType(ret, f64); + fixCallType(ret, Type::f64); return ret; } else if (ast[1] == MINUS) { if (ast[2]->isNumber() || @@ -2067,7 +2066,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { // if we have an unsigned coercion on us, it is an unsigned op Expression* expr = process(ast[2][2]); bool isSigned = !isParentUnsignedCoercion(astStackHelper.getParent()); - bool isF64 = expr->type == f64; + bool isF64 = expr->type == Type::f64; UnaryOp op; if (isSigned && isF64) { op = UnaryOp::TruncSFloat64ToInt32; @@ -2092,7 +2091,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto ret = allocator.alloc<Unary>(); ret->op = EqZInt32; ret->value = process(ast[2]); - ret->type = i32; + ret->type = Type::i32; return ret; } abort_on("bad unary", ast); @@ -2125,34 +2124,34 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (name == Math_fround) { assert(ast[2]->size() == 1); Literal lit = checkLiteral(ast[2][0], false /* raw is float */); - if (lit.type == f64) { + if (lit.type == Type::f64) { return builder.makeConst(Literal((float)lit.getf64())); } auto ret = allocator.alloc<Unary>(); ret->value = process(ast[2][0]); - if (ret->value->type == f64) { + if (ret->value->type == Type::f64) { ret->op = DemoteFloat64; - } else if (ret->value->type == i32) { + } else if (ret->value->type == Type::i32) { if (isUnsignedCoercion(ast[2][0])) { ret->op = ConvertUInt32ToFloat32; } else { ret->op = ConvertSInt32ToFloat32; } - } else if (ret->value->type == f32) { + } else if (ret->value->type == Type::f32) { return ret->value; - } else if (ret->value->type == none) { // call, etc. - ret->value->type = f32; + } else if (ret->value->type == Type::none) { // call, etc. + ret->value->type = Type::f32; return ret->value; } else { abort_on("confusing fround target", ast[2][0]); } - ret->type = f32; + ret->type = Type::f32; return ret; } if (name == Math_abs) { // overloaded on type: i32, f32 or f64 Expression* value = process(ast[2][0]); - if (value->type == i32) { + if (value->type == Type::i32) { // No wasm support, so use a temp local ensureI32Temp(); auto set = allocator.alloc<LocalSet>(); @@ -2163,7 +2162,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto get = [&]() { auto ret = allocator.alloc<LocalGet>(); ret->index = function->getLocalIndex(I32_TEMP); - ret->type = i32; + ret->type = Type::i32; return ret; }; auto isNegative = allocator.alloc<Binary>(); @@ -2177,18 +2176,18 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { flip->op = SubInt32; flip->left = builder.makeConst(Literal(0)); flip->right = get(); - flip->type = i32; + flip->type = Type::i32; auto select = allocator.alloc<Select>(); select->ifTrue = flip; select->ifFalse = get(); select->condition = isNegative; - select->type = i32; + select->type = Type::i32; block->list.push_back(select); block->finalize(); return block; - } else if (value->type == f32 || value->type == f64) { + } else if (value->type == Type::f32 || value->type == Type::f64) { auto ret = allocator.alloc<Unary>(); - ret->op = value->type == f32 ? AbsFloat32 : AbsFloat64; + ret->op = value->type == Type::f32 ? AbsFloat32 : AbsFloat64; ret->value = value; ret->type = value->type; return ret; @@ -2201,12 +2200,12 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { Expression* value = process(ast[2][0]); auto ret = allocator.alloc<Unary>(); ret->value = value; - if (value->type == f32) { + if (value->type == Type::f32) { ret->op = name == Math_floor ? FloorFloat32 : name == Math_ceil ? CeilFloat32 : SqrtFloat32; ret->type = value->type; - } else if (value->type == f64) { + } else if (value->type == Type::f64) { ret->op = name == Math_floor ? FloorFloat64 : name == Math_ceil ? CeilFloat64 : SqrtFloat64; @@ -2223,9 +2222,9 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto ret = allocator.alloc<Binary>(); ret->left = process(ast[2][0]); ret->right = process(ast[2][1]); - if (ret->left->type == f32) { + if (ret->left->type == Type::f32) { ret->op = name == Math_max ? MaxFloat32 : MinFloat32; - } else if (ret->left->type == f64) { + } else if (ret->left->type == Type::f64) { ret->op = name == Math_max ? MaxFloat64 : MinFloat64; } else { Fatal() << "min/max only work on float/double in asm.js and wasm"; @@ -2335,27 +2334,28 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { case 'l': { auto align = num == 2 ? ast[2][1]->getInteger() : 0; if (name == LOAD1) { - return builder.makeLoad(1, true, 0, 1, process(ast[2][0]), i32); + return builder.makeLoad( + 1, true, 0, 1, process(ast[2][0]), Type::i32); } if (name == LOAD2) { return builder.makeLoad( - 2, true, 0, indexOr(align, 2), process(ast[2][0]), i32); + 2, true, 0, indexOr(align, 2), process(ast[2][0]), Type::i32); } if (name == LOAD4) { return builder.makeLoad( - 4, true, 0, indexOr(align, 4), process(ast[2][0]), i32); + 4, true, 0, indexOr(align, 4), process(ast[2][0]), Type::i32); } if (name == LOAD8) { return builder.makeLoad( - 8, true, 0, indexOr(align, 8), process(ast[2][0]), i64); + 8, true, 0, indexOr(align, 8), process(ast[2][0]), Type::i64); } if (name == LOADF) { return builder.makeLoad( - 4, true, 0, indexOr(align, 4), process(ast[2][0]), f32); + 4, true, 0, indexOr(align, 4), process(ast[2][0]), Type::f32); } if (name == LOADD) { return builder.makeLoad( - 8, true, 0, indexOr(align, 8), process(ast[2][0]), f64); + 8, true, 0, indexOr(align, 8), process(ast[2][0]), Type::f64); } break; } @@ -2363,7 +2363,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto align = num == 3 ? ast[2][2]->getInteger() : 0; if (name == STORE1) { return builder.makeStore( - 1, 0, 1, process(ast[2][0]), process(ast[2][1]), i32); + 1, 0, 1, process(ast[2][0]), process(ast[2][1]), Type::i32); } if (name == STORE2) { return builder.makeStore(2, @@ -2371,7 +2371,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { indexOr(align, 2), process(ast[2][0]), process(ast[2][1]), - i32); + Type::i32); } if (name == STORE4) { return builder.makeStore(4, @@ -2379,7 +2379,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { indexOr(align, 4), process(ast[2][0]), process(ast[2][1]), - i32); + Type::i32); } if (name == STORE8) { return builder.makeStore(8, @@ -2387,17 +2387,21 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { indexOr(align, 8), process(ast[2][0]), process(ast[2][1]), - i64); + Type::i64); } if (name == STOREF) { auto* value = process(ast[2][1]); - if (value->type == f64) { + if (value->type == Type::f64) { // asm.js allows storing a double to HEAPF32, we must cast // here value = builder.makeUnary(DemoteFloat64, value); } - return builder.makeStore( - 4, 0, indexOr(align, 4), process(ast[2][0]), value, f32); + return builder.makeStore(4, + 0, + indexOr(align, 4), + process(ast[2][0]), + value, + Type::f32); } if (name == STORED) { return builder.makeStore(8, @@ -2405,7 +2409,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { indexOr(align, 8), process(ast[2][0]), process(ast[2][1]), - f64); + Type::f64); } break; } @@ -2415,11 +2419,11 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (name == I64) { // no-op "coercion" / "cast", although we also tolerate i64(0) // for constants that fit in i32 - if (value->type == i32) { + if (value->type == Type::i32) { return builder.makeConst( Literal(int64_t(value->cast<Const>()->value.geti32()))); } else { - fixCallType(value, i64); + fixCallType(value, Type::i64); return value; } } @@ -2497,7 +2501,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { return builder.makeUnary(UnaryOp::PopcntInt64, value); } if (name == I64_ATOMICS_LOAD) { - return builder.makeAtomicLoad(8, 0, value, i64); + return builder.makeAtomicLoad(8, 0, value, Type::i64); } } else if (num == 2) { // 2 params,binary if (name == I64_CONST) { @@ -2587,37 +2591,37 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { // atomics if (name == I64_ATOMICS_STORE) { wasm.memory.shared = true; - return builder.makeAtomicStore(8, 0, left, right, i64); + return builder.makeAtomicStore(8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_ADD) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::Add, 8, 0, left, right, i64); + AtomicRMWOp::Add, 8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_SUB) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::Sub, 8, 0, left, right, i64); + AtomicRMWOp::Sub, 8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_AND) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::And, 8, 0, left, right, i64); + AtomicRMWOp::And, 8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_OR) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::Or, 8, 0, left, right, i64); + AtomicRMWOp::Or, 8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_XOR) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::Xor, 8, 0, left, right, i64); + AtomicRMWOp::Xor, 8, 0, left, right, Type::i64); } if (name == I64_ATOMICS_EXCHANGE) { wasm.memory.shared = true; return builder.makeAtomicRMW( - AtomicRMWOp::Xchg, 8, 0, left, right, i64); + AtomicRMWOp::Xchg, 8, 0, left, right, Type::i64); } } else if (num == 3) { if (name == I64_ATOMICS_COMPAREEXCHANGE) { @@ -2627,7 +2631,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { process(ast[2][0]), process(ast[2][1]), process(ast[2][2]), - i64); + Type::i64); } } break; @@ -2723,13 +2727,13 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { // we don't know the table offset yet. emit target = target + // callImport(tableName), which we fix up later when we know how asm // function tables are layed out inside the wasm table. - ret->target = - builder.makeBinary(BinaryOp::AddInt32, - ret->target, - builder.makeCall(target[1]->getIString(), {}, i32)); + ret->target = builder.makeBinary( + BinaryOp::AddInt32, + ret->target, + builder.makeCall(target[1]->getIString(), {}, Type::i32)); return ret; } else if (what == RETURN) { - Type type = !!ast[1] ? detectWasmType(ast[1], &asmData) : none; + Type type = !!ast[1] ? detectWasmType(ast[1], &asmData) : Type::none; if (seenReturn) { assert(function->sig.results == type); } else { @@ -3004,7 +3008,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto conv = allocator.alloc<Unary>(); conv->op = ReinterpretFloat32; conv->value = process(writtenValue); - if (conv->value->type == f64) { + if (conv->value->type == Type::f64) { // this has an implicit f64->f32 in the write to memory conv->value = builder.makeUnary(DemoteFloat64, conv->value); } @@ -3070,26 +3074,26 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { // outside if not Break* breakWhenNotMatching = nullptr; - if (br->condition->type == i32) { + if (br->condition->type == Type::i32) { Binary* offsetor = allocator.alloc<Binary>(); offsetor->op = BinaryOp::SubInt32; offsetor->left = br->condition; offsetor->right = builder.makeConst(Literal(int32_t(min))); - offsetor->type = i32; + offsetor->type = Type::i32; br->condition = offsetor; } else { - assert(br->condition->type == i64); + assert(br->condition->type == Type::i64); // 64-bit condition. after offsetting it must be in a reasonable // range, but the offsetting itself must be 64-bit Binary* offsetor = allocator.alloc<Binary>(); offsetor->op = BinaryOp::SubInt64; offsetor->left = br->condition; offsetor->right = builder.makeConst(Literal(int64_t(min))); - offsetor->type = i64; + offsetor->type = Type::i64; // the switch itself can be 32-bit, as the range is in a reasonable // range. so after offsetting, we need to make sure there are no high // bits, then we can just look at the lower 32 bits - auto temp = Builder::addVar(function, i64); + auto temp = Builder::addVar(function, Type::i64); auto* block = builder.makeBlock(); block->list.push_back(builder.makeLocalSet(temp, offsetor)); // if high bits, we can break to the default (we'll fill in the name @@ -3100,10 +3104,10 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { builder.makeUnary( UnaryOp::WrapInt64, builder.makeBinary(BinaryOp::ShrUInt64, - builder.makeLocalGet(temp, i64), + builder.makeLocalGet(temp, Type::i64), builder.makeConst(Literal(int64_t(32)))))); block->list.push_back(breakWhenNotMatching); - block->list.push_back(builder.makeLocalGet(temp, i64)); + block->list.push_back(builder.makeLocalGet(temp, Type::i64)); block->finalize(); br->condition = builder.makeUnary(UnaryOp::WrapInt64, block); } @@ -3173,7 +3177,8 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { } else { name = nameMapper.pushLabelName("switch-case"); auto* iff = builder.makeIf( - builder.makeBinary(br->condition->type == i32 ? EqInt32 : EqInt64, + builder.makeBinary(br->condition->type == Type::i32 ? EqInt32 + : EqInt64, builder.makeLocalGet(var, br->condition->type), builder.makeConst(getLiteral(condition))), builder.makeBreak(name), diff --git a/src/asmjs/asm_v_wasm.cpp b/src/asmjs/asm_v_wasm.cpp index 5959db43e..b499bd6e2 100644 --- a/src/asmjs/asm_v_wasm.cpp +++ b/src/asmjs/asm_v_wasm.cpp @@ -43,24 +43,24 @@ Type asmToWasmType(AsmType asmType) { AsmType wasmToAsmType(Type type) { switch (type) { - case i32: + case Type::i32: return ASM_INT; - case f32: + case Type::f32: return ASM_FLOAT; - case f64: + case Type::f64: return ASM_DOUBLE; - case i64: + case Type::i64: return ASM_INT64; - case v128: + case Type::v128: assert(false && "v128 not implemented yet"); - case funcref: - case anyref: - case nullref: - case exnref: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: assert(false && "reference types are not supported by asm2wasm"); - case none: + case Type::none: return ASM_NONE; - case unreachable: + case Type::unreachable: WASM_UNREACHABLE("invalid type"); } WASM_UNREACHABLE("invalid type"); @@ -68,27 +68,27 @@ AsmType wasmToAsmType(Type type) { char getSig(Type type) { switch (type) { - case i32: + case Type::i32: return 'i'; - case i64: + case Type::i64: return 'j'; - case f32: + case Type::f32: return 'f'; - case f64: + case Type::f64: return 'd'; - case v128: + case Type::v128: return 'V'; - case funcref: + case Type::funcref: return 'F'; - case anyref: + case Type::anyref: return 'A'; - case nullref: + case Type::nullref: return 'N'; - case exnref: + case Type::exnref: return 'E'; - case none: + case Type::none: return 'v'; - case unreachable: + case Type::unreachable: WASM_UNREACHABLE("invalid type"); } WASM_UNREACHABLE("invalid type"); @@ -109,14 +109,14 @@ std::string getSig(Type results, Type params) { } Expression* ensureDouble(Expression* expr, MixedArena& allocator) { - if (expr->type == f32) { + if (expr->type == Type::f32) { auto conv = allocator.alloc<Unary>(); conv->op = PromoteFloat32; conv->value = expr; conv->type = Type::f64; return conv; } - assert(expr->type == f64); + assert(expr->type == Type::f64); return expr; } diff --git a/src/binaryen-c.cpp b/src/binaryen-c.cpp index a98ff3407..44830cd75 100644 --- a/src/binaryen-c.cpp +++ b/src/binaryen-c.cpp @@ -272,17 +272,17 @@ extern "C" { // Core types -BinaryenType BinaryenTypeNone(void) { return none; } -BinaryenType BinaryenTypeInt32(void) { return i32; } -BinaryenType BinaryenTypeInt64(void) { return i64; } -BinaryenType BinaryenTypeFloat32(void) { return f32; } -BinaryenType BinaryenTypeFloat64(void) { return f64; } -BinaryenType BinaryenTypeVec128(void) { return v128; } -BinaryenType BinaryenTypeFuncref(void) { return funcref; } -BinaryenType BinaryenTypeAnyref(void) { return anyref; } -BinaryenType BinaryenTypeNullref(void) { return nullref; } -BinaryenType BinaryenTypeExnref(void) { return exnref; } -BinaryenType BinaryenTypeUnreachable(void) { return unreachable; } +BinaryenType BinaryenTypeNone(void) { return Type::none; } +BinaryenType BinaryenTypeInt32(void) { return Type::i32; } +BinaryenType BinaryenTypeInt64(void) { return Type::i64; } +BinaryenType BinaryenTypeFloat32(void) { return Type::f32; } +BinaryenType BinaryenTypeFloat64(void) { return Type::f64; } +BinaryenType BinaryenTypeVec128(void) { return Type::v128; } +BinaryenType BinaryenTypeFuncref(void) { return Type::funcref; } +BinaryenType BinaryenTypeAnyref(void) { return Type::anyref; } +BinaryenType BinaryenTypeNullref(void) { return Type::nullref; } +BinaryenType BinaryenTypeExnref(void) { return Type::exnref; } +BinaryenType BinaryenTypeUnreachable(void) { return Type::unreachable; } BinaryenType BinaryenTypeAuto(void) { return uint32_t(-1); } BinaryenType BinaryenTypeCreate(BinaryenType* types, uint32_t numTypes) { @@ -321,11 +321,11 @@ void BinaryenTypeExpand(BinaryenType t, BinaryenType* buf) { } } -WASM_DEPRECATED BinaryenType BinaryenNone(void) { return none; } -WASM_DEPRECATED BinaryenType BinaryenInt32(void) { return i32; } -WASM_DEPRECATED BinaryenType BinaryenInt64(void) { return i64; } -WASM_DEPRECATED BinaryenType BinaryenFloat32(void) { return f32; } -WASM_DEPRECATED BinaryenType BinaryenFloat64(void) { return f64; } +WASM_DEPRECATED BinaryenType BinaryenNone(void) { return Type::none; } +WASM_DEPRECATED BinaryenType BinaryenInt32(void) { return Type::i32; } +WASM_DEPRECATED BinaryenType BinaryenInt64(void) { return Type::i64; } +WASM_DEPRECATED BinaryenType BinaryenFloat32(void) { return Type::f32; } +WASM_DEPRECATED BinaryenType BinaryenFloat64(void) { return Type::f64; } WASM_DEPRECATED BinaryenType BinaryenUndefined(void) { return uint32_t(-1); } // Expression ids diff --git a/src/cfg/Relooper.cpp b/src/cfg/Relooper.cpp index d46b39a8e..0c9b581bd 100644 --- a/src/cfg/Relooper.cpp +++ b/src/cfg/Relooper.cpp @@ -378,10 +378,10 @@ wasm::Expression* Block::Render(RelooperBuilder& Builder, bool InLoop) { // breaking on Outer leads to the content in NextOuter Outer->name = CurrName; NextOuter->list.push_back(CurrContent); - // if this is not a dead end, also need to break to the outside - // this is both an optimization, and avoids incorrectness as adding - // a brak in unreachable code can make a place look reachable that isn't - if (CurrContent->type != wasm::unreachable) { + // if this is not a dead end, also need to break to the outside this is + // both an optimization, and avoids incorrectness as adding a break in + // unreachable code can make a place look reachable that isn't + if (CurrContent->type != wasm::Type::unreachable) { NextOuter->list.push_back(Builder.makeBreak(SwitchLeave)); } // prepare for more nesting @@ -835,7 +835,7 @@ private: } } NewList.push_back(Curr); - if (Curr->type == wasm::unreachable) { + if (Curr->type == wasm::Type::unreachable) { SeenUnreachableType = true; } }; diff --git a/src/cfg/Relooper.h b/src/cfg/Relooper.h index 6b38816d0..f13d02711 100644 --- a/src/cfg/Relooper.h +++ b/src/cfg/Relooper.h @@ -50,7 +50,7 @@ public: : wasm::Builder(wasm), labelHelper(labelHelper) {} wasm::LocalGet* makeGetLabel() { - return makeLocalGet(labelHelper, wasm::i32); + return makeLocalGet(labelHelper, wasm::Type::i32); } wasm::LocalSet* makeSetLabel(wasm::Index value) { return makeLocalSet(labelHelper, makeConst(wasm::Literal(int32_t(value)))); diff --git a/src/dataflow/node.h b/src/dataflow/node.h index 612a2f613..bb537afc2 100644 --- a/src/dataflow/node.h +++ b/src/dataflow/node.h @@ -163,7 +163,7 @@ struct Node { case Zext: return getValue(0)->getWasmType(); case Bad: - return unreachable; + return wasm::Type::unreachable; default: WASM_UNREACHABLE("invalid node type"); } diff --git a/src/gen-s-parser.inc b/src/gen-s-parser.inc index eb5626b3f..eab7be468 100644 --- a/src/gen-s-parser.inc +++ b/src/gen-s-parser.inc @@ -10,7 +10,7 @@ switch (op[0]) { case 'a': { switch (op[1]) { case 'n': - if (strcmp(op, "anyref.pop") == 0) { return makePop(anyref); } + if (strcmp(op, "anyref.pop") == 0) { return makePop(Type::anyref); } goto parse_error; case 't': { switch (op[7]) { @@ -84,7 +84,7 @@ switch (op[0]) { if (strcmp(op, "else") == 0) { return makeThenOrElse(s); } goto parse_error; case 'x': - if (strcmp(op, "exnref.pop") == 0) { return makePop(exnref); } + if (strcmp(op, "exnref.pop") == 0) { return makePop(Type::exnref); } goto parse_error; default: goto parse_error; } @@ -116,7 +116,7 @@ switch (op[0]) { case 'n': { switch (op[7]) { case 's': - if (strcmp(op, "f32.const") == 0) { return makeConst(s, f32); } + if (strcmp(op, "f32.const") == 0) { return makeConst(s, Type::f32); } goto parse_error; case 'v': { switch (op[13]) { @@ -191,7 +191,7 @@ switch (op[0]) { if (strcmp(op, "f32.le") == 0) { return makeBinary(s, BinaryOp::LeFloat32); } goto parse_error; case 'o': - if (strcmp(op, "f32.load") == 0) { return makeLoad(s, f32, /*isAtomic=*/false); } + if (strcmp(op, "f32.load") == 0) { return makeLoad(s, Type::f32, /*isAtomic=*/false); } goto parse_error; case 't': if (strcmp(op, "f32.lt") == 0) { return makeBinary(s, BinaryOp::LtFloat32); } @@ -228,7 +228,7 @@ switch (op[0]) { } } case 'p': - if (strcmp(op, "f32.pop") == 0) { return makePop(f32); } + if (strcmp(op, "f32.pop") == 0) { return makePop(Type::f32); } goto parse_error; case 'r': if (strcmp(op, "f32.reinterpret_i32") == 0) { return makeUnary(s, UnaryOp::ReinterpretInt32); } @@ -239,7 +239,7 @@ switch (op[0]) { if (strcmp(op, "f32.sqrt") == 0) { return makeUnary(s, UnaryOp::SqrtFloat32); } goto parse_error; case 't': - if (strcmp(op, "f32.store") == 0) { return makeStore(s, f32, /*isAtomic=*/false); } + if (strcmp(op, "f32.store") == 0) { return makeStore(s, Type::f32, /*isAtomic=*/false); } goto parse_error; case 'u': if (strcmp(op, "f32.sub") == 0) { return makeBinary(s, BinaryOp::SubFloat32); } @@ -397,7 +397,7 @@ switch (op[0]) { case 'n': { switch (op[7]) { case 's': - if (strcmp(op, "f64.const") == 0) { return makeConst(s, f64); } + if (strcmp(op, "f64.const") == 0) { return makeConst(s, Type::f64); } goto parse_error; case 'v': { switch (op[13]) { @@ -464,7 +464,7 @@ switch (op[0]) { if (strcmp(op, "f64.le") == 0) { return makeBinary(s, BinaryOp::LeFloat64); } goto parse_error; case 'o': - if (strcmp(op, "f64.load") == 0) { return makeLoad(s, f64, /*isAtomic=*/false); } + if (strcmp(op, "f64.load") == 0) { return makeLoad(s, Type::f64, /*isAtomic=*/false); } goto parse_error; case 't': if (strcmp(op, "f64.lt") == 0) { return makeBinary(s, BinaryOp::LtFloat64); } @@ -503,7 +503,7 @@ switch (op[0]) { case 'p': { switch (op[5]) { case 'o': - if (strcmp(op, "f64.pop") == 0) { return makePop(f64); } + if (strcmp(op, "f64.pop") == 0) { return makePop(Type::f64); } goto parse_error; case 'r': if (strcmp(op, "f64.promote_f32") == 0) { return makeUnary(s, UnaryOp::PromoteFloat32); } @@ -520,7 +520,7 @@ switch (op[0]) { if (strcmp(op, "f64.sqrt") == 0) { return makeUnary(s, UnaryOp::SqrtFloat64); } goto parse_error; case 't': - if (strcmp(op, "f64.store") == 0) { return makeStore(s, f64, /*isAtomic=*/false); } + if (strcmp(op, "f64.store") == 0) { return makeStore(s, Type::f64, /*isAtomic=*/false); } goto parse_error; case 'u': if (strcmp(op, "f64.sub") == 0) { return makeBinary(s, BinaryOp::SubFloat64); } @@ -654,7 +654,7 @@ switch (op[0]) { } } case 'u': - if (strcmp(op, "funcref.pop") == 0) { return makePop(funcref); } + if (strcmp(op, "funcref.pop") == 0) { return makePop(Type::funcref); } goto parse_error; default: goto parse_error; } @@ -944,13 +944,13 @@ switch (op[0]) { case 'l': { switch (op[15]) { case '\0': - if (strcmp(op, "i32.atomic.load") == 0) { return makeLoad(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.load") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/true); } goto parse_error; case '1': - if (strcmp(op, "i32.atomic.load16_u") == 0) { return makeLoad(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.load16_u") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/true); } goto parse_error; case '8': - if (strcmp(op, "i32.atomic.load8_u") == 0) { return makeLoad(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.load8_u") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/true); } goto parse_error; default: goto parse_error; } @@ -962,30 +962,30 @@ switch (op[0]) { case 'a': { switch (op[16]) { case 'd': - if (strcmp(op, "i32.atomic.rmw.add") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.add") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'n': - if (strcmp(op, "i32.atomic.rmw.and") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.and") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i32.atomic.rmw.cmpxchg") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.cmpxchg") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw.or") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.or") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 's': - if (strcmp(op, "i32.atomic.rmw.sub") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.sub") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'x': { switch (op[16]) { case 'c': - if (strcmp(op, "i32.atomic.rmw.xchg") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.xchg") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw.xor") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw.xor") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } @@ -998,30 +998,30 @@ switch (op[0]) { case 'a': { switch (op[18]) { case 'd': - if (strcmp(op, "i32.atomic.rmw16.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'n': - if (strcmp(op, "i32.atomic.rmw16.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i32.atomic.rmw16.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw16.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 's': - if (strcmp(op, "i32.atomic.rmw16.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'x': { switch (op[18]) { case 'c': - if (strcmp(op, "i32.atomic.rmw16.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw16.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw16.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } @@ -1034,30 +1034,30 @@ switch (op[0]) { case 'a': { switch (op[17]) { case 'd': - if (strcmp(op, "i32.atomic.rmw8.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'n': - if (strcmp(op, "i32.atomic.rmw8.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i32.atomic.rmw8.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw8.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 's': - if (strcmp(op, "i32.atomic.rmw8.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'x': { switch (op[17]) { case 'c': - if (strcmp(op, "i32.atomic.rmw8.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; case 'o': - if (strcmp(op, "i32.atomic.rmw8.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i32); } + if (strcmp(op, "i32.atomic.rmw8.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i32); } goto parse_error; default: goto parse_error; } @@ -1071,19 +1071,19 @@ switch (op[0]) { case 's': { switch (op[16]) { case '\0': - if (strcmp(op, "i32.atomic.store") == 0) { return makeStore(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.store") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/true); } goto parse_error; case '1': - if (strcmp(op, "i32.atomic.store16") == 0) { return makeStore(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.store16") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/true); } goto parse_error; case '8': - if (strcmp(op, "i32.atomic.store8") == 0) { return makeStore(s, i32, /*isAtomic=*/true); } + if (strcmp(op, "i32.atomic.store8") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/true); } goto parse_error; default: goto parse_error; } } case 'w': - if (strcmp(op, "i32.atomic.wait") == 0) { return makeAtomicWait(s, i32); } + if (strcmp(op, "i32.atomic.wait") == 0) { return makeAtomicWait(s, Type::i32); } goto parse_error; default: goto parse_error; } @@ -1097,7 +1097,7 @@ switch (op[0]) { if (strcmp(op, "i32.clz") == 0) { return makeUnary(s, UnaryOp::ClzInt32); } goto parse_error; case 'o': - if (strcmp(op, "i32.const") == 0) { return makeConst(s, i32); } + if (strcmp(op, "i32.const") == 0) { return makeConst(s, Type::i32); } goto parse_error; case 't': if (strcmp(op, "i32.ctz") == 0) { return makeUnary(s, UnaryOp::CtzInt32); } @@ -1186,15 +1186,15 @@ switch (op[0]) { case 'o': { switch (op[8]) { case '\0': - if (strcmp(op, "i32.load") == 0) { return makeLoad(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.load") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/false); } goto parse_error; case '1': { switch (op[11]) { case 's': - if (strcmp(op, "i32.load16_s") == 0) { return makeLoad(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.load16_s") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/false); } goto parse_error; case 'u': - if (strcmp(op, "i32.load16_u") == 0) { return makeLoad(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.load16_u") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1202,10 +1202,10 @@ switch (op[0]) { case '8': { switch (op[10]) { case 's': - if (strcmp(op, "i32.load8_s") == 0) { return makeLoad(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.load8_s") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/false); } goto parse_error; case 'u': - if (strcmp(op, "i32.load8_u") == 0) { return makeLoad(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.load8_u") == 0) { return makeLoad(s, Type::i32, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1239,7 +1239,7 @@ switch (op[0]) { case 'p': { switch (op[7]) { case '\0': - if (strcmp(op, "i32.pop") == 0) { return makePop(i32); } + if (strcmp(op, "i32.pop") == 0) { return makePop(Type::i32); } goto parse_error; case 'c': if (strcmp(op, "i32.popcnt") == 0) { return makeUnary(s, UnaryOp::PopcntInt32); } @@ -1306,13 +1306,13 @@ switch (op[0]) { case 't': { switch (op[9]) { case '\0': - if (strcmp(op, "i32.store") == 0) { return makeStore(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.store") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/false); } goto parse_error; case '1': - if (strcmp(op, "i32.store16") == 0) { return makeStore(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.store16") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/false); } goto parse_error; case '8': - if (strcmp(op, "i32.store8") == 0) { return makeStore(s, i32, /*isAtomic=*/false); } + if (strcmp(op, "i32.store8") == 0) { return makeStore(s, Type::i32, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1621,16 +1621,16 @@ switch (op[0]) { case 'l': { switch (op[15]) { case '\0': - if (strcmp(op, "i64.atomic.load") == 0) { return makeLoad(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.load") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '1': - if (strcmp(op, "i64.atomic.load16_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.load16_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '3': - if (strcmp(op, "i64.atomic.load32_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.load32_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '8': - if (strcmp(op, "i64.atomic.load8_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.load8_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/true); } goto parse_error; default: goto parse_error; } @@ -1642,30 +1642,30 @@ switch (op[0]) { case 'a': { switch (op[16]) { case 'd': - if (strcmp(op, "i64.atomic.rmw.add") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.add") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'n': - if (strcmp(op, "i64.atomic.rmw.and") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.and") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i64.atomic.rmw.cmpxchg") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.cmpxchg") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw.or") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.or") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 's': - if (strcmp(op, "i64.atomic.rmw.sub") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.sub") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'x': { switch (op[16]) { case 'c': - if (strcmp(op, "i64.atomic.rmw.xchg") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.xchg") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw.xor") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw.xor") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } @@ -1678,30 +1678,30 @@ switch (op[0]) { case 'a': { switch (op[18]) { case 'd': - if (strcmp(op, "i64.atomic.rmw16.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'n': - if (strcmp(op, "i64.atomic.rmw16.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i64.atomic.rmw16.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw16.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 's': - if (strcmp(op, "i64.atomic.rmw16.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'x': { switch (op[18]) { case 'c': - if (strcmp(op, "i64.atomic.rmw16.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw16.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw16.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } @@ -1714,30 +1714,30 @@ switch (op[0]) { case 'a': { switch (op[18]) { case 'd': - if (strcmp(op, "i64.atomic.rmw32.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'n': - if (strcmp(op, "i64.atomic.rmw32.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i64.atomic.rmw32.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw32.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 's': - if (strcmp(op, "i64.atomic.rmw32.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'x': { switch (op[18]) { case 'c': - if (strcmp(op, "i64.atomic.rmw32.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw32.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw32.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } @@ -1750,30 +1750,30 @@ switch (op[0]) { case 'a': { switch (op[17]) { case 'd': - if (strcmp(op, "i64.atomic.rmw8.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.add_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'n': - if (strcmp(op, "i64.atomic.rmw8.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.and_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } } case 'c': - if (strcmp(op, "i64.atomic.rmw8.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.cmpxchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw8.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.or_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 's': - if (strcmp(op, "i64.atomic.rmw8.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.sub_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'x': { switch (op[17]) { case 'c': - if (strcmp(op, "i64.atomic.rmw8.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.xchg_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; case 'o': - if (strcmp(op, "i64.atomic.rmw8.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, i64); } + if (strcmp(op, "i64.atomic.rmw8.xor_u") == 0) { return makeAtomicRMWOrCmpxchg(s, Type::i64); } goto parse_error; default: goto parse_error; } @@ -1787,22 +1787,22 @@ switch (op[0]) { case 's': { switch (op[16]) { case '\0': - if (strcmp(op, "i64.atomic.store") == 0) { return makeStore(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.store") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '1': - if (strcmp(op, "i64.atomic.store16") == 0) { return makeStore(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.store16") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '3': - if (strcmp(op, "i64.atomic.store32") == 0) { return makeStore(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.store32") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/true); } goto parse_error; case '8': - if (strcmp(op, "i64.atomic.store8") == 0) { return makeStore(s, i64, /*isAtomic=*/true); } + if (strcmp(op, "i64.atomic.store8") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/true); } goto parse_error; default: goto parse_error; } } case 'w': - if (strcmp(op, "i64.atomic.wait") == 0) { return makeAtomicWait(s, i64); } + if (strcmp(op, "i64.atomic.wait") == 0) { return makeAtomicWait(s, Type::i64); } goto parse_error; default: goto parse_error; } @@ -1816,7 +1816,7 @@ switch (op[0]) { if (strcmp(op, "i64.clz") == 0) { return makeUnary(s, UnaryOp::ClzInt64); } goto parse_error; case 'o': - if (strcmp(op, "i64.const") == 0) { return makeConst(s, i64); } + if (strcmp(op, "i64.const") == 0) { return makeConst(s, Type::i64); } goto parse_error; case 't': if (strcmp(op, "i64.ctz") == 0) { return makeUnary(s, UnaryOp::CtzInt64); } @@ -1919,15 +1919,15 @@ switch (op[0]) { case 'o': { switch (op[8]) { case '\0': - if (strcmp(op, "i64.load") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case '1': { switch (op[11]) { case 's': - if (strcmp(op, "i64.load16_s") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load16_s") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case 'u': - if (strcmp(op, "i64.load16_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load16_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1935,10 +1935,10 @@ switch (op[0]) { case '3': { switch (op[11]) { case 's': - if (strcmp(op, "i64.load32_s") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load32_s") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case 'u': - if (strcmp(op, "i64.load32_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load32_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1946,10 +1946,10 @@ switch (op[0]) { case '8': { switch (op[10]) { case 's': - if (strcmp(op, "i64.load8_s") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load8_s") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case 'u': - if (strcmp(op, "i64.load8_u") == 0) { return makeLoad(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.load8_u") == 0) { return makeLoad(s, Type::i64, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -1983,7 +1983,7 @@ switch (op[0]) { case 'p': { switch (op[7]) { case '\0': - if (strcmp(op, "i64.pop") == 0) { return makePop(i64); } + if (strcmp(op, "i64.pop") == 0) { return makePop(Type::i64); } goto parse_error; case 'c': if (strcmp(op, "i64.popcnt") == 0) { return makeUnary(s, UnaryOp::PopcntInt64); } @@ -2050,16 +2050,16 @@ switch (op[0]) { case 't': { switch (op[9]) { case '\0': - if (strcmp(op, "i64.store") == 0) { return makeStore(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.store") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case '1': - if (strcmp(op, "i64.store16") == 0) { return makeStore(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.store16") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case '3': - if (strcmp(op, "i64.store32") == 0) { return makeStore(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.store32") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/false); } goto parse_error; case '8': - if (strcmp(op, "i64.store8") == 0) { return makeStore(s, i64, /*isAtomic=*/false); } + if (strcmp(op, "i64.store8") == 0) { return makeStore(s, Type::i64, /*isAtomic=*/false); } goto parse_error; default: goto parse_error; } @@ -2489,7 +2489,7 @@ switch (op[0]) { if (strcmp(op, "nop") == 0) { return makeNop(); } goto parse_error; case 'u': - if (strcmp(op, "nullref.pop") == 0) { return makePop(nullref); } + if (strcmp(op, "nullref.pop") == 0) { return makePop(Type::nullref); } goto parse_error; default: goto parse_error; } @@ -2589,10 +2589,10 @@ switch (op[0]) { if (strcmp(op, "v128.bitselect") == 0) { return makeSIMDTernary(s, SIMDTernaryOp::Bitselect); } goto parse_error; case 'c': - if (strcmp(op, "v128.const") == 0) { return makeConst(s, v128); } + if (strcmp(op, "v128.const") == 0) { return makeConst(s, Type::v128); } goto parse_error; case 'l': - if (strcmp(op, "v128.load") == 0) { return makeLoad(s, v128, /*isAtomic=*/false); } + if (strcmp(op, "v128.load") == 0) { return makeLoad(s, Type::v128, /*isAtomic=*/false); } goto parse_error; case 'n': if (strcmp(op, "v128.not") == 0) { return makeUnary(s, UnaryOp::NotVec128); } @@ -2601,10 +2601,10 @@ switch (op[0]) { if (strcmp(op, "v128.or") == 0) { return makeBinary(s, BinaryOp::OrVec128); } goto parse_error; case 'p': - if (strcmp(op, "v128.pop") == 0) { return makePop(v128); } + if (strcmp(op, "v128.pop") == 0) { return makePop(Type::v128); } goto parse_error; case 's': - if (strcmp(op, "v128.store") == 0) { return makeStore(s, v128, /*isAtomic=*/false); } + if (strcmp(op, "v128.store") == 0) { return makeStore(s, Type::v128, /*isAtomic=*/false); } goto parse_error; case 'x': if (strcmp(op, "v128.xor") == 0) { return makeBinary(s, BinaryOp::XorVec128); } diff --git a/src/ir/ExpressionAnalyzer.cpp b/src/ir/ExpressionAnalyzer.cpp index 4b9869ddd..a61af9ce5 100644 --- a/src/ir/ExpressionAnalyzer.cpp +++ b/src/ir/ExpressionAnalyzer.cpp @@ -151,7 +151,8 @@ template<typename T> void visitImmediates(Expression* curr, T& visitor) { } void visitLoad(Load* curr) { visitor.visitInt(curr->bytes); - if (curr->type != unreachable && curr->bytes < curr->type.getByteSize()) { + if (curr->type != Type::unreachable && + curr->bytes < curr->type.getByteSize()) { visitor.visitInt(curr->signed_); } visitor.visitAddress(curr->offset); diff --git a/src/ir/ReFinalize.cpp b/src/ir/ReFinalize.cpp index 9243869a1..5e72850e6 100644 --- a/src/ir/ReFinalize.cpp +++ b/src/ir/ReFinalize.cpp @@ -21,7 +21,7 @@ namespace wasm { static Type getValueType(Expression* value) { - return value ? value->type : none; + return value ? value->type : Type::none; } namespace { @@ -41,7 +41,7 @@ void handleBranchForVisitBlock(T* curr, Name name, Module* module) { void ReFinalize::visitBlock(Block* curr) { if (curr->list.size() == 0) { - curr->type = none; + curr->type = Type::none; return; } // Get the least upper bound type of the last element and all branch return @@ -54,14 +54,14 @@ void ReFinalize::visitBlock(Block* curr) { return; } } - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return; } // type is none, but we might be unreachable - if (curr->type == none) { + if (curr->type == Type::none) { for (auto* child : curr->list) { - if (child->type == unreachable) { - curr->type = unreachable; + if (child->type == Type::unreachable) { + curr->type = Type::unreachable; break; } } @@ -72,7 +72,7 @@ void ReFinalize::visitLoop(Loop* curr) { curr->finalize(); } void ReFinalize::visitBreak(Break* curr) { curr->finalize(); auto valueType = getValueType(curr->value); - if (valueType == unreachable) { + if (valueType == Type::unreachable) { replaceUntaken(curr->value, curr->condition); } else { updateBreakValueType(curr->name, valueType); @@ -81,7 +81,7 @@ void ReFinalize::visitBreak(Break* curr) { void ReFinalize::visitSwitch(Switch* curr) { curr->finalize(); auto valueType = getValueType(curr->value); - if (valueType == unreachable) { + if (valueType == Type::unreachable) { replaceUntaken(curr->value, curr->condition); } else { for (auto target : curr->targets) { @@ -164,7 +164,7 @@ void ReFinalize::updateBreakValueType(Name name, Type type) { // Replace an untaken branch/switch with an unreachable value. // A condition may also exist and may or may not be unreachable. void ReFinalize::replaceUntaken(Expression* value, Expression* condition) { - assert(value->type == unreachable); + assert(value->type == Type::unreachable); auto* replacement = value; if (condition) { Builder builder(*getModule()); diff --git a/src/ir/abstract.h b/src/ir/abstract.h index 76215d07f..06a1cd41c 100644 --- a/src/ir/abstract.h +++ b/src/ir/abstract.h @@ -53,13 +53,13 @@ enum Op { // addition, AddInt32. If the op does not exist, it returns Invalid. inline UnaryOp getUnary(Type type, Op op) { switch (type) { - case i32: { + case Type::i32: { return InvalidUnary; } - case i64: { + case Type::i64: { return InvalidUnary; } - case f32: { + case Type::f32: { switch (op) { case Neg: return NegFloat32; @@ -68,7 +68,7 @@ inline UnaryOp getUnary(Type type, Op op) { } break; } - case f64: { + case Type::f64: { switch (op) { case Neg: return NegFloat64; @@ -77,15 +77,15 @@ inline UnaryOp getUnary(Type type, Op op) { } break; } - case v128: { + case Type::v128: { WASM_UNREACHABLE("v128 not implemented yet"); } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: { + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: { return InvalidUnary; } } @@ -94,7 +94,7 @@ inline UnaryOp getUnary(Type type, Op op) { inline BinaryOp getBinary(Type type, Op op) { switch (type) { - case i32: { + case Type::i32: { switch (op) { case Add: return AddInt32; @@ -131,7 +131,7 @@ inline BinaryOp getBinary(Type type, Op op) { } break; } - case i64: { + case Type::i64: { switch (op) { case Add: return AddInt64; @@ -168,7 +168,7 @@ inline BinaryOp getBinary(Type type, Op op) { } break; } - case f32: { + case Type::f32: { switch (op) { case Add: return AddFloat32; @@ -189,7 +189,7 @@ inline BinaryOp getBinary(Type type, Op op) { } break; } - case f64: { + case Type::f64: { switch (op) { case Add: return AddFloat64; @@ -210,15 +210,15 @@ inline BinaryOp getBinary(Type type, Op op) { } break; } - case v128: { + case Type::v128: { WASM_UNREACHABLE("v128 not implemented yet"); } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: { + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: { return InvalidBinary; } } diff --git a/src/ir/bits.h b/src/ir/bits.h index cbac70d77..6b0697280 100644 --- a/src/ir/bits.h +++ b/src/ir/bits.h @@ -55,9 +55,9 @@ struct Bits { // gets the number of effective shifts a shift operation does. In // wasm, only 5 bits matter for 32-bit shifts, and 6 for 64. static Index getEffectiveShifts(Index amount, Type type) { - if (type == i32) { + if (type == Type::i32) { return amount & 31; - } else if (type == i64) { + } else if (type == Type::i64) { return amount & 63; } WASM_UNREACHABLE("unexpected type"); @@ -65,37 +65,41 @@ struct Bits { static Index getEffectiveShifts(Expression* expr) { auto* amount = expr->cast<Const>(); - if (amount->type == i32) { - return getEffectiveShifts(amount->value.geti32(), i32); - } else if (amount->type == i64) { - return getEffectiveShifts(amount->value.geti64(), i64); + if (amount->type == Type::i32) { + return getEffectiveShifts(amount->value.geti32(), Type::i32); + } else if (amount->type == Type::i64) { + return getEffectiveShifts(amount->value.geti64(), Type::i64); } WASM_UNREACHABLE("unexpected type"); } static Expression* makeSignExt(Expression* value, Index bytes, Module& wasm) { - if (value->type == i32) { + if (value->type == Type::i32) { if (bytes == 1 || bytes == 2) { auto shifts = bytes == 1 ? 24 : 16; Builder builder(wasm); return builder.makeBinary( ShrSInt32, builder.makeBinary( - ShlInt32, value, LiteralUtils::makeFromInt32(shifts, i32, wasm)), - LiteralUtils::makeFromInt32(shifts, i32, wasm)); + ShlInt32, + value, + LiteralUtils::makeFromInt32(shifts, Type::i32, wasm)), + LiteralUtils::makeFromInt32(shifts, Type::i32, wasm)); } assert(bytes == 4); return value; // nothing to do } else { - assert(value->type == i64); + assert(value->type == Type::i64); if (bytes == 1 || bytes == 2 || bytes == 4) { auto shifts = bytes == 1 ? 56 : (bytes == 2 ? 48 : 32); Builder builder(wasm); return builder.makeBinary( ShrSInt64, builder.makeBinary( - ShlInt64, value, LiteralUtils::makeFromInt32(shifts, i64, wasm)), - LiteralUtils::makeFromInt32(shifts, i64, wasm)); + ShlInt64, + value, + LiteralUtils::makeFromInt32(shifts, Type::i64, wasm)), + LiteralUtils::makeFromInt32(shifts, Type::i64, wasm)); } assert(bytes == 8); return value; // nothing to do diff --git a/src/ir/block-utils.h b/src/ir/block-utils.h index 153dd45b3..d3a4e0a64 100644 --- a/src/ir/block-utils.h +++ b/src/ir/block-utils.h @@ -51,7 +51,7 @@ simplifyToContents(Block* block, T* parent, bool allowTypeChange = false) { // inside is unreachable (if both concrete, must match, and since no name // on block, we can't be branched to, so if singleton is unreachable, so // is the block) - assert(block->type.isConcrete() && singleton->type == unreachable); + assert(block->type.isConcrete() && singleton->type == Type::unreachable); // we could replace with unreachable, but would need to update all // the parent's types } diff --git a/src/ir/branch-utils.h b/src/ir/branch-utils.h index a22301e54..363a9c9e2 100644 --- a/src/ir/branch-utils.h +++ b/src/ir/branch-utils.h @@ -28,17 +28,17 @@ namespace BranchUtils { // (unreachable))) inline bool isBranchReachable(Break* br) { - return !(br->value && br->value->type == unreachable) && - !(br->condition && br->condition->type == unreachable); + return !(br->value && br->value->type == Type::unreachable) && + !(br->condition && br->condition->type == Type::unreachable); } inline bool isBranchReachable(Switch* sw) { - return !(sw->value && sw->value->type == unreachable) && - sw->condition->type != unreachable; + return !(sw->value && sw->value->type == Type::unreachable) && + sw->condition->type != Type::unreachable; } inline bool isBranchReachable(BrOnExn* br) { - return br->exnref->type != unreachable; + return br->exnref->type != Type::unreachable; } inline bool isBranchReachable(Expression* expr) { @@ -159,14 +159,16 @@ struct BranchSeeker : public PostWalker<BranchSeeker> { BranchSeeker(Name target) : target(target) {} - void noteFound(Expression* value) { noteFound(value ? value->type : none); } + void noteFound(Expression* value) { + noteFound(value ? value->type : Type::none); + } void noteFound(Type type) { found++; if (found == 1) { - valueType = unreachable; + valueType = Type::unreachable; } - if (type != unreachable) { + if (type != Type::unreachable) { valueType = type; } } diff --git a/src/ir/effects.h b/src/ir/effects.h index 6eb2da91d..9c8887333 100644 --- a/src/ir/effects.h +++ b/src/ir/effects.h @@ -209,7 +209,7 @@ struct EffectAnalyzer // loop top, but no way to get out, then it is an infinite loop, and we // consider that a branching side effect (note how the same logic does // not apply to blocks). - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { branches = true; } } diff --git a/src/ir/literal-utils.h b/src/ir/literal-utils.h index 4bc79eee9..45119f2bd 100644 --- a/src/ir/literal-utils.h +++ b/src/ir/literal-utils.h @@ -34,7 +34,7 @@ inline Expression* makeFromInt32(int32_t x, Type type, Module& wasm) { inline Expression* makeZero(Type type, Module& wasm) { // TODO: Switch to using v128.const once V8 supports it // (https://bugs.chromium.org/p/v8/issues/detail?id=8460) - if (type == v128) { + if (type == Type::v128) { Builder builder(wasm); return builder.makeUnary(SplatVecI32x4, builder.makeConst(Literal(int32_t(0)))); diff --git a/src/ir/load-utils.h b/src/ir/load-utils.h index a3bf79c60..62c31ef4e 100644 --- a/src/ir/load-utils.h +++ b/src/ir/load-utils.h @@ -28,7 +28,7 @@ namespace LoadUtils { // fill in bits either signed or unsigned wise) inline bool isSignRelevant(Load* load) { auto type = load->type; - if (load->type == unreachable) { + if (load->type == Type::unreachable) { return false; } return !type.isFloat() && load->bytes < type.getByteSize(); diff --git a/src/ir/memory-utils.h b/src/ir/memory-utils.h index c6b2fad18..13b356ba8 100644 --- a/src/ir/memory-utils.h +++ b/src/ir/memory-utils.h @@ -174,7 +174,7 @@ inline bool ensureLimitedSegments(Module& module) { // create the segment and add in all the data auto* c = module.allocator.alloc<Const>(); c->value = Literal(int32_t(start)); - c->type = i32; + c->type = Type::i32; Memory::Segment combined(c); for (Index j = i; j < memory.segments.size(); j++) { diff --git a/src/ir/properties.h b/src/ir/properties.h index f4c9686b6..01e78563d 100644 --- a/src/ir/properties.h +++ b/src/ir/properties.h @@ -156,7 +156,7 @@ inline Index getZeroExtBits(Expression* curr) { inline Expression* getFallthrough(Expression* curr) { // If the current node is unreachable, there is no value // falling through. - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return curr; } if (auto* set = curr->dynCast<LocalSet>()) { @@ -173,9 +173,9 @@ inline Expression* getFallthrough(Expression* curr) { } else if (auto* iff = curr->dynCast<If>()) { if (iff->ifFalse) { // Perhaps just one of the two actually returns. - if (iff->ifTrue->type == unreachable) { + if (iff->ifTrue->type == Type::unreachable) { return getFallthrough(iff->ifFalse); - } else if (iff->ifFalse->type == unreachable) { + } else if (iff->ifFalse->type == Type::unreachable) { return getFallthrough(iff->ifTrue); } } diff --git a/src/ir/type-updating.h b/src/ir/type-updating.h index d64ae0158..c028d3b5b 100644 --- a/src/ir/type-updating.h +++ b/src/ir/type-updating.h @@ -172,7 +172,7 @@ struct TypeUpdater // note the addition of a node void noteBreakChange(Name name, int change, Expression* value) { - noteBreakChange(name, change, value ? value->type : none); + noteBreakChange(name, change, value ? value->type : Type::none); } void noteBreakChange(Name name, int change, Type type) { @@ -191,7 +191,7 @@ struct TypeUpdater makeBlockUnreachableIfNoFallThrough(block); } else if (change == 1 && info.numBreaks == 1) { // bumped to 1! the block may now be reachable - if (block->type != unreachable) { + if (block->type != Type::unreachable) { return; // was already reachable, had a fallthrough } changeTypeTo(block, type); @@ -217,7 +217,7 @@ struct TypeUpdater // the one thing we need to do here is propagate unreachability, // no other change is possible void propagateTypesUp(Expression* curr) { - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { return; } while (1) { @@ -227,7 +227,7 @@ struct TypeUpdater return; } // get ready to apply unreachability to this node - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return; // already unreachable, stop here } // most nodes become unreachable if a child is unreachable, @@ -239,23 +239,23 @@ struct TypeUpdater } // if the block has breaks, it can keep its type if (!block->name.is() || blockInfos[block->name].numBreaks == 0) { - curr->type = unreachable; + curr->type = Type::unreachable; } else { return; // did not turn } } else if (auto* iff = curr->dynCast<If>()) { // may not be unreachable if just one side is iff->finalize(); - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { return; // did not turn } } else if (auto* tryy = curr->dynCast<Try>()) { tryy->finalize(); - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { return; // did not turn } } else { - curr->type = unreachable; + curr->type = Type::unreachable; } } } @@ -276,7 +276,7 @@ struct TypeUpdater } void makeBlockUnreachableIfNoFallThrough(Block* curr) { - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return; // no change possible } if (!curr->list.empty() && curr->list.back()->type.isConcrete()) { @@ -284,9 +284,9 @@ struct TypeUpdater return; } for (auto* child : curr->list) { - if (child->type == unreachable) { + if (child->type == Type::unreachable) { // no fallthrough, and an unreachable, => this block is now unreachable - changeTypeTo(curr, unreachable); + changeTypeTo(curr, Type::unreachable); return; } } @@ -300,7 +300,7 @@ struct TypeUpdater return; // nothing concrete to change to unreachable } curr->finalize(); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { propagateTypesUp(curr); } } @@ -310,7 +310,7 @@ struct TypeUpdater return; // nothing concrete to change to unreachable } curr->finalize(); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { propagateTypesUp(curr); } } diff --git a/src/ir/utils.h b/src/ir/utils.h index 9bd3c9e0b..b7c5b9cba 100644 --- a/src/ir/utils.h +++ b/src/ir/utils.h @@ -281,7 +281,7 @@ struct AutoDrop : public WalkerPass<ExpressionStackWalker<AutoDrop>> { } if (maybeDrop(curr->list.back())) { reFinalize(); - assert(curr->type == none || curr->type == unreachable); + assert(curr->type == Type::none || curr->type == Type::unreachable); } } @@ -297,7 +297,7 @@ struct AutoDrop : public WalkerPass<ExpressionStackWalker<AutoDrop>> { } if (acted) { reFinalize(); - assert(curr->type == none); + assert(curr->type == Type::none); } } @@ -323,20 +323,21 @@ struct I64Utilities { }; static Expression* recreateI64(Builder& builder, Index low, Index high) { - return recreateI64( - builder, builder.makeLocalGet(low, i32), builder.makeLocalGet(high, i32)); + return recreateI64(builder, + builder.makeLocalGet(low, Type::i32), + builder.makeLocalGet(high, Type::i32)); }; static Expression* getI64High(Builder& builder, Index index) { return builder.makeUnary( WrapInt64, builder.makeBinary(ShrUInt64, - builder.makeLocalGet(index, i64), + builder.makeLocalGet(index, Type::i64), builder.makeConst(Literal(int64_t(32))))); } static Expression* getI64Low(Builder& builder, Index index) { - return builder.makeUnary(WrapInt64, builder.makeLocalGet(index, i64)); + return builder.makeUnary(WrapInt64, builder.makeLocalGet(index, Type::i64)); } }; diff --git a/src/parsing.h b/src/parsing.h index d64236df3..a75e953b4 100644 --- a/src/parsing.h +++ b/src/parsing.h @@ -85,10 +85,10 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { if (type.isFloat()) { if (s == _INFINITY) { switch (type) { - case f32: + case Type::f32: ret->value = Literal(std::numeric_limits<float>::infinity()); break; - case f64: + case Type::f64: ret->value = Literal(std::numeric_limits<double>::infinity()); break; default: @@ -99,10 +99,10 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } if (s == NEG_INFINITY) { switch (type) { - case f32: + case Type::f32: ret->value = Literal(-std::numeric_limits<float>::infinity()); break; - case f64: + case Type::f64: ret->value = Literal(-std::numeric_limits<double>::infinity()); break; default: @@ -113,10 +113,10 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } if (s == _NAN) { switch (type) { - case f32: + case Type::f32: ret->value = Literal(float(std::nan(""))); break; - case f64: + case Type::f64: ret->value = Literal(double(std::nan(""))); break; default: @@ -138,7 +138,7 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { throw ParseException("bad nan input"); } switch (type) { - case f32: { + case Type::f32: { uint32_t pattern; if (modifier) { std::istringstream istr(modifier); @@ -159,7 +159,7 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { ret->value = Literal(pattern).castToF32(); break; } - case f64: { + case Type::f64: { uint64_t pattern; if (modifier) { std::istringstream istr(modifier); @@ -188,10 +188,10 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } if (s == NEG_NAN) { switch (type) { - case f32: + case Type::f32: ret->value = Literal(float(-std::nan(""))); break; - case f64: + case Type::f64: ret->value = Literal(double(-std::nan(""))); break; default: @@ -202,7 +202,7 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } } switch (type) { - case i32: { + case Type::i32: { if ((str[0] == '0' && str[1] == 'x') || (str[0] == '-' && str[1] == '0' && str[2] == 'x')) { bool negative = str[0] == '-'; @@ -227,7 +227,7 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } break; } - case i64: { + case Type::i64: { if ((str[0] == '0' && str[1] == 'x') || (str[0] == '-' && str[1] == '0' && str[2] == 'x')) { bool negative = str[0] == '-'; @@ -252,24 +252,24 @@ parseConst(cashew::IString s, Type type, MixedArena& allocator) { } break; } - case f32: { + case Type::f32: { char* end; ret->value = Literal(strtof(str, &end)); break; } - case f64: { + case Type::f64: { char* end; ret->value = Literal(strtod(str, &end)); break; } - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: WASM_UNREACHABLE("unexpected const type"); - case none: - case unreachable: { + case Type::none: + case Type::unreachable: { return nullptr; } } diff --git a/src/passes/AlignmentLowering.cpp b/src/passes/AlignmentLowering.cpp index d659fcb69..18fe46398 100644 --- a/src/passes/AlignmentLowering.cpp +++ b/src/passes/AlignmentLowering.cpp @@ -32,26 +32,31 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> { return; } Builder builder(*getModule()); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { replaceCurrent(curr->ptr); return; } - assert(curr->type == i32); // TODO: i64, f32, f64 - auto temp = builder.addVar(getFunction(), i32); + assert(curr->type == Type::i32); // TODO: i64, f32, f64 + auto temp = builder.addVar(getFunction(), Type::i32); Expression* ret; if (curr->bytes == 2) { ret = builder.makeBinary( OrInt32, - builder.makeLoad( - 1, false, curr->offset, 1, builder.makeLocalGet(temp, i32), i32), - builder.makeBinary(ShlInt32, - builder.makeLoad(1, - false, - curr->offset + 1, - 1, - builder.makeLocalGet(temp, i32), - i32), - builder.makeConst(Literal(int32_t(8))))); + builder.makeLoad(1, + false, + curr->offset, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeBinary( + ShlInt32, + builder.makeLoad(1, + false, + curr->offset + 1, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeConst(Literal(int32_t(8))))); if (curr->signed_) { ret = Bits::makeSignExt(ret, 2, *getModule()); } @@ -61,47 +66,59 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> { OrInt32, builder.makeBinary( OrInt32, - builder.makeLoad( - 1, false, curr->offset, 1, builder.makeLocalGet(temp, i32), i32), - builder.makeBinary(ShlInt32, - builder.makeLoad(1, - false, - curr->offset + 1, - 1, - builder.makeLocalGet(temp, i32), - i32), - builder.makeConst(Literal(int32_t(8))))), + builder.makeLoad(1, + false, + curr->offset, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeBinary( + ShlInt32, + builder.makeLoad(1, + false, + curr->offset + 1, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeConst(Literal(int32_t(8))))), builder.makeBinary( OrInt32, - builder.makeBinary(ShlInt32, - builder.makeLoad(1, - false, - curr->offset + 2, - 1, - builder.makeLocalGet(temp, i32), - i32), - builder.makeConst(Literal(int32_t(16)))), - builder.makeBinary(ShlInt32, - builder.makeLoad(1, - false, - curr->offset + 3, - 1, - builder.makeLocalGet(temp, i32), - i32), - builder.makeConst(Literal(int32_t(24)))))); + builder.makeBinary( + ShlInt32, + builder.makeLoad(1, + false, + curr->offset + 2, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeConst(Literal(int32_t(16)))), + builder.makeBinary( + ShlInt32, + builder.makeLoad(1, + false, + curr->offset + 3, + 1, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeConst(Literal(int32_t(24)))))); } else if (curr->align == 2) { ret = builder.makeBinary( OrInt32, - builder.makeLoad( - 2, false, curr->offset, 2, builder.makeLocalGet(temp, i32), i32), - builder.makeBinary(ShlInt32, - builder.makeLoad(2, - false, - curr->offset + 2, - 2, - builder.makeLocalGet(temp, i32), - i32), - builder.makeConst(Literal(int32_t(16))))); + builder.makeLoad(2, + false, + curr->offset, + 2, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeBinary( + ShlInt32, + builder.makeLoad(2, + false, + curr->offset + 2, + 2, + builder.makeLocalGet(temp, Type::i32), + Type::i32), + builder.makeConst(Literal(int32_t(16))))); } else { WASM_UNREACHABLE("invalid alignment"); } @@ -117,14 +134,14 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> { return; } Builder builder(*getModule()); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { replaceCurrent(builder.makeBlock( {builder.makeDrop(curr->ptr), builder.makeDrop(curr->value)})); return; } - assert(curr->value->type == i32); // TODO: i64, f32, f64 - auto tempPtr = builder.addVar(getFunction(), i32); - auto tempValue = builder.addVar(getFunction(), i32); + assert(curr->value->type == Type::i32); // TODO: i64, f32, f64 + auto tempPtr = builder.addVar(getFunction(), Type::i32); + auto tempValue = builder.addVar(getFunction(), Type::i32); auto* block = builder.makeBlock({builder.makeLocalSet(tempPtr, curr->ptr), builder.makeLocalSet(tempValue, curr->value)}); @@ -133,71 +150,71 @@ struct AlignmentLowering : public WalkerPass<PostWalker<AlignmentLowering>> { builder.makeStore(1, curr->offset, 1, - builder.makeLocalGet(tempPtr, i32), - builder.makeLocalGet(tempValue, i32), - i32)); + builder.makeLocalGet(tempPtr, Type::i32), + builder.makeLocalGet(tempValue, Type::i32), + Type::i32)); block->list.push_back(builder.makeStore( 1, curr->offset + 1, 1, - builder.makeLocalGet(tempPtr, i32), + builder.makeLocalGet(tempPtr, Type::i32), builder.makeBinary(ShrUInt32, - builder.makeLocalGet(tempValue, i32), + builder.makeLocalGet(tempValue, Type::i32), builder.makeConst(Literal(int32_t(8)))), - i32)); + Type::i32)); } else if (curr->bytes == 4) { if (curr->align == 1) { block->list.push_back( builder.makeStore(1, curr->offset, 1, - builder.makeLocalGet(tempPtr, i32), - builder.makeLocalGet(tempValue, i32), - i32)); + builder.makeLocalGet(tempPtr, Type::i32), + builder.makeLocalGet(tempValue, Type::i32), + Type::i32)); block->list.push_back(builder.makeStore( 1, curr->offset + 1, 1, - builder.makeLocalGet(tempPtr, i32), + builder.makeLocalGet(tempPtr, Type::i32), builder.makeBinary(ShrUInt32, - builder.makeLocalGet(tempValue, i32), + builder.makeLocalGet(tempValue, Type::i32), builder.makeConst(Literal(int32_t(8)))), - i32)); + Type::i32)); block->list.push_back(builder.makeStore( 1, curr->offset + 2, 1, - builder.makeLocalGet(tempPtr, i32), + builder.makeLocalGet(tempPtr, Type::i32), builder.makeBinary(ShrUInt32, - builder.makeLocalGet(tempValue, i32), + builder.makeLocalGet(tempValue, Type::i32), builder.makeConst(Literal(int32_t(16)))), - i32)); + Type::i32)); block->list.push_back(builder.makeStore( 1, curr->offset + 3, 1, - builder.makeLocalGet(tempPtr, i32), + builder.makeLocalGet(tempPtr, Type::i32), builder.makeBinary(ShrUInt32, - builder.makeLocalGet(tempValue, i32), + builder.makeLocalGet(tempValue, Type::i32), builder.makeConst(Literal(int32_t(24)))), - i32)); + Type::i32)); } else if (curr->align == 2) { block->list.push_back( builder.makeStore(2, curr->offset, 2, - builder.makeLocalGet(tempPtr, i32), - builder.makeLocalGet(tempValue, i32), - i32)); + builder.makeLocalGet(tempPtr, Type::i32), + builder.makeLocalGet(tempValue, Type::i32), + Type::i32)); block->list.push_back(builder.makeStore( 2, curr->offset + 2, 2, - builder.makeLocalGet(tempPtr, i32), + builder.makeLocalGet(tempPtr, Type::i32), builder.makeBinary(ShrUInt32, - builder.makeLocalGet(tempValue, i32), + builder.makeLocalGet(tempValue, Type::i32), builder.makeConst(Literal(int32_t(16)))), - i32)); + Type::i32)); } else { WASM_UNREACHABLE("invalid alignment"); } diff --git a/src/passes/Asyncify.cpp b/src/passes/Asyncify.cpp index 8f929d9fe..b7e8c90e0 100644 --- a/src/passes/Asyncify.cpp +++ b/src/passes/Asyncify.cpp @@ -298,10 +298,10 @@ class GlobalHelper { public: GlobalHelper(Module& module) : module(module) { - map[i32] = "asyncify_fake_call_global_i32"; - map[i64] = "asyncify_fake_call_global_i64"; - map[f32] = "asyncify_fake_call_global_f32"; - map[f64] = "asyncify_fake_call_global_f64"; + map[Type::i32] = "asyncify_fake_call_global_i32"; + map[Type::i64] = "asyncify_fake_call_global_i64"; + map[Type::f32] = "asyncify_fake_call_global_f32"; + map[Type::f64] = "asyncify_fake_call_global_f64"; Builder builder(module); for (auto& pair : map) { auto type = pair.first; @@ -326,7 +326,7 @@ public: if (iter != rev.end()) { return iter->second; } - return none; + return Type::none; } private: @@ -647,8 +647,8 @@ public: false, int32_t(DataOffset::BStackPos), 4, - makeGlobalGet(ASYNCIFY_DATA, i32), - i32); + makeGlobalGet(ASYNCIFY_DATA, Type::i32), + Type::i32); } Expression* makeIncStackPos(int32_t by) { @@ -659,14 +659,14 @@ public: 4, int32_t(DataOffset::BStackPos), 4, - makeGlobalGet(ASYNCIFY_DATA, i32), + makeGlobalGet(ASYNCIFY_DATA, Type::i32), makeBinary(AddInt32, makeGetStackPos(), makeConst(Literal(by))), - i32); + Type::i32); } Expression* makeStateCheck(State value) { return makeBinary(EqInt32, - makeGlobalGet(ASYNCIFY_STATE, i32), + makeGlobalGet(ASYNCIFY_STATE, Type::i32), makeConst(Literal(int32_t(value)))); } @@ -810,11 +810,11 @@ private: iff->finalize(); return iff; } - auto conditionTemp = builder->addVar(func, i32); + auto conditionTemp = builder->addVar(func, Type::i32); // TODO: can avoid pre if the condition is a get or a const auto* pre = makeMaybeSkip(builder->makeLocalSet(conditionTemp, iff->condition)); - iff->condition = builder->makeLocalGet(conditionTemp, i32); + iff->condition = builder->makeLocalGet(conditionTemp, Type::i32); iff->condition = builder->makeBinary( OrInt32, iff->condition, builder->makeStateCheck(State::Rewinding)); iff->ifTrue = process(iff->ifTrue); @@ -826,7 +826,7 @@ private: builder->makeBinary( OrInt32, builder->makeUnary(EqZInt32, - builder->makeLocalGet(conditionTemp, i32)), + builder->makeLocalGet(conditionTemp, Type::i32)), builder->makeStateCheck(State::Rewinding)), process(otherArm)); otherIf->finalize(); @@ -853,7 +853,7 @@ private: // TODO: stop doing this after code can no longer reach a call that may // change the state assert(doesCall(curr)); - assert(curr->type == none); + assert(curr->type == Type::none); // The case of a set is tricky: we must *not* execute the set when // unwinding, since at that point we have a fake value for the return, // and if we applied it to the local, it would end up saved and then @@ -893,8 +893,9 @@ private: // it when we add its contents, later.) return builder->makeIf( builder->makeStateCheck(State::Unwinding), - builder->makeCall( - ASYNCIFY_UNWIND, {builder->makeConst(Literal(int32_t(index)))}, none), + builder->makeCall(ASYNCIFY_UNWIND, + {builder->makeConst(Literal(int32_t(index)))}, + Type::none), ifNotUnwinding); } @@ -903,13 +904,13 @@ private: // don't want it to be seen by asyncify itself. return builder->makeCall(ASYNCIFY_CHECK_CALL_INDEX, {builder->makeConst(Literal(int32_t(index)))}, - i32); + Type::i32); } Expression* makeCallIndexPop() { // Emit an intrinsic for this, as we store the index into a local, and // don't want it to be seen by asyncify itself. - return builder->makeCall(ASYNCIFY_GET_CALL_INDEX, {}, none); + return builder->makeCall(ASYNCIFY_GET_CALL_INDEX, {}, Type::none); } // Given a function that is not instrumented - because we proved it doesn't @@ -921,11 +922,11 @@ private: // That is, if in an uninstrumented function, a sleep should not begin // from any call. void addAssertsInNonInstrumented(Function* func) { - auto oldState = builder->addVar(func, i32); + auto oldState = builder->addVar(func, Type::i32); // Add a check at the function entry. func->body = builder->makeSequence( builder->makeLocalSet(oldState, - builder->makeGlobalGet(ASYNCIFY_STATE, i32)), + builder->makeGlobalGet(ASYNCIFY_STATE, Type::i32)), func->body); // Add a check around every call. struct Walker : PostWalker<Walker> { @@ -944,8 +945,8 @@ private: void handleCall(Expression* call) { auto* check = builder->makeIf( builder->makeBinary(NeInt32, - builder->makeGlobalGet(ASYNCIFY_STATE, i32), - builder->makeLocalGet(oldState, i32)), + builder->makeGlobalGet(ASYNCIFY_STATE, Type::i32), + builder->makeLocalGet(oldState, Type::i32)), builder->makeUnreachable()); Expression* rep; if (call->type.isConcrete()) { @@ -991,11 +992,12 @@ struct AsyncifyLocals : public WalkerPass<PostWalker<AsyncifyLocals>> { builder->makeIncStackPos(-4), builder->makeLocalSet( rewindIndex, - builder->makeLoad(4, false, 0, 4, builder->makeGetStackPos(), i32)))); + builder->makeLoad( + 4, false, 0, 4, builder->makeGetStackPos(), Type::i32)))); } else if (curr->target == ASYNCIFY_CHECK_CALL_INDEX) { replaceCurrent(builder->makeBinary( EqInt32, - builder->makeLocalGet(rewindIndex, i32), + builder->makeLocalGet(rewindIndex, Type::i32), builder->makeConst( Literal(int32_t(curr->operands[0]->cast<Const>()->value.geti32()))))); } @@ -1003,7 +1005,7 @@ struct AsyncifyLocals : public WalkerPass<PostWalker<AsyncifyLocals>> { void visitGlobalSet(GlobalSet* curr) { auto type = analyzer->globals.getTypeOrNone(curr->name); - if (type != none) { + if (type != Type::none) { replaceCurrent( builder->makeLocalSet(getFakeCallLocal(type), curr->value)); } @@ -1011,7 +1013,7 @@ struct AsyncifyLocals : public WalkerPass<PostWalker<AsyncifyLocals>> { void visitGlobalGet(GlobalGet* curr) { auto type = analyzer->globals.getTypeOrNone(curr->name); - if (type != none) { + if (type != Type::none) { replaceCurrent(builder->makeLocalGet(getFakeCallLocal(type), type)); } } @@ -1038,8 +1040,8 @@ struct AsyncifyLocals : public WalkerPass<PostWalker<AsyncifyLocals>> { // well as saving the locals. // An index is needed for getting the unwinding and rewinding call indexes // around TODO: can this be the same index? - auto unwindIndex = builder->addVar(func, i32); - rewindIndex = builder->addVar(func, i32); + auto unwindIndex = builder->addVar(func, Type::i32); + rewindIndex = builder->addVar(func, Type::i32); // Rewrite the function body. builder = make_unique<AsyncifyBuilder>(*getModule()); walk(func->body); @@ -1095,7 +1097,7 @@ private: } auto* block = builder->makeBlock(); block->list.push_back(builder->makeIncStackPos(-total)); - auto tempIndex = builder->addVar(func, i32); + auto tempIndex = builder->addVar(func, Type::i32); block->list.push_back( builder->makeLocalSet(tempIndex, builder->makeGetStackPos())); Index offset = 0; @@ -1110,7 +1112,7 @@ private: true, offset, STACK_ALIGN, - builder->makeLocalGet(tempIndex, i32), + builder->makeLocalGet(tempIndex, Type::i32), type))); offset += size; } @@ -1124,7 +1126,7 @@ private: } auto* func = getFunction(); auto* block = builder->makeBlock(); - auto tempIndex = builder->addVar(func, i32); + auto tempIndex = builder->addVar(func, Type::i32); block->list.push_back( builder->makeLocalSet(tempIndex, builder->makeGetStackPos())); Index offset = 0; @@ -1137,7 +1139,7 @@ private: builder->makeStore(size, offset, STACK_ALIGN, - builder->makeLocalGet(tempIndex, i32), + builder->makeLocalGet(tempIndex, Type::i32), builder->makeLocalGet(i, type), type)); offset += size; @@ -1154,8 +1156,8 @@ private: 0, 4, builder->makeGetStackPos(), - builder->makeLocalGet(tempIndex, i32), - i32), + builder->makeLocalGet(tempIndex, Type::i32), + Type::i32), builder->makeIncStackPos(4)); } }; @@ -1281,11 +1283,11 @@ private: void addGlobals(Module* module) { Builder builder(*module); module->addGlobal(builder.makeGlobal(ASYNCIFY_STATE, - i32, + Type::i32, builder.makeConst(Literal(int32_t(0))), Builder::Mutable)); module->addGlobal(builder.makeGlobal(ASYNCIFY_DATA, - i32, + Type::i32, builder.makeConst(Literal(int32_t(0))), Builder::Mutable)); } @@ -1295,14 +1297,14 @@ private: auto makeFunction = [&](Name name, bool setData, State state) { std::vector<Type> params; if (setData) { - params.push_back(i32); + params.push_back(Type::i32); } auto* body = builder.makeBlock(); body->list.push_back(builder.makeGlobalSet( ASYNCIFY_STATE, builder.makeConst(Literal(int32_t(state))))); if (setData) { - body->list.push_back( - builder.makeGlobalSet(ASYNCIFY_DATA, builder.makeLocalGet(0, i32))); + body->list.push_back(builder.makeGlobalSet( + ASYNCIFY_DATA, builder.makeLocalGet(0, Type::i32))); } // Verify the data is valid. auto* stackPos = @@ -1310,15 +1312,15 @@ private: false, int32_t(DataOffset::BStackPos), 4, - builder.makeGlobalGet(ASYNCIFY_DATA, i32), - i32); + builder.makeGlobalGet(ASYNCIFY_DATA, Type::i32), + Type::i32); auto* stackEnd = builder.makeLoad(4, false, int32_t(DataOffset::BStackEnd), 4, - builder.makeGlobalGet(ASYNCIFY_DATA, i32), - i32); + builder.makeGlobalGet(ASYNCIFY_DATA, Type::i32), + Type::i32); body->list.push_back( builder.makeIf(builder.makeBinary(GtUInt32, stackPos, stackEnd), builder.makeUnreachable())); diff --git a/src/passes/AvoidReinterprets.cpp b/src/passes/AvoidReinterprets.cpp index f1b3d96d8..4df75dd15 100644 --- a/src/passes/AvoidReinterprets.cpp +++ b/src/passes/AvoidReinterprets.cpp @@ -115,7 +115,7 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> { auto& info = pair.second; if (info.reinterpreted && canReplaceWithReinterpret(load)) { // We should use another load here, to avoid reinterprets. - info.ptrLocal = Builder::addVar(func, i32); + info.ptrLocal = Builder::addVar(func, Type::i32); info.reinterpretedLocal = Builder::addVar(func, load->type.reinterpret()); } else { @@ -165,7 +165,7 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> { auto& info = iter->second; Builder builder(*module); auto* ptr = curr->ptr; - curr->ptr = builder.makeLocalGet(info.ptrLocal, i32); + curr->ptr = builder.makeLocalGet(info.ptrLocal, Type::i32); // Note that the other load can have its sign set to false - if the // original were an integer, the other is a float anyhow; and if // original were a float, we don't know what sign to use. @@ -173,8 +173,8 @@ struct AvoidReinterprets : public WalkerPass<PostWalker<AvoidReinterprets>> { {builder.makeLocalSet(info.ptrLocal, ptr), builder.makeLocalSet( info.reinterpretedLocal, - makeReinterpretedLoad(curr, - builder.makeLocalGet(info.ptrLocal, i32))), + makeReinterpretedLoad( + curr, builder.makeLocalGet(info.ptrLocal, Type::i32))), curr})); } } diff --git a/src/passes/CodeFolding.cpp b/src/passes/CodeFolding.cpp index 947e64715..9b6e1143d 100644 --- a/src/passes/CodeFolding.cpp +++ b/src/passes/CodeFolding.cpp @@ -201,7 +201,7 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> { // see if there is a fallthrough bool hasFallthrough = true; for (auto* child : curr->list) { - if (child->type == unreachable) { + if (child->type == Type::unreachable) { hasFallthrough = false; } } @@ -685,12 +685,12 @@ private: auto* old = getFunction()->body; auto* inner = builder.makeBlock(); inner->name = innerName; - if (old->type == unreachable) { + if (old->type == Type::unreachable) { // the old body is not flowed out of anyhow, so just put it there inner->list.push_back(old); } else { // otherwise, we must not flow out to the merged code - if (old->type == none) { + if (old->type == Type::none) { inner->list.push_back(old); inner->list.push_back(builder.makeReturn()); } else { @@ -703,7 +703,7 @@ private: if (toplevel) { toplevel->finalize(); } - if (old->type != unreachable) { + if (old->type != Type::unreachable) { inner->list.push_back(builder.makeReturn(old)); } else { inner->list.push_back(old); diff --git a/src/passes/ConstHoisting.cpp b/src/passes/ConstHoisting.cpp index 4e8cd9910..6c79c4215 100644 --- a/src/passes/ConstHoisting.cpp +++ b/src/passes/ConstHoisting.cpp @@ -78,29 +78,29 @@ private: // measure the size of the constant Index size = 0; switch (value.type) { - case i32: { + case Type::i32: { size = getWrittenSize(S32LEB(value.geti32())); break; } - case i64: { + case Type::i64: { size = getWrittenSize(S64LEB(value.geti64())); break; } - case f32: - case f64: { + case Type::f32: + case Type::f64: { size = value.type.getByteSize(); break; } - // not implemented yet - case v128: - case funcref: - case anyref: - case nullref: - case exnref: { + // not implemented yet + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: { return false; } - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } // compute the benefit, of replacing the uses with diff --git a/src/passes/DeadArgumentElimination.cpp b/src/passes/DeadArgumentElimination.cpp index 4395fc780..43ebc6721 100644 --- a/src/passes/DeadArgumentElimination.cpp +++ b/src/passes/DeadArgumentElimination.cpp @@ -294,21 +294,21 @@ struct DAE : public Pass { assert(call->operands.size() == numParams); auto* operand = call->operands[i]; if (auto* c = operand->dynCast<Const>()) { - if (value.type == none) { + if (value.type == Type::none) { // This is the first value seen. value = c->value; } else if (value != c->value) { // Not identical, give up - value.type = none; + value.type = Type::none; break; } } else { // Not a constant, give up - value.type = none; + value.type = Type::none; break; } } - if (value.type != none) { + if (value.type != Type::none) { // Success! We can just apply the constant in the function, which // makes the parameter value unused, which lets us remove it later. Builder builder(*module); @@ -466,8 +466,8 @@ private: Expression** location = iter->second; *location = call; // Update the call's type. - if (call->type != unreachable) { - call->type = none; + if (call->type != Type::unreachable) { + call->type = Type::none; } } } diff --git a/src/passes/DeadCodeElimination.cpp b/src/passes/DeadCodeElimination.cpp index 7d5385a83..ac984f6cb 100644 --- a/src/passes/DeadCodeElimination.cpp +++ b/src/passes/DeadCodeElimination.cpp @@ -82,10 +82,14 @@ struct DeadCodeElimination } // if a child exists and is unreachable, we can replace ourselves with it - bool isDead(Expression* child) { return child && child->type == unreachable; } + bool isDead(Expression* child) { + return child && child->type == Type::unreachable; + } // a similar check, assumes the child exists - bool isUnreachable(Expression* child) { return child->type == unreachable; } + bool isUnreachable(Expression* child) { + return child->type == Type::unreachable; + } // things that stop control flow @@ -170,7 +174,7 @@ struct DeadCodeElimination if (!reachable && list.size() > 1) { // to do here: nothing to remove after it) for (Index i = 0; i < list.size() - 1; i++) { - if (list[i]->type == unreachable) { + if (list[i]->type == Type::unreachable) { list.resize(i + 1); break; } @@ -393,7 +397,7 @@ struct DeadCodeElimination // we don't need to drop unreachable nodes Expression* drop(Expression* toDrop) { - if (toDrop->type == unreachable) { + if (toDrop->type == Type::unreachable) { return toDrop; } return Builder(*getModule()).makeDrop(toDrop); diff --git a/src/passes/Flatten.cpp b/src/passes/Flatten.cpp index fda8e3f80..13aa985aa 100644 --- a/src/passes/Flatten.cpp +++ b/src/passes/Flatten.cpp @@ -105,7 +105,7 @@ struct Flatten if (last->type.isConcrete()) { last = builder.makeLocalSet(temp, last); } - block->finalize(none); + block->finalize(Type::none); // and we leave just a get of the value auto* rep = builder.makeLocalGet(temp, type); replaceCurrent(rep); @@ -113,7 +113,7 @@ struct Flatten ourPreludes.push_back(block); } // the block now has no return value, and may have become unreachable - block->finalize(none); + block->finalize(Type::none); } else if (auto* iff = curr->dynCast<If>()) { // condition preludes go before the entire if @@ -160,7 +160,7 @@ struct Flatten rep = builder.makeLocalGet(temp, type); // the whole if is now a prelude ourPreludes.push_back(loop); - loop->type = none; + loop->type = Type::none; } loop->body = getPreludesWithExpression(originalBody, loop->body); loop->finalize(); @@ -181,7 +181,7 @@ struct Flatten if (auto* set = curr->dynCast<LocalSet>()) { if (set->isTee()) { // we disallow local.tee - if (set->value->type == unreachable) { + if (set->value->type == Type::unreachable) { replaceCurrent(set->value); // trivial, no set happens } else { // use a set in a prelude + a get @@ -234,14 +234,14 @@ struct Flatten if (br->type.isConcrete()) { replaceCurrent(builder.makeLocalGet(temp, type)); } else { - assert(br->type == unreachable); + assert(br->type == Type::unreachable); replaceCurrent(builder.makeUnreachable()); } } br->value = nullptr; br->finalize(); } else { - assert(type == unreachable); + assert(type == Type::unreachable); // we don't need the br at all replaceCurrent(br->value); } @@ -264,7 +264,7 @@ struct Flatten sw->value = nullptr; sw->finalize(); } else { - assert(type == unreachable); + assert(type == Type::unreachable); // we don't need the br at all replaceCurrent(sw->value); } @@ -277,7 +277,7 @@ struct Flatten curr = getCurrent(); // we may have replaced it // we have changed children ReFinalizeNode().visit(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { ourPreludes.push_back(curr); replaceCurrent(builder.makeUnreachable()); } else if (curr->type.isConcrete()) { diff --git a/src/passes/FuncCastEmulation.cpp b/src/passes/FuncCastEmulation.cpp index 9d5109a83..09c87c212 100644 --- a/src/passes/FuncCastEmulation.cpp +++ b/src/passes/FuncCastEmulation.cpp @@ -45,38 +45,39 @@ static const int NUM_PARAMS = 16; static Expression* toABI(Expression* value, Module* module) { Builder builder(*module); switch (value->type) { - case i32: { + case Type::i32: { value = builder.makeUnary(ExtendUInt32, value); break; } - case i64: { + case Type::i64: { // already good break; } - case f32: { + case Type::f32: { value = builder.makeUnary(ExtendUInt32, builder.makeUnary(ReinterpretFloat32, value)); break; } - case f64: { + case Type::f64: { value = builder.makeUnary(ReinterpretFloat64, value); break; } - case v128: { + case Type::v128: { WASM_UNREACHABLE("v128 not implemented yet"); } - case funcref: - case anyref: - case nullref: - case exnref: { + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: { WASM_UNREACHABLE("reference types cannot be converted to i64"); } - case none: { + case Type::none: { // the value is none, but we need a value here - value = builder.makeSequence(value, LiteralUtils::makeZero(i64, *module)); + value = + builder.makeSequence(value, LiteralUtils::makeZero(Type::i64, *module)); break; } - case unreachable: { + case Type::unreachable: { // can leave it, the call isn't taken anyhow break; } @@ -88,36 +89,36 @@ static Expression* toABI(Expression* value, Module* module) { static Expression* fromABI(Expression* value, Type type, Module* module) { Builder builder(*module); switch (type) { - case i32: { + case Type::i32: { value = builder.makeUnary(WrapInt64, value); break; } - case i64: { + case Type::i64: { // already good break; } - case f32: { + case Type::f32: { value = builder.makeUnary(ReinterpretInt32, builder.makeUnary(WrapInt64, value)); break; } - case f64: { + case Type::f64: { value = builder.makeUnary(ReinterpretInt64, value); break; } - case v128: { + case Type::v128: { WASM_UNREACHABLE("v128 not implemented yet"); } - case funcref: - case anyref: - case nullref: - case exnref: { + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: { WASM_UNREACHABLE("reference types cannot be converted from i64"); } - case none: { + case Type::none: { value = builder.makeDrop(value); } - case unreachable: { + case Type::unreachable: { // can leave it, the call isn't taken anyhow break; } @@ -143,12 +144,12 @@ struct ParallelFuncCastEmulation } // Add extra operands as needed. while (curr->operands.size() < NUM_PARAMS) { - curr->operands.push_back(LiteralUtils::makeZero(i64, *getModule())); + curr->operands.push_back(LiteralUtils::makeZero(Type::i64, *getModule())); } // Set the new types curr->sig = ABIType; auto oldType = curr->type; - curr->type = i64; + curr->type = Type::i64; curr->finalize(); // may be unreachable // Fix up return value replaceCurrent(fromABI(curr, oldType, getModule())); @@ -203,12 +204,12 @@ private: std::vector<Expression*> callOperands; for (Index i = 0; i < params.size(); i++) { callOperands.push_back( - fromABI(builder.makeLocalGet(i, i64), params[i], module)); + fromABI(builder.makeLocalGet(i, Type::i64), params[i], module)); } auto* call = builder.makeCall(name, callOperands, type); std::vector<Type> thunkParams; for (Index i = 0; i < NUM_PARAMS; i++) { - thunkParams.push_back(i64); + thunkParams.push_back(Type::i64); } auto* thunkFunc = builder.makeFunction(thunk, diff --git a/src/passes/I64ToI32Lowering.cpp b/src/passes/I64ToI32Lowering.cpp index c9a4f46ea..c3ed6fb04 100644 --- a/src/passes/I64ToI32Lowering.cpp +++ b/src/passes/I64ToI32Lowering.cpp @@ -109,13 +109,13 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { // add new globals for high bits for (size_t i = 0, globals = module->globals.size(); i < globals; ++i) { auto* curr = module->globals[i].get(); - if (curr->type != i64) { + if (curr->type != Type::i64) { continue; } originallyI64Globals.insert(curr->name); - curr->type = i32; + curr->type = Type::i32; auto* high = builder->makeGlobal(makeHighName(curr->name), - i32, + Type::i32, builder->makeConst(Literal(int32_t(0))), Builder::Mutable); module->addGlobal(high); @@ -125,21 +125,22 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { if (auto* c = curr->init->dynCast<Const>()) { uint64_t value = c->value.geti64(); c->value = Literal(uint32_t(value)); - c->type = i32; + c->type = Type::i32; high->init = builder->makeConst(Literal(uint32_t(value >> 32))); } else if (auto* get = curr->init->dynCast<GlobalGet>()) { - high->init = builder->makeGlobalGet(makeHighName(get->name), i32); + high->init = + builder->makeGlobalGet(makeHighName(get->name), Type::i32); } else { WASM_UNREACHABLE("unexpected expression type"); } - curr->init->type = i32; + curr->init->type = Type::i32; } } // For functions that return 64-bit values, we use this global variable // to return the high 32 bits. auto* highBits = new Global(); - highBits->type = i32; + highBits->type = Type::i32; highBits->name = INT64_TO_32_HIGH_BITS; highBits->init = builder->makeConst(Literal(int32_t(0))); highBits->mutable_ = true; @@ -173,7 +174,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { (i < oldFunc->getVarIndexBase()) ? Builder::addParam : static_cast<Index (*)(Function*, Name, Type)>(Builder::addVar); - if (paramType == i64) { + if (paramType == Type::i64) { builderFunc(func, lowName, Type::i32); builderFunc(func, highName, Type::i32); indexMap[i] = newIdx; @@ -199,8 +200,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { TempVar lowBits = getTemp(); LocalSet* setLow = builder->makeLocalSet(lowBits, func->body); GlobalSet* setHigh = builder->makeGlobalSet( - INT64_TO_32_HIGH_BITS, builder->makeLocalGet(highBits, i32)); - LocalGet* getLow = builder->makeLocalGet(lowBits, i32); + INT64_TO_32_HIGH_BITS, builder->makeLocalGet(highBits, Type::i32)); + LocalGet* getLow = builder->makeLocalGet(lowBits, Type::i32); func->body = builder->blockify(setLow, setHigh, getLow); } } @@ -224,7 +225,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { args.push_back(e); if (hasOutParam(e)) { TempVar argHighBits = fetchOutParam(e); - args.push_back(builder->makeLocalGet(argHighBits, i32)); + args.push_back(builder->makeLocalGet(argHighBits, Type::i32)); fixed = true; } } @@ -238,8 +239,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { auto* call = callBuilder(args, Type::i32); LocalSet* doCall = builder->makeLocalSet(lowBits, call); LocalSet* setHigh = builder->makeLocalSet( - highBits, builder->makeGlobalGet(INT64_TO_32_HIGH_BITS, i32)); - LocalGet* getLow = builder->makeLocalGet(lowBits, i32); + highBits, builder->makeGlobalGet(INT64_TO_32_HIGH_BITS, Type::i32)); + LocalGet* getLow = builder->makeLocalGet(lowBits, Type::i32); Block* result = builder->blockify(doCall, setHigh, getLow); setOutParam(result, std::move(highBits)); replaceCurrent(result); @@ -289,13 +290,13 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { // Need to remap the local into the new naming scheme, regardless of // the type of the local. curr->index = mappedIndex; - if (curr->type != i64) { + if (curr->type != Type::i64) { return; } - curr->type = i32; + curr->type = Type::i32; TempVar highBits = getTemp(); LocalSet* setHighBits = builder->makeLocalSet( - highBits, builder->makeLocalGet(mappedIndex + 1, i32)); + highBits, builder->makeLocalGet(mappedIndex + 1, Type::i32)); Block* result = builder->blockify(setHighBits, curr); replaceCurrent(result); setOutParam(result, std::move(highBits)); @@ -304,11 +305,11 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { void lowerTee(LocalSet* curr) { TempVar highBits = fetchOutParam(curr->value); TempVar tmp = getTemp(); - curr->type = i32; + curr->type = Type::i32; LocalSet* setLow = builder->makeLocalSet(tmp, curr); LocalSet* setHigh = builder->makeLocalSet( - curr->index + 1, builder->makeLocalGet(highBits, i32)); - LocalGet* getLow = builder->makeLocalGet(tmp, i32); + curr->index + 1, builder->makeLocalGet(highBits, Type::i32)); + LocalGet* getLow = builder->makeLocalGet(tmp, Type::i32); Block* result = builder->blockify(setLow, setHigh, getLow); replaceCurrent(result); setOutParam(result, std::move(highBits)); @@ -327,8 +328,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { return; } TempVar highBits = fetchOutParam(curr->value); - auto* setHigh = builder->makeLocalSet(mappedIndex + 1, - builder->makeLocalGet(highBits, i32)); + auto* setHigh = builder->makeLocalSet( + mappedIndex + 1, builder->makeLocalGet(highBits, Type::i32)); Block* result = builder->blockify(curr, setHigh); replaceCurrent(result); } @@ -340,10 +341,10 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { if (!originallyI64Globals.count(curr->name)) { return; } - curr->type = i32; + curr->type = Type::i32; TempVar highBits = getTemp(); LocalSet* setHighBits = builder->makeLocalSet( - highBits, builder->makeGlobalGet(makeHighName(curr->name), i32)); + highBits, builder->makeGlobalGet(makeHighName(curr->name), Type::i32)); Block* result = builder->blockify(setHighBits, curr); replaceCurrent(result); setOutParam(result, std::move(highBits)); @@ -358,12 +359,12 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { } TempVar highBits = fetchOutParam(curr->value); auto* setHigh = builder->makeGlobalSet( - makeHighName(curr->name), builder->makeLocalGet(highBits, i32)); + makeHighName(curr->name), builder->makeLocalGet(highBits, Type::i32)); replaceCurrent(builder->makeSequence(curr, setHigh)); } void visitLoad(Load* curr) { - if (curr->type != i64) { + if (curr->type != Type::i64) { return; } assert(!curr->isAtomic && "atomic load not implemented"); @@ -379,26 +380,27 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { curr->signed_, curr->offset + 4, std::min(uint32_t(curr->align), uint32_t(4)), - builder->makeLocalGet(ptrTemp, i32), - i32)); + builder->makeLocalGet(ptrTemp, Type::i32), + Type::i32)); } else if (curr->signed_) { loadHigh = builder->makeLocalSet( highBits, builder->makeBinary(ShrSInt32, - builder->makeLocalGet(lowBits, i32), + builder->makeLocalGet(lowBits, Type::i32), builder->makeConst(Literal(int32_t(31))))); } else { loadHigh = builder->makeLocalSet(highBits, builder->makeConst(Literal(int32_t(0)))); } - curr->type = i32; + curr->type = Type::i32; curr->bytes = std::min(curr->bytes, uint8_t(4)); curr->align = std::min(uint32_t(curr->align), uint32_t(4)); - curr->ptr = builder->makeLocalGet(ptrTemp, i32); - Block* result = builder->blockify(setPtr, - builder->makeLocalSet(lowBits, curr), - loadHigh, - builder->makeLocalGet(lowBits, i32)); + curr->ptr = builder->makeLocalGet(ptrTemp, Type::i32); + Block* result = + builder->blockify(setPtr, + builder->makeLocalSet(lowBits, curr), + loadHigh, + builder->makeLocalGet(lowBits, Type::i32)); replaceCurrent(result); setOutParam(result, std::move(highBits)); } @@ -413,19 +415,19 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { uint8_t bytes = curr->bytes; curr->bytes = std::min(curr->bytes, uint8_t(4)); curr->align = std::min(uint32_t(curr->align), uint32_t(4)); - curr->valueType = i32; + curr->valueType = Type::i32; if (bytes == 8) { TempVar ptrTemp = getTemp(); LocalSet* setPtr = builder->makeLocalSet(ptrTemp, curr->ptr); - curr->ptr = builder->makeLocalGet(ptrTemp, i32); + curr->ptr = builder->makeLocalGet(ptrTemp, Type::i32); curr->finalize(); Store* storeHigh = builder->makeStore(4, curr->offset + 4, std::min(uint32_t(curr->align), uint32_t(4)), - builder->makeLocalGet(ptrTemp, i32), - builder->makeLocalGet(highBits, i32), - i32); + builder->makeLocalGet(ptrTemp, Type::i32), + builder->makeLocalGet(highBits, Type::i32), + Type::i32); replaceCurrent(builder->blockify(setPtr, curr, storeHigh)); } } @@ -442,7 +444,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { if (!getFunction()) { return; // if in a global init, skip - we already handled that. } - if (curr->type != i64) { + if (curr->type != Type::i64) { return; } TempVar highBits = getTemp(); @@ -463,7 +465,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { auto* result = builder->makeUnary( EqZInt32, builder->makeBinary( - OrInt32, curr->value, builder->makeLocalGet(highBits, i32))); + OrInt32, curr->value, builder->makeLocalGet(highBits, Type::i32))); replaceCurrent(result); } @@ -485,11 +487,11 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { LocalSet* setHigh = builder->makeLocalSet( highBits, builder->makeBinary(ShrSInt32, - builder->makeLocalGet(lowBits, i32), + builder->makeLocalGet(lowBits, Type::i32), builder->makeConst(Literal(int32_t(31))))); - Block* result = - builder->blockify(setLow, setHigh, builder->makeLocalGet(lowBits, i32)); + Block* result = builder->blockify( + setLow, setHigh, builder->makeLocalGet(lowBits, Type::i32)); setOutParam(result, std::move(highBits)); replaceCurrent(result); @@ -506,15 +508,16 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { // our f64 through memory at address 0 TempVar highBits = getTemp(); Block* result = builder->blockify( - builder->makeCall(ABI::wasm2js::SCRATCH_STORE_F64, {curr->value}, none), + builder->makeCall( + ABI::wasm2js::SCRATCH_STORE_F64, {curr->value}, Type::none), builder->makeLocalSet( highBits, builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32, {builder->makeConst(Literal(int32_t(1)))}, - i32)), + Type::i32)), builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32, {builder->makeConst(Literal(int32_t(0)))}, - i32)); + Type::i32)); setOutParam(result, std::move(highBits)); replaceCurrent(result); MemoryUtils::ensureExists(getModule()->memory); @@ -528,12 +531,12 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { Block* result = builder->blockify( builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32, {builder->makeConst(Literal(int32_t(0))), curr->value}, - none), + Type::none), builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32, {builder->makeConst(Literal(int32_t(1))), - builder->makeLocalGet(highBits, i32)}, - none), - builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_F64, {}, f64)); + builder->makeLocalGet(highBits, Type::i32)}, + Type::none), + builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_F64, {}, Type::f64)); replaceCurrent(result); MemoryUtils::ensureExists(getModule()->memory); ABI::wasm2js::ensureScratchMemoryHelpers(getModule()); @@ -567,7 +570,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { u32Max = Literal(((float)UINT_MAX) + 1); trunc = TruncUFloat32ToInt32; convert = ConvertUInt32ToFloat32; - localType = f32; + localType = Type::f32; abs = AbsFloat32; ge = GeFloat32; gt = GtFloat32; @@ -585,7 +588,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { u32Max = Literal(((double)UINT_MAX) + 1); trunc = TruncUFloat64ToInt32; convert = ConvertUInt32ToFloat64; - localType = f64; + localType = Type::f64; abs = AbsFloat64; ge = GeFloat64; gt = GtFloat64; @@ -682,12 +685,12 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeBinary( AddFloat64, builder->makeUnary(ConvertUInt32ToFloat64, - builder->makeLocalGet(lowBits, i32)), + builder->makeLocalGet(lowBits, Type::i32)), builder->makeBinary( MulFloat64, builder->makeConst(Literal((double)UINT_MAX + 1)), builder->makeUnary(convertHigh, - builder->makeLocalGet(highBits, i32))))); + builder->makeLocalGet(highBits, Type::i32))))); switch (curr->op) { case ConvertSInt64ToFloat32: @@ -711,20 +714,20 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { TempVar firstResult = getTemp(); LocalSet* setFirst = builder->makeLocalSet( firstResult, - builder->makeUnary(op32, builder->makeLocalGet(first, i32))); + builder->makeUnary(op32, builder->makeLocalGet(first, Type::i32))); Binary* check = builder->makeBinary(EqInt32, - builder->makeLocalGet(firstResult, i32), + builder->makeLocalGet(firstResult, Type::i32), builder->makeConst(Literal(int32_t(32)))); If* conditional = builder->makeIf( check, builder->makeBinary( AddInt32, - builder->makeUnary(op32, builder->makeLocalGet(second, i32)), + builder->makeUnary(op32, builder->makeLocalGet(second, Type::i32)), builder->makeConst(Literal(int32_t(32)))), - builder->makeLocalGet(firstResult, i32)); + builder->makeLocalGet(firstResult, Type::i32)); LocalSet* setHigh = builder->makeLocalSet( highResult, builder->makeConst(Literal(int32_t(0)))); @@ -783,7 +786,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { if (handleUnreachable(curr)) { return; } - assert(hasOutParam(curr->value) || curr->type == i64 || curr->type == f64); + assert(hasOutParam(curr->value) || curr->type == Type::i64 || + curr->type == Type::f64); switch (curr->op) { case ClzInt64: case CtzInt64: @@ -837,24 +841,24 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { LocalSet* addLow = builder->makeLocalSet( lowResult, builder->makeBinary(AddInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32))); + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32))); LocalSet* addHigh = builder->makeLocalSet( highResult, builder->makeBinary(AddInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32))); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32))); LocalSet* carryBit = builder->makeLocalSet( highResult, builder->makeBinary(AddInt32, - builder->makeLocalGet(highResult, i32), + builder->makeLocalGet(highResult, Type::i32), builder->makeConst(Literal(int32_t(1))))); - If* checkOverflow = - builder->makeIf(builder->makeBinary(LtUInt32, - builder->makeLocalGet(lowResult, i32), - builder->makeLocalGet(rightLow, i32)), - carryBit); - LocalGet* getLow = builder->makeLocalGet(lowResult, i32); + If* checkOverflow = builder->makeIf( + builder->makeBinary(LtUInt32, + builder->makeLocalGet(lowResult, Type::i32), + builder->makeLocalGet(rightLow, Type::i32)), + carryBit); + LocalGet* getLow = builder->makeLocalGet(lowResult, Type::i32); result = builder->blockify(result, addLow, addHigh, checkOverflow, getLow); setOutParam(result, std::move(highResult)); return result; @@ -871,24 +875,24 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { LocalSet* subLow = builder->makeLocalSet( lowResult, builder->makeBinary(SubInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32))); + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32))); LocalSet* borrowBit = builder->makeLocalSet( borrow, builder->makeBinary(LtUInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32))); + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32))); LocalSet* subHigh1 = builder->makeLocalSet( highResult, builder->makeBinary(AddInt32, - builder->makeLocalGet(borrow, i32), - builder->makeLocalGet(rightHigh, i32))); + builder->makeLocalGet(borrow, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32))); LocalSet* subHigh2 = builder->makeLocalSet( highResult, builder->makeBinary(SubInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(highResult, i32))); - LocalGet* getLow = builder->makeLocalGet(lowResult, i32); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(highResult, Type::i32))); + LocalGet* getLow = builder->makeLocalGet(lowResult, Type::i32); result = builder->blockify(result, subLow, borrowBit, subHigh1, subHigh2, getLow); setOutParam(result, std::move(highResult)); @@ -920,11 +924,11 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeLocalSet( rightHigh, builder->makeBinary(op32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32))), + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32))), builder->makeBinary(op32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32))); + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32))); setOutParam(result, std::move(rightHigh)); return result; } @@ -934,8 +938,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeLocalSet( highBits, builder->makeBinary(ShlInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(shift, i32))), + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(shift, Type::i32))), builder->makeConst(Literal(int32_t(0)))); } @@ -950,19 +954,19 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeLocalSet( highBits, builder->makeBinary(ShrSInt32, - builder->makeLocalGet(leftHigh, i32), + builder->makeLocalGet(leftHigh, Type::i32), builder->makeConst(Literal(int32_t(31))))), builder->makeBinary(ShrSInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(shift, i32))); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(shift, Type::i32))); } Block* makeLargeShrU(Index highBits, Index leftHigh, Index shift) { return builder->blockify( builder->makeLocalSet(highBits, builder->makeConst(Literal(int32_t(0)))), builder->makeBinary(ShrUInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(shift, i32))); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(shift, Type::i32))); } Block* makeSmallShl(Index highBits, @@ -975,17 +979,17 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { AndInt32, shiftMask, builder->makeBinary( - ShrUInt32, builder->makeLocalGet(leftLow, i32), widthLessShift)); + ShrUInt32, builder->makeLocalGet(leftLow, Type::i32), widthLessShift)); Binary* shiftHigh = builder->makeBinary(ShlInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(shift, i32)); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(shift, Type::i32)); return builder->blockify( builder->makeLocalSet( highBits, builder->makeBinary(OrInt32, shiftedInBits, shiftHigh)), builder->makeBinary(ShlInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(shift, i32))); + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(shift, Type::i32))); } // a >> b where `b` < 32 @@ -1003,17 +1007,18 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { Binary* shiftedInBits = builder->makeBinary( ShlInt32, builder->makeBinary( - AndInt32, shiftMask, builder->makeLocalGet(leftHigh, i32)), + AndInt32, shiftMask, builder->makeLocalGet(leftHigh, Type::i32)), widthLessShift); - Binary* shiftLow = builder->makeBinary(ShrUInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(shift, i32)); + Binary* shiftLow = + builder->makeBinary(ShrUInt32, + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(shift, Type::i32)); return builder->blockify( builder->makeLocalSet( highBits, builder->makeBinary(ShrSInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(shift, i32))), + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(shift, Type::i32))), builder->makeBinary(OrInt32, shiftedInBits, shiftLow)); } @@ -1026,17 +1031,18 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { Binary* shiftedInBits = builder->makeBinary( ShlInt32, builder->makeBinary( - AndInt32, shiftMask, builder->makeLocalGet(leftHigh, i32)), + AndInt32, shiftMask, builder->makeLocalGet(leftHigh, Type::i32)), widthLessShift); - Binary* shiftLow = builder->makeBinary(ShrUInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(shift, i32)); + Binary* shiftLow = + builder->makeBinary(ShrUInt32, + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(shift, Type::i32)); return builder->blockify( builder->makeLocalSet( highBits, builder->makeBinary(ShrUInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(shift, i32))), + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(shift, Type::i32))), builder->makeBinary(OrInt32, shiftedInBits, shiftLow)); } @@ -1058,13 +1064,13 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { LocalSet* setShift = builder->makeLocalSet( shift, builder->makeBinary(AndInt32, - builder->makeLocalGet(rightLow, i32), + builder->makeLocalGet(rightLow, Type::i32), builder->makeConst(Literal(int32_t(32 - 1))))); Binary* isLargeShift = builder->makeBinary( LeUInt32, builder->makeConst(Literal(int32_t(32))), builder->makeBinary(AndInt32, - builder->makeLocalGet(rightLow, i32), + builder->makeLocalGet(rightLow, Type::i32), builder->makeConst(Literal(int32_t(64 - 1))))); Block* largeShiftBlock; switch (op) { @@ -1084,12 +1090,12 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { SubInt32, builder->makeBinary(ShlInt32, builder->makeConst(Literal(int32_t(1))), - builder->makeLocalGet(shift, i32)), + builder->makeLocalGet(shift, Type::i32)), builder->makeConst(Literal(int32_t(1)))); Binary* widthLessShift = builder->makeBinary(SubInt32, builder->makeConst(Literal(int32_t(32))), - builder->makeLocalGet(shift, i32)); + builder->makeLocalGet(shift, Type::i32)); Block* smallShiftBlock; switch (op) { case ShlInt64: { @@ -1127,11 +1133,11 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeBinary( AndInt32, builder->makeBinary(EqInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32)), + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32)), builder->makeBinary(EqInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)))); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)))); } Block* lowerNe(Block* result, @@ -1144,11 +1150,11 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeBinary( OrInt32, builder->makeBinary(NeInt32, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32)), + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32)), builder->makeBinary(NeInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)))); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)))); } Block* lowerUComp(BinaryOp op, @@ -1180,14 +1186,16 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { } Binary* compHigh = builder->makeBinary(highOp, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)); - Binary* eqHigh = builder->makeBinary(EqInt32, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)); - Binary* compLow = builder->makeBinary(lowOp, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32)); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)); + Binary* eqHigh = + builder->makeBinary(EqInt32, + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)); + Binary* compLow = + builder->makeBinary(lowOp, + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32)); return builder->blockify( result, builder->makeBinary( @@ -1227,15 +1235,16 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { } Binary* compHigh1 = builder->makeBinary(highOp1, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)); Binary* compHigh2 = builder->makeBinary(highOp2, - builder->makeLocalGet(leftHigh, i32), - builder->makeLocalGet(rightHigh, i32)); - Binary* compLow = builder->makeBinary(lowOp, - builder->makeLocalGet(leftLow, i32), - builder->makeLocalGet(rightLow, i32)); + builder->makeLocalGet(leftHigh, Type::i32), + builder->makeLocalGet(rightHigh, Type::i32)); + Binary* compLow = + builder->makeBinary(lowOp, + builder->makeLocalGet(leftLow, Type::i32), + builder->makeLocalGet(rightLow, Type::i32)); If* lowIf = builder->makeIf(compLow, builder->makeConst(Literal(int32_t(0))), builder->makeConst(Literal(int32_t(1)))); @@ -1405,14 +1414,14 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { builder->makeLocalSet( lowBits, builder->makeSelect( - builder->makeLocalGet(cond, i32), curr->ifTrue, curr->ifFalse)), + builder->makeLocalGet(cond, Type::i32), curr->ifTrue, curr->ifFalse)), builder->makeLocalSet( highBits, builder->makeSelect( - builder->makeLocalGet(cond, i32), - builder->makeLocalGet(fetchOutParam(curr->ifTrue), i32), - builder->makeLocalGet(fetchOutParam(curr->ifFalse), i32))), - builder->makeLocalGet(lowBits, i32)); + builder->makeLocalGet(cond, Type::i32), + builder->makeLocalGet(fetchOutParam(curr->ifTrue), Type::i32), + builder->makeLocalGet(fetchOutParam(curr->ifFalse), Type::i32))), + builder->makeLocalGet(lowBits, Type::i32)); setOutParam(result, std::move(highBits)); replaceCurrent(result); } @@ -1433,8 +1442,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> { TempVar highBits = fetchOutParam(curr->value); LocalSet* setLow = builder->makeLocalSet(lowBits, curr->value); GlobalSet* setHigh = builder->makeGlobalSet( - INT64_TO_32_HIGH_BITS, builder->makeLocalGet(highBits, i32)); - curr->value = builder->makeLocalGet(lowBits, i32); + INT64_TO_32_HIGH_BITS, builder->makeLocalGet(highBits, Type::i32)); + curr->value = builder->makeLocalGet(lowBits, Type::i32); Block* result = builder->blockify(setLow, setHigh, curr); replaceCurrent(result); } @@ -1448,7 +1457,7 @@ private: std::unordered_set<Name> originallyI64Globals; Index nextTemp; - TempVar getTemp(Type ty = i32) { + TempVar getTemp(Type ty = Type::i32) { Index ret; auto& freeList = freeTemps[(int)ty]; if (freeList.size() > 0) { @@ -1486,7 +1495,7 @@ private: // unconditionally before themselves, so it is not valid for an if, // in particular. bool handleUnreachable(Expression* curr) { - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { return false; } std::vector<Expression*> children; @@ -1494,7 +1503,7 @@ private: for (auto* child : ChildIterator(curr)) { if (child->type.isConcrete()) { child = builder->makeDrop(child); - } else if (child->type == unreachable) { + } else if (child->type == Type::unreachable) { hasUnreachable = true; } children.push_back(child); @@ -1505,7 +1514,7 @@ private: // This has an unreachable child, so we can replace it with // the children. auto* block = builder->makeBlock(children); - assert(block->type == unreachable); + assert(block->type == Type::unreachable); replaceCurrent(block); return true; } diff --git a/src/passes/Inlining.cpp b/src/passes/Inlining.cpp index c43d41e7f..5737359a3 100644 --- a/src/passes/Inlining.cpp +++ b/src/passes/Inlining.cpp @@ -154,12 +154,12 @@ struct Planner : public WalkerPass<PostWalker<Planner>> { bool isUnreachable; if (curr->isReturn) { // Tail calls are only actually unreachable if an argument is - isUnreachable = - std::any_of(curr->operands.begin(), - curr->operands.end(), - [](Expression* op) { return op->type == unreachable; }); + isUnreachable = std::any_of( + curr->operands.begin(), curr->operands.end(), [](Expression* op) { + return op->type == Type::unreachable; + }); } else { - isUnreachable = curr->type == unreachable; + isUnreachable = curr->type == Type::unreachable; } if (state->worthInlining.count(curr->target) && !isUnreachable && curr->target != getFunction()->name) { @@ -273,7 +273,7 @@ doInlining(Module* module, Function* into, const InliningAction& action) { // contained void, that is fine too. a bad case is a void function in which // we have unreachable code, so we would be replacing a void call with an // unreachable. - if (contents->type == unreachable && block->type == none) { + if (contents->type == Type::unreachable && block->type == Type::none) { // Make the block reachable by adding a break to it block->list.push_back(builder.makeBreak(block->name)); } diff --git a/src/passes/InstrumentLocals.cpp b/src/passes/InstrumentLocals.cpp index ae35ec2d1..3e3be6244 100644 --- a/src/passes/InstrumentLocals.cpp +++ b/src/passes/InstrumentLocals.cpp @@ -75,33 +75,33 @@ struct InstrumentLocals : public WalkerPass<PostWalker<InstrumentLocals>> { Builder builder(*getModule()); Name import; switch (curr->type) { - case i32: + case Type::i32: import = get_i32; break; - case i64: + case Type::i64: return; // TODO - case f32: + case Type::f32: import = get_f32; break; - case f64: + case Type::f64: import = get_f64; break; - case v128: + case Type::v128: assert(false && "v128 not implemented yet"); - case funcref: + case Type::funcref: import = get_funcref; break; - case anyref: + case Type::anyref: import = get_anyref; break; - case nullref: + case Type::nullref: import = get_nullref; break; - case exnref: + case Type::exnref: import = get_exnref; break; - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } replaceCurrent( @@ -123,34 +123,34 @@ struct InstrumentLocals : public WalkerPass<PostWalker<InstrumentLocals>> { Builder builder(*getModule()); Name import; switch (curr->value->type) { - case i32: + case Type::i32: import = set_i32; break; - case i64: + case Type::i64: return; // TODO - case f32: + case Type::f32: import = set_f32; break; - case f64: + case Type::f64: import = set_f64; break; - case v128: + case Type::v128: assert(false && "v128 not implemented yet"); - case funcref: + case Type::funcref: import = set_funcref; break; - case anyref: + case Type::anyref: import = set_anyref; break; - case nullref: + case Type::nullref: import = set_nullref; break; - case exnref: + case Type::exnref: import = set_exnref; break; - case unreachable: + case Type::unreachable: return; // nothing to do here - case none: + case Type::none: WASM_UNREACHABLE("unexpected type"); } curr->value = diff --git a/src/passes/InstrumentMemory.cpp b/src/passes/InstrumentMemory.cpp index 9a805b19b..54f763734 100644 --- a/src/passes/InstrumentMemory.cpp +++ b/src/passes/InstrumentMemory.cpp @@ -84,19 +84,19 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> { builder.makeConst(Literal(int32_t(curr->bytes))), builder.makeConst(Literal(int32_t(curr->offset.addr))), curr->ptr}, - i32); + Type::i32); Name target; switch (curr->type) { - case i32: + case Type::i32: target = load_val_i32; break; - case i64: + case Type::i64: target = load_val_i64; break; - case f32: + case Type::f32: target = load_val_f32; break; - case f64: + case Type::f64: target = load_val_f64; break; default: @@ -115,19 +115,19 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> { builder.makeConst(Literal(int32_t(curr->bytes))), builder.makeConst(Literal(int32_t(curr->offset.addr))), curr->ptr}, - i32); + Type::i32); Name target; switch (curr->value->type) { - case i32: + case Type::i32: target = store_val_i32; break; - case i64: + case Type::i64: target = store_val_i64; break; - case f32: + case Type::f32: target = store_val_f32; break; - case f64: + case Type::f64: target = store_val_f64; break; default: diff --git a/src/passes/LegalizeJSInterface.cpp b/src/passes/LegalizeJSInterface.cpp index df6651b0d..3a28e1745 100644 --- a/src/passes/LegalizeJSInterface.cpp +++ b/src/passes/LegalizeJSInterface.cpp @@ -244,7 +244,7 @@ private: auto* block = builder.makeBlock(); block->list.push_back(builder.makeLocalSet(index, call)); block->list.push_back(builder.makeCall( - f->name, {I64Utilities::getI64High(builder, index)}, none)); + f->name, {I64Utilities::getI64High(builder, index)}, Type::none)); block->list.push_back(I64Utilities::getI64Low(builder, index)); block->finalize(); legal->body = block; @@ -281,8 +281,8 @@ private: if (imParams[i] == Type::i64) { call->operands.push_back(I64Utilities::getI64Low(builder, i)); call->operands.push_back(I64Utilities::getI64High(builder, i)); - params.push_back(i32); - params.push_back(i32); + params.push_back(Type::i32); + params.push_back(Type::i32); } else { call->operands.push_back(builder.makeLocalGet(i, imParams[i])); params.push_back(imParams[i]); diff --git a/src/passes/LogExecution.cpp b/src/passes/LogExecution.cpp index 611f79dfd..a6caf1c1f 100644 --- a/src/passes/LogExecution.cpp +++ b/src/passes/LogExecution.cpp @@ -72,7 +72,7 @@ private: Builder builder(*getModule()); return builder.makeSequence( builder.makeCall( - LOGGER, {builder.makeConst(Literal(int32_t(id++)))}, none), + LOGGER, {builder.makeConst(Literal(int32_t(id++)))}, Type::none), curr); } }; diff --git a/src/passes/LoopInvariantCodeMotion.cpp b/src/passes/LoopInvariantCodeMotion.cpp index c4880114d..a95f4c8eb 100644 --- a/src/passes/LoopInvariantCodeMotion.cpp +++ b/src/passes/LoopInvariantCodeMotion.cpp @@ -189,7 +189,7 @@ struct LoopInvariantCodeMotion bool interestingToMove(Expression* curr) { // In theory we could consider blocks, but then heavy nesting of // switch patterns would be heavy, and almost always pointless. - if (curr->type != none || curr->is<Nop>() || curr->is<Block>() || + if (curr->type != Type::none || curr->is<Nop>() || curr->is<Block>() || curr->is<Loop>()) { return false; } diff --git a/src/passes/MergeBlocks.cpp b/src/passes/MergeBlocks.cpp index 8e04ed47f..276c50d9e 100644 --- a/src/passes/MergeBlocks.cpp +++ b/src/passes/MergeBlocks.cpp @@ -148,7 +148,7 @@ struct BreakValueDropper : public ControlFlowWalker<BreakValueDropper> { if (curr->value && curr->name == origin) { Builder builder(*getModule()); auto* value = curr->value; - if (value->type == unreachable) { + if (value->type == Type::unreachable) { // the break isn't even reached replaceCurrent(value); return; @@ -172,7 +172,7 @@ struct BreakValueDropper : public ControlFlowWalker<BreakValueDropper> { static bool hasUnreachableChild(Block* block) { for (auto* test : block->list) { - if (test->type == unreachable) { + if (test->type == Type::unreachable) { return true; } } @@ -184,7 +184,7 @@ static bool hasDeadCode(Block* block) { auto& list = block->list; auto size = list.size(); for (size_t i = 1; i < size; i++) { - if (list[i - 1]->type == unreachable) { + if (list[i - 1]->type == Type::unreachable) { return true; } } @@ -437,13 +437,13 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> { if (!block->name.is() && block->list.size() >= 2) { // if we move around unreachable code, type changes could occur. avoid // that, as anyhow it means we should have run dce before getting here - if (curr->type == none && hasUnreachableChild(block)) { + if (curr->type == Type::none && hasUnreachableChild(block)) { // moving the block to the outside would replace a none with an // unreachable return outer; } auto* back = block->list.back(); - if (back->type == unreachable) { + if (back->type == Type::unreachable) { // curr is not reachable, dce could remove it; don't try anything // fancy here return outer; diff --git a/src/passes/OptimizeAddedConstants.cpp b/src/passes/OptimizeAddedConstants.cpp index a34d1b96d..f0c1ec2d9 100644 --- a/src/passes/OptimizeAddedConstants.cpp +++ b/src/passes/OptimizeAddedConstants.cpp @@ -213,7 +213,7 @@ private: index = parent->getHelperIndex(set); } curr->offset = result.total; - curr->ptr = Builder(*module).makeLocalGet(index, i32); + curr->ptr = Builder(*module).makeLocalGet(index, Type::i32); return true; } } @@ -304,7 +304,7 @@ struct OptimizeAddedConstants return iter->second; } return helperIndexes[set] = - Builder(*getModule()).addVar(getFunction(), i32); + Builder(*getModule()).addVar(getFunction(), Type::i32); } bool isPropagatable(LocalSet* set) { return propagatable.count(set); } @@ -387,7 +387,7 @@ private: } auto* value = *target; Builder builder(*module); - *target = builder.makeLocalGet(index, i32); + *target = builder.makeLocalGet(index, Type::i32); replaceCurrent( builder.makeSequence(builder.makeLocalSet(index, value), curr)); } diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp index edd6ba2b6..2372df462 100644 --- a/src/passes/OptimizeInstructions.cpp +++ b/src/passes/OptimizeInstructions.cpp @@ -54,9 +54,9 @@ template<typename LocalInfoProvider> Index getMaxBits(Expression* curr, LocalInfoProvider* localInfoProvider) { if (auto* const_ = curr->dynCast<Const>()) { switch (curr->type) { - case i32: + case Type::i32: return 32 - const_->value.countLeadingZeroes().geti32(); - case i64: + case Type::i64: return 64 - const_->value.countLeadingZeroes().geti64(); default: WASM_UNREACHABLE("invalid type"); @@ -179,11 +179,11 @@ Index getMaxBits(Expression* curr, LocalInfoProvider* localInfoProvider) { } } switch (curr->type) { - case i32: + case Type::i32: return 32; - case i64: + case Type::i64: return 64; - case unreachable: + case Type::unreachable: return 64; // not interesting, but don't crash default: WASM_UNREACHABLE("invalid type"); @@ -232,7 +232,7 @@ struct LocalScanner : PostWalker<LocalScanner> { return; } auto type = getFunction()->getLocalType(curr->index); - if (type != i32 && type != i64) { + if (type != Type::i32 && type != Type::i64) { return; } // an integer var, worth processing @@ -261,9 +261,9 @@ struct LocalScanner : PostWalker<LocalScanner> { Index getBitsForType(Type type) { switch (type) { - case i32: + case Type::i32: return 32; - case i64: + case Type::i64: return 64; default: return -1; @@ -334,7 +334,7 @@ struct OptimizeInstructions // might change (if might not be unreachable if just one arm is, for // example). this optimization pass focuses on actually executing code. the // only exceptions are control flow changes - if (curr->type == unreachable && !curr->is<Break>() && + if (curr->type == Type::unreachable && !curr->is<Break>() && !curr->is<Switch>() && !curr->is<If>()) { return nullptr; } @@ -567,7 +567,7 @@ struct OptimizeInstructions } } // math operations on a constant power of 2 right side can be optimized - if (right->type == i32) { + if (right->type == Type::i32) { uint32_t c = right->value.geti32(); if (IsPowerOf2(c)) { if (binary->op == MulInt32) { @@ -744,7 +744,7 @@ struct OptimizeInstructions std::swap(iff->ifTrue, iff->ifFalse); } } - if (iff->condition->type != unreachable && + if (iff->condition->type != Type::unreachable && ExpressionAnalyzer::equal(iff->ifTrue, iff->ifFalse)) { // sides are identical, fold // if we can replace the if with one arm, and no side effects in the @@ -763,7 +763,7 @@ struct OptimizeInstructions // the types diff. as the condition is reachable, that means the // if must be concrete while the arm is not assert(iff->type.isConcrete() && - iff->ifTrue->type == unreachable); + iff->ifTrue->type == Type::unreachable); // emit a block with a forced type auto* ret = builder.makeBlock(); if (needCondition) { @@ -847,7 +847,7 @@ struct OptimizeInstructions if (auto* binary = store->value->dynCast<Binary>()) { if (binary->op == AndInt32) { if (auto* right = binary->right->dynCast<Const>()) { - if (right->type == i32) { + if (right->type == Type::i32) { auto mask = right->value.geti32(); if ((store->bytes == 1 && mask == 0xff) || (store->bytes == 2 && mask == 0xffff)) { @@ -866,7 +866,7 @@ struct OptimizeInstructions } else if (auto* unary = store->value->dynCast<Unary>()) { if (unary->op == WrapInt64) { // instead of wrapping to 32, just store some of the bits in the i64 - store->valueType = i64; + store->valueType = Type::i64; store->value = unary->value; } } @@ -968,11 +968,11 @@ private: return makeZeroExt(ext, Properties::getSignExtBits(binary)); } } else if (auto* block = boolean->dynCast<Block>()) { - if (block->type == i32 && block->list.size() > 0) { + if (block->type == Type::i32 && block->list.size() > 0) { block->list.back() = optimizeBoolean(block->list.back()); } } else if (auto* iff = boolean->dynCast<If>()) { - if (iff->type == i32) { + if (iff->type == Type::i32) { iff->ifTrue = optimizeBoolean(iff->ifTrue); iff->ifFalse = optimizeBoolean(iff->ifFalse); } @@ -1469,7 +1469,7 @@ private: case LtUInt32: case GtSInt32: case GtUInt32: - return LiteralUtils::makeZero(i32, *getModule()); + return LiteralUtils::makeZero(Type::i32, *getModule()); case AndInt32: case OrInt32: case AndInt64: @@ -1485,7 +1485,7 @@ private: case LeUInt64: case GeSInt64: case GeUInt64: - return LiteralUtils::makeFromInt32(1, i32, *getModule()); + return LiteralUtils::makeFromInt32(1, Type::i32, *getModule()); default: return nullptr; } diff --git a/src/passes/PostAssemblyScript.cpp b/src/passes/PostAssemblyScript.cpp index 4e41c8f08..eeb077ce9 100644 --- a/src/passes/PostAssemblyScript.cpp +++ b/src/passes/PostAssemblyScript.cpp @@ -88,8 +88,8 @@ struct AliasGraph : LocalGraph { // consider a full retain pattern, which must also set a local. static bool isRetainCall(Call* expr) { // __retain(...) - return expr->target == RETAIN && expr->type == i32 && - expr->operands.size() == 1 && expr->operands[0]->type == i32; + return expr->target == RETAIN && expr->type == Type::i32 && + expr->operands.size() == 1 && expr->operands[0]->type == Type::i32; } // Tests if a local.set is considered to be a full retain pattern. @@ -117,8 +117,8 @@ static bool isRetainLocation(Expression** expr) { // consider a full release pattern, which must also get a local. static bool isReleaseCall(Call* expr) { // __release(...) - return expr->target == RELEASE && expr->type == none && - expr->operands.size() == 1 && expr->operands[0]->type == i32; + return expr->target == RELEASE && expr->type == Type::none && + expr->operands.size() == 1 && expr->operands[0]->type == Type::i32; } // Tests if the given location is that of a full release pattern. Note that @@ -138,7 +138,7 @@ static bool isReleaseLocation(Expression** expr) { // Tests if the given call calls any allocation function. static bool isAllocCall(Call* expr) { return (expr->target == ALLOC || expr->target == ALLOCARRAY) && - expr->type == i32; + expr->type == Type::i32; } // A pass that eliminates redundant retain and release calls. diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp index 85eb026f9..21393c1cf 100644 --- a/src/passes/Precompute.cpp +++ b/src/passes/Precompute.cpp @@ -199,7 +199,7 @@ struct Precompute // this expression causes a return. if it's already a return, reuse the // node if (auto* ret = curr->dynCast<Return>()) { - if (flow.value.type != none) { + if (flow.value.type != Type::none) { // reuse a const value if there is one if (ret->value) { if (auto* value = ret->value->dynCast<Const>()) { @@ -226,7 +226,7 @@ struct Precompute if (auto* br = curr->dynCast<Break>()) { br->name = flow.breakTo; br->condition = nullptr; - if (flow.value.type != none) { + if (flow.value.type != Type::none) { // reuse a const value if there is one if (br->value) { if (auto* value = br->value->dynCast<Const>()) { @@ -243,10 +243,11 @@ struct Precompute br->finalize(); } else { Builder builder(*getModule()); - replaceCurrent(builder.makeBreak( - flow.breakTo, - flow.value.type != none ? builder.makeConstExpression(flow.value) - : nullptr)); + replaceCurrent( + builder.makeBreak(flow.breakTo, + flow.value.type != Type::none + ? builder.makeConstExpression(flow.value) + : nullptr)); } return; } diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp index 549aa661d..106966ef1 100644 --- a/src/passes/Print.cpp +++ b/src/passes/Print.cpp @@ -89,7 +89,9 @@ std::ostream& operator<<(std::ostream& os, SigName sigName) { // Printing "unreachable" as a instruction prefix type is not valid in wasm text // format. Print something else to make it pass. -static Type forceConcrete(Type type) { return type.isConcrete() ? type : i32; } +static Type forceConcrete(Type type) { + return type.isConcrete() ? type : Type::i32; +} // Prints the internal contents of an expression: everything but // the children. @@ -186,7 +188,8 @@ struct PrintExpressionContents o << ".atomic"; } o << ".load"; - if (curr->type != unreachable && curr->bytes < curr->type.getByteSize()) { + if (curr->type != Type::unreachable && + curr->bytes < curr->type.getByteSize()) { if (curr->bytes == 1) { o << '8'; } else if (curr->bytes == 2) { @@ -212,7 +215,7 @@ struct PrintExpressionContents o << ".atomic"; } o << ".store"; - if (curr->bytes < 4 || (curr->valueType == i64 && curr->bytes < 8)) { + if (curr->bytes < 4 || (curr->valueType == Type::i64 && curr->bytes < 8)) { if (curr->bytes == 1) { o << '8'; } else if (curr->bytes == 2) { @@ -233,7 +236,7 @@ struct PrintExpressionContents } static void printRMWSize(std::ostream& o, Type type, uint8_t bytes) { prepareColor(o) << forceConcrete(type) << ".atomic.rmw"; - if (type != unreachable && bytes != type.getByteSize()) { + if (type != Type::unreachable && bytes != type.getByteSize()) { if (bytes == 1) { o << '8'; } else if (bytes == 2) { @@ -269,7 +272,8 @@ struct PrintExpressionContents o << "xchg"; break; } - if (curr->type != unreachable && curr->bytes != curr->type.getByteSize()) { + if (curr->type != Type::unreachable && + curr->bytes != curr->type.getByteSize()) { o << "_u"; } restoreNormalColor(o); @@ -281,7 +285,8 @@ struct PrintExpressionContents prepareColor(o); printRMWSize(o, curr->type, curr->bytes); o << "cmpxchg"; - if (curr->type != unreachable && curr->bytes != curr->type.getByteSize()) { + if (curr->type != Type::unreachable && + curr->bytes != curr->type.getByteSize()) { o << "_u"; } restoreNormalColor(o); diff --git a/src/passes/ReReloop.cpp b/src/passes/ReReloop.cpp index 53a232dc1..8f83f8a8f 100644 --- a/src/passes/ReReloop.cpp +++ b/src/passes/ReReloop.cpp @@ -320,7 +320,7 @@ struct ReReloop final : public Pass { // anywhere. add a return as needed for (auto* cfgBlock : relooper->Blocks) { auto* block = cfgBlock->Code->cast<Block>(); - if (cfgBlock->BranchesOut.empty() && block->type != unreachable) { + if (cfgBlock->BranchesOut.empty() && block->type != Type::unreachable) { block->list.push_back(function->sig.results == Type::none ? (Expression*)builder->makeReturn() : (Expression*)builder->makeUnreachable()); @@ -345,7 +345,7 @@ struct ReReloop final : public Pass { relooper->Calculate(entry); // render { - auto temp = builder->addVar(function, i32); + auto temp = builder->addVar(function, Type::i32); CFG::RelooperBuilder builder(*module, temp); function->body = relooper->Render(builder); // if the function has a result, and the relooper emitted diff --git a/src/passes/RemoveNonJSOps.cpp b/src/passes/RemoveNonJSOps.cpp index eebd97ae1..d758440f2 100644 --- a/src/passes/RemoveNonJSOps.cpp +++ b/src/passes/RemoveNonJSOps.cpp @@ -158,12 +158,12 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> { // can actually implement) and then use reinterpretation to get the float // back out. switch (curr->type) { - case f32: - curr->type = i32; + case Type::f32: + curr->type = Type::i32; replaceCurrent(builder->makeUnary(ReinterpretInt32, curr)); break; - case f64: - curr->type = i64; + case Type::f64: + curr->type = Type::i64; replaceCurrent(builder->makeUnary(ReinterpretInt64, curr)); break; default: @@ -180,12 +180,12 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> { // we can actually implement) and then use reinterpretation to store the // right value. switch (curr->valueType) { - case f32: - curr->valueType = i32; + case Type::f32: + curr->valueType = Type::i32; curr->value = builder->makeUnary(ReinterpretFloat32, curr->value); break; - case f64: - curr->valueType = i64; + case Type::f64: + curr->valueType = Type::i64; curr->value = builder->makeUnary(ReinterpretFloat64, curr->value); break; default: diff --git a/src/passes/RemoveUnusedBrs.cpp b/src/passes/RemoveUnusedBrs.cpp index e0174934a..786fcaf67 100644 --- a/src/passes/RemoveUnusedBrs.cpp +++ b/src/passes/RemoveUnusedBrs.cpp @@ -37,7 +37,7 @@ static bool canTurnIfIntoBrIf(Expression* ifCondition, Expression* brValue, PassOptions& options) { // if the if isn't even reached, this is all dead code anyhow - if (ifCondition->type == unreachable) { + if (ifCondition->type == Type::unreachable) { return false; } if (!brValue) { @@ -104,7 +104,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { flows.push_back(currp); } else if (curr->is<If>()) { auto* iff = curr->cast<If>(); - if (iff->condition->type == unreachable) { + if (iff->condition->type == Type::unreachable) { // avoid trying to optimize this, we never reach it anyhow self->stopFlow(); return; @@ -118,7 +118,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { // there is no way to emit a proper type for one arm being // none and the other flowing a value; and there is no way // to flow a value from a none. - if (iff->ifTrue->type == none || iff->ifFalse->type == none) { + if (iff->ifTrue->type == Type::none || + iff->ifFalse->type == Type::none) { self->removeValueFlow(ifTrueFlows); self->stopValueFlow(); } @@ -166,7 +167,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { // invalid to represent as such. auto size = list.size(); for (Index i = 0; i < size; i++) { - if (i != size - 1 && list[i]->type == unreachable) { + if (i != size - 1 && list[i]->type == Type::unreachable) { // No value flows out of this block. self->stopValueFlow(); break; @@ -283,17 +284,18 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { } // great, we are in that case, optimize Builder builder(*getModule()); - auto temp = builder.addVar(getFunction(), i32); + auto temp = builder.addVar(getFunction(), Type::i32); Expression* z; replaceCurrent( z = builder.makeIf( - builder.makeLocalTee(temp, curr->condition, i32), - builder.makeIf(builder.makeBinary(EqInt32, - builder.makeLocalGet(temp, i32), - builder.makeConst(Literal(int32_t( - curr->targets.size() - 1)))), - builder.makeBreak(curr->targets.back()), - builder.makeBreak(curr->default_)), + builder.makeLocalTee(temp, curr->condition, Type::i32), + builder.makeIf( + builder.makeBinary( + EqInt32, + builder.makeLocalGet(temp, Type::i32), + builder.makeConst(Literal(int32_t(curr->targets.size() - 1)))), + builder.makeBreak(curr->targets.back()), + builder.makeBreak(curr->default_)), builder.makeBreak(curr->targets.front()))); } } @@ -318,7 +320,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { // avoid one branch. // If running the br's condition unconditionally is too expensive, // give up. - auto* zero = LiteralUtils::makeZero(i32, *getModule()); + auto* zero = LiteralUtils::makeZero(Type::i32, *getModule()); if (tooCostlyToRunUnconditionally( getPassOptions(), br->condition, zero)) { return; @@ -353,7 +355,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { auto* iff = (*currp)->dynCast<If>(); if (iff) { - if (iff->condition->type == unreachable) { + if (iff->condition->type == Type::unreachable) { // avoid trying to optimize this, we never reach it anyhow return; } @@ -424,7 +426,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { if (!iff->ifFalse) { // we need the ifTrue to break, so it cannot reach the code we want to // move - if (iff->ifTrue->type == unreachable) { + if (iff->ifTrue->type == Type::unreachable) { iff->ifFalse = builder.stealSlice(block, i + 1, list.size()); iff->finalize(); block->finalize(); @@ -468,13 +470,13 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { return block; }; - if (iff->ifTrue->type == unreachable) { + if (iff->ifTrue->type == Type::unreachable) { iff->ifFalse = blockifyMerge( iff->ifFalse, builder.stealSlice(block, i + 1, list.size())); iff->finalize(); block->finalize(); return true; - } else if (iff->ifFalse->type == unreachable) { + } else if (iff->ifFalse->type == Type::unreachable) { iff->ifTrue = blockifyMerge( iff->ifTrue, builder.stealSlice(block, i + 1, list.size())); iff->finalize(); @@ -781,7 +783,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { auto* br1 = list[i]->dynCast<Break>(); // avoid unreachable brs, as they are dead code anyhow, and after // merging them the outer scope could need type changes - if (!br1 || !br1->condition || br1->type == unreachable) { + if (!br1 || !br1->condition || br1->type == Type::unreachable) { continue; } assert(!br1->value); @@ -792,7 +794,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { assert(!br2->value); // same target as previous, which has no value // a br_if and then a br[_if] with the same target right after it if (br2->condition) { - if (shrink && br2->type != unreachable) { + if (shrink && br2->type != Type::unreachable) { // Join adjacent br_ifs to the same target, making one br_if // with a "selectified" condition that executes both. if (!EffectAnalyzer(passOptions, br2->condition) @@ -872,7 +874,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { // type is unreachable that means it is not actually reached, which we // can ignore. if (br && br->condition && br->name == curr->name && - br->type != unreachable) { + br->type != Type::unreachable) { if (BranchUtils::BranchSeeker::count(curr, curr->name) == 1) { // no other breaks to that name, so we can do this if (!drop) { @@ -977,7 +979,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { } auto tryToOptimize = [&](Expression* one, Expression* two, bool flipCondition) { - if (one->type == unreachable && two->type != unreachable) { + if (one->type == Type::unreachable && + two->type != Type::unreachable) { if (auto* br = one->dynCast<Break>()) { if (ExpressionAnalyzer::isSimple(br)) { // Wonderful, do it! @@ -1130,7 +1133,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> { if (!br->condition || br->value) { return nullptr; } - if (br->type != none) { + if (br->type != Type::none) { // no value, so can be unreachable or none. ignore unreachable ones, // dce will clean it up return nullptr; diff --git a/src/passes/SafeHeap.cpp b/src/passes/SafeHeap.cpp index fc6706f3e..a9ad62820 100644 --- a/src/passes/SafeHeap.cpp +++ b/src/passes/SafeHeap.cpp @@ -71,7 +71,7 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> { AccessInstrumenter* create() override { return new AccessInstrumenter; } void visitLoad(Load* curr) { - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return; } Builder builder(*getModule()); @@ -85,7 +85,7 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> { } void visitStore(Store* curr) { - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { return; } Builder builder(*getModule()); @@ -96,7 +96,7 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> { builder.makeConst(Literal(int32_t(curr->offset))), curr->value, }, - none)); + Type::none)); } }; @@ -169,15 +169,16 @@ struct SafeHeap : public Pass { void addGlobals(Module* module, FeatureSet features) { // load funcs Load load; - for (auto type : {i32, i64, f32, f64, v128}) { - if (type == v128 && !features.hasSIMD()) { + for (Type type : {Type::i32, Type::i64, Type::f32, Type::f64, Type::v128}) { + if (type == Type::v128 && !features.hasSIMD()) { continue; } load.type = type; for (Index bytes : {1, 2, 4, 8, 16}) { load.bytes = bytes; - if (bytes > type.getByteSize() || (type == f32 && bytes != 4) || - (type == f64 && bytes != 8) || (type == v128 && bytes != 16)) { + if (bytes > type.getByteSize() || (type == Type::f32 && bytes != 4) || + (type == Type::f64 && bytes != 8) || + (type == Type::v128 && bytes != 16)) { continue; } for (auto signed_ : {true, false}) { @@ -204,18 +205,19 @@ struct SafeHeap : public Pass { } // store funcs Store store; - for (auto valueType : {i32, i64, f32, f64, v128}) { - if (valueType == v128 && !features.hasSIMD()) { + for (Type valueType : + {Type::i32, Type::i64, Type::f32, Type::f64, Type::v128}) { + if (valueType == Type::v128 && !features.hasSIMD()) { continue; } store.valueType = valueType; - store.type = none; + store.type = Type::none; for (Index bytes : {1, 2, 4, 8, 16}) { store.bytes = bytes; if (bytes > valueType.getByteSize() || - (valueType == f32 && bytes != 4) || - (valueType == f64 && bytes != 8) || - (valueType == v128 && bytes != 16)) { + (valueType == Type::f32 && bytes != 4) || + (valueType == Type::f64 && bytes != 8) || + (valueType == Type::v128 && bytes != 16)) { continue; } for (Index align : {1, 2, 4, 8, 16}) { @@ -246,13 +248,14 @@ struct SafeHeap : public Pass { func->name = name; // pointer, offset func->sig = Signature({Type::i32, Type::i32}, style.type); - func->vars.push_back(i32); // pointer + offset + func->vars.push_back(Type::i32); // pointer + offset Builder builder(*module); auto* block = builder.makeBlock(); block->list.push_back(builder.makeLocalSet( 2, - builder.makeBinary( - AddInt32, builder.makeLocalGet(0, i32), builder.makeLocalGet(1, i32)))); + builder.makeBinary(AddInt32, + builder.makeLocalGet(0, Type::i32), + builder.makeLocalGet(1, Type::i32)))); // check for reading past valid memory: if pointer + offset + bytes block->list.push_back(makeBoundsCheck(style.type, builder, 2, style.bytes)); // check proper alignment @@ -262,7 +265,7 @@ struct SafeHeap : public Pass { // do the load auto* load = module->allocator.alloc<Load>(); *load = style; // basically the same as the template we are given! - load->ptr = builder.makeLocalGet(2, i32); + load->ptr = builder.makeLocalGet(2, Type::i32); Expression* last = load; if (load->isAtomic && load->signed_) { // atomic loads cannot be signed, manually sign it @@ -285,13 +288,14 @@ struct SafeHeap : public Pass { func->name = name; // pointer, offset, value func->sig = Signature({Type::i32, Type::i32, style.valueType}, Type::none); - func->vars.push_back(i32); // pointer + offset + func->vars.push_back(Type::i32); // pointer + offset Builder builder(*module); auto* block = builder.makeBlock(); block->list.push_back(builder.makeLocalSet( 3, - builder.makeBinary( - AddInt32, builder.makeLocalGet(0, i32), builder.makeLocalGet(1, i32)))); + builder.makeBinary(AddInt32, + builder.makeLocalGet(0, Type::i32), + builder.makeLocalGet(1, Type::i32)))); // check for reading past valid memory: if pointer + offset + bytes block->list.push_back( makeBoundsCheck(style.valueType, builder, 3, style.bytes)); @@ -302,10 +306,10 @@ struct SafeHeap : public Pass { // do the store auto* store = module->allocator.alloc<Store>(); *store = style; // basically the same as the template we are given! - store->ptr = builder.makeLocalGet(3, i32); + store->ptr = builder.makeLocalGet(3, Type::i32); store->value = builder.makeLocalGet(2, style.valueType); block->list.push_back(store); - block->finalize(none); + block->finalize(Type::none); func->body = block; module->addFunction(func); } @@ -313,9 +317,9 @@ struct SafeHeap : public Pass { Expression* makeAlignCheck(Address align, Builder& builder, Index local) { return builder.makeIf( builder.makeBinary(AndInt32, - builder.makeLocalGet(local, i32), + builder.makeLocalGet(local, Type::i32), builder.makeConst(Literal(int32_t(align - 1)))), - builder.makeCall(alignfault, {}, none)); + builder.makeCall(alignfault, {}, Type::none)); } Expression* @@ -324,30 +328,30 @@ struct SafeHeap : public Pass { auto upperBound = options.lowMemoryUnused ? PassOptions::LowMemoryBound : 0; Expression* brkLocation; if (sbrk.is()) { - brkLocation = - builder.makeCall(sbrk, {builder.makeConst(Literal(int32_t(0)))}, i32); + brkLocation = builder.makeCall( + sbrk, {builder.makeConst(Literal(int32_t(0)))}, Type::i32); } else { Expression* sbrkPtr; if (dynamicTopPtr.is()) { - sbrkPtr = builder.makeGlobalGet(dynamicTopPtr, i32); + sbrkPtr = builder.makeGlobalGet(dynamicTopPtr, Type::i32); } else { - sbrkPtr = builder.makeCall(getSbrkPtr, {}, i32); + sbrkPtr = builder.makeCall(getSbrkPtr, {}, Type::i32); } - brkLocation = builder.makeLoad(4, false, 0, 4, sbrkPtr, i32); + brkLocation = builder.makeLoad(4, false, 0, 4, sbrkPtr, Type::i32); } return builder.makeIf( builder.makeBinary( OrInt32, builder.makeBinary(upperOp, - builder.makeLocalGet(local, i32), + builder.makeLocalGet(local, Type::i32), builder.makeConst(Literal(int32_t(upperBound)))), builder.makeBinary( GtUInt32, builder.makeBinary(AddInt32, - builder.makeLocalGet(local, i32), + builder.makeLocalGet(local, Type::i32), builder.makeConst(Literal(int32_t(bytes)))), brkLocation)), - builder.makeCall(segfault, {}, none)); + builder.makeCall(segfault, {}, Type::none)); } }; diff --git a/src/passes/SimplifyLocals.cpp b/src/passes/SimplifyLocals.cpp index a952f8a38..f7558aa83 100644 --- a/src/passes/SimplifyLocals.cpp +++ b/src/passes/SimplifyLocals.cpp @@ -421,7 +421,7 @@ struct SimplifyLocals void optimizeLoopReturn(Loop* loop) { // If there is a sinkable thing in an eligible loop, we can optimize // it in a trivial way to the outside of the loop. - if (loop->type != none) { + if (loop->type != Type::none) { return; } if (sinkables.empty()) { @@ -442,7 +442,7 @@ struct SimplifyLocals block->list[block->list.size() - 1] = set->value; *item = builder.makeNop(); block->finalize(); - assert(block->type != none); + assert(block->type != Type::none); loop->finalize(); set->value = loop; set->finalize(); @@ -584,7 +584,7 @@ struct SimplifyLocals assert(iff->ifFalse); // if this if already has a result, or is unreachable code, we have // nothing to do - if (iff->type != none) { + if (iff->type != Type::none) { return; } // We now have the sinkables from both sides of the if, and can look @@ -606,14 +606,16 @@ struct SimplifyLocals Sinkables& ifFalse = sinkables; Index goodIndex = -1; bool found = false; - if (iff->ifTrue->type == unreachable) { - assert(iff->ifFalse->type != unreachable); // since the if type is none + if (iff->ifTrue->type == Type::unreachable) { + // since the if type is none + assert(iff->ifFalse->type != Type::unreachable); if (!ifFalse.empty()) { goodIndex = ifFalse.begin()->first; found = true; } - } else if (iff->ifFalse->type == unreachable) { - assert(iff->ifTrue->type != unreachable); // since the if type is none + } else if (iff->ifFalse->type == Type::unreachable) { + // since the if type is none + assert(iff->ifTrue->type != Type::unreachable); if (!ifTrue.empty()) { goodIndex = ifTrue.begin()->first; found = true; @@ -636,7 +638,7 @@ struct SimplifyLocals // ensure we have a place to write the return values for, if not, we // need another cycle auto* ifTrueBlock = iff->ifTrue->dynCast<Block>(); - if (iff->ifTrue->type != unreachable) { + if (iff->ifTrue->type != Type::unreachable) { if (!ifTrueBlock || ifTrueBlock->name.is() || ifTrueBlock->list.size() == 0 || !ifTrueBlock->list.back()->is<Nop>()) { @@ -645,7 +647,7 @@ struct SimplifyLocals } } auto* ifFalseBlock = iff->ifFalse->dynCast<Block>(); - if (iff->ifFalse->type != unreachable) { + if (iff->ifFalse->type != Type::unreachable) { if (!ifFalseBlock || ifFalseBlock->name.is() || ifFalseBlock->list.size() == 0 || !ifFalseBlock->list.back()->is<Nop>()) { @@ -654,24 +656,24 @@ struct SimplifyLocals } } // all set, go - if (iff->ifTrue->type != unreachable) { + if (iff->ifTrue->type != Type::unreachable) { auto* ifTrueItem = ifTrue.at(goodIndex).item; ifTrueBlock->list[ifTrueBlock->list.size() - 1] = (*ifTrueItem)->template cast<LocalSet>()->value; ExpressionManipulator::nop(*ifTrueItem); ifTrueBlock->finalize(); - assert(ifTrueBlock->type != none); + assert(ifTrueBlock->type != Type::none); } - if (iff->ifFalse->type != unreachable) { + if (iff->ifFalse->type != Type::unreachable) { auto* ifFalseItem = ifFalse.at(goodIndex).item; ifFalseBlock->list[ifFalseBlock->list.size() - 1] = (*ifFalseItem)->template cast<LocalSet>()->value; ExpressionManipulator::nop(*ifFalseItem); ifFalseBlock->finalize(); - assert(ifFalseBlock->type != none); + assert(ifFalseBlock->type != Type::none); } iff->finalize(); // update type - assert(iff->type != none); + assert(iff->type != Type::none); // finally, create a local.set on the iff itself auto* newLocalSet = Builder(*this->getModule()).makeLocalSet(goodIndex, iff); @@ -703,7 +705,7 @@ struct SimplifyLocals // arm into a one-sided if. void optimizeIfReturn(If* iff, Expression** currp) { // If this if is unreachable code, we have nothing to do. - if (iff->type != none || iff->ifTrue->type != none) { + if (iff->type != Type::none || iff->ifTrue->type != Type::none) { return; } // Anything sinkable is good for us. @@ -726,14 +728,14 @@ struct SimplifyLocals ifTrueBlock->list[ifTrueBlock->list.size() - 1] = set->value; *item = builder.makeNop(); ifTrueBlock->finalize(); - assert(ifTrueBlock->type != none); + assert(ifTrueBlock->type != Type::none); // Update the ifFalse side. iff->ifFalse = builder.makeLocalGet( set->index, this->getFunction()->getLocalType(set->index)); iff->finalize(); // update type // Update the get count. getCounter.num[set->index]++; - assert(iff->type != none); + assert(iff->type != Type::none); // Finally, reuse the local.set on the iff itself. set->value = iff; set->finalize(); diff --git a/src/passes/SpillPointers.cpp b/src/passes/SpillPointers.cpp index 7458f5fbd..453510592 100644 --- a/src/passes/SpillPointers.cpp +++ b/src/passes/SpillPointers.cpp @@ -155,7 +155,7 @@ struct SpillPointers Function* func, Module* module) { auto* call = *origin; - if (call->type == unreachable) { + if (call->type == Type::unreachable) { return; // the call is never reached anyhow, ignore } Builder builder(*module); diff --git a/src/passes/StackIR.cpp b/src/passes/StackIR.cpp index e4aa55d68..a628468fb 100644 --- a/src/passes/StackIR.cpp +++ b/src/passes/StackIR.cpp @@ -88,7 +88,7 @@ private: // We can remove this. removeAt(i); } - } else if (inst->type == unreachable) { + } else if (inst->type == Type::unreachable) { inUnreachableCode = true; } } @@ -221,7 +221,7 @@ private: // This is an actual regular value on the value stack. values.push_back(null); } - } else if (inst->origin->is<LocalSet>() && inst->type == none) { + } else if (inst->origin->is<LocalSet>() && inst->type == Type::none) { // This set is potentially optimizable later, add to stack. values.push_back(i); } diff --git a/src/passes/TrapMode.cpp b/src/passes/TrapMode.cpp index 9c855d8c1..f3cf3c63c 100644 --- a/src/passes/TrapMode.cpp +++ b/src/passes/TrapMode.cpp @@ -99,7 +99,7 @@ bool isTruncOpSigned(UnaryOp op) { Function* generateBinaryFunc(Module& wasm, Binary* curr) { BinaryOp op = curr->op; Type type = curr->type; - bool isI64 = type == i64; + bool isI64 = type == Type::i64; Builder builder(wasm); Expression* result = builder.makeBinary( op, builder.makeLocalGet(0, type), builder.makeLocalGet(1, type)); @@ -145,7 +145,7 @@ Function* generateUnaryFunc(Module& wasm, Unary* curr) { Type type = curr->value->type; Type retType = curr->type; UnaryOp truncOp = curr->op; - bool isF64 = type == f64; + bool isF64 = type == Type::f64; Builder builder(wasm); @@ -276,12 +276,12 @@ Expression* makeTrappingUnary(Unary* curr, // slow ffi If i64, there is no "JS" way to handle this, as no i64s in JS, so // always clamp if we don't allow traps asm.js doesn't have unsigned // f64-to-int, so just use the signed one. - if (curr->type != i64 && mode == TrapMode::JS) { + if (curr->type != Type::i64 && mode == TrapMode::JS) { // WebAssembly traps on float-to-int overflows, but asm.js wouldn't, so we // must emulate that ensureF64ToI64JSImport(trappingFunctions); Expression* f64Value = ensureDouble(curr->value, wasm.allocator); - return builder.makeCall(F64_TO_INT, {f64Value}, i32); + return builder.makeCall(F64_TO_INT, {f64Value}, Type::i32); } ensureUnaryFunc(curr, wasm, trappingFunctions); diff --git a/src/passes/Untee.cpp b/src/passes/Untee.cpp index 6e5fb489b..848e88a65 100644 --- a/src/passes/Untee.cpp +++ b/src/passes/Untee.cpp @@ -35,7 +35,7 @@ struct Untee : public WalkerPass<PostWalker<Untee>> { void visitLocalSet(LocalSet* curr) { if (curr->isTee()) { - if (curr->value->type == unreachable) { + if (curr->value->type == Type::unreachable) { // we don't reach the tee, just remove it replaceCurrent(curr->value); } else { diff --git a/src/passes/Vacuum.cpp b/src/passes/Vacuum.cpp index 48a55ed89..a222b0159 100644 --- a/src/passes/Vacuum.cpp +++ b/src/passes/Vacuum.cpp @@ -56,7 +56,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { Expression* optimize(Expression* curr, bool resultUsed, bool typeMatters) { auto type = curr->type; // An unreachable node must not be changed. - if (type == unreachable) { + if (type == Type::unreachable) { return curr; } // We iterate on possible replacements. If a replacement changes the type, @@ -226,7 +226,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { // example, code-folding can merge out identical zeros at the end of // if arms). optimized = LiteralUtils::makeZero(child->type, *getModule()); - } else if (child->type == unreachable) { + } else if (child->type == Type::unreachable) { // Don't try to optimize out an unreachable child (dce can do that // properly). optimized = child; @@ -245,7 +245,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { list[z] = nullptr; } // if this is unreachable, the rest is dead code - if (list[z - skip]->type == unreachable && z < size - 1) { + if (list[z - skip]->type == Type::unreachable && z < size - 1) { for (Index i = z - skip + 1; i < list.size(); i++) { auto* remove = list[i]; if (remove) { @@ -292,7 +292,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { return; } // if the condition is unreachable, just return it - if (curr->condition->type == unreachable) { + if (curr->condition->type == Type::unreachable) { typeUpdater.noteRecursiveRemoval(curr->ifTrue); if (curr->ifFalse) { typeUpdater.noteRecursiveRemoval(curr->ifFalse); @@ -366,14 +366,14 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { BranchUtils::BranchSeeker seeker(block->name); Expression* temp = block; seeker.walk(temp); - if (seeker.found && seeker.valueType != none) { + if (seeker.found && seeker.valueType != Type::none) { canPop = false; } } if (canPop) { block->list.back() = last; block->list.pop_back(); - block->type = none; + block->type = Type::none; // we don't need the drop anymore, let's see what we have left in // the block if (block->list.size() > 1) { @@ -394,16 +394,17 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> { auto* iff = curr->value->dynCast<If>(); if (iff && iff->ifFalse && iff->type.isConcrete()) { // reuse the drop in both cases - if (iff->ifTrue->type == unreachable && iff->ifFalse->type.isConcrete()) { + if (iff->ifTrue->type == Type::unreachable && + iff->ifFalse->type.isConcrete()) { curr->value = iff->ifFalse; iff->ifFalse = curr; - iff->type = none; + iff->type = Type::none; replaceCurrent(iff); - } else if (iff->ifFalse->type == unreachable && + } else if (iff->ifFalse->type == Type::unreachable && iff->ifTrue->type.isConcrete()) { curr->value = iff->ifTrue; iff->ifTrue = curr; - iff->type = none; + iff->type = Type::none; replaceCurrent(iff); } } diff --git a/src/shell-interface.h b/src/shell-interface.h index 75f8e81b8..cd5e5210b 100644 --- a/src/shell-interface.h +++ b/src/shell-interface.h @@ -100,28 +100,28 @@ struct ShellExternalInterface : ModuleInstance::ExternalInterface { ModuleUtils::iterImportedGlobals(wasm, [&](Global* import) { if (import->module == SPECTEST && import->base.startsWith(GLOBAL)) { switch (import->type) { - case i32: + case Type::i32: globals[import->name] = Literal(int32_t(666)); break; - case i64: + case Type::i64: globals[import->name] = Literal(int64_t(666)); break; - case f32: + case Type::f32: globals[import->name] = Literal(float(666.6)); break; - case f64: + case Type::f64: globals[import->name] = Literal(double(666.6)); break; - case v128: + case Type::v128: assert(false && "v128 not implemented yet"); - case funcref: - case anyref: - case nullref: - case exnref: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: globals[import->name] = Literal::makeNullref(); break; - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } } diff --git a/src/tools/asm2wasm.cpp b/src/tools/asm2wasm.cpp index b54dd8cd8..6b20d1bcb 100644 --- a/src/tools/asm2wasm.cpp +++ b/src/tools/asm2wasm.cpp @@ -227,7 +227,7 @@ int main(int argc, const char* argv[]) { Expression* init; const auto& memBase = options.extra.find("mem base"); if (memBase == options.extra.end()) { - init = Builder(wasm).makeGlobalGet(MEMORY_BASE, i32); + init = Builder(wasm).makeGlobalGet(MEMORY_BASE, Type::i32); } else { init = Builder(wasm).makeConst( Literal(int32_t(atoi(memBase->second.c_str())))); diff --git a/src/tools/fuzzing.h b/src/tools/fuzzing.h index af5773e80..81bd87581 100644 --- a/src/tools/fuzzing.h +++ b/src/tools/fuzzing.h @@ -378,16 +378,20 @@ private: builder.makeBinary( AddInt32, builder.makeBinary(ShlInt32, - builder.makeLocalGet(0, i32), + builder.makeLocalGet(0, Type::i32), builder.makeConst(Literal(uint32_t(5)))), - builder.makeLocalGet(0, i32)), - builder.makeLoad( - 1, false, i, 1, builder.makeConst(Literal(uint32_t(0))), i32)))); - } - contents.push_back(builder.makeLocalGet(0, i32)); + builder.makeLocalGet(0, Type::i32)), + builder.makeLoad(1, + false, + i, + 1, + builder.makeConst(Literal(uint32_t(0))), + Type::i32)))); + } + contents.push_back(builder.makeLocalGet(0, Type::i32)); auto* body = builder.makeBlock(contents); auto* hasher = wasm.addFunction(builder.makeFunction( - "hashMemory", Signature(Type::none, Type::i32), {i32}, body)); + "hashMemory", Signature(Type::none, Type::i32), {Type::i32}, body)); wasm.addExport( builder.makeExport(hasher->name, hasher->name, ExternalKind::Function)); // Export memory so JS fuzzing can use it @@ -446,7 +450,7 @@ private: void addHangLimitSupport() { auto* glob = builder.makeGlobal(HANG_LIMIT_GLOBAL, - i32, + Type::i32, builder.makeConst(Literal(int32_t(HANG_LIMIT))), Builder::Mutable); wasm.addGlobal(glob); @@ -481,12 +485,12 @@ private: return builder.makeSequence( builder.makeIf( builder.makeUnary(UnaryOp::EqZInt32, - builder.makeGlobalGet(HANG_LIMIT_GLOBAL, i32)), - makeTrivial(unreachable)), + builder.makeGlobalGet(HANG_LIMIT_GLOBAL, Type::i32)), + makeTrivial(Type::unreachable)), builder.makeGlobalSet( HANG_LIMIT_GLOBAL, builder.makeBinary(BinaryOp::SubInt32, - builder.makeGlobalGet(HANG_LIMIT_GLOBAL, i32), + builder.makeGlobalGet(HANG_LIMIT_GLOBAL, Type::i32), builder.makeConst(Literal(int32_t(1)))))); } @@ -502,18 +506,18 @@ private: builder.makeConst(literal)); wasm.addFunction(func); }; - add("deNan32", f32, Literal(float(0)), EqFloat32); - add("deNan64", f64, Literal(double(0)), EqFloat64); + add("deNan32", Type::f32, Literal(float(0)), EqFloat32); + add("deNan64", Type::f64, Literal(double(0)), EqFloat64); } Expression* makeDeNanOp(Expression* expr) { if (allowNaNs) { return expr; } - if (expr->type == f32) { - return builder.makeCall("deNan32", {expr}, f32); - } else if (expr->type == f64) { - return builder.makeCall("deNan64", {expr}, f64); + if (expr->type == Type::f32) { + return builder.makeCall("deNan32", {expr}, Type::f32); + } else if (expr->type == Type::f64) { + return builder.makeCall("deNan64", {expr}, Type::f64); } return expr; // unreachable etc. is fine } @@ -558,7 +562,7 @@ private: // with small chance, make the body unreachable auto bodyType = func->sig.results; if (oneIn(10)) { - bodyType = unreachable; + bodyType = Type::unreachable; } // with reasonable chance make the body a block if (oneIn(2)) { @@ -844,34 +848,34 @@ private: } else { return makeLocalGet(type); } - } else if (type == none) { + } else if (type == Type::none) { if (oneIn(2)) { return makeNop(type); } else { return makeLocalSet(type); } } - assert(type == unreachable); + assert(type == Type::unreachable); return makeTrivial(type); } nesting++; Expression* ret = nullptr; switch (type) { - case i32: - case i64: - case f32: - case f64: - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::i32: + case Type::i64: + case Type::f32: + case Type::f64: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: ret = _makeConcrete(type); break; - case none: + case Type::none: ret = _makenone(); break; - case unreachable: + case Type::unreachable: ret = _makeunreachable(); break; } @@ -942,19 +946,19 @@ private: } choice = upTo(100); if (choice < 50) { - return makeLocalSet(none); + return makeLocalSet(Type::none); } if (choice < 60) { - return makeBlock(none); + return makeBlock(Type::none); } if (choice < 70) { - return makeIf(none); + return makeIf(Type::none); } if (choice < 80) { - return makeLoop(none); + return makeLoop(Type::none); } if (choice < 90) { - return makeBreak(none); + return makeBreak(Type::none); } using Self = TranslateToFuzzReader; auto options = FeatureOptions<Expression* (Self::*)(Type)>() @@ -972,41 +976,41 @@ private: &Self::makeGlobalSet) .add(FeatureSet::BulkMemory, &Self::makeBulkMemory) .add(FeatureSet::Atomics, &Self::makeAtomic); - return (this->*pick(options))(none); + return (this->*pick(options))(Type::none); } Expression* _makeunreachable() { switch (upTo(15)) { case 0: - return makeBlock(unreachable); + return makeBlock(Type::unreachable); case 1: - return makeIf(unreachable); + return makeIf(Type::Type::unreachable); case 2: - return makeLoop(unreachable); + return makeLoop(Type::unreachable); case 3: - return makeBreak(unreachable); + return makeBreak(Type::unreachable); case 4: - return makeCall(unreachable); + return makeCall(Type::unreachable); case 5: - return makeCallIndirect(unreachable); + return makeCallIndirect(Type::unreachable); case 6: - return makeLocalSet(unreachable); + return makeLocalSet(Type::unreachable); case 7: - return makeStore(unreachable); + return makeStore(Type::unreachable); case 8: - return makeUnary(unreachable); + return makeUnary(Type::unreachable); case 9: - return makeBinary(unreachable); + return makeBinary(Type::unreachable); case 10: - return makeSelect(unreachable); + return makeSelect(Type::unreachable); case 11: - return makeSwitch(unreachable); + return makeSwitch(Type::unreachable); case 12: - return makeDrop(unreachable); + return makeDrop(Type::unreachable); case 13: - return makeReturn(unreachable); + return makeReturn(Type::unreachable); case 14: - return makeUnreachable(unreachable); + return makeUnreachable(Type::unreachable); } WASM_UNREACHABLE("unexpected value"); } @@ -1019,10 +1023,10 @@ private: } else { return makeConst(type); } - } else if (type == none) { + } else if (type == Type::none) { return makeNop(type); } - assert(type == unreachable); + assert(type == Type::unreachable); Expression* ret = nullptr; if (func->sig.results.isConcrete()) { ret = makeTrivial(func->sig.results); @@ -1051,13 +1055,13 @@ private: num++; } while (num > 0 && !finishedInput) { - ret->list.push_back(make(none)); + ret->list.push_back(make(Type::none)); num--; } // give a chance to make the final element an unreachable break, instead // of concrete - a common pattern (branch to the top of a loop etc.) if (!finishedInput && type.isConcrete() && oneIn(2)) { - ret->list.push_back(makeBreak(unreachable)); + ret->list.push_back(makeBreak(Type::unreachable)); } else { ret->list.push_back(make(type)); } @@ -1069,8 +1073,8 @@ private: } if (ret->type != type) { // e.g. we might want an unreachable block, but a child breaks to it - assert(type == unreachable && ret->type == none); - return builder.makeSequence(ret, make(unreachable)); + assert(type == Type::unreachable && ret->type == Type::none); + return builder.makeSequence(ret, make(Type::unreachable)); } return ret; } @@ -1087,7 +1091,7 @@ private: } else { // ensure a branch back. also optionally create some loop vars std::vector<Expression*> list; - list.push_back(makeMaybeBlock(none)); // primary contents + list.push_back(makeMaybeBlock(Type::none)); // primary contents // possible branch back list.push_back(builder.makeBreak(ret->name, nullptr, makeCondition())); list.push_back(make(type)); // final element, so we have the right type @@ -1103,7 +1107,7 @@ private: // we want a 50-50 chance for the condition to be taken, for interesting // execution paths. by itself, there is bias (e.g. most consts are "yes") // so even that out with noise - auto* ret = make(i32); + auto* ret = make(Type::i32); if (oneIn(2)) { ret = builder.makeUnary(UnaryOp::EqZInt32, ret); } @@ -1138,7 +1142,7 @@ private: return makeTrivial(type); } Expression* condition = nullptr; - if (type != unreachable) { + if (type != Type::unreachable) { hangStack.push_back(nullptr); condition = makeCondition(); } @@ -1157,8 +1161,8 @@ private: auto* ret = builder.makeBreak(name, make(type), condition); hangStack.pop_back(); return ret; - } else if (type == none) { - if (valueType != none) { + } else if (type == Type::none) { + if (valueType != Type::Type::none) { // we need to break to a proper place continue; } @@ -1166,8 +1170,8 @@ private: hangStack.pop_back(); return ret; } else { - assert(type == unreachable); - if (valueType != none) { + assert(type == Type::unreachable); + if (valueType != Type::Type::none) { // we need to break to a proper place continue; } @@ -1210,7 +1214,7 @@ private: } } // we failed to find something - if (type != unreachable) { + if (type != Type::unreachable) { hangStack.pop_back(); } return makeTrivial(type); @@ -1254,7 +1258,7 @@ private: while (1) { // TODO: handle unreachable targetFn = wasm.getFunction(data[i]); - isReturn = type == unreachable && wasm.features.hasTailCall() && + isReturn = type == Type::unreachable && wasm.features.hasTailCall() && func->sig.results == targetFn->sig.results; if (targetFn->sig.results == type || isReturn) { break; @@ -1273,7 +1277,7 @@ private: if (!allowOOB || !oneIn(10)) { target = builder.makeConst(Literal(int32_t(i))); } else { - target = make(i32); + target = make(Type::i32); } std::vector<Expression*> args; for (auto type : targetFn->sig.params.expand()) { @@ -1291,7 +1295,7 @@ private: } Expression* makeLocalSet(Type type) { - bool tee = type != none; + bool tee = type != Type::none; Type valueType; if (tee) { valueType = type; @@ -1319,18 +1323,18 @@ private: } Expression* makeGlobalSet(Type type) { - assert(type == none); + assert(type == Type::none); type = getConcreteType(); auto& globals = globalsByType[type]; if (globals.empty()) { - return makeTrivial(none); + return makeTrivial(Type::Type::none); } auto* value = make(type); return builder.makeGlobalSet(pick(globals), value); } Expression* makePointer() { - auto* ret = make(i32); + auto* ret = make(Type::i32); // with high probability, mask the pointer so it's in a reasonable // range. otherwise, most pointers are going to be out of range and // most memory ops will just trap @@ -1345,7 +1349,7 @@ private: auto offset = logify(get()); auto ptr = makePointer(); switch (type) { - case i32: { + case Type::i32: { bool signed_ = get() & 1; switch (upTo(3)) { case 0: @@ -1358,7 +1362,7 @@ private: } WASM_UNREACHABLE("unexpected value"); } - case i64: { + case Type::i64: { bool signed_ = get() & 1; switch (upTo(4)) { case 0: @@ -1374,25 +1378,25 @@ private: } WASM_UNREACHABLE("unexpected value"); } - case f32: { + case Type::f32: { return builder.makeLoad(4, false, offset, pick(1, 2, 4), ptr, type); } - case f64: { + case Type::f64: { return builder.makeLoad(8, false, offset, pick(1, 2, 4, 8), ptr, type); } - case v128: { + case Type::v128: { if (!wasm.features.hasSIMD()) { return makeTrivial(type); } return builder.makeLoad( 16, false, offset, pick(1, 2, 4, 8, 16), ptr, type); } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("invalid type"); } WASM_UNREACHABLE("invalid type"); @@ -1404,7 +1408,7 @@ private: return makeTrivial(type); } auto* ret = makeNonAtomicLoad(type); - if (type != i32 && type != i64) { + if (type != Type::i32 && type != Type::i64) { return ret; } if (!wasm.features.hasAtomics() || oneIn(2)) { @@ -1420,7 +1424,7 @@ private: } Expression* makeNonAtomicStore(Type type) { - if (type == unreachable) { + if (type == Type::unreachable) { // make a normal store, then make it unreachable auto* ret = makeNonAtomicStore(getStorableType()); auto* store = ret->dynCast<Store>(); @@ -1429,14 +1433,14 @@ private: } switch (upTo(3)) { case 0: - store->ptr = make(unreachable); + store->ptr = make(Type::unreachable); break; case 1: - store->value = make(unreachable); + store->value = make(Type::unreachable); break; case 2: - store->ptr = make(unreachable); - store->value = make(unreachable); + store->ptr = make(Type::unreachable); + store->value = make(Type::unreachable); break; } store->finalize(); @@ -1444,14 +1448,14 @@ private: } // the type is none or unreachable. we also need to pick the value // type. - if (type == none) { + if (type == Type::none) { type = getStorableType(); } auto offset = logify(get()); auto ptr = makePointer(); auto value = make(type); switch (type) { - case i32: { + case Type::i32: { switch (upTo(3)) { case 0: return builder.makeStore(1, offset, 1, ptr, value, type); @@ -1463,7 +1467,7 @@ private: } WASM_UNREACHABLE("invalid value"); } - case i64: { + case Type::i64: { switch (upTo(4)) { case 0: return builder.makeStore(1, offset, 1, ptr, value, type); @@ -1478,25 +1482,25 @@ private: } WASM_UNREACHABLE("invalid value"); } - case f32: { + case Type::f32: { return builder.makeStore(4, offset, pick(1, 2, 4), ptr, value, type); } - case f64: { + case Type::f64: { return builder.makeStore(8, offset, pick(1, 2, 4, 8), ptr, value, type); } - case v128: { + case Type::v128: { if (!wasm.features.hasSIMD()) { return makeTrivial(type); } return builder.makeStore( 16, offset, pick(1, 2, 4, 8, 16), ptr, value, type); } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("invalid type"); } WASM_UNREACHABLE("invalid type"); @@ -1511,7 +1515,7 @@ private: if (!store) { return ret; } - if (store->value->type != i32 && store->value->type != i64) { + if (store->value->type != Type::i32 && store->value->type != Type::i64) { return store; } if (!wasm.features.hasAtomics() || oneIn(2)) { @@ -1525,51 +1529,51 @@ private: } Literal makeArbitraryLiteral(Type type) { - if (type == v128) { + if (type == Type::v128) { // generate each lane individually for random lane interpretation switch (upTo(6)) { case 0: - return Literal(std::array<Literal, 16>{{makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32)}}); + return Literal(std::array<Literal, 16>{{makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32)}}); case 1: - return Literal(std::array<Literal, 8>{{makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32)}}); + return Literal(std::array<Literal, 8>{{makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32)}}); case 2: - return Literal(std::array<Literal, 4>{{makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32), - makeLiteral(i32)}}); + return Literal(std::array<Literal, 4>{{makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32), + makeLiteral(Type::i32)}}); case 3: - return Literal( - std::array<Literal, 2>{{makeLiteral(i64), makeLiteral(i64)}}); + return Literal(std::array<Literal, 2>{ + {makeLiteral(Type::i64), makeLiteral(Type::i64)}}); case 4: - return Literal(std::array<Literal, 4>{{makeLiteral(f32), - makeLiteral(f32), - makeLiteral(f32), - makeLiteral(f32)}}); + return Literal(std::array<Literal, 4>{{makeLiteral(Type::f32), + makeLiteral(Type::f32), + makeLiteral(Type::f32), + makeLiteral(Type::f32)}}); case 5: - return Literal( - std::array<Literal, 2>{{makeLiteral(f64), makeLiteral(f64)}}); + return Literal(std::array<Literal, 2>{ + {makeLiteral(Type::f64), makeLiteral(Type::f64)}}); default: WASM_UNREACHABLE("unexpected value"); } @@ -1579,21 +1583,21 @@ private: case 0: { // totally random, entire range switch (type) { - case i32: + case Type::i32: return Literal(get32()); - case i64: + case Type::i64: return Literal(get64()); - case f32: + case Type::f32: return Literal(getFloat()); - case f64: + case Type::f64: return Literal(getDouble()); - case v128: - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("invalid type"); } break; @@ -1624,21 +1628,21 @@ private: WASM_UNREACHABLE("invalid value"); } switch (type) { - case i32: + case Type::i32: return Literal(int32_t(small)); - case i64: + case Type::i64: return Literal(int64_t(small)); - case f32: + case Type::f32: return Literal(float(small)); - case f64: + case Type::f64: return Literal(double(small)); - case v128: - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } break; @@ -1647,7 +1651,7 @@ private: // special values Literal value; switch (type) { - case i32: + case Type::i32: value = Literal(pick<int32_t>(0, std::numeric_limits<int8_t>::min(), @@ -1660,7 +1664,7 @@ private: std::numeric_limits<uint16_t>::max(), std::numeric_limits<uint32_t>::max())); break; - case i64: + case Type::i64: value = Literal(pick<int64_t>(0, std::numeric_limits<int8_t>::min(), @@ -1676,7 +1680,7 @@ private: std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint64_t>::max())); break; - case f32: + case Type::f32: value = Literal(pick<float>(0, std::numeric_limits<float>::min(), std::numeric_limits<float>::max(), @@ -1687,7 +1691,7 @@ private: std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint64_t>::max())); break; - case f64: + case Type::f64: value = Literal(pick<double>(0, std::numeric_limits<float>::min(), std::numeric_limits<float>::max(), @@ -1700,13 +1704,13 @@ private: std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint64_t>::max())); break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } // tweak around special values @@ -1722,25 +1726,25 @@ private: // powers of 2 Literal value; switch (type) { - case i32: + case Type::i32: value = Literal(int32_t(1) << upTo(32)); break; - case i64: + case Type::i64: value = Literal(int64_t(1) << upTo(64)); break; - case f32: + case Type::f32: value = Literal(float(int64_t(1) << upTo(64))); break; - case f64: + case Type::f64: value = Literal(double(int64_t(1) << upTo(64))); break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } // maybe negative @@ -1790,9 +1794,10 @@ private: } Expression* makeUnary(Type type) { - if (type == unreachable) { + if (type == Type::unreachable) { if (auto* unary = makeUnary(getConcreteType())->dynCast<Unary>()) { - return makeDeNanOp(builder.makeUnary(unary->op, make(unreachable))); + return makeDeNanOp( + builder.makeUnary(unary->op, make(Type::unreachable))); } // give up return makeTrivial(type); @@ -1803,18 +1808,18 @@ private: } switch (type) { - case i32: { + case Type::i32: { switch (getConcreteType()) { - case i32: { + case Type::i32: { auto op = pick( FeatureOptions<UnaryOp>() .add(FeatureSet::MVP, EqZInt32, ClzInt32, CtzInt32, PopcntInt32) .add(FeatureSet::SignExt, ExtendS8Int32, ExtendS16Int32)); - return buildUnary({op, make(i32)}); + return buildUnary({op, make(Type::i32)}); } - case i64: - return buildUnary({pick(EqZInt64, WrapInt64), make(i64)}); - case f32: { + case Type::i64: + return buildUnary({pick(EqZInt64, WrapInt64), make(Type::i64)}); + case Type::f32: { auto op = pick(FeatureOptions<UnaryOp>() .add(FeatureSet::MVP, TruncSFloat32ToInt32, @@ -1823,9 +1828,9 @@ private: .add(FeatureSet::TruncSat, TruncSatSFloat32ToInt32, TruncSatUFloat32ToInt32)); - return buildUnary({op, make(f32)}); + return buildUnary({op, make(Type::f32)}); } - case f64: { + case Type::f64: { auto op = pick(FeatureOptions<UnaryOp>() .add(FeatureSet::MVP, TruncSFloat64ToInt32, @@ -1833,9 +1838,9 @@ private: .add(FeatureSet::TruncSat, TruncSatSFloat64ToInt32, TruncSatUFloat64ToInt32)); - return buildUnary({op, make(f64)}); + return buildUnary({op, make(Type::f64)}); } - case v128: { + case Type::v128: { assert(wasm.features.hasSIMD()); return buildUnary({pick(AnyTrueVecI8x16, AllTrueVecI8x16, @@ -1845,20 +1850,20 @@ private: AllTrueVecI32x4, AnyTrueVecI64x2, AllTrueVecI64x2), - make(v128)}); + make(Type::v128)}); } - case funcref: - case anyref: - case nullref: - case exnref: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: return makeTrivial(type); - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } WASM_UNREACHABLE("invalid type"); } - case i64: { + case Type::i64: { switch (upTo(4)) { case 0: { auto op = @@ -1868,10 +1873,11 @@ private: ExtendS8Int64, ExtendS16Int64, ExtendS32Int64)); - return buildUnary({op, make(i64)}); + return buildUnary({op, make(Type::i64)}); } case 1: - return buildUnary({pick(ExtendSInt32, ExtendUInt32), make(i32)}); + return buildUnary( + {pick(ExtendSInt32, ExtendUInt32), make(Type::i32)}); case 2: { auto op = pick(FeatureOptions<UnaryOp>() .add(FeatureSet::MVP, @@ -1880,7 +1886,7 @@ private: .add(FeatureSet::TruncSat, TruncSatSFloat32ToInt64, TruncSatUFloat32ToInt64)); - return buildUnary({op, make(f32)}); + return buildUnary({op, make(Type::f32)}); } case 3: { auto op = pick(FeatureOptions<UnaryOp>() @@ -1891,12 +1897,12 @@ private: .add(FeatureSet::TruncSat, TruncSatSFloat64ToInt64, TruncSatUFloat64ToInt64)); - return buildUnary({op, make(f64)}); + return buildUnary({op, make(Type::f64)}); } } WASM_UNREACHABLE("invalid value"); } - case f32: { + case Type::f32: { switch (upTo(4)) { case 0: return makeDeNanOp(buildUnary({pick(NegFloat32, @@ -1906,22 +1912,22 @@ private: TruncFloat32, NearestFloat32, SqrtFloat32), - make(f32)})); + make(Type::f32)})); case 1: return makeDeNanOp(buildUnary({pick(ConvertUInt32ToFloat32, ConvertSInt32ToFloat32, ReinterpretInt32), - make(i32)})); + make(Type::i32)})); case 2: return makeDeNanOp( buildUnary({pick(ConvertUInt64ToFloat32, ConvertSInt64ToFloat32), - make(i64)})); + make(Type::i64)})); case 3: - return makeDeNanOp(buildUnary({DemoteFloat64, make(f64)})); + return makeDeNanOp(buildUnary({DemoteFloat64, make(Type::f64)})); } WASM_UNREACHABLE("invalid value"); } - case f64: { + case Type::f64: { switch (upTo(4)) { case 0: return makeDeNanOp(buildUnary({pick(NegFloat64, @@ -1931,33 +1937,34 @@ private: TruncFloat64, NearestFloat64, SqrtFloat64), - make(f64)})); + make(Type::f64)})); case 1: return makeDeNanOp( buildUnary({pick(ConvertUInt32ToFloat64, ConvertSInt32ToFloat64), - make(i32)})); + make(Type::i32)})); case 2: return makeDeNanOp(buildUnary({pick(ConvertUInt64ToFloat64, ConvertSInt64ToFloat64, ReinterpretInt64), - make(i64)})); + make(Type::i64)})); case 3: - return makeDeNanOp(buildUnary({PromoteFloat32, make(f32)})); + return makeDeNanOp(buildUnary({PromoteFloat32, make(Type::f32)})); } WASM_UNREACHABLE("invalid value"); } - case v128: { + case Type::v128: { assert(wasm.features.hasSIMD()); switch (upTo(5)) { case 0: return buildUnary( - {pick(SplatVecI8x16, SplatVecI16x8, SplatVecI32x4), make(i32)}); + {pick(SplatVecI8x16, SplatVecI16x8, SplatVecI32x4), + make(Type::i32)}); case 1: - return buildUnary({SplatVecI64x2, make(i64)}); + return buildUnary({SplatVecI64x2, make(Type::i64)}); case 2: - return buildUnary({SplatVecF32x4, make(f32)}); + return buildUnary({SplatVecF32x4, make(Type::f32)}); case 3: - return buildUnary({SplatVecF64x2, make(f64)}); + return buildUnary({SplatVecF64x2, make(Type::f64)}); case 4: return buildUnary({pick(NotVec128, NegVecI8x16, @@ -1986,16 +1993,16 @@ private: WidenHighSVecI16x8ToVecI32x4, WidenLowUVecI16x8ToVecI32x4, WidenHighUVecI16x8ToVecI32x4), - make(v128)}); + make(Type::v128)}); } WASM_UNREACHABLE("invalid value"); } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } WASM_UNREACHABLE("invalid type"); @@ -2006,10 +2013,10 @@ private: } Expression* makeBinary(Type type) { - if (type == unreachable) { + if (type == Type::unreachable) { if (auto* binary = makeBinary(getConcreteType())->dynCast<Binary>()) { - return makeDeNanOp( - buildBinary({binary->op, make(unreachable), make(unreachable)})); + return makeDeNanOp(buildBinary( + {binary->op, make(Type::unreachable), make(Type::unreachable)})); } // give up return makeTrivial(type); @@ -2020,7 +2027,7 @@ private: } switch (type) { - case i32: { + case Type::i32: { switch (upTo(4)) { case 0: return buildBinary({pick(AddInt32, @@ -2048,8 +2055,8 @@ private: GtUInt32, GeSInt32, GeUInt32), - make(i32), - make(i32)}); + make(Type::i32), + make(Type::i32)}); case 1: return buildBinary({pick(EqInt64, NeInt64, @@ -2061,8 +2068,8 @@ private: GtUInt64, GeSInt64, GeUInt64), - make(i64), - make(i64)}); + make(Type::i64), + make(Type::i64)}); case 2: return buildBinary({pick(EqFloat32, NeFloat32, @@ -2070,8 +2077,8 @@ private: LeFloat32, GtFloat32, GeFloat32), - make(f32), - make(f32)}); + make(Type::f32), + make(Type::f32)}); case 3: return buildBinary({pick(EqFloat64, NeFloat64, @@ -2079,12 +2086,12 @@ private: LeFloat64, GtFloat64, GeFloat64), - make(f64), - make(f64)}); + make(Type::f64), + make(Type::f64)}); } WASM_UNREACHABLE("invalid value"); } - case i64: { + case Type::i64: { return buildBinary({pick(AddInt64, SubInt64, MulInt64, @@ -2100,10 +2107,10 @@ private: ShrSInt64, RotLInt64, RotRInt64), - make(i64), - make(i64)}); + make(Type::i64), + make(Type::i64)}); } - case f32: { + case Type::f32: { return makeDeNanOp(buildBinary({pick(AddFloat32, SubFloat32, MulFloat32, @@ -2111,10 +2118,10 @@ private: CopySignFloat32, MinFloat32, MaxFloat32), - make(f32), - make(f32)})); + make(Type::f32), + make(Type::f32)})); } - case f64: { + case Type::f64: { return makeDeNanOp(buildBinary({pick(AddFloat64, SubFloat64, MulFloat64, @@ -2122,10 +2129,10 @@ private: CopySignFloat64, MinFloat64, MaxFloat64), - make(f64), - make(f64)})); + make(Type::f64), + make(Type::f64)})); } - case v128: { + case Type::v128: { assert(wasm.features.hasSIMD()); return buildBinary({pick(EqVecI8x16, NeVecI8x16, @@ -2222,15 +2229,15 @@ private: NarrowSVecI32x4ToVecI16x8, NarrowUVecI32x4ToVecI16x8, SwizzleVec8x16), - make(v128), - make(v128)}); - } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + make(Type::v128), + make(Type::v128)}); + } + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } WASM_UNREACHABLE("invalid type"); @@ -2244,18 +2251,18 @@ private: Type subType1 = pick(getSubTypes(type)); Type subType2 = pick(getSubTypes(type)); return makeDeNanOp( - buildSelect({make(i32), make(subType1), make(subType2)}, type)); + buildSelect({make(Type::i32), make(subType1), make(subType2)}, type)); } Expression* makeSwitch(Type type) { - assert(type == unreachable); + assert(type == Type::unreachable); if (breakableStack.empty()) { return make(type); } // we need to find proper targets to break to; try a bunch int tries = TRIES; std::vector<Name> names; - Type valueType = unreachable; + Type valueType = Type::Type::unreachable; while (tries-- > 0) { auto* target = pick(breakableStack); auto name = getTargetName(target); @@ -2275,14 +2282,14 @@ private: } auto default_ = names.back(); names.pop_back(); - auto temp1 = make(i32), + auto temp1 = make(Type::i32), temp2 = valueType.isConcrete() ? make(valueType) : nullptr; return builder.makeSwitch(names, default_, temp1, temp2); } Expression* makeDrop(Type type) { return builder.makeDrop( - make(type == unreachable ? type : getConcreteType())); + make(type == Type::unreachable ? type : getConcreteType())); } Expression* makeReturn(Type type) { @@ -2291,12 +2298,12 @@ private: } Expression* makeNop(Type type) { - assert(type == none); + assert(type == Type::none); return builder.makeNop(); } Expression* makeUnreachable(Type type) { - assert(type == unreachable); + assert(type == Type::unreachable); return builder.makeUnreachable(); } @@ -2306,26 +2313,26 @@ private: return makeTrivial(type); } wasm.memory.shared = true; - if (type == none) { + if (type == Type::none) { return builder.makeAtomicFence(); } - if (type == i32 && oneIn(2)) { + if (type == Type::i32 && oneIn(2)) { if (ATOMIC_WAITS && oneIn(2)) { auto* ptr = makePointer(); - auto expectedType = pick(i32, i64); + auto expectedType = pick(Type::i32, Type::i64); auto* expected = make(expectedType); - auto* timeout = make(i64); + auto* timeout = make(Type::i64); return builder.makeAtomicWait( ptr, expected, timeout, expectedType, logify(get())); } else { auto* ptr = makePointer(); - auto* count = make(i32); + auto* count = make(Type::i32); return builder.makeAtomicNotify(ptr, count, logify(get())); } } Index bytes; switch (type) { - case i32: { + case Type::i32: { switch (upTo(3)) { case 0: bytes = 1; @@ -2341,7 +2348,7 @@ private: } break; } - case i64: { + case Type::i64: { switch (upTo(4)) { case 0: bytes = 1; @@ -2391,14 +2398,14 @@ private: if (type.isRef()) { return makeTrivial(type); } - if (type != v128) { + if (type != Type::v128) { return makeSIMDExtract(type); } switch (upTo(7)) { case 0: - return makeUnary(v128); + return makeUnary(Type::v128); case 1: - return makeBinary(v128); + return makeBinary(Type::v128); case 2: return makeSIMDReplace(); case 3: @@ -2416,32 +2423,32 @@ private: Expression* makeSIMDExtract(Type type) { auto op = static_cast<SIMDExtractOp>(0); switch (type) { - case i32: + case Type::i32: op = pick(ExtractLaneSVecI8x16, ExtractLaneUVecI8x16, ExtractLaneSVecI16x8, ExtractLaneUVecI16x8, ExtractLaneVecI32x4); break; - case i64: + case Type::i64: op = ExtractLaneVecI64x2; break; - case f32: + case Type::f32: op = ExtractLaneVecF32x4; break; - case f64: + case Type::f64: op = ExtractLaneVecF64x2; break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } - Expression* vec = make(v128); + Expression* vec = make(Type::v128); uint8_t index = 0; switch (op) { case ExtractLaneSVecI8x16: @@ -2471,33 +2478,33 @@ private: ReplaceLaneVecI64x2, ReplaceLaneVecF32x4, ReplaceLaneVecF64x2); - Expression* vec = make(v128); + Expression* vec = make(Type::v128); uint8_t index; Type lane_t; switch (op) { case ReplaceLaneVecI8x16: index = upTo(16); - lane_t = i32; + lane_t = Type::i32; break; case ReplaceLaneVecI16x8: index = upTo(8); - lane_t = i32; + lane_t = Type::i32; break; case ReplaceLaneVecI32x4: index = upTo(4); - lane_t = i32; + lane_t = Type::i32; break; case ReplaceLaneVecI64x2: index = upTo(2); - lane_t = i64; + lane_t = Type::i64; break; case ReplaceLaneVecF32x4: index = upTo(4); - lane_t = f32; + lane_t = Type::f32; break; case ReplaceLaneVecF64x2: index = upTo(2); - lane_t = f64; + lane_t = Type::f64; break; default: WASM_UNREACHABLE("unexpected op"); @@ -2507,8 +2514,8 @@ private: } Expression* makeSIMDShuffle() { - Expression* left = make(v128); - Expression* right = make(v128); + Expression* left = make(Type::v128); + Expression* right = make(Type::v128); std::array<uint8_t, 16> mask; for (size_t i = 0; i < 16; ++i) { mask[i] = upTo(32); @@ -2524,9 +2531,9 @@ private: // QFMAF64x2, // QFMSF64x2); SIMDTernaryOp op = Bitselect; - Expression* a = make(v128); - Expression* b = make(v128); - Expression* c = make(v128); + Expression* a = make(Type::v128); + Expression* b = make(Type::v128); + Expression* c = make(Type::v128); return builder.makeSIMDTernary(op, a, b, c); } @@ -2543,8 +2550,8 @@ private: ShlVecI64x2, ShrSVecI64x2, ShrUVecI64x2); - Expression* vec = make(v128); - Expression* shift = make(i32); + Expression* vec = make(Type::v128); + Expression* shift = make(Type::Type::i32); return builder.makeSIMDShift(op, vec, shift); } @@ -2590,7 +2597,7 @@ private: return makeTrivial(type); } assert(wasm.features.hasBulkMemory()); - assert(type == none); + assert(type == Type::none); switch (upTo(4)) { case 0: return makeMemoryInit(); @@ -2618,7 +2625,7 @@ private: Expression* makeMemoryInit() { if (!allowMemory) { - return makeTrivial(none); + return makeTrivial(Type::none); } uint32_t segment = upTo(wasm.memory.segments.size()); size_t totalSize = wasm.memory.segments[segment].data.size(); @@ -2632,28 +2639,28 @@ private: Expression* makeDataDrop() { if (!allowMemory) { - return makeTrivial(none); + return makeTrivial(Type::none); } return builder.makeDataDrop(upTo(wasm.memory.segments.size())); } Expression* makeMemoryCopy() { if (!allowMemory) { - return makeTrivial(none); + return makeTrivial(Type::none); } Expression* dest = makePointer(); Expression* source = makePointer(); - Expression* size = make(i32); + Expression* size = make(Type::i32); return builder.makeMemoryCopy(dest, source, size); } Expression* makeMemoryFill() { if (!allowMemory) { - return makeTrivial(none); + return makeTrivial(Type::none); } Expression* dest = makePointer(); Expression* value = makePointer(); - Expression* size = make(i32); + Expression* size = make(Type::i32); return builder.makeMemoryFill(dest, value, size); } @@ -2662,12 +2669,12 @@ private: Expression* makeLogging() { auto type = getLoggableType(); return builder.makeCall( - std::string("log-") + type.toString(), {make(type)}, none); + std::string("log-") + type.toString(), {make(type)}, Type::none); } Expression* makeMemoryHashLogging() { - auto* hash = builder.makeCall(std::string("hashMemory"), {}, i32); - return builder.makeCall(std::string("log-i32"), {hash}, none); + auto* hash = builder.makeCall(std::string("hashMemory"), {}, Type::i32); + return builder.makeCall(std::string("log-i32"), {hash}, Type::none); } // special getters @@ -2850,7 +2857,7 @@ private: if (auto* block = target->dynCast<Block>()) { return block->type; } else if (target->is<Loop>()) { - return none; + return Type::none; } WASM_UNREACHABLE("unexpected expr type"); } diff --git a/src/tools/js-wrapper.h b/src/tools/js-wrapper.h index a2d481f42..a787d4d2c 100644 --- a/src/tools/js-wrapper.h +++ b/src/tools/js-wrapper.h @@ -107,7 +107,7 @@ static std::string generateJSWrapper(Module& wasm) { ret += ", "; } ret += "0"; - if (param == i64) { + if (param == Type::i64) { ret += ", 0"; } } diff --git a/src/tools/spec-wrapper.h b/src/tools/spec-wrapper.h index f59291e55..2bad602f2 100644 --- a/src/tools/spec-wrapper.h +++ b/src/tools/spec-wrapper.h @@ -33,29 +33,29 @@ static std::string generateSpecWrapper(Module& wasm) { for (Type param : func->sig.params.expand()) { // zeros in arguments TODO more? switch (param) { - case i32: + case Type::i32: ret += "(i32.const 0)"; break; - case i64: + case Type::i64: ret += "(i64.const 0)"; break; - case f32: + case Type::f32: ret += "(f32.const 0)"; break; - case f64: + case Type::f64: ret += "(f64.const 0)"; break; - case v128: + case Type::v128: ret += "(v128.const i32x4 0 0 0 0)"; break; - case funcref: - case anyref: - case nullref: - case exnref: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: ret += "(ref.null)"; break; - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } ret += " "; diff --git a/src/tools/wasm-reduce.cpp b/src/tools/wasm-reduce.cpp index 6adb1e174..5a05fc7d2 100644 --- a/src/tools/wasm-reduce.cpp +++ b/src/tools/wasm-reduce.cpp @@ -462,7 +462,7 @@ struct Reducer void visitExpression(Expression* curr) { // type-based reductions - if (curr->type == none) { + if (curr->type == Type::none) { if (tryToReduceCurrentToNop()) { return; } @@ -471,14 +471,14 @@ struct Reducer return; } } else { - assert(curr->type == unreachable); + assert(curr->type == Type::unreachable); if (tryToReduceCurrentToUnreachable()) { return; } } // specific reductions if (auto* iff = curr->dynCast<If>()) { - if (iff->type == none) { + if (iff->type == Type::none) { // perhaps we need just the condition? if (tryToReplaceCurrent(builder->makeDrop(iff->condition))) { return; @@ -556,7 +556,7 @@ struct Reducer } // Finally, try to replace with a child. for (auto* child : ChildIterator(curr)) { - if (child->type.isConcrete() && curr->type == none) { + if (child->type.isConcrete() && curr->type == Type::none) { if (tryToReplaceCurrent(builder->makeDrop(child))) { return; } @@ -578,114 +578,114 @@ struct Reducer } Expression* fixed = nullptr; switch (curr->type) { - case i32: { + case Type::i32: { switch (child->type) { - case i32: + case Type::i32: WASM_UNREACHABLE("invalid type"); - case i64: + case Type::i64: fixed = builder->makeUnary(WrapInt64, child); break; - case f32: + case Type::f32: fixed = builder->makeUnary(TruncSFloat32ToInt32, child); break; - case f64: + case Type::f64: fixed = builder->makeUnary(TruncSFloat64ToInt32, child); break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: continue; // not implemented yet - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } break; } - case i64: { + case Type::i64: { switch (child->type) { - case i32: + case Type::i32: fixed = builder->makeUnary(ExtendSInt32, child); break; - case i64: + case Type::i64: WASM_UNREACHABLE("invalid type"); - case f32: + case Type::f32: fixed = builder->makeUnary(TruncSFloat32ToInt64, child); break; - case f64: + case Type::f64: fixed = builder->makeUnary(TruncSFloat64ToInt64, child); break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: continue; // not implemented yet - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } break; } - case f32: { + case Type::f32: { switch (child->type) { - case i32: + case Type::i32: fixed = builder->makeUnary(ConvertSInt32ToFloat32, child); break; - case i64: + case Type::i64: fixed = builder->makeUnary(ConvertSInt64ToFloat32, child); break; - case f32: + case Type::f32: WASM_UNREACHABLE("unexpected type"); - case f64: + case Type::f64: fixed = builder->makeUnary(DemoteFloat64, child); break; - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: continue; // not implemented yet - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } break; } - case f64: { + case Type::f64: { switch (child->type) { - case i32: + case Type::i32: fixed = builder->makeUnary(ConvertSInt32ToFloat64, child); break; - case i64: + case Type::i64: fixed = builder->makeUnary(ConvertSInt64ToFloat64, child); break; - case f32: + case Type::f32: fixed = builder->makeUnary(PromoteFloat32, child); break; - case f64: + case Type::f64: WASM_UNREACHABLE("unexpected type"); - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: continue; // not implemented yet - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } break; } - case v128: - case funcref: - case anyref: - case nullref: - case exnref: + case Type::v128: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: continue; // not implemented yet - case none: - case unreachable: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } assert(fixed->type == curr->type); @@ -879,7 +879,7 @@ struct Reducer auto funcSig = func->sig; auto* funcBody = func->body; for (auto* child : ChildIterator(func->body)) { - if (!(child->type.isConcrete() || child->type == none)) { + if (!(child->type.isConcrete() || child->type == Type::none)) { continue; // not something a function can return } // Try to replace the body with the child, fixing up the function diff --git a/src/tools/wasm2js.cpp b/src/tools/wasm2js.cpp index 7fa56c810..fabcf5522 100644 --- a/src/tools/wasm2js.cpp +++ b/src/tools/wasm2js.cpp @@ -565,7 +565,7 @@ Ref AssertionEmitter::emitAssertReturnFunc(Builder& wasmBuilder, Expression* actual = sexpBuilder.parseExpression(e[1]); Expression* body = nullptr; if (e.size() == 2) { - if (actual->type == none) { + if (actual->type == Type::none) { body = wasmBuilder.blockify(actual, wasmBuilder.makeConst(Literal(uint32_t(1)))); } else { @@ -576,25 +576,25 @@ Ref AssertionEmitter::emitAssertReturnFunc(Builder& wasmBuilder, Type resType = expected->type; actual->type = resType; switch (resType) { - case i32: + case Type::i32: body = wasmBuilder.makeBinary(EqInt32, actual, expected); break; - case i64: + case Type::i64: body = wasmBuilder.makeCall( "i64Equal", {actual, - wasmBuilder.makeCall(WASM_FETCH_HIGH_BITS, {}, i32), + wasmBuilder.makeCall(WASM_FETCH_HIGH_BITS, {}, Type::i32), expected}, - i32); + Type::i32); break; - case f32: { - body = wasmBuilder.makeCall("f32Equal", {actual, expected}, i32); + case Type::f32: { + body = wasmBuilder.makeCall("f32Equal", {actual, expected}, Type::i32); break; } - case f64: { - body = wasmBuilder.makeCall("f64Equal", {actual, expected}, i32); + case Type::f64: { + body = wasmBuilder.makeCall("f64Equal", {actual, expected}, Type::i32); break; } @@ -623,7 +623,7 @@ Ref AssertionEmitter::emitAssertReturnNanFunc(Builder& wasmBuilder, Name testFuncName, Name asmModule) { Expression* actual = sexpBuilder.parseExpression(e[1]); - Expression* body = wasmBuilder.makeCall("isNaN", {actual}, i32); + Expression* body = wasmBuilder.makeCall("isNaN", {actual}, Type::i32); std::unique_ptr<Function> testFunc( wasmBuilder.makeFunction(testFuncName, std::vector<NameType>{}, diff --git a/src/wasm-binary.h b/src/wasm-binary.h index f019d0792..6f8aa5744 100644 --- a/src/wasm-binary.h +++ b/src/wasm-binary.h @@ -905,37 +905,37 @@ inline S32LEB binaryType(Type type) { int ret = 0; switch (type) { // None only used for block signatures. TODO: Separate out? - case none: + case Type::none: ret = BinaryConsts::EncodedType::Empty; break; - case i32: + case Type::i32: ret = BinaryConsts::EncodedType::i32; break; - case i64: + case Type::i64: ret = BinaryConsts::EncodedType::i64; break; - case f32: + case Type::f32: ret = BinaryConsts::EncodedType::f32; break; - case f64: + case Type::f64: ret = BinaryConsts::EncodedType::f64; break; - case v128: + case Type::v128: ret = BinaryConsts::EncodedType::v128; break; - case funcref: + case Type::funcref: ret = BinaryConsts::EncodedType::funcref; break; - case anyref: + case Type::anyref: ret = BinaryConsts::EncodedType::anyref; break; - case nullref: + case Type::nullref: ret = BinaryConsts::EncodedType::nullref; break; - case exnref: + case Type::exnref: ret = BinaryConsts::EncodedType::exnref; break; - case unreachable: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } return S32LEB(ret); diff --git a/src/wasm-builder.h b/src/wasm-builder.h index 38009cb8a..a625d7e54 100644 --- a/src/wasm-builder.h +++ b/src/wasm-builder.h @@ -27,7 +27,7 @@ namespace wasm { struct NameType { Name name; Type type; - NameType() : name(nullptr), type(none) {} + NameType() : name(nullptr), type(Type::none) {} NameType(Name name, Type type) : name(name), type(type) {} }; @@ -774,32 +774,32 @@ public: Literal value; // TODO: reuse node conditionally when possible for literals switch (curr->type) { - case i32: + case Type::i32: value = Literal(int32_t(0)); break; - case i64: + case Type::i64: value = Literal(int64_t(0)); break; - case f32: + case Type::f32: value = Literal(float(0)); break; - case f64: + case Type::f64: value = Literal(double(0)); break; - case v128: { + case Type::v128: { std::array<uint8_t, 16> bytes; bytes.fill(0); value = Literal(bytes.data()); break; } - case funcref: - case anyref: - case nullref: - case exnref: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: return ExpressionManipulator::refNull(curr); - case none: + case Type::none: return ExpressionManipulator::nop(curr); - case unreachable: + case Type::unreachable: return ExpressionManipulator::unreachable(curr); } return makeConst(value); diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h index f37a6edd6..a29af2216 100644 --- a/src/wasm-interpreter.h +++ b/src/wasm-interpreter.h @@ -1041,8 +1041,8 @@ public: if (std::isnan(val)) { trap("truncSFloat of nan"); } - if (curr->type == i32) { - if (value.type == f32) { + if (curr->type == Type::i32) { + if (value.type == Type::f32) { if (!isInRangeI32TruncS(value.reinterpreti32())) { trap("i32.truncSFloat overflow"); } @@ -1053,7 +1053,7 @@ public: } return Literal(int32_t(val)); } else { - if (value.type == f32) { + if (value.type == Type::f32) { if (!isInRangeI64TruncS(value.reinterpreti32())) { trap("i64.truncSFloat overflow"); } @@ -1071,8 +1071,8 @@ public: if (std::isnan(val)) { trap("truncUFloat of nan"); } - if (curr->type == i32) { - if (value.type == f32) { + if (curr->type == Type::i32) { + if (value.type == Type::f32) { if (!isInRangeI32TruncU(value.reinterpreti32())) { trap("i32.truncUFloat overflow"); } @@ -1083,7 +1083,7 @@ public: } return Literal(uint32_t(val)); } else { - if (value.type == f32) { + if (value.type == Type::f32) { if (!isInRangeI64TruncU(value.reinterpreti32())) { trap("i64.truncUFloat overflow"); } @@ -1135,7 +1135,7 @@ public: } Literal value = flow.value; NOTE_EVAL1(value); - return Literal(value.type == nullref); + return Literal(value.type == Type::nullref); } Flow visitRefFunc(RefFunc* curr) { NOTE_ENTER("RefFunc"); @@ -1198,7 +1198,7 @@ public: // customize load/store, or the sub-functions which they call virtual Literal load(Load* load, Address addr) { switch (load->type) { - case i32: { + case Type::i32: { switch (load->bytes) { case 1: return load->signed_ ? Literal((int32_t)load8s(addr)) @@ -1213,7 +1213,7 @@ public: } break; } - case i64: { + case Type::i64: { switch (load->bytes) { case 1: return load->signed_ ? Literal((int64_t)load8s(addr)) @@ -1231,25 +1231,25 @@ public: } break; } - case f32: + case Type::f32: return Literal(load32u(addr)).castToF32(); - case f64: + case Type::f64: return Literal(load64u(addr)).castToF64(); - case v128: + case Type::v128: return Literal(load128(addr).data()); - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } WASM_UNREACHABLE("invalid type"); } virtual void store(Store* store, Address addr, Literal value) { switch (store->valueType) { - case i32: { + case Type::i32: { switch (store->bytes) { case 1: store8(addr, value.geti32()); @@ -1265,7 +1265,7 @@ public: } break; } - case i64: { + case Type::i64: { switch (store->bytes) { case 1: store8(addr, value.geti64()); @@ -1285,21 +1285,21 @@ public: break; } // write floats carefully, ensuring all bits reach memory - case f32: + case Type::f32: store32(addr, value.reinterpreti32()); break; - case f64: + case Type::f64: store64(addr, value.reinterpreti64()); break; - case v128: + case Type::v128: store128(addr, value.getv128()); break; - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } } @@ -1790,7 +1790,7 @@ private: } Flow visitSIMDLoadSplat(SIMDLoad* curr) { Load load; - load.type = i32; + load.type = Type::i32; load.bytes = curr->getMemBytes(); load.signed_ = false; load.offset = curr->offset; @@ -1809,7 +1809,7 @@ private: splat = &Literal::splatI32x4; break; case LoadSplatVec64x2: - load.type = i64; + load.type = Type::i64; splat = &Literal::splatI64x2; break; default: @@ -2125,7 +2125,7 @@ protected: template<class LS> Address getFinalAddress(LS* curr, Literal ptr) { Address memorySizeBytes = memorySize * Memory::kPageSize; - uint64_t addr = ptr.type == i32 ? ptr.geti32() : ptr.geti64(); + uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64(); trapIfGt(curr->offset, memorySizeBytes, "offset > memory"); trapIfGt(addr, memorySizeBytes - curr->offset, "final > memory"); addr += curr->offset; @@ -2136,7 +2136,7 @@ protected: Address getFinalAddress(Literal ptr, Index bytes) { Address memorySizeBytes = memorySize * Memory::kPageSize; - uint64_t addr = ptr.type == i32 ? ptr.geti32() : ptr.geti64(); + uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64(); trapIfGt(addr, memorySizeBytes - bytes, "highest > memory"); return addr; } @@ -2150,7 +2150,7 @@ protected: checkLoadAddress(addr, bytes); Const ptr; ptr.value = Literal(int32_t(addr)); - ptr.type = i32; + ptr.type = Type::i32; Load load; load.bytes = bytes; load.signed_ = true; @@ -2164,7 +2164,7 @@ protected: void doAtomicStore(Address addr, Index bytes, Literal toStore) { Const ptr; ptr.value = Literal(int32_t(addr)); - ptr.type = i32; + ptr.type = Type::i32; Const value; value.value = toStore; value.type = toStore.type; diff --git a/src/wasm-s-parser.h b/src/wasm-s-parser.h index 8cdcb88f4..e6b40eb08 100644 --- a/src/wasm-s-parser.h +++ b/src/wasm-s-parser.h @@ -161,7 +161,9 @@ private: Type stringToType(const char* str, bool allowError = false, bool prefix = false); Type stringToLaneType(const char* str); - bool isType(cashew::IString str) { return stringToType(str, true) != none; } + bool isType(cashew::IString str) { + return stringToType(str, true) != Type::none; + } public: Expression* parseExpression(Element* s) { return parseExpression(*s); } diff --git a/src/wasm-stack.h b/src/wasm-stack.h index 5f0bf12af..eeb675a3d 100644 --- a/src/wasm-stack.h +++ b/src/wasm-stack.h @@ -260,7 +260,8 @@ void BinaryenIRWriter<SubType>::visitPossibleBlockContents(Expression* curr) { for (auto* child : block->list) { visit(child); } - if (block->type == unreachable && block->list.back()->type != unreachable) { + if (block->type == Type::unreachable && + block->list.back()->type != Type::unreachable) { // similar to in visitBlock, here we could skip emitting the block itself, // but must still end the 'block' (the contents, really) with an unreachable emitUnreachable(); @@ -283,7 +284,7 @@ void BinaryenIRWriter<SubType>::visitBlock(Block* curr) { }; auto afterChildren = [this](Block* curr) { - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // an unreachable block is one that cannot be exited. We cannot encode // this directly in wasm, where blocks must be none,i32,i64,f32,f64. Since // the block cannot be exited, we can emit an unreachable at the end, and @@ -291,7 +292,7 @@ void BinaryenIRWriter<SubType>::visitBlock(Block* curr) { emitUnreachable(); } emitScopeEnd(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // and emit an unreachable *outside* the block too, so later things can // pop anything emitUnreachable(); @@ -331,7 +332,7 @@ void BinaryenIRWriter<SubType>::visitBlock(Block* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitIf(If* curr) { visit(curr->condition); - if (curr->condition->type == unreachable) { + if (curr->condition->type == Type::unreachable) { // this if-else is unreachable because of the condition, i.e., the condition // does not exit. So don't emit the if (but do consume the condition) emitUnreachable(); @@ -346,7 +347,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitIf(If* curr) { } emitScopeEnd(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // we already handled the case of the condition being unreachable. // otherwise, we may still be unreachable, if we are an if-else with both // sides unreachable. wasm does not allow this to be emitted directly, so we @@ -361,13 +362,13 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitLoop(Loop* curr) { emit(curr); visitPossibleBlockContents(curr->body); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // we emitted a loop without a return type, and the body might be block // contents, so ensure it is not consumed emitUnreachable(); } emitScopeEnd(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // we emitted a loop without a return type, so it must not be consumed emitUnreachable(); } @@ -382,7 +383,7 @@ void BinaryenIRWriter<SubType>::visitBreak(Break* curr) { visit(curr->condition); } emit(curr); - if (curr->condition && curr->type == unreachable) { + if (curr->condition && curr->type == Type::unreachable) { // a br_if is normally none or emits a value. if it is unreachable, then // either the condition or the value is unreachable, which is extremely // rare, and may require us to make the stack polymorphic (if the block we @@ -436,7 +437,7 @@ void BinaryenIRWriter<SubType>::visitCall(Call* curr) { // the current value (here i32.eqz). // // The same applies for other expressions. - if (curr->type == unreachable && !curr->isReturn) { + if (curr->type == Type::unreachable && !curr->isReturn) { emitUnreachable(); return; } @@ -449,7 +450,7 @@ void BinaryenIRWriter<SubType>::visitCallIndirect(CallIndirect* curr) { visit(operand); } visit(curr->target); - if (curr->type == unreachable && !curr->isReturn) { + if (curr->type == Type::unreachable && !curr->isReturn) { emitUnreachable(); return; } @@ -464,7 +465,7 @@ void BinaryenIRWriter<SubType>::visitLocalGet(LocalGet* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitLocalSet(LocalSet* curr) { visit(curr->value); - if (curr->isTee() && curr->type == unreachable) { + if (curr->isTee() && curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -485,7 +486,7 @@ void BinaryenIRWriter<SubType>::visitGlobalSet(GlobalSet* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitLoad(Load* curr) { visit(curr->ptr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -503,7 +504,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitAtomicRMW(AtomicRMW* curr) { visit(curr->ptr); visit(curr->value); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -515,7 +516,7 @@ void BinaryenIRWriter<SubType>::visitAtomicCmpxchg(AtomicCmpxchg* curr) { visit(curr->ptr); visit(curr->expected); visit(curr->replacement); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -527,7 +528,7 @@ void BinaryenIRWriter<SubType>::visitAtomicWait(AtomicWait* curr) { visit(curr->ptr); visit(curr->expected); visit(curr->timeout); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -538,7 +539,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitAtomicNotify(AtomicNotify* curr) { visit(curr->ptr); visit(curr->notifyCount); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -553,7 +554,7 @@ void BinaryenIRWriter<SubType>::visitAtomicFence(AtomicFence* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitSIMDExtract(SIMDExtract* curr) { visit(curr->vec); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -564,7 +565,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitSIMDReplace(SIMDReplace* curr) { visit(curr->vec); visit(curr->value); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -575,7 +576,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitSIMDShuffle(SIMDShuffle* curr) { visit(curr->left); visit(curr->right); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -587,7 +588,7 @@ void BinaryenIRWriter<SubType>::visitSIMDTernary(SIMDTernary* curr) { visit(curr->a); visit(curr->b); visit(curr->c); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -598,7 +599,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitSIMDShift(SIMDShift* curr) { visit(curr->vec); visit(curr->shift); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -608,7 +609,7 @@ void BinaryenIRWriter<SubType>::visitSIMDShift(SIMDShift* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitSIMDLoad(SIMDLoad* curr) { visit(curr->ptr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -652,7 +653,7 @@ void BinaryenIRWriter<SubType>::visitConst(Const* curr) { template<typename SubType> void BinaryenIRWriter<SubType>::visitUnary(Unary* curr) { visit(curr->value); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -663,7 +664,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitBinary(Binary* curr) { visit(curr->left); visit(curr->right); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -675,7 +676,7 @@ void BinaryenIRWriter<SubType>::visitSelect(Select* curr) { visit(curr->ifTrue); visit(curr->ifFalse); visit(curr->condition); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); return; } @@ -734,7 +735,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitTry(Try* curr) { emitCatch(curr); visitPossibleBlockContents(curr->catchBody); emitScopeEnd(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); } } @@ -757,7 +758,7 @@ template<typename SubType> void BinaryenIRWriter<SubType>::visitBrOnExn(BrOnExn* curr) { visit(curr->exnref); emit(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { emitUnreachable(); } } diff --git a/src/wasm-type.h b/src/wasm-type.h index 668ac3e4d..18c429e95 100644 --- a/src/wasm-type.h +++ b/src/wasm-type.h @@ -148,18 +148,6 @@ std::ostream& operator<<(std::ostream& os, ParamType t); std::ostream& operator<<(std::ostream& os, ResultType t); std::ostream& operator<<(std::ostream& os, Signature t); -constexpr Type none = Type::none; -constexpr Type i32 = Type::i32; -constexpr Type i64 = Type::i64; -constexpr Type f32 = Type::f32; -constexpr Type f64 = Type::f64; -constexpr Type v128 = Type::v128; -constexpr Type funcref = Type::funcref; -constexpr Type anyref = Type::anyref; -constexpr Type nullref = Type::nullref; -constexpr Type exnref = Type::exnref; -constexpr Type unreachable = Type::unreachable; - } // namespace wasm template<> class std::hash<wasm::Signature> { diff --git a/src/wasm.h b/src/wasm.h index c4dbd2f3f..77bf75545 100644 --- a/src/wasm.h +++ b/src/wasm.h @@ -543,7 +543,7 @@ public: Id _id; // the type of the expression: its *output*, not necessarily its input(s) - Type type = none; + Type type = Type::none; Expression(Id id) : _id(id) {} @@ -655,7 +655,7 @@ public: class Break : public SpecificExpression<Expression::BreakId> { public: Break() : value(nullptr), condition(nullptr) {} - Break(MixedArena& allocator) : Break() { type = unreachable; } + Break(MixedArena& allocator) : Break() { type = Type::unreachable; } Name name; Expression* value; @@ -666,7 +666,9 @@ public: class Switch : public SpecificExpression<Expression::SwitchId> { public: - Switch(MixedArena& allocator) : targets(allocator) { type = unreachable; } + Switch(MixedArena& allocator) : targets(allocator) { + type = Type::unreachable; + } ArenaVector<Name> targets; Name default_; @@ -1028,7 +1030,7 @@ public: class Return : public SpecificExpression<Expression::ReturnId> { public: - Return() { type = unreachable; } + Return() { type = Type::unreachable; } Return(MixedArena& allocator) : Return() {} Expression* value = nullptr; @@ -1047,7 +1049,7 @@ public: class Unreachable : public SpecificExpression<Expression::UnreachableId> { public: - Unreachable() { type = unreachable; } + Unreachable() { type = Type::unreachable; } Unreachable(MixedArena& allocator) : Unreachable() {} }; @@ -1134,7 +1136,7 @@ public: class BrOnExn : public SpecificExpression<Expression::BrOnExnId> { public: - BrOnExn() { type = unreachable; } + BrOnExn() { type = Type::unreachable; } BrOnExn(MixedArena& allocator) : BrOnExn() {} Name name; diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp index ba5a8d3dd..4b748a5af 100644 --- a/src/wasm/wasm-binary.cpp +++ b/src/wasm/wasm-binary.cpp @@ -1048,25 +1048,25 @@ Type WasmBinaryBuilder::getType() { switch (type) { // None only used for block signatures. TODO: Separate out? case BinaryConsts::EncodedType::Empty: - return none; + return Type::none; case BinaryConsts::EncodedType::i32: - return i32; + return Type::i32; case BinaryConsts::EncodedType::i64: - return i64; + return Type::i64; case BinaryConsts::EncodedType::f32: - return f32; + return Type::f32; case BinaryConsts::EncodedType::f64: - return f64; + return Type::f64; case BinaryConsts::EncodedType::v128: - return v128; + return Type::v128; case BinaryConsts::EncodedType::funcref: - return funcref; + return Type::funcref; case BinaryConsts::EncodedType::anyref: - return anyref; + return Type::anyref; case BinaryConsts::EncodedType::nullref: - return nullref; + return Type::nullref; case BinaryConsts::EncodedType::exnref: - return exnref; + return Type::exnref; default: throwError("invalid wasm type: " + std::to_string(type)); } @@ -1648,7 +1648,7 @@ void WasmBinaryBuilder::processExpressions() { return; } expressionStack.push_back(curr); - if (curr->type == unreachable) { + if (curr->type == Type::unreachable) { // once we see something unreachable, we don't want to add anything else // to the stack, as it could be stacky code that is non-representable in // our AST. but we do need to skip it @@ -1730,7 +1730,7 @@ Expression* WasmBinaryBuilder::popExpression() { Expression* WasmBinaryBuilder::popNonVoidExpression() { auto* ret = popExpression(); - if (ret->type != none) { + if (ret->type != Type::none) { return ret; } // we found a void, so this is stacky code that we must handle carefully @@ -1741,7 +1741,7 @@ Expression* WasmBinaryBuilder::popNonVoidExpression() { while (1) { auto* curr = popExpression(); expressions.push_back(curr); - if (curr->type != none) { + if (curr->type != Type::none) { break; } } @@ -1757,7 +1757,7 @@ Expression* WasmBinaryBuilder::popNonVoidExpression() { block->list[0] = builder.makeLocalSet(local, block->list[0]); block->list.push_back(builder.makeLocalGet(local, type)); } else { - assert(type == unreachable); + assert(type == Type::unreachable); // nothing to do here - unreachable anyhow } block->finalize(); @@ -2318,7 +2318,7 @@ void WasmBinaryBuilder::pushBlockElements(Block* curr, } expressionStack.resize(start); // if we have a consumable item and need it, use it - if (consumable != NONE && curr->list.back()->type == none) { + if (consumable != NONE && curr->list.back()->type == Type::none) { requireFunctionContext( "need an extra var in a non-function context, invalid wasm"); Builder builder(wasm); @@ -2338,7 +2338,7 @@ void WasmBinaryBuilder::visitBlock(Block* curr) { while (1) { curr->type = getType(); curr->name = getNextLabel(); - breakStack.push_back({curr->name, curr->type != none}); + breakStack.push_back({curr->name, curr->type != Type::none}); stack.push_back(curr); if (more() && input[pos] == BinaryConsts::Block) { // a recursion @@ -2386,12 +2386,13 @@ void WasmBinaryBuilder::visitBlock(Block* curr) { Expression* WasmBinaryBuilder::getBlockOrSingleton(Type type, unsigned numPops) { Name label = getNextLabel(); - breakStack.push_back({label, type != none && type != unreachable}); + breakStack.push_back( + {label, type != Type::none && type != Type::unreachable}); auto start = expressionStack.size(); Builder builder(wasm); for (unsigned i = 0; i < numPops; i++) { - auto* pop = builder.makePop(exnref); + auto* pop = builder.makePop(Type::exnref); expressionStack.push_back(pop); } @@ -2615,82 +2616,82 @@ bool WasmBinaryBuilder::maybeVisitLoad(Expression*& out, case BinaryConsts::I32LoadMem8S: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i32; + curr->type = Type::i32; curr->signed_ = true; break; case BinaryConsts::I32LoadMem8U: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i32; + curr->type = Type::i32; curr->signed_ = false; break; case BinaryConsts::I32LoadMem16S: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i32; + curr->type = Type::i32; curr->signed_ = true; break; case BinaryConsts::I32LoadMem16U: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i32; + curr->type = Type::i32; curr->signed_ = false; break; case BinaryConsts::I32LoadMem: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = i32; + curr->type = Type::i32; break; case BinaryConsts::I64LoadMem8S: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = true; break; case BinaryConsts::I64LoadMem8U: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = false; break; case BinaryConsts::I64LoadMem16S: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = true; break; case BinaryConsts::I64LoadMem16U: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = false; break; case BinaryConsts::I64LoadMem32S: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = true; break; case BinaryConsts::I64LoadMem32U: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = i64; + curr->type = Type::i64; curr->signed_ = false; break; case BinaryConsts::I64LoadMem: curr = allocator.alloc<Load>(); curr->bytes = 8; - curr->type = i64; + curr->type = Type::i64; break; case BinaryConsts::F32LoadMem: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = f32; + curr->type = Type::f32; break; case BinaryConsts::F64LoadMem: curr = allocator.alloc<Load>(); curr->bytes = 8; - curr->type = f64; + curr->type = Type::f64; break; default: return false; @@ -2701,37 +2702,37 @@ bool WasmBinaryBuilder::maybeVisitLoad(Expression*& out, case BinaryConsts::I32AtomicLoad8U: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i32; + curr->type = Type::i32; break; case BinaryConsts::I32AtomicLoad16U: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i32; + curr->type = Type::i32; break; case BinaryConsts::I32AtomicLoad: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = i32; + curr->type = Type::i32; break; case BinaryConsts::I64AtomicLoad8U: curr = allocator.alloc<Load>(); curr->bytes = 1; - curr->type = i64; + curr->type = Type::i64; break; case BinaryConsts::I64AtomicLoad16U: curr = allocator.alloc<Load>(); curr->bytes = 2; - curr->type = i64; + curr->type = Type::i64; break; case BinaryConsts::I64AtomicLoad32U: curr = allocator.alloc<Load>(); curr->bytes = 4; - curr->type = i64; + curr->type = Type::i64; break; case BinaryConsts::I64AtomicLoad: curr = allocator.alloc<Load>(); curr->bytes = 8; - curr->type = i64; + curr->type = Type::i64; break; default: return false; @@ -2757,47 +2758,47 @@ bool WasmBinaryBuilder::maybeVisitStore(Expression*& out, case BinaryConsts::I32StoreMem8: curr = allocator.alloc<Store>(); curr->bytes = 1; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I32StoreMem16: curr = allocator.alloc<Store>(); curr->bytes = 2; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I32StoreMem: curr = allocator.alloc<Store>(); curr->bytes = 4; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I64StoreMem8: curr = allocator.alloc<Store>(); curr->bytes = 1; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64StoreMem16: curr = allocator.alloc<Store>(); curr->bytes = 2; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64StoreMem32: curr = allocator.alloc<Store>(); curr->bytes = 4; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64StoreMem: curr = allocator.alloc<Store>(); curr->bytes = 8; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::F32StoreMem: curr = allocator.alloc<Store>(); curr->bytes = 4; - curr->valueType = f32; + curr->valueType = Type::f32; break; case BinaryConsts::F64StoreMem: curr = allocator.alloc<Store>(); curr->bytes = 8; - curr->valueType = f64; + curr->valueType = Type::f64; break; default: return false; @@ -2807,37 +2808,37 @@ bool WasmBinaryBuilder::maybeVisitStore(Expression*& out, case BinaryConsts::I32AtomicStore8: curr = allocator.alloc<Store>(); curr->bytes = 1; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I32AtomicStore16: curr = allocator.alloc<Store>(); curr->bytes = 2; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I32AtomicStore: curr = allocator.alloc<Store>(); curr->bytes = 4; - curr->valueType = i32; + curr->valueType = Type::i32; break; case BinaryConsts::I64AtomicStore8: curr = allocator.alloc<Store>(); curr->bytes = 1; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64AtomicStore16: curr = allocator.alloc<Store>(); curr->bytes = 2; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64AtomicStore32: curr = allocator.alloc<Store>(); curr->bytes = 4; - curr->valueType = i64; + curr->valueType = Type::i64; break; case BinaryConsts::I64AtomicStore: curr = allocator.alloc<Store>(); curr->bytes = 8; - curr->valueType = i64; + curr->valueType = Type::i64; break; default: return false; @@ -2870,25 +2871,25 @@ bool WasmBinaryBuilder::maybeVisitAtomicRMW(Expression*& out, uint8_t code) { // Handle the cases for all the valid types for a particular opcode #define SET_FOR_OP(Op) \ case BinaryConsts::I32AtomicRMW##Op: \ - SET(Op, i32, 4); \ + SET(Op, Type::i32, 4); \ break; \ case BinaryConsts::I32AtomicRMW##Op##8U: \ - SET(Op, i32, 1); \ + SET(Op, Type::i32, 1); \ break; \ case BinaryConsts::I32AtomicRMW##Op##16U: \ - SET(Op, i32, 2); \ + SET(Op, Type::i32, 2); \ break; \ case BinaryConsts::I64AtomicRMW##Op: \ - SET(Op, i64, 8); \ + SET(Op, Type::i64, 8); \ break; \ case BinaryConsts::I64AtomicRMW##Op##8U: \ - SET(Op, i64, 1); \ + SET(Op, Type::i64, 1); \ break; \ case BinaryConsts::I64AtomicRMW##Op##16U: \ - SET(Op, i64, 2); \ + SET(Op, Type::i64, 2); \ break; \ case BinaryConsts::I64AtomicRMW##Op##32U: \ - SET(Op, i64, 4); \ + SET(Op, Type::i64, 4); \ break; switch (code) { @@ -2932,25 +2933,25 @@ bool WasmBinaryBuilder::maybeVisitAtomicCmpxchg(Expression*& out, switch (code) { case BinaryConsts::I32AtomicCmpxchg: - SET(i32, 4); + SET(Type::i32, 4); break; case BinaryConsts::I64AtomicCmpxchg: - SET(i64, 8); + SET(Type::i64, 8); break; case BinaryConsts::I32AtomicCmpxchg8U: - SET(i32, 1); + SET(Type::i32, 1); break; case BinaryConsts::I32AtomicCmpxchg16U: - SET(i32, 2); + SET(Type::i32, 2); break; case BinaryConsts::I64AtomicCmpxchg8U: - SET(i64, 1); + SET(Type::i64, 1); break; case BinaryConsts::I64AtomicCmpxchg16U: - SET(i64, 2); + SET(Type::i64, 2); break; case BinaryConsts::I64AtomicCmpxchg32U: - SET(i64, 4); + SET(Type::i64, 4); break; default: WASM_UNREACHABLE("unexpected opcode"); @@ -2979,15 +2980,15 @@ bool WasmBinaryBuilder::maybeVisitAtomicWait(Expression*& out, uint8_t code) { switch (code) { case BinaryConsts::I32AtomicWait: - curr->expectedType = i32; + curr->expectedType = Type::i32; break; case BinaryConsts::I64AtomicWait: - curr->expectedType = i64; + curr->expectedType = Type::i64; break; default: WASM_UNREACHABLE("unexpected opcode"); } - curr->type = i32; + curr->type = Type::i32; BYN_TRACE("zz node: AtomicWait\n"); curr->timeout = popNonVoidExpression(); curr->expected = popNonVoidExpression(); @@ -3009,7 +3010,7 @@ bool WasmBinaryBuilder::maybeVisitAtomicNotify(Expression*& out, uint8_t code) { auto* curr = allocator.alloc<AtomicNotify>(); BYN_TRACE("zz node: AtomicNotify\n"); - curr->type = i32; + curr->type = Type::i32; curr->notifyCount = popNonVoidExpression(); curr->ptr = popNonVoidExpression(); Address readAlign; @@ -4067,7 +4068,7 @@ bool WasmBinaryBuilder::maybeVisitSIMDStore(Expression*& out, uint32_t code) { } auto* curr = allocator.alloc<Store>(); curr->bytes = 16; - curr->valueType = v128; + curr->valueType = Type::v128; readMemoryAccess(curr->align, curr->offset); curr->isAtomic = false; curr->value = popNonVoidExpression(); @@ -4285,7 +4286,7 @@ bool WasmBinaryBuilder::maybeVisitSIMDShift(Expression*& out, uint32_t code) { bool WasmBinaryBuilder::maybeVisitSIMDLoad(Expression*& out, uint32_t code) { if (code == BinaryConsts::V128Load) { auto* curr = allocator.alloc<Load>(); - curr->type = v128; + curr->type = Type::v128; curr->bytes = 16; readMemoryAccess(curr->align, curr->offset); curr->isAtomic = false; diff --git a/src/wasm/wasm-emscripten.cpp b/src/wasm/wasm-emscripten.cpp index 3d65f9de2..8357983b2 100644 --- a/src/wasm/wasm-emscripten.cpp +++ b/src/wasm/wasm-emscripten.cpp @@ -89,13 +89,13 @@ Expression* EmscriptenGlueGenerator::generateLoadStackPointer() { /* offset =*/stackPointerOffset, /* align =*/4, /* ptr =*/builder.makeConst(Literal(0)), - /* type =*/i32); + /* type =*/Type::i32); } Global* stackPointer = getStackPointerGlobal(); if (!stackPointer) { Fatal() << "stack pointer global not found"; } - return builder.makeGlobalGet(stackPointer->name, i32); + return builder.makeGlobalGet(stackPointer->name, Type::i32); } inline Expression* stackBoundsCheck(Builder& builder, @@ -111,7 +111,7 @@ inline Expression* stackBoundsCheck(Builder& builder, // Otherwise, just trap. Expression* handler; if (handlerName.is()) { - handler = builder.makeCall(handlerName, {}, none); + handler = builder.makeCall(handlerName, {}, Type::none); } else { handler = builder.makeUnreachable(); } @@ -139,7 +139,7 @@ EmscriptenGlueGenerator::generateStoreStackPointer(Function* func, /* align =*/4, /* ptr =*/builder.makeConst(Literal(0)), /* value =*/value, - /* type =*/i32); + /* type =*/Type::i32); } Global* stackPointer = getStackPointerGlobal(); if (!stackPointer) { @@ -160,7 +160,7 @@ void EmscriptenGlueGenerator::generateStackSaveFunction() { BYN_TRACE("generateStackSaveFunction\n"); std::vector<NameType> params{}; Function* function = - builder.makeFunction(STACK_SAVE, std::move(params), i32, {}); + builder.makeFunction(STACK_SAVE, std::move(params), Type::i32, {}); function->body = generateLoadStackPointer(); @@ -169,24 +169,24 @@ void EmscriptenGlueGenerator::generateStackSaveFunction() { void EmscriptenGlueGenerator::generateStackAllocFunction() { BYN_TRACE("generateStackAllocFunction\n"); - std::vector<NameType> params{{"0", i32}}; - Function* function = - builder.makeFunction(STACK_ALLOC, std::move(params), i32, {{"1", i32}}); + std::vector<NameType> params{{"0", Type::i32}}; + Function* function = builder.makeFunction( + STACK_ALLOC, std::move(params), Type::i32, {{"1", Type::i32}}); Expression* loadStack = generateLoadStackPointer(); - LocalGet* getSizeArg = builder.makeLocalGet(0, i32); + LocalGet* getSizeArg = builder.makeLocalGet(0, Type::i32); Binary* sub = builder.makeBinary(SubInt32, loadStack, getSizeArg); const static uint32_t bitAlignment = 16; const static uint32_t bitMask = bitAlignment - 1; Const* subConst = builder.makeConst(Literal(~bitMask)); Binary* maskedSub = builder.makeBinary(AndInt32, sub, subConst); - LocalSet* teeStackLocal = builder.makeLocalTee(1, maskedSub, i32); + LocalSet* teeStackLocal = builder.makeLocalTee(1, maskedSub, Type::i32); Expression* storeStack = generateStoreStackPointer(function, teeStackLocal); Block* block = builder.makeBlock(); block->list.push_back(storeStack); - LocalGet* getStackLocal2 = builder.makeLocalGet(1, i32); + LocalGet* getStackLocal2 = builder.makeLocalGet(1, Type::i32); block->list.push_back(getStackLocal2); - block->type = i32; + block->type = Type::i32; function->body = block; addExportedFunction(wasm, function); @@ -194,10 +194,10 @@ void EmscriptenGlueGenerator::generateStackAllocFunction() { void EmscriptenGlueGenerator::generateStackRestoreFunction() { BYN_TRACE("generateStackRestoreFunction\n"); - std::vector<NameType> params{{"0", i32}}; + std::vector<NameType> params{{"0", Type::i32}}; Function* function = - builder.makeFunction(STACK_RESTORE, std::move(params), none, {}); - LocalGet* getArg = builder.makeLocalGet(0, i32); + builder.makeFunction(STACK_RESTORE, std::move(params), Type::none, {}); + LocalGet* getArg = builder.makeLocalGet(0, Type::i32); Expression* store = generateStoreStackPointer(function, getArg); function->body = store; @@ -264,15 +264,15 @@ Function* EmscriptenGlueGenerator::generateAssignGOTEntriesFunction() { return nullptr; } - Function* assignFunc = - builder.makeFunction(ASSIGN_GOT_ENTIRES, std::vector<NameType>{}, none, {}); + Function* assignFunc = builder.makeFunction( + ASSIGN_GOT_ENTIRES, std::vector<NameType>{}, Type::none, {}); Block* block = builder.makeBlock(); assignFunc->body = block; for (Global* g : gotMemEntries) { Name getter(std::string("g$") + g->base.c_str()); ensureFunctionImport(&wasm, getter, Signature(Type::none, Type::i32)); - Expression* call = builder.makeCall(getter, {}, i32); + Expression* call = builder.makeCall(getter, {}, Type::i32); GlobalSet* globalSet = builder.makeGlobalSet(g->name, call); block->list.push_back(globalSet); } @@ -298,7 +298,7 @@ Function* EmscriptenGlueGenerator::generateAssignGOTEntriesFunction() { (std::string("fp$") + g->base.c_str() + std::string("$") + getSig(f)) .c_str()); ensureFunctionImport(&wasm, getter, Signature(Type::none, Type::i32)); - Expression* call = builder.makeCall(getter, {}, i32); + Expression* call = builder.makeCall(getter, {}, Type::i32); GlobalSet* globalSet = builder.makeGlobalSet(g->name, call); block->list.push_back(globalSet); } @@ -322,13 +322,13 @@ Function* EmscriptenGlueGenerator::generateAssignGOTEntriesFunction() { void EmscriptenGlueGenerator::generatePostInstantiateFunction() { BYN_TRACE("generatePostInstantiateFunction\n"); Builder builder(wasm); - Function* post_instantiate = - builder.makeFunction(POST_INSTANTIATE, std::vector<NameType>{}, none, {}); + Function* post_instantiate = builder.makeFunction( + POST_INSTANTIATE, std::vector<NameType>{}, Type::none, {}); wasm.addFunction(post_instantiate); if (Function* F = generateAssignGOTEntriesFunction()) { // call __assign_got_enties from post_instantiate - Expression* call = builder.makeCall(F->name, {}, none); + Expression* call = builder.makeCall(F->name, {}, Type::none); post_instantiate->body = builder.blockify(post_instantiate->body, call); } @@ -336,7 +336,7 @@ void EmscriptenGlueGenerator::generatePostInstantiateFunction() { // expected by emscripten. // TODO(sbc): Unify these if (auto* e = wasm.getExportOrNull(WASM_CALL_CTORS)) { - Expression* call = builder.makeCall(e->value, {}, none); + Expression* call = builder.makeCall(e->value, {}, Type::none); post_instantiate->body = builder.blockify(post_instantiate->body, call); wasm.removeExport(WASM_CALL_CTORS); } @@ -350,11 +350,11 @@ void EmscriptenGlueGenerator::generatePostInstantiateFunction() { Function* EmscriptenGlueGenerator::generateMemoryGrowthFunction() { Name name(GROW_WASM_MEMORY); - std::vector<NameType> params{{NEW_SIZE, i32}}; + std::vector<NameType> params{{NEW_SIZE, Type::i32}}; Function* growFunction = - builder.makeFunction(name, std::move(params), i32, {}); + builder.makeFunction(name, std::move(params), Type::i32, {}); growFunction->body = - builder.makeHost(MemoryGrow, Name(), {builder.makeLocalGet(0, i32)}); + builder.makeHost(MemoryGrow, Name(), {builder.makeLocalGet(0, Type::i32)}); addExportedFunction(wasm, growFunction); @@ -384,14 +384,14 @@ void EmscriptenGlueGenerator::generateDynCallThunk(Signature sig) { return; // module already contains this dyncall } std::vector<NameType> params; - params.emplace_back("fptr", i32); // function pointer param + params.emplace_back("fptr", Type::i32); // function pointer param int p = 0; const std::vector<Type>& paramTypes = sig.params.expand(); for (const auto& ty : paramTypes) { params.emplace_back(std::to_string(p++), ty); } Function* f = builder.makeFunction(name, std::move(params), sig.results, {}); - Expression* fptr = builder.makeLocalGet(0, i32); + Expression* fptr = builder.makeLocalGet(0, Type::i32); std::vector<Expression*> args; for (unsigned i = 0; i < paramTypes.size(); ++i) { args.push_back(builder.makeLocalGet(i + 1, paramTypes[i])); @@ -423,7 +423,7 @@ struct RemoveStackPointer : public PostWalker<RemoveStackPointer> { if (!builder) { builder = make_unique<Builder>(*getModule()); } - replaceCurrent(builder->makeCall(STACK_SAVE, {}, i32)); + replaceCurrent(builder->makeCall(STACK_SAVE, {}, Type::i32)); } } @@ -433,7 +433,8 @@ struct RemoveStackPointer : public PostWalker<RemoveStackPointer> { if (!builder) { builder = make_unique<Builder>(*getModule()); } - replaceCurrent(builder->makeCall(STACK_RESTORE, {curr->value}, none)); + replaceCurrent( + builder->makeCall(STACK_RESTORE, {curr->value}, Type::none)); } } @@ -548,7 +549,7 @@ void EmscriptenGlueGenerator::enforceStackLimit() { void EmscriptenGlueGenerator::generateSetStackLimitFunction() { Function* function = builder.makeFunction(SET_STACK_LIMIT, Signature(Type::i32, Type::none), {}); - LocalGet* getArg = builder.makeLocalGet(0, i32); + LocalGet* getArg = builder.makeLocalGet(0, Type::i32); Expression* store = builder.makeGlobalSet(STACK_LIMIT, getArg); function->body = store; addExportedFunction(wasm, function); @@ -1227,7 +1228,7 @@ std::string EmscriptenGlueGenerator::generateEmscriptenMetadata( for (const auto& ex : wasm.exports) { if (ex->kind == ExternalKind::Global) { const Global* g = wasm.getGlobal(ex->value); - assert(g->type == i32); + assert(g->type == Type::i32); Const* init = g->init->cast<Const>(); uint32_t addr = init->value.geti32(); meta << nextElement() << '"' << ex->name.str << "\" : \"" << addr @@ -1313,10 +1314,11 @@ void EmscriptenGlueGenerator::exportWasiStart() { } BYN_TRACE("exportWasiStart\n"); Builder builder(wasm); - auto* body = builder.makeDrop(builder.makeCall( - main, - {LiteralUtils::makeZero(i32, wasm), LiteralUtils::makeZero(i32, wasm)}, - i32)); + auto* body = + builder.makeDrop(builder.makeCall(main, + {LiteralUtils::makeZero(Type::i32, wasm), + LiteralUtils::makeZero(Type::i32, wasm)}, + Type::i32)); auto* func = builder.makeFunction(_start, Signature(Type::none, Type::none), {}, body); wasm.addFunction(func); diff --git a/src/wasm/wasm-s-parser.cpp b/src/wasm/wasm-s-parser.cpp index 7ff946200..573df9dfa 100644 --- a/src/wasm/wasm-s-parser.cpp +++ b/src/wasm/wasm-s-parser.cpp @@ -833,64 +833,64 @@ Type SExpressionWasmBuilder::stringToType(const char* str, bool prefix) { if (str[0] == 'i') { if (str[1] == '3' && str[2] == '2' && (prefix || str[3] == 0)) { - return i32; + return Type::i32; } if (str[1] == '6' && str[2] == '4' && (prefix || str[3] == 0)) { - return i64; + return Type::i64; } } if (str[0] == 'f') { if (str[1] == '3' && str[2] == '2' && (prefix || str[3] == 0)) { - return f32; + return Type::f32; } if (str[1] == '6' && str[2] == '4' && (prefix || str[3] == 0)) { - return f64; + return Type::f64; } } if (str[0] == 'v') { if (str[1] == '1' && str[2] == '2' && str[3] == '8' && (prefix || str[4] == 0)) { - return v128; + return Type::v128; } } if (strncmp(str, "funcref", 7) == 0 && (prefix || str[7] == 0)) { - return funcref; + return Type::funcref; } if (strncmp(str, "anyref", 6) == 0 && (prefix || str[6] == 0)) { - return anyref; + return Type::anyref; } if (strncmp(str, "nullref", 7) == 0 && (prefix || str[7] == 0)) { - return nullref; + return Type::nullref; } if (strncmp(str, "exnref", 6) == 0 && (prefix || str[6] == 0)) { - return exnref; + return Type::exnref; } if (allowError) { - return none; + return Type::none; } throw ParseException(std::string("invalid wasm type: ") + str); } Type SExpressionWasmBuilder::stringToLaneType(const char* str) { if (strcmp(str, "i8x16") == 0) { - return i32; + return Type::i32; } if (strcmp(str, "i16x8") == 0) { - return i32; + return Type::i32; } if (strcmp(str, "i32x4") == 0) { - return i32; + return Type::i32; } if (strcmp(str, "i64x2") == 0) { - return i64; + return Type::i64; } if (strcmp(str, "f32x4") == 0) { - return f32; + return Type::f32; } if (strcmp(str, "f64x2") == 0) { - return f64; + return Type::f64; } - return none; + return Type::none; } Function::DebugLocation @@ -1067,7 +1067,7 @@ Expression* SExpressionWasmBuilder::makeBlock(Element& s) { if (i < s.size() && s[i]->isStr()) { // could be a name or a type if (s[i]->dollared() || - stringToType(s[i]->str(), true /* allowError */) == none) { + stringToType(s[i]->str(), true /* allowError */) == Type::none) { sName = s[i++]->str(); } else { sName = "block"; @@ -1152,7 +1152,7 @@ static Literal makeLanes(Element& s, MixedArena& allocator, Type lane_t) { } Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) { - if (type != v128) { + if (type != Type::v128) { auto ret = parseConst(s[1]->str(), type, allocator); if (!ret) { throw ParseException("bad const", s[1]->line, s[1]->col); @@ -1165,7 +1165,7 @@ Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) { size_t lanes = s.size() - 2; switch (lanes) { case 2: { - if (lane_t != i64 && lane_t != f64) { + if (lane_t != Type::i64 && lane_t != Type::f64) { throw ParseException( "Unexpected v128 literal lane type", s[1]->line, s[1]->col); } @@ -1173,7 +1173,7 @@ Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) { break; } case 4: { - if (lane_t != i32 && lane_t != f32) { + if (lane_t != Type::i32 && lane_t != Type::f32) { throw ParseException( "Unexpected v128 literal lane type", s[1]->line, s[1]->col); } @@ -1181,7 +1181,7 @@ Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) { break; } case 8: { - if (lane_t != i32) { + if (lane_t != Type::i32) { throw ParseException( "Unexpected v128 literal lane type", s[1]->line, s[1]->col); } @@ -1189,7 +1189,7 @@ Expression* SExpressionWasmBuilder::makeConst(Element& s, Type type) { break; } case 16: { - if (lane_t != i32) { + if (lane_t != Type::i32) { throw ParseException( "Unexpected v128 literal lane type", s[1]->line, s[1]->col); } @@ -1626,7 +1626,7 @@ SExpressionWasmBuilder::makeMaybeBlock(Element& s, size_t i, Type type) { Type SExpressionWasmBuilder::parseOptionalResultType(Element& s, Index& i) { if (s.size() == i) { - return none; + return Type::none; } // TODO(sbc): Remove support for old result syntax (bare streing) once the @@ -1638,7 +1638,7 @@ Type SExpressionWasmBuilder::parseOptionalResultType(Element& s, Index& i) { Element& params = *s[i]; IString id = params[0]->str(); if (id != RESULT) { - return none; + return Type::none; } i++; @@ -2003,7 +2003,7 @@ void SExpressionWasmBuilder::parseMemory(Element& s, bool preParseImport) { } const char* input = curr[j]->c_str(); auto* offset = allocator.alloc<Const>(); - offset->type = i32; + offset->type = Type::i32; offset->value = Literal(int32_t(offsetValue)); if (auto size = strlen(input)) { std::vector<char> data; @@ -2245,7 +2245,7 @@ void SExpressionWasmBuilder::parseGlobal(Element& s, bool preParseImport) { globalCounter++; globalNames.push_back(global->name); bool mutable_ = false; - Type type = none; + Type type = Type::none; bool exported = false; Name importModule, importBase; while (i < s.size() && s[i]->isList()) { @@ -2276,7 +2276,7 @@ void SExpressionWasmBuilder::parseGlobal(Element& s, bool preParseImport) { if (exported && mutable_) { throw ParseException("cannot export a mutable global", s.line, s.col); } - if (type == none) { + if (type == Type::none) { type = stringToType(s[i++]->str()); } if (importModule.is()) { diff --git a/src/wasm/wasm-stack.cpp b/src/wasm/wasm-stack.cpp index af1e8907c..551b1f132 100644 --- a/src/wasm/wasm-stack.cpp +++ b/src/wasm/wasm-stack.cpp @@ -21,7 +21,7 @@ namespace wasm { void BinaryInstWriter::visitBlock(Block* curr) { breakStack.push_back(curr->name); o << int8_t(BinaryConsts::Block); - o << binaryType(curr->type != unreachable ? curr->type : none); + o << binaryType(curr->type != Type::unreachable ? curr->type : Type::none); } void BinaryInstWriter::visitIf(If* curr) { @@ -30,7 +30,7 @@ void BinaryInstWriter::visitIf(If* curr) { // instead) breakStack.emplace_back(IMPOSSIBLE_CONTINUE); o << int8_t(BinaryConsts::If); - o << binaryType(curr->type != unreachable ? curr->type : none); + o << binaryType(curr->type != Type::unreachable ? curr->type : Type::none); } void BinaryInstWriter::emitIfElse() { @@ -43,7 +43,7 @@ void BinaryInstWriter::emitIfElse() { void BinaryInstWriter::visitLoop(Loop* curr) { breakStack.push_back(curr->name); o << int8_t(BinaryConsts::Loop); - o << binaryType(curr->type != unreachable ? curr->type : none); + o << binaryType(curr->type != Type::unreachable ? curr->type : Type::none); } void BinaryInstWriter::visitBreak(Break* curr) { @@ -94,7 +94,7 @@ void BinaryInstWriter::visitGlobalSet(GlobalSet* curr) { void BinaryInstWriter::visitLoad(Load* curr) { if (!curr->isAtomic) { switch (curr->type) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: o << int8_t(curr->signed_ ? BinaryConsts::I32LoadMem8S @@ -112,7 +112,7 @@ void BinaryInstWriter::visitLoad(Load* curr) { } break; } - case i64: { + case Type::i64: { switch (curr->bytes) { case 1: o << int8_t(curr->signed_ ? BinaryConsts::I64LoadMem8S @@ -134,30 +134,30 @@ void BinaryInstWriter::visitLoad(Load* curr) { } break; } - case f32: + case Type::f32: o << int8_t(BinaryConsts::F32LoadMem); break; - case f64: + case Type::f64: o << int8_t(BinaryConsts::F64LoadMem); break; - case v128: + case Type::v128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Load); break; - case unreachable: + case Type::unreachable: // the pointer is unreachable, so we are never reached; just don't emit // a load return; - case funcref: - case anyref: - case nullref: - case exnref: - case none: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: WASM_UNREACHABLE("unexpected type"); } } else { o << int8_t(BinaryConsts::AtomicPrefix); switch (curr->type) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I32AtomicLoad8U); @@ -173,7 +173,7 @@ void BinaryInstWriter::visitLoad(Load* curr) { } break; } - case i64: { + case Type::i64: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I64AtomicLoad8U); @@ -192,7 +192,7 @@ void BinaryInstWriter::visitLoad(Load* curr) { } break; } - case unreachable: + case Type::unreachable: return; default: WASM_UNREACHABLE("unexpected type"); @@ -204,7 +204,7 @@ void BinaryInstWriter::visitLoad(Load* curr) { void BinaryInstWriter::visitStore(Store* curr) { if (!curr->isAtomic) { switch (curr->valueType) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I32StoreMem8); @@ -220,7 +220,7 @@ void BinaryInstWriter::visitStore(Store* curr) { } break; } - case i64: { + case Type::i64: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I64StoreMem8); @@ -239,28 +239,28 @@ void BinaryInstWriter::visitStore(Store* curr) { } break; } - case f32: + case Type::f32: o << int8_t(BinaryConsts::F32StoreMem); break; - case f64: + case Type::f64: o << int8_t(BinaryConsts::F64StoreMem); break; - case v128: + case Type::v128: o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Store); break; - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } } else { o << int8_t(BinaryConsts::AtomicPrefix); switch (curr->valueType) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I32AtomicStore8); @@ -276,7 +276,7 @@ void BinaryInstWriter::visitStore(Store* curr) { } break; } - case i64: { + case Type::i64: { switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I64AtomicStore8); @@ -308,7 +308,7 @@ void BinaryInstWriter::visitAtomicRMW(AtomicRMW* curr) { #define CASE_FOR_OP(Op) \ case Op: \ switch (curr->type) { \ - case i32: \ + case Type::i32: \ switch (curr->bytes) { \ case 1: \ o << int8_t(BinaryConsts::I32AtomicRMW##Op##8U); \ @@ -323,7 +323,7 @@ void BinaryInstWriter::visitAtomicRMW(AtomicRMW* curr) { WASM_UNREACHABLE("invalid rmw size"); \ } \ break; \ - case i64: \ + case Type::i64: \ switch (curr->bytes) { \ case 1: \ o << int8_t(BinaryConsts::I64AtomicRMW##Op##8U); \ @@ -364,7 +364,7 @@ void BinaryInstWriter::visitAtomicRMW(AtomicRMW* curr) { void BinaryInstWriter::visitAtomicCmpxchg(AtomicCmpxchg* curr) { o << int8_t(BinaryConsts::AtomicPrefix); switch (curr->type) { - case i32: + case Type::i32: switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I32AtomicCmpxchg8U); @@ -379,7 +379,7 @@ void BinaryInstWriter::visitAtomicCmpxchg(AtomicCmpxchg* curr) { WASM_UNREACHABLE("invalid size"); } break; - case i64: + case Type::i64: switch (curr->bytes) { case 1: o << int8_t(BinaryConsts::I64AtomicCmpxchg8U); @@ -406,12 +406,12 @@ void BinaryInstWriter::visitAtomicCmpxchg(AtomicCmpxchg* curr) { void BinaryInstWriter::visitAtomicWait(AtomicWait* curr) { o << int8_t(BinaryConsts::AtomicPrefix); switch (curr->expectedType) { - case i32: { + case Type::i32: { o << int8_t(BinaryConsts::I32AtomicWait); emitMemoryAccess(4, 4, curr->offset); break; } - case i64: { + case Type::i64: { o << int8_t(BinaryConsts::I64AtomicWait); emitMemoryAccess(8, 8, curr->offset); break; @@ -622,23 +622,23 @@ void BinaryInstWriter::visitMemoryFill(MemoryFill* curr) { void BinaryInstWriter::visitConst(Const* curr) { switch (curr->type) { - case i32: { + case Type::i32: { o << int8_t(BinaryConsts::I32Const) << S32LEB(curr->value.geti32()); break; } - case i64: { + case Type::i64: { o << int8_t(BinaryConsts::I64Const) << S64LEB(curr->value.geti64()); break; } - case f32: { + case Type::f32: { o << int8_t(BinaryConsts::F32Const) << curr->value.reinterpreti32(); break; } - case f64: { + case Type::f64: { o << int8_t(BinaryConsts::F64Const) << curr->value.reinterpreti64(); break; } - case v128: { + case Type::v128: { o << int8_t(BinaryConsts::SIMDPrefix) << U32LEB(BinaryConsts::V128Const); std::array<uint8_t, 16> v = curr->value.getv128(); for (size_t i = 0; i < 16; ++i) { @@ -646,12 +646,12 @@ void BinaryInstWriter::visitConst(Const* curr) { } break; } - case funcref: - case anyref: - case nullref: - case exnref: - case none: - case unreachable: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: + case Type::unreachable: WASM_UNREACHABLE("unexpected type"); } } @@ -1592,7 +1592,7 @@ void BinaryInstWriter::visitRefFunc(RefFunc* curr) { void BinaryInstWriter::visitTry(Try* curr) { breakStack.emplace_back(IMPOSSIBLE_CONTINUE); o << int8_t(BinaryConsts::Try); - o << binaryType(curr->type != unreachable ? curr->type : none); + o << binaryType(curr->type != Type::unreachable ? curr->type : Type::none); } void BinaryInstWriter::emitCatch() { @@ -1661,85 +1661,88 @@ void BinaryInstWriter::mapLocalsAndEmitHeader() { Type type = func->getLocalType(i); // increment now for simplicity, must decrement it in returns currLocalsByType[type]++; - if (type == i32) { - mappedLocals[i] = index + currLocalsByType[i32] - 1; + if (type == Type::i32) { + mappedLocals[i] = index + currLocalsByType[Type::i32] - 1; continue; } - index += numLocalsByType[i32]; - if (type == i64) { - mappedLocals[i] = index + currLocalsByType[i64] - 1; + index += numLocalsByType[Type::i32]; + if (type == Type::i64) { + mappedLocals[i] = index + currLocalsByType[Type::i64] - 1; continue; } - index += numLocalsByType[i64]; - if (type == f32) { - mappedLocals[i] = index + currLocalsByType[f32] - 1; + index += numLocalsByType[Type::i64]; + if (type == Type::f32) { + mappedLocals[i] = index + currLocalsByType[Type::f32] - 1; continue; } - index += numLocalsByType[f32]; - if (type == f64) { - mappedLocals[i] = index + currLocalsByType[f64] - 1; + index += numLocalsByType[Type::f32]; + if (type == Type::f64) { + mappedLocals[i] = index + currLocalsByType[Type::f64] - 1; continue; } - index += numLocalsByType[f64]; - if (type == v128) { - mappedLocals[i] = index + currLocalsByType[v128] - 1; + index += numLocalsByType[Type::f64]; + if (type == Type::v128) { + mappedLocals[i] = index + currLocalsByType[Type::v128] - 1; continue; } - index += numLocalsByType[v128]; - if (type == funcref) { - mappedLocals[i] = index + currLocalsByType[funcref] - 1; + index += numLocalsByType[Type::v128]; + if (type == Type::funcref) { + mappedLocals[i] = index + currLocalsByType[Type::funcref] - 1; continue; } - index += numLocalsByType[funcref]; - if (type == anyref) { - mappedLocals[i] = index + currLocalsByType[anyref] - 1; + index += numLocalsByType[Type::funcref]; + if (type == Type::anyref) { + mappedLocals[i] = index + currLocalsByType[Type::anyref] - 1; continue; } - index += numLocalsByType[anyref]; - if (type == nullref) { - mappedLocals[i] = index + currLocalsByType[nullref] - 1; + index += numLocalsByType[Type::anyref]; + if (type == Type::nullref) { + mappedLocals[i] = index + currLocalsByType[Type::nullref] - 1; continue; } - index += numLocalsByType[nullref]; - if (type == exnref) { - mappedLocals[i] = index + currLocalsByType[exnref] - 1; + index += numLocalsByType[Type::nullref]; + if (type == Type::exnref) { + mappedLocals[i] = index + currLocalsByType[Type::exnref] - 1; continue; } WASM_UNREACHABLE("unexpected type"); } // Emit them. - o << U32LEB( - (numLocalsByType[i32] ? 1 : 0) + (numLocalsByType[i64] ? 1 : 0) + - (numLocalsByType[f32] ? 1 : 0) + (numLocalsByType[f64] ? 1 : 0) + - (numLocalsByType[v128] ? 1 : 0) + (numLocalsByType[funcref] ? 1 : 0) + - (numLocalsByType[anyref] ? 1 : 0) + (numLocalsByType[nullref] ? 1 : 0) + - (numLocalsByType[exnref] ? 1 : 0)); - if (numLocalsByType[i32]) { - o << U32LEB(numLocalsByType[i32]) << binaryType(i32); + o << U32LEB((numLocalsByType[Type::i32] ? 1 : 0) + + (numLocalsByType[Type::i64] ? 1 : 0) + + (numLocalsByType[Type::f32] ? 1 : 0) + + (numLocalsByType[Type::f64] ? 1 : 0) + + (numLocalsByType[Type::v128] ? 1 : 0) + + (numLocalsByType[Type::funcref] ? 1 : 0) + + (numLocalsByType[Type::anyref] ? 1 : 0) + + (numLocalsByType[Type::nullref] ? 1 : 0) + + (numLocalsByType[Type::exnref] ? 1 : 0)); + if (numLocalsByType[Type::i32]) { + o << U32LEB(numLocalsByType[Type::i32]) << binaryType(Type::i32); } - if (numLocalsByType[i64]) { - o << U32LEB(numLocalsByType[i64]) << binaryType(i64); + if (numLocalsByType[Type::i64]) { + o << U32LEB(numLocalsByType[Type::i64]) << binaryType(Type::i64); } - if (numLocalsByType[f32]) { - o << U32LEB(numLocalsByType[f32]) << binaryType(f32); + if (numLocalsByType[Type::f32]) { + o << U32LEB(numLocalsByType[Type::f32]) << binaryType(Type::f32); } - if (numLocalsByType[f64]) { - o << U32LEB(numLocalsByType[f64]) << binaryType(f64); + if (numLocalsByType[Type::f64]) { + o << U32LEB(numLocalsByType[Type::f64]) << binaryType(Type::f64); } - if (numLocalsByType[v128]) { - o << U32LEB(numLocalsByType[v128]) << binaryType(v128); + if (numLocalsByType[Type::v128]) { + o << U32LEB(numLocalsByType[Type::v128]) << binaryType(Type::v128); } - if (numLocalsByType[funcref]) { - o << U32LEB(numLocalsByType[funcref]) << binaryType(funcref); + if (numLocalsByType[Type::funcref]) { + o << U32LEB(numLocalsByType[Type::funcref]) << binaryType(Type::funcref); } - if (numLocalsByType[anyref]) { - o << U32LEB(numLocalsByType[anyref]) << binaryType(anyref); + if (numLocalsByType[Type::anyref]) { + o << U32LEB(numLocalsByType[Type::anyref]) << binaryType(Type::anyref); } - if (numLocalsByType[nullref]) { - o << U32LEB(numLocalsByType[nullref]) << binaryType(nullref); + if (numLocalsByType[Type::nullref]) { + o << U32LEB(numLocalsByType[Type::nullref]) << binaryType(Type::nullref); } - if (numLocalsByType[exnref]) { - o << U32LEB(numLocalsByType[exnref]) << binaryType(exnref); + if (numLocalsByType[Type::exnref]) { + o << U32LEB(numLocalsByType[Type::exnref]) << binaryType(Type::exnref); } } @@ -1799,17 +1802,17 @@ StackInst* StackIRGenerator::makeStackInst(StackInst::Op op, auto stackType = origin->type; if (origin->is<Block>() || origin->is<Loop>() || origin->is<If>() || origin->is<Try>()) { - if (stackType == unreachable) { + if (stackType == Type::unreachable) { // There are no unreachable blocks, loops, or ifs. we emit extra // unreachables to fix that up, so that they are valid as having none // type. - stackType = none; + stackType = Type::none; } else if (op != StackInst::BlockEnd && op != StackInst::IfEnd && op != StackInst::LoopEnd && op != StackInst::TryEnd) { // If a concrete type is returned, we mark the end of the construct has // having that type (as it is pushed to the value stack at that point), // other parts are marked as none). - stackType = none; + stackType = Type::none; } } ret->type = stackType; diff --git a/src/wasm/wasm-validator.cpp b/src/wasm/wasm-validator.cpp index b19ed096f..30e182f0b 100644 --- a/src/wasm/wasm-validator.cpp +++ b/src/wasm/wasm-validator.cpp @@ -147,7 +147,7 @@ struct ValidationInfo { template<typename T, typename S> bool shouldBeEqualOrFirstIsUnreachable( S left, S right, T curr, const char* text, Function* func = nullptr) { - if (left != unreachable && left != right) { + if (left != Type::unreachable && left != right) { std::ostringstream ss; ss << left << " != " << right << ": " << text; fail(ss.str(), curr, func); @@ -173,9 +173,9 @@ struct ValidationInfo { const char* text, Function* func = nullptr) { switch (ty) { - case i32: - case i64: - case unreachable: { + case Type::i32: + case Type::i64: + case Type::unreachable: { break; } default: @@ -411,7 +411,8 @@ void FunctionValidator::visitBlock(Block* curr) { curr, "block+breaks must have right type if breaks return a value"); } - if (curr->type.isConcrete() && info.arity && info.type != unreachable) { + if (curr->type.isConcrete() && info.arity && + info.type != Type::unreachable) { shouldBeSubType( info.type, curr->type, @@ -422,7 +423,7 @@ void FunctionValidator::visitBlock(Block* curr) { info.arity != BreakInfo::PoisonArity, curr, "break arities must match"); if (curr->list.size() > 0) { auto last = curr->list.back()->type; - if (last == none) { + if (last == Type::none) { shouldBeTrue(info.arity == Index(0), curr, "if block ends with a none, breaks cannot send a value " @@ -463,7 +464,7 @@ void FunctionValidator::visitBlock(Block* curr) { } else { shouldBeUnequal( backType, - none, + Type(Type::none), curr, "block with value must not have last element that is none"); } @@ -487,7 +488,7 @@ void FunctionValidator::visitLoop(Loop* curr) { } breakInfos.erase(iter); } - if (curr->type == none) { + if (curr->type == Type::none) { shouldBeFalse(curr->body->type.isConcrete(), curr, "bad body for a loop that has no value"); @@ -513,22 +514,22 @@ void FunctionValidator::visitLoop(Loop* curr) { } void FunctionValidator::visitIf(If* curr) { - shouldBeTrue(curr->condition->type == unreachable || - curr->condition->type == i32, + shouldBeTrue(curr->condition->type == Type::unreachable || + curr->condition->type == Type::i32, curr, "if condition must be valid"); if (!curr->ifFalse) { shouldBeFalse(curr->ifTrue->type.isConcrete(), curr, "if without else must not return a value in body"); - if (curr->condition->type != unreachable) { + if (curr->condition->type != Type::unreachable) { shouldBeEqual(curr->type, - none, + Type(Type::none), curr, "if without else and reachable condition must be none"); } } else { - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { shouldBeSubTypeOrFirstIsUnreachable( curr->ifTrue->type, curr->type, @@ -540,13 +541,13 @@ void FunctionValidator::visitIf(If* curr) { curr, "returning if-else's false must have right type"); } else { - if (curr->condition->type != unreachable) { + if (curr->condition->type != Type::unreachable) { shouldBeEqual(curr->ifTrue->type, - unreachable, + Type(Type::unreachable), curr, "unreachable if-else must have unreachable true"); shouldBeEqual(curr->ifFalse->type, - unreachable, + Type(Type::unreachable), curr, "unreachable if-else must have unreachable false"); } @@ -570,14 +571,15 @@ void FunctionValidator::noteBreak(Name name, Expression* value, Expression* curr) { if (value) { - shouldBeUnequal(value->type, none, curr, "breaks must have a valid value"); + shouldBeUnequal( + value->type, Type(Type::none), curr, "breaks must have a valid value"); } - noteBreak(name, value ? value->type : none, curr); + noteBreak(name, value ? value->type : Type::none, curr); } void FunctionValidator::noteBreak(Name name, Type valueType, Expression* curr) { Index arity = 0; - if (valueType != none) { + if (valueType != Type::none) { arity = 1; } auto iter = breakInfos.find(name); @@ -598,12 +600,13 @@ void FunctionValidator::noteBreak(Name name, Type valueType, Expression* curr) { void FunctionValidator::visitBreak(Break* curr) { noteBreak(curr->name, curr->value, curr); if (curr->value) { - shouldBeTrue( - curr->value->type != none, curr, "break value must not have none type"); + shouldBeTrue(curr->value->type != Type::none, + curr, + "break value must not have none type"); } if (curr->condition) { - shouldBeTrue(curr->condition->type == unreachable || - curr->condition->type == i32, + shouldBeTrue(curr->condition->type == Type::unreachable || + curr->condition->type == Type::i32, curr, "break condition must be i32"); } @@ -614,8 +617,8 @@ void FunctionValidator::visitSwitch(Switch* curr) { noteBreak(target, curr->value, curr); } noteBreak(curr->default_, curr->value, curr); - shouldBeTrue(curr->condition->type == unreachable || - curr->condition->type == i32, + shouldBeTrue(curr->condition->type == Type::unreachable || + curr->condition->type == Type::i32, curr, "br_table condition must be i32"); } @@ -648,7 +651,7 @@ void FunctionValidator::visitCall(Call* curr) { } if (curr->isReturn) { shouldBeEqual(curr->type, - unreachable, + Type(Type::unreachable), curr, "return_call should have unreachable type"); shouldBeEqual( @@ -657,11 +660,11 @@ void FunctionValidator::visitCall(Call* curr) { curr, "return_call callee return type must match caller return type"); } else { - if (curr->type == unreachable) { - bool hasUnreachableOperand = - std::any_of(curr->operands.begin(), - curr->operands.end(), - [](Expression* op) { return op->type == unreachable; }); + if (curr->type == Type::unreachable) { + bool hasUnreachableOperand = std::any_of( + curr->operands.begin(), curr->operands.end(), [](Expression* op) { + return op->type == Type::unreachable; + }); shouldBeTrue( hasUnreachableOperand, curr, @@ -683,8 +686,10 @@ void FunctionValidator::visitCallIndirect(CallIndirect* curr) { return; } const std::vector<Type>& params = curr->sig.params.expand(); - shouldBeEqualOrFirstIsUnreachable( - curr->target->type, i32, curr, "indirect call target must be an i32"); + shouldBeEqualOrFirstIsUnreachable(curr->target->type, + Type(Type::i32), + curr, + "indirect call target must be an i32"); if (!shouldBeTrue(curr->operands.size() == params.size(), curr, "call param number must match")) { @@ -701,7 +706,7 @@ void FunctionValidator::visitCallIndirect(CallIndirect* curr) { } if (curr->isReturn) { shouldBeEqual(curr->type, - unreachable, + Type(Type::unreachable), curr, "return_call_indirect should have unreachable type"); shouldBeEqual( @@ -710,12 +715,12 @@ void FunctionValidator::visitCallIndirect(CallIndirect* curr) { curr, "return_call_indirect callee return type must match caller return type"); } else { - if (curr->type == unreachable) { - if (curr->target->type != unreachable) { - bool hasUnreachableOperand = - std::any_of(curr->operands.begin(), - curr->operands.end(), - [](Expression* op) { return op->type == unreachable; }); + if (curr->type == Type::unreachable) { + if (curr->target->type != Type::unreachable) { + bool hasUnreachableOperand = std::any_of( + curr->operands.begin(), curr->operands.end(), [](Expression* op) { + return op->type == Type::unreachable; + }); shouldBeTrue(hasUnreachableOperand, curr, "call_indirects may only be unreachable if they have " @@ -754,8 +759,8 @@ void FunctionValidator::visitLocalSet(LocalSet* curr) { if (shouldBeTrue(curr->index < getFunction()->getNumLocals(), curr, "local.set index must be small enough")) { - if (curr->value->type != unreachable) { - if (curr->type != none) { // tee is ok anyhow + if (curr->value->type != Type::unreachable) { + if (curr->type != Type::none) { // tee is ok anyhow shouldBeEqual(getFunction()->getLocalType(curr->index), curr->type, curr, @@ -803,12 +808,12 @@ void FunctionValidator::visitLoad(Load* curr) { shouldBeTrue(getModule()->features.hasAtomics(), curr, "Atomic operation (atomics are disabled)"); - shouldBeTrue(curr->type == i32 || curr->type == i64 || - curr->type == unreachable, + shouldBeTrue(curr->type == Type::i32 || curr->type == Type::i64 || + curr->type == Type::unreachable, curr, "Atomic load should be i32 or i64"); } - if (curr->type == v128) { + if (curr->type == Type::v128) { shouldBeTrue(getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); @@ -819,7 +824,7 @@ void FunctionValidator::visitLoad(Load* curr) { validateMemBytes(curr->bytes, curr->type, curr); validateAlignment(curr->align, curr->type, curr->bytes, curr->isAtomic, curr); shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "load pointer type must be i32"); + curr->ptr->type, Type(Type::i32), curr, "load pointer type must be i32"); if (curr->isAtomic) { shouldBeFalse(curr->signed_, curr, "atomic loads must be unsigned"); shouldBeIntOrUnreachable( @@ -834,12 +839,12 @@ void FunctionValidator::visitStore(Store* curr) { shouldBeTrue(getModule()->features.hasAtomics(), curr, "Atomic operation (atomics are disabled)"); - shouldBeTrue(curr->valueType == i32 || curr->valueType == i64 || - curr->valueType == unreachable, + shouldBeTrue(curr->valueType == Type::i32 || curr->valueType == Type::i64 || + curr->valueType == Type::unreachable, curr, "Atomic store should be i32 or i64"); } - if (curr->valueType == v128) { + if (curr->valueType == Type::v128) { shouldBeTrue(getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); @@ -851,9 +856,11 @@ void FunctionValidator::visitStore(Store* curr) { validateAlignment( curr->align, curr->valueType, curr->bytes, curr->isAtomic, curr); shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "store pointer type must be i32"); - shouldBeUnequal( - curr->value->type, none, curr, "store value type must not be none"); + curr->ptr->type, Type(Type::i32), curr, "store pointer type must be i32"); + shouldBeUnequal(curr->value->type, + Type(Type::none), + curr, + "store value type must not be none"); shouldBeEqualOrFirstIsUnreachable( curr->value->type, curr->valueType, curr, "store value type must match"); if (curr->isAtomic) { @@ -872,8 +879,10 @@ void FunctionValidator::visitAtomicRMW(AtomicRMW* curr) { curr, "Atomic operation with non-shared memory"); validateMemBytes(curr->bytes, curr->type, curr); - shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "AtomicRMW pointer type must be i32"); + shouldBeEqualOrFirstIsUnreachable(curr->ptr->type, + Type(Type::i32), + curr, + "AtomicRMW pointer type must be i32"); shouldBeEqualOrFirstIsUnreachable(curr->type, curr->value->type, curr, @@ -893,9 +902,9 @@ void FunctionValidator::visitAtomicCmpxchg(AtomicCmpxchg* curr) { "Atomic operation with non-shared memory"); validateMemBytes(curr->bytes, curr->type, curr); shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "cmpxchg pointer type must be i32"); - if (curr->expected->type != unreachable && - curr->replacement->type != unreachable) { + curr->ptr->type, Type(Type::i32), curr, "cmpxchg pointer type must be i32"); + if (curr->expected->type != Type::unreachable && + curr->replacement->type != Type::unreachable) { shouldBeEqual(curr->expected->type, curr->replacement->type, curr, @@ -925,9 +934,11 @@ void FunctionValidator::visitAtomicWait(AtomicWait* curr) { curr, "Atomic operation with non-shared memory"); shouldBeEqualOrFirstIsUnreachable( - curr->type, i32, curr, "AtomicWait must have type i32"); - shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "AtomicWait pointer type must be i32"); + curr->type, Type(Type::i32), curr, "AtomicWait must have type i32"); + shouldBeEqualOrFirstIsUnreachable(curr->ptr->type, + Type(Type::i32), + curr, + "AtomicWait pointer type must be i32"); shouldBeIntOrUnreachable( curr->expected->type, curr, "AtomicWait expected type must be int"); shouldBeEqualOrFirstIsUnreachable( @@ -935,8 +946,10 @@ void FunctionValidator::visitAtomicWait(AtomicWait* curr) { curr->expectedType, curr, "AtomicWait expected type must match operand"); - shouldBeEqualOrFirstIsUnreachable( - curr->timeout->type, i64, curr, "AtomicWait timeout type must be i64"); + shouldBeEqualOrFirstIsUnreachable(curr->timeout->type, + Type(Type::i64), + curr, + "AtomicWait timeout type must be i64"); } void FunctionValidator::visitAtomicNotify(AtomicNotify* curr) { @@ -949,12 +962,14 @@ void FunctionValidator::visitAtomicNotify(AtomicNotify* curr) { curr, "Atomic operation with non-shared memory"); shouldBeEqualOrFirstIsUnreachable( - curr->type, i32, curr, "AtomicNotify must have type i32"); - shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "AtomicNotify pointer type must be i32"); + curr->type, Type(Type::i32), curr, "AtomicNotify must have type i32"); + shouldBeEqualOrFirstIsUnreachable(curr->ptr->type, + Type(Type::i32), + curr, + "AtomicNotify pointer type must be i32"); shouldBeEqualOrFirstIsUnreachable( curr->notifyCount->type, - i32, + Type(Type::i32), curr, "AtomicNotify notifyCount type must be i32"); } @@ -977,35 +992,37 @@ void FunctionValidator::visitAtomicFence(AtomicFence* curr) { void FunctionValidator::visitSIMDExtract(SIMDExtract* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); - shouldBeEqualOrFirstIsUnreachable( - curr->vec->type, v128, curr, "extract_lane must operate on a v128"); - Type lane_t = none; + shouldBeEqualOrFirstIsUnreachable(curr->vec->type, + Type(Type::v128), + curr, + "extract_lane must operate on a v128"); + Type lane_t = Type::none; size_t lanes = 0; switch (curr->op) { case ExtractLaneSVecI8x16: case ExtractLaneUVecI8x16: - lane_t = i32; + lane_t = Type::i32; lanes = 16; break; case ExtractLaneSVecI16x8: case ExtractLaneUVecI16x8: - lane_t = i32; + lane_t = Type::i32; lanes = 8; break; case ExtractLaneVecI32x4: - lane_t = i32; + lane_t = Type::i32; lanes = 4; break; case ExtractLaneVecI64x2: - lane_t = i64; + lane_t = Type::i64; lanes = 2; break; case ExtractLaneVecF32x4: - lane_t = f32; + lane_t = Type::f32; lanes = 4; break; case ExtractLaneVecF64x2: - lane_t = f64; + lane_t = Type::f64; lanes = 2; break; } @@ -1021,34 +1038,36 @@ void FunctionValidator::visitSIMDReplace(SIMDReplace* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, v128, curr, "replace_lane must have type v128"); - shouldBeEqualOrFirstIsUnreachable( - curr->vec->type, v128, curr, "replace_lane must operate on a v128"); - Type lane_t = none; + curr->type, Type(Type::v128), curr, "replace_lane must have type v128"); + shouldBeEqualOrFirstIsUnreachable(curr->vec->type, + Type(Type::v128), + curr, + "replace_lane must operate on a v128"); + Type lane_t = Type::none; size_t lanes = 0; switch (curr->op) { case ReplaceLaneVecI8x16: - lane_t = i32; + lane_t = Type::i32; lanes = 16; break; case ReplaceLaneVecI16x8: - lane_t = i32; + lane_t = Type::i32; lanes = 8; break; case ReplaceLaneVecI32x4: - lane_t = i32; + lane_t = Type::i32; lanes = 4; break; case ReplaceLaneVecI64x2: - lane_t = i64; + lane_t = Type::i64; lanes = 2; break; case ReplaceLaneVecF32x4: - lane_t = f32; + lane_t = Type::f32; lanes = 4; break; case ReplaceLaneVecF64x2: - lane_t = f64; + lane_t = Type::f64; lanes = 2; break; } @@ -1061,11 +1080,11 @@ void FunctionValidator::visitSIMDShuffle(SIMDShuffle* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, v128, curr, "v128.shuffle must have type v128"); + curr->type, Type(Type::v128), curr, "v128.shuffle must have type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->left->type, v128, curr, "expected operand of type v128"); + curr->left->type, Type(Type::v128), curr, "expected operand of type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->right->type, v128, curr, "expected operand of type v128"); + curr->right->type, Type(Type::v128), curr, "expected operand of type v128"); for (uint8_t index : curr->mask) { shouldBeTrue(index < 32, curr, "Invalid lane index in mask"); } @@ -1075,24 +1094,26 @@ void FunctionValidator::visitSIMDTernary(SIMDTernary* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, v128, curr, "SIMD ternary must have type v128"); + curr->type, Type(Type::v128), curr, "SIMD ternary must have type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->a->type, v128, curr, "expected operand of type v128"); + curr->a->type, Type(Type::v128), curr, "expected operand of type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->b->type, v128, curr, "expected operand of type v128"); + curr->b->type, Type(Type::v128), curr, "expected operand of type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->c->type, v128, curr, "expected operand of type v128"); + curr->c->type, Type(Type::v128), curr, "expected operand of type v128"); } void FunctionValidator::visitSIMDShift(SIMDShift* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, v128, curr, "vector shift must have type v128"); + curr->type, Type(Type::v128), curr, "vector shift must have type v128"); shouldBeEqualOrFirstIsUnreachable( - curr->vec->type, v128, curr, "expected operand of type v128"); - shouldBeEqualOrFirstIsUnreachable( - curr->shift->type, i32, curr, "expected shift amount to have type i32"); + curr->vec->type, Type(Type::v128), curr, "expected operand of type v128"); + shouldBeEqualOrFirstIsUnreachable(curr->shift->type, + Type(Type::i32), + curr, + "expected shift amount to have type i32"); } void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) { @@ -1101,15 +1122,17 @@ void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) { shouldBeTrue( getModule()->features.hasSIMD(), curr, "SIMD operation (SIMD is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, v128, curr, "load_splat must have type v128"); - shouldBeEqualOrFirstIsUnreachable( - curr->ptr->type, i32, curr, "load_splat address must have type i32"); - Type memAlignType = none; + curr->type, Type(Type::v128), curr, "load_splat must have type v128"); + shouldBeEqualOrFirstIsUnreachable(curr->ptr->type, + Type(Type::i32), + curr, + "load_splat address must have type i32"); + Type memAlignType = Type::none; switch (curr->op) { case LoadSplatVec8x16: case LoadSplatVec16x8: case LoadSplatVec32x4: - memAlignType = i32; + memAlignType = Type::i32; break; case LoadSplatVec64x2: case LoadExtSVec8x8ToVecI16x8: @@ -1118,7 +1141,7 @@ void FunctionValidator::visitSIMDLoad(SIMDLoad* curr) { case LoadExtUVec16x4ToVecI32x4: case LoadExtSVec32x2ToVecI64x2: case LoadExtUVec32x2ToVecI64x2: - memAlignType = i64; + memAlignType = Type::i64; break; } Index bytes = curr->getMemBytes(); @@ -1130,13 +1153,15 @@ void FunctionValidator::visitMemoryInit(MemoryInit* curr) { curr, "Bulk memory operation (bulk memory is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, none, curr, "memory.init must have type none"); - shouldBeEqualOrFirstIsUnreachable( - curr->dest->type, i32, curr, "memory.init dest must be an i32"); + curr->type, Type(Type::none), curr, "memory.init must have type none"); shouldBeEqualOrFirstIsUnreachable( - curr->offset->type, i32, curr, "memory.init offset must be an i32"); + curr->dest->type, Type(Type::i32), curr, "memory.init dest must be an i32"); + shouldBeEqualOrFirstIsUnreachable(curr->offset->type, + Type(Type::i32), + curr, + "memory.init offset must be an i32"); shouldBeEqualOrFirstIsUnreachable( - curr->size->type, i32, curr, "memory.init size must be an i32"); + curr->size->type, Type(Type::i32), curr, "memory.init size must be an i32"); if (!shouldBeTrue(getModule()->memory.exists, curr, "Memory operations require a memory")) { @@ -1152,7 +1177,7 @@ void FunctionValidator::visitDataDrop(DataDrop* curr) { curr, "Bulk memory operation (bulk memory is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, none, curr, "data.drop must have type none"); + curr->type, Type(Type::none), curr, "data.drop must have type none"); if (!shouldBeTrue(getModule()->memory.exists, curr, "Memory operations require a memory")) { @@ -1168,13 +1193,15 @@ void FunctionValidator::visitMemoryCopy(MemoryCopy* curr) { curr, "Bulk memory operation (bulk memory is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, none, curr, "memory.copy must have type none"); - shouldBeEqualOrFirstIsUnreachable( - curr->dest->type, i32, curr, "memory.copy dest must be an i32"); + curr->type, Type(Type::none), curr, "memory.copy must have type none"); shouldBeEqualOrFirstIsUnreachable( - curr->source->type, i32, curr, "memory.copy source must be an i32"); + curr->dest->type, Type(Type::i32), curr, "memory.copy dest must be an i32"); + shouldBeEqualOrFirstIsUnreachable(curr->source->type, + Type(Type::i32), + curr, + "memory.copy source must be an i32"); shouldBeEqualOrFirstIsUnreachable( - curr->size->type, i32, curr, "memory.copy size must be an i32"); + curr->size->type, Type(Type::i32), curr, "memory.copy size must be an i32"); shouldBeTrue( getModule()->memory.exists, curr, "Memory operations require a memory"); } @@ -1184,13 +1211,15 @@ void FunctionValidator::visitMemoryFill(MemoryFill* curr) { curr, "Bulk memory operation (bulk memory is disabled)"); shouldBeEqualOrFirstIsUnreachable( - curr->type, none, curr, "memory.fill must have type none"); - shouldBeEqualOrFirstIsUnreachable( - curr->dest->type, i32, curr, "memory.fill dest must be an i32"); + curr->type, Type(Type::none), curr, "memory.fill must have type none"); shouldBeEqualOrFirstIsUnreachable( - curr->value->type, i32, curr, "memory.fill value must be an i32"); + curr->dest->type, Type(Type::i32), curr, "memory.fill dest must be an i32"); + shouldBeEqualOrFirstIsUnreachable(curr->value->type, + Type(Type::i32), + curr, + "memory.fill value must be an i32"); shouldBeEqualOrFirstIsUnreachable( - curr->size->type, i32, curr, "memory.fill size must be an i32"); + curr->size->type, Type(Type::i32), curr, "memory.fill size must be an i32"); shouldBeTrue( getModule()->memory.exists, curr, "Memory operations require a memory"); } @@ -1199,41 +1228,42 @@ void FunctionValidator::validateMemBytes(uint8_t bytes, Type type, Expression* curr) { switch (type) { - case i32: + case Type::i32: shouldBeTrue(bytes == 1 || bytes == 2 || bytes == 4, curr, "expected i32 operation to touch 1, 2, or 4 bytes"); break; - case i64: + case Type::i64: shouldBeTrue(bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8, curr, "expected i64 operation to touch 1, 2, 4, or 8 bytes"); break; - case f32: + case Type::f32: shouldBeEqual( bytes, uint8_t(4), curr, "expected f32 operation to touch 4 bytes"); break; - case f64: + case Type::f64: shouldBeEqual( bytes, uint8_t(8), curr, "expected f64 operation to touch 8 bytes"); break; - case v128: + case Type::v128: shouldBeEqual( bytes, uint8_t(16), curr, "expected v128 operation to touch 16 bytes"); break; - case unreachable: + case Type::unreachable: break; - case funcref: - case anyref: - case nullref: - case exnref: - case none: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: WASM_UNREACHABLE("unexpected type"); } } void FunctionValidator::visitBinary(Binary* curr) { - if (curr->left->type != unreachable && curr->right->type != unreachable) { + if (curr->left->type != Type::unreachable && + curr->right->type != Type::unreachable) { shouldBeEqual(curr->left->type, curr->right->type, curr, @@ -1265,7 +1295,8 @@ void FunctionValidator::visitBinary(Binary* curr) { case GtUInt32: case GeSInt32: case GeUInt32: { - shouldBeEqualOrFirstIsUnreachable(curr->left->type, i32, curr, "i32 op"); + shouldBeEqualOrFirstIsUnreachable( + curr->left->type, Type(Type::i32), curr, "i32 op"); break; } case AddInt64: @@ -1293,7 +1324,8 @@ void FunctionValidator::visitBinary(Binary* curr) { case GtUInt64: case GeSInt64: case GeUInt64: { - shouldBeEqualOrFirstIsUnreachable(curr->left->type, i64, curr, "i64 op"); + shouldBeEqualOrFirstIsUnreachable( + curr->left->type, Type(Type::i64), curr, "i64 op"); break; } case AddFloat32: @@ -1309,7 +1341,8 @@ void FunctionValidator::visitBinary(Binary* curr) { case LeFloat32: case GtFloat32: case GeFloat32: { - shouldBeEqualOrFirstIsUnreachable(curr->left->type, f32, curr, "f32 op"); + shouldBeEqualOrFirstIsUnreachable( + curr->left->type, Type(Type::f32), curr, "f32 op"); break; } case AddFloat64: @@ -1325,7 +1358,8 @@ void FunctionValidator::visitBinary(Binary* curr) { case LeFloat64: case GtFloat64: case GeFloat64: { - shouldBeEqualOrFirstIsUnreachable(curr->left->type, f64, curr, "f64 op"); + shouldBeEqualOrFirstIsUnreachable( + curr->left->type, Type(Type::f64), curr, "f64 op"); break; } case EqVecI8x16: @@ -1426,9 +1460,9 @@ void FunctionValidator::visitBinary(Binary* curr) { case NarrowUVecI32x4ToVecI16x8: case SwizzleVec8x16: { shouldBeEqualOrFirstIsUnreachable( - curr->left->type, v128, curr, "v128 op"); + curr->left->type, Type(Type::v128), curr, "v128 op"); shouldBeEqualOrFirstIsUnreachable( - curr->right->type, v128, curr, "v128 op"); + curr->right->type, Type(Type::v128), curr, "v128 op"); break; } case InvalidBinary: @@ -1441,25 +1475,29 @@ void FunctionValidator::visitBinary(Binary* curr) { void FunctionValidator::visitUnary(Unary* curr) { shouldBeUnequal(curr->value->type, - none, + Type(Type::none), curr, "unaries must not receive a none as their input"); - if (curr->value->type == unreachable) { + if (curr->value->type == Type::unreachable) { return; // nothing to check } switch (curr->op) { case ClzInt32: case CtzInt32: case PopcntInt32: { - shouldBeEqual( - curr->value->type, i32, curr, "i32 unary value type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i32), + curr, + "i32 unary value type must be correct"); break; } case ClzInt64: case CtzInt64: case PopcntInt64: { - shouldBeEqual( - curr->value->type, i64, curr, "i64 unary value type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i64), + curr, + "i64 unary value type must be correct"); break; } case NegFloat32: @@ -1469,8 +1507,10 @@ void FunctionValidator::visitUnary(Unary* curr) { case TruncFloat32: case NearestFloat32: case SqrtFloat32: { - shouldBeEqual( - curr->value->type, f32, curr, "f32 unary value type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f32), + curr, + "f32 unary value type must be correct"); break; } case NegFloat64: @@ -1480,128 +1520,166 @@ void FunctionValidator::visitUnary(Unary* curr) { case TruncFloat64: case NearestFloat64: case SqrtFloat64: { - shouldBeEqual( - curr->value->type, f64, curr, "f64 unary value type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f64), + curr, + "f64 unary value type must be correct"); break; } case EqZInt32: { - shouldBeTrue(curr->value->type == i32, curr, "i32.eqz input must be i32"); + shouldBeTrue( + curr->value->type == Type::i32, curr, "i32.eqz input must be i32"); break; } case EqZInt64: { - shouldBeTrue(curr->value->type == i64, curr, "i64.eqz input must be i64"); + shouldBeTrue(curr->value->type == Type(Type::i64), + curr, + "i64.eqz input must be i64"); break; } case ExtendSInt32: case ExtendUInt32: case ExtendS8Int32: case ExtendS16Int32: { - shouldBeEqual( - curr->value->type, i32, curr, "extend type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i32), + curr, + "extend type must be correct"); break; } case ExtendS8Int64: case ExtendS16Int64: case ExtendS32Int64: { - shouldBeEqual( - curr->value->type, i64, curr, "extend type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i64), + curr, + "extend type must be correct"); break; } case WrapInt64: { - shouldBeEqual(curr->value->type, i64, curr, "wrap type must be correct"); + shouldBeEqual( + curr->value->type, Type(Type::i64), curr, "wrap type must be correct"); break; } case TruncSFloat32ToInt32: case TruncSFloat32ToInt64: case TruncUFloat32ToInt32: case TruncUFloat32ToInt64: { - shouldBeEqual(curr->value->type, f32, curr, "trunc type must be correct"); + shouldBeEqual( + curr->value->type, Type(Type::f32), curr, "trunc type must be correct"); break; } case TruncSatSFloat32ToInt32: case TruncSatSFloat32ToInt64: case TruncSatUFloat32ToInt32: case TruncSatUFloat32ToInt64: { - shouldBeEqual(curr->value->type, f32, curr, "trunc type must be correct"); + shouldBeEqual( + curr->value->type, Type(Type::f32), curr, "trunc type must be correct"); break; } case TruncSFloat64ToInt32: case TruncSFloat64ToInt64: case TruncUFloat64ToInt32: case TruncUFloat64ToInt64: { - shouldBeEqual(curr->value->type, f64, curr, "trunc type must be correct"); + shouldBeEqual( + curr->value->type, Type(Type::f64), curr, "trunc type must be correct"); break; } case TruncSatSFloat64ToInt32: case TruncSatSFloat64ToInt64: case TruncSatUFloat64ToInt32: case TruncSatUFloat64ToInt64: { - shouldBeEqual(curr->value->type, f64, curr, "trunc type must be correct"); + shouldBeEqual( + curr->value->type, Type(Type::f64), curr, "trunc type must be correct"); break; } case ReinterpretFloat32: { - shouldBeEqual( - curr->value->type, f32, curr, "reinterpret/f32 type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f32), + curr, + "reinterpret/f32 type must be correct"); break; } case ReinterpretFloat64: { - shouldBeEqual( - curr->value->type, f64, curr, "reinterpret/f64 type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f64), + curr, + "reinterpret/f64 type must be correct"); break; } case ConvertUInt32ToFloat32: case ConvertUInt32ToFloat64: case ConvertSInt32ToFloat32: case ConvertSInt32ToFloat64: { - shouldBeEqual( - curr->value->type, i32, curr, "convert type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i32), + curr, + "convert type must be correct"); break; } case ConvertUInt64ToFloat32: case ConvertUInt64ToFloat64: case ConvertSInt64ToFloat32: case ConvertSInt64ToFloat64: { - shouldBeEqual( - curr->value->type, i64, curr, "convert type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i64), + curr, + "convert type must be correct"); break; } case PromoteFloat32: { - shouldBeEqual( - curr->value->type, f32, curr, "promote type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f32), + curr, + "promote type must be correct"); break; } case DemoteFloat64: { - shouldBeEqual( - curr->value->type, f64, curr, "demote type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::f64), + curr, + "demote type must be correct"); break; } case ReinterpretInt32: { - shouldBeEqual( - curr->value->type, i32, curr, "reinterpret/i32 type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i32), + curr, + "reinterpret/i32 type must be correct"); break; } case ReinterpretInt64: { - shouldBeEqual( - curr->value->type, i64, curr, "reinterpret/i64 type must be correct"); + shouldBeEqual(curr->value->type, + Type(Type::i64), + curr, + "reinterpret/i64 type must be correct"); break; } case SplatVecI8x16: case SplatVecI16x8: case SplatVecI32x4: - shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type"); - shouldBeEqual(curr->value->type, i32, curr, "expected i32 splat value"); + shouldBeEqual( + curr->type, Type(Type::v128), curr, "expected splat to have v128 type"); + shouldBeEqual( + curr->value->type, Type(Type::i32), curr, "expected i32 splat value"); break; case SplatVecI64x2: - shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type"); - shouldBeEqual(curr->value->type, i64, curr, "expected i64 splat value"); + shouldBeEqual( + curr->type, Type(Type::v128), curr, "expected splat to have v128 type"); + shouldBeEqual( + curr->value->type, Type(Type::i64), curr, "expected i64 splat value"); break; case SplatVecF32x4: - shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type"); - shouldBeEqual(curr->value->type, f32, curr, "expected f32 splat value"); + shouldBeEqual( + curr->type, Type(Type::v128), curr, "expected splat to have v128 type"); + shouldBeEqual( + curr->value->type, Type(Type::f32), curr, "expected f32 splat value"); break; case SplatVecF64x2: - shouldBeEqual(curr->type, v128, curr, "expected splat to have v128 type"); - shouldBeEqual(curr->value->type, f64, curr, "expected i64 splat value"); + shouldBeEqual( + curr->type, Type(Type::v128), curr, "expected splat to have v128 type"); + shouldBeEqual( + curr->value->type, Type(Type::f64), curr, "expected f64 splat value"); break; case NotVec128: case NegVecI8x16: @@ -1630,8 +1708,9 @@ void FunctionValidator::visitUnary(Unary* curr) { case WidenHighSVecI16x8ToVecI32x4: case WidenLowUVecI16x8ToVecI32x4: case WidenHighUVecI16x8ToVecI32x4: - shouldBeEqual(curr->type, v128, curr, "expected v128 type"); - shouldBeEqual(curr->value->type, v128, curr, "expected v128 operand"); + shouldBeEqual(curr->type, Type(Type::v128), curr, "expected v128 type"); + shouldBeEqual( + curr->value->type, Type(Type::v128), curr, "expected v128 operand"); break; case AnyTrueVecI8x16: case AllTrueVecI8x16: @@ -1641,9 +1720,12 @@ void FunctionValidator::visitUnary(Unary* curr) { case AllTrueVecI32x4: case AnyTrueVecI64x2: case AllTrueVecI64x2: + shouldBeEqual(curr->type, + Type(Type::i32), + curr, + "expected boolean reduction to have i32 type"); shouldBeEqual( - curr->type, i32, curr, "expected boolean reduction to have i32 type"); - shouldBeEqual(curr->value->type, v128, curr, "expected v128 operand"); + curr->value->type, Type(Type::v128), curr, "expected v128 operand"); break; case InvalidUnary: WASM_UNREACHABLE("invalid unary op"); @@ -1654,15 +1736,15 @@ void FunctionValidator::visitUnary(Unary* curr) { } void FunctionValidator::visitSelect(Select* curr) { - shouldBeUnequal(curr->ifTrue->type, none, curr, "select left must be valid"); shouldBeUnequal( - curr->ifFalse->type, none, curr, "select right must be valid"); - shouldBeUnequal(curr->type, none, curr, "select type must be valid"); - shouldBeTrue(curr->condition->type == unreachable || - curr->condition->type == i32, + curr->ifFalse->type, Type(Type::none), curr, "select right must be valid"); + shouldBeUnequal( + curr->type, Type(Type::none), curr, "select type must be valid"); + shouldBeTrue(curr->condition->type == Type::unreachable || + curr->condition->type == Type::i32, curr, "select condition must be valid"); - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { shouldBeTrue(Type::isSubType(curr->ifTrue->type, curr->type), curr, "select's left expression must be subtype of select's type"); @@ -1674,7 +1756,7 @@ void FunctionValidator::visitSelect(Select* curr) { void FunctionValidator::visitDrop(Drop* curr) { shouldBeTrue(curr->value->type.isConcrete() || - curr->value->type == unreachable, + curr->value->type == Type::unreachable, curr, "can only drop a valid value"); } @@ -1693,7 +1775,7 @@ void FunctionValidator::visitHost(Host* curr) { curr, "memory.grow must have 1 operand"); shouldBeEqualOrFirstIsUnreachable(curr->operands[0]->type, - i32, + Type(Type::i32), curr, "memory.grow must have i32 operand"); break; @@ -1716,7 +1798,7 @@ void FunctionValidator::visitRefFunc(RefFunc* curr) { } void FunctionValidator::visitTry(Try* curr) { - if (curr->type != unreachable) { + if (curr->type != Type::unreachable) { shouldBeSubTypeOrFirstIsUnreachable( curr->body->type, curr->type, @@ -1729,11 +1811,11 @@ void FunctionValidator::visitTry(Try* curr) { "try's type does not match catch's body type"); } else { shouldBeEqual(curr->body->type, - unreachable, + Type(Type::unreachable), curr, "unreachable try-catch must have unreachable try body"); shouldBeEqual(curr->catchBody->type, - unreachable, + Type(Type::unreachable), curr, "unreachable try-catch must have unreachable catch body"); } @@ -1743,8 +1825,10 @@ void FunctionValidator::visitThrow(Throw* curr) { if (!info.validateGlobally) { return; } - shouldBeEqual( - curr->type, unreachable, curr, "throw's type must be unreachable"); + shouldBeEqual(curr->type, + Type(Type::unreachable), + curr, + "throw's type must be unreachable"); auto* event = getModule()->getEventOrNull(curr->event); if (!shouldBeTrue(!!event, curr, "throw's event must exist")) { return; @@ -1767,8 +1851,10 @@ void FunctionValidator::visitThrow(Throw* curr) { } void FunctionValidator::visitRethrow(Rethrow* curr) { - shouldBeEqual( - curr->type, unreachable, curr, "rethrow's type must be unreachable"); + shouldBeEqual(curr->type, + Type(Type::unreachable), + curr, + "rethrow's type must be unreachable"); shouldBeSubTypeOrFirstIsUnreachable( curr->exnref->type, Type::exnref, @@ -1788,13 +1874,13 @@ void FunctionValidator::visitBrOnExn(BrOnExn* curr) { Type::exnref, curr, "br_on_exn's argument must be unreachable or exnref type or its subtype"); - if (curr->exnref->type == unreachable) { - shouldBeTrue(curr->type == unreachable, + if (curr->exnref->type == Type::unreachable) { + shouldBeTrue(curr->type == Type::unreachable, curr, "If exnref argument's type is unreachable, br_on_exn should " "be unreachable too"); } else { - shouldBeTrue(curr->type == exnref, + shouldBeTrue(curr->type == Type::exnref, curr, "br_on_exn's type should be exnref unless its exnref argument " "is unreachable"); @@ -1890,24 +1976,24 @@ void FunctionValidator::validateAlignment( } shouldBeTrue(align <= bytes, curr, "alignment must not exceed natural"); switch (type) { - case i32: - case f32: { + case Type::i32: + case Type::f32: { shouldBeTrue(align <= 4, curr, "alignment must not exceed natural"); break; } - case i64: - case f64: { + case Type::i64: + case Type::f64: { shouldBeTrue(align <= 8, curr, "alignment must not exceed natural"); break; } - case v128: - case unreachable: + case Type::v128: + case Type::unreachable: break; - case funcref: - case anyref: - case nullref: - case exnref: - case none: + case Type::funcref: + case Type::anyref: + case Type::nullref: + case Type::exnref: + case Type::none: WASM_UNREACHABLE("invalid type"); } } @@ -1970,13 +2056,13 @@ static void validateImports(Module& module, ValidationInfo& info) { if (info.validateWeb) { for (Type param : curr->sig.params.expand()) { info.shouldBeUnequal(param, - i64, + Type(Type::i64), curr->name, "Imported function must not have i64 parameters"); } for (Type result : curr->sig.results.expand()) { info.shouldBeUnequal(result, - i64, + Type(Type::i64), curr->name, "Imported function must not have i64 results"); } @@ -1998,13 +2084,13 @@ static void validateExports(Module& module, ValidationInfo& info) { for (auto param : f->sig.params.expand()) { info.shouldBeUnequal( param, - i64, + Type(Type::i64), f->name, "Exported function must not have i64 parameters"); } for (auto result : f->sig.results.expand()) { info.shouldBeUnequal(result, - i64, + Type(Type::i64), f->name, "Exported function must not have i64 results"); } @@ -2103,7 +2189,7 @@ static void validateMemory(Module& module, ValidationInfo& info) { "passive segment should not have an offset"); } else { if (!info.shouldBeEqual(segment.offset->type, - i32, + Type(Type::i32), segment.offset, "segment offset should be i32")) { continue; @@ -2136,7 +2222,7 @@ static void validateTable(Module& module, ValidationInfo& info) { auto& curr = module.table; for (auto& segment : curr.segments) { info.shouldBeEqual(segment.offset->type, - i32, + Type(Type::i32), segment.offset, "segment offset should be i32"); info.shouldBeTrue(checkOffset(segment.offset, diff --git a/src/wasm/wasm.cpp b/src/wasm/wasm.cpp index 11d203835..a3dc80896 100644 --- a/src/wasm/wasm.cpp +++ b/src/wasm/wasm.cpp @@ -220,18 +220,18 @@ struct TypeSeeker : public PostWalker<TypeSeeker> { void visitBreak(Break* curr) { if (curr->name == targetName) { - types.push_back(curr->value ? curr->value->type : none); + types.push_back(curr->value ? curr->value->type : Type::none); } } void visitSwitch(Switch* curr) { for (auto name : curr->targets) { if (name == targetName) { - types.push_back(curr->value ? curr->value->type : none); + types.push_back(curr->value ? curr->value->type : Type::none); } } if (curr->default_ == targetName) { - types.push_back(curr->value ? curr->value->type : none); + types.push_back(curr->value ? curr->value->type : Type::none); } } @@ -246,7 +246,7 @@ struct TypeSeeker : public PostWalker<TypeSeeker> { if (curr->list.size() > 0) { types.push_back(curr->list.back()->type); } else { - types.push_back(none); + types.push_back(Type::none); } } else if (curr->name == targetName) { // ignore all breaks til now, they were captured by someone with the same @@ -271,7 +271,7 @@ struct TypeSeeker : public PostWalker<TypeSeeker> { static void handleUnreachable(Block* block, bool breakabilityKnown = false, bool hasBreak = false) { - if (block->type == unreachable) { + if (block->type == Type::unreachable) { return; // nothing to do } if (block->list.size() == 0) { @@ -285,14 +285,14 @@ static void handleUnreachable(Block* block, } // look for an unreachable child for (auto* child : block->list) { - if (child->type == unreachable) { + if (child->type == Type::unreachable) { // there is an unreachable child, so we are unreachable, unless we have a // break if (!breakabilityKnown) { hasBreak = BranchUtils::BranchSeeker::has(block, block->name); } if (!hasBreak) { - block->type = unreachable; + block->type = Type::unreachable; } return; } @@ -315,19 +315,19 @@ void Block::finalize() { return; } // if we are unreachable, we are done - if (type == unreachable) { + if (type == Type::unreachable) { return; } // we may still be unreachable if we have an unreachable // child for (auto* child : list) { - if (child->type == unreachable) { - type = unreachable; + if (child->type == Type::unreachable) { + type = Type::unreachable; return; } } } else { - type = none; + type = Type::none; } return; } @@ -339,24 +339,24 @@ void Block::finalize() { void Block::finalize(Type type_) { type = type_; - if (type == none && list.size() > 0) { + if (type == Type::none && list.size() > 0) { handleUnreachable(this); } } void Block::finalize(Type type_, bool hasBreak) { type = type_; - if (type == none && list.size() > 0) { + if (type == Type::none && list.size() > 0) { handleUnreachable(this, true, hasBreak); } } void If::finalize(Type type_) { type = type_; - if (type == none && (condition->type == unreachable || - (ifFalse && ifTrue->type == unreachable && - ifFalse->type == unreachable))) { - type = unreachable; + if (type == Type::none && (condition->type == Type::unreachable || + (ifFalse && ifTrue->type == Type::unreachable && + ifFalse->type == Type::unreachable))) { + type = Type::unreachable; } } @@ -371,15 +371,15 @@ void If::finalize() { // (i32.const 20 // ) // otherwise, if the condition is unreachable, so is the if - if (type == none && condition->type == unreachable) { - type = unreachable; + if (type == Type::none && condition->type == Type::unreachable) { + type = Type::unreachable; } } void Loop::finalize(Type type_) { type = type_; - if (type == none && body->type == unreachable) { - type = unreachable; + if (type == Type::none && body->type == Type::unreachable) { + type = Type::unreachable; } } @@ -387,24 +387,24 @@ void Loop::finalize() { type = body->type; } void Break::finalize() { if (condition) { - if (condition->type == unreachable) { - type = unreachable; + if (condition->type == Type::unreachable) { + type = Type::unreachable; } else if (value) { type = value->type; } else { - type = none; + type = Type::none; } } else { - type = unreachable; + type = Type::unreachable; } } -void Switch::finalize() { type = unreachable; } +void Switch::finalize() { type = Type::unreachable; } template<typename T> void handleUnreachableOperands(T* curr) { for (auto* child : curr->operands) { - if (child->type == unreachable) { - curr->type = unreachable; + if (child->type == Type::unreachable) { + curr->type = Type::unreachable; break; } } @@ -413,7 +413,7 @@ template<typename T> void handleUnreachableOperands(T* curr) { void Call::finalize() { handleUnreachableOperands(this); if (isReturn) { - type = unreachable; + type = Type::unreachable; } } @@ -421,14 +421,14 @@ void CallIndirect::finalize() { type = sig.results; handleUnreachableOperands(this); if (isReturn) { - type = unreachable; + type = Type::unreachable; } - if (target->type == unreachable) { - type = unreachable; + if (target->type == Type::unreachable) { + type = Type::unreachable; } } -bool LocalSet::isTee() const { return type != none; } +bool LocalSet::isTee() const { return type != Type::none; } // Changes to local.tee. The type of the local should be given. void LocalSet::makeTee(Type type_) { @@ -438,62 +438,63 @@ void LocalSet::makeTee(Type type_) { // Changes to local.set. void LocalSet::makeSet() { - type = none; + type = Type::none; finalize(); // type may need to be unreachable } void LocalSet::finalize() { - if (value->type == unreachable) { - type = unreachable; + if (value->type == Type::unreachable) { + type = Type::unreachable; } } void GlobalSet::finalize() { - if (value->type == unreachable) { - type = unreachable; + if (value->type == Type::unreachable) { + type = Type::unreachable; } } void Load::finalize() { - if (ptr->type == unreachable) { - type = unreachable; + if (ptr->type == Type::unreachable) { + type = Type::unreachable; } } void Store::finalize() { - assert(valueType != none); // must be set - if (ptr->type == unreachable || value->type == unreachable) { - type = unreachable; + assert(valueType != Type::none); // must be set + if (ptr->type == Type::unreachable || value->type == Type::unreachable) { + type = Type::unreachable; } else { - type = none; + type = Type::none; } } void AtomicRMW::finalize() { - if (ptr->type == unreachable || value->type == unreachable) { - type = unreachable; + if (ptr->type == Type::unreachable || value->type == Type::unreachable) { + type = Type::unreachable; } } void AtomicCmpxchg::finalize() { - if (ptr->type == unreachable || expected->type == unreachable || - replacement->type == unreachable) { - type = unreachable; + if (ptr->type == Type::unreachable || expected->type == Type::unreachable || + replacement->type == Type::unreachable) { + type = Type::unreachable; } } void AtomicWait::finalize() { - type = i32; - if (ptr->type == unreachable || expected->type == unreachable || - timeout->type == unreachable) { - type = unreachable; + type = Type::i32; + if (ptr->type == Type::unreachable || expected->type == Type::unreachable || + timeout->type == Type::unreachable) { + type = Type::unreachable; } } void AtomicNotify::finalize() { - type = i32; - if (ptr->type == unreachable || notifyCount->type == unreachable) { - type = unreachable; + type = Type::i32; + if (ptr->type == Type::unreachable || + notifyCount->type == Type::unreachable) { + type = Type::unreachable; } } @@ -505,92 +506,92 @@ void SIMDExtract::finalize() { case ExtractLaneSVecI16x8: case ExtractLaneUVecI16x8: case ExtractLaneVecI32x4: - type = i32; + type = Type::i32; break; case ExtractLaneVecI64x2: - type = i64; + type = Type::i64; break; case ExtractLaneVecF32x4: - type = f32; + type = Type::f32; break; case ExtractLaneVecF64x2: - type = f64; + type = Type::f64; break; default: WASM_UNREACHABLE("unexpected op"); } - if (vec->type == unreachable) { - type = unreachable; + if (vec->type == Type::unreachable) { + type = Type::unreachable; } } void SIMDReplace::finalize() { assert(vec && value); - type = v128; - if (vec->type == unreachable || value->type == unreachable) { - type = unreachable; + type = Type::v128; + if (vec->type == Type::unreachable || value->type == Type::unreachable) { + type = Type::unreachable; } } void SIMDShuffle::finalize() { assert(left && right); - type = v128; - if (left->type == unreachable || right->type == unreachable) { - type = unreachable; + type = Type::v128; + if (left->type == Type::unreachable || right->type == Type::unreachable) { + type = Type::unreachable; } } void SIMDTernary::finalize() { assert(a && b && c); - type = v128; - if (a->type == unreachable || b->type == unreachable || - c->type == unreachable) { - type = unreachable; + type = Type::v128; + if (a->type == Type::unreachable || b->type == Type::unreachable || + c->type == Type::unreachable) { + type = Type::unreachable; } } void MemoryInit::finalize() { assert(dest && offset && size); - type = none; - if (dest->type == unreachable || offset->type == unreachable || - size->type == unreachable) { - type = unreachable; + type = Type::none; + if (dest->type == Type::unreachable || offset->type == Type::unreachable || + size->type == Type::unreachable) { + type = Type::unreachable; } } -void DataDrop::finalize() { type = none; } +void DataDrop::finalize() { type = Type::none; } void MemoryCopy::finalize() { assert(dest && source && size); - type = none; - if (dest->type == unreachable || source->type == unreachable || - size->type == unreachable) { - type = unreachable; + type = Type::none; + if (dest->type == Type::unreachable || source->type == Type::unreachable || + size->type == Type::unreachable) { + type = Type::unreachable; } } void MemoryFill::finalize() { assert(dest && value && size); - type = none; - if (dest->type == unreachable || value->type == unreachable || - size->type == unreachable) { - type = unreachable; + type = Type::none; + if (dest->type == Type::unreachable || value->type == Type::unreachable || + size->type == Type::unreachable) { + type = Type::unreachable; } } void SIMDShift::finalize() { assert(vec && shift); - type = v128; - if (vec->type == unreachable || shift->type == unreachable) { - type = unreachable; + type = Type::v128; + if (vec->type == Type::unreachable || shift->type == Type::unreachable) { + type = Type::unreachable; } } void SIMDLoad::finalize() { assert(ptr); - type = v128; - if (ptr->type == unreachable) { - type = unreachable; + type = Type::v128; + if (ptr->type == Type::unreachable) { + type = Type::unreachable; } } @@ -625,8 +626,8 @@ void Const::finalize() { type = value.type; } bool Unary::isRelational() { return op == EqZInt32 || op == EqZInt64; } void Unary::finalize() { - if (value->type == unreachable) { - type = unreachable; + if (value->type == Type::unreachable) { + type = Type::unreachable; return; } switch (op) { @@ -654,27 +655,27 @@ void Unary::finalize() { break; case EqZInt32: case EqZInt64: - type = i32; + type = Type::i32; break; case ExtendS8Int32: case ExtendS16Int32: - type = i32; + type = Type::i32; break; case ExtendSInt32: case ExtendUInt32: case ExtendS8Int64: case ExtendS16Int64: case ExtendS32Int64: - type = i64; + type = Type::i64; break; case WrapInt64: - type = i32; + type = Type::i32; break; case PromoteFloat32: - type = f64; + type = Type::f64; break; case DemoteFloat64: - type = f32; + type = Type::f32; break; case TruncSFloat32ToInt32: case TruncUFloat32ToInt32: @@ -685,7 +686,7 @@ void Unary::finalize() { case TruncSatSFloat64ToInt32: case TruncSatUFloat64ToInt32: case ReinterpretFloat32: - type = i32; + type = Type::i32; break; case TruncSFloat32ToInt64: case TruncUFloat32ToInt64: @@ -696,21 +697,21 @@ void Unary::finalize() { case TruncSatSFloat64ToInt64: case TruncSatUFloat64ToInt64: case ReinterpretFloat64: - type = i64; + type = Type::i64; break; case ReinterpretInt32: case ConvertSInt32ToFloat32: case ConvertUInt32ToFloat32: case ConvertSInt64ToFloat32: case ConvertUInt64ToFloat32: - type = f32; + type = Type::f32; break; case ReinterpretInt64: case ConvertSInt32ToFloat64: case ConvertUInt32ToFloat64: case ConvertSInt64ToFloat64: case ConvertUInt64ToFloat64: - type = f64; + type = Type::f64; break; case SplatVecI8x16: case SplatVecI16x8: @@ -745,7 +746,7 @@ void Unary::finalize() { case WidenHighSVecI16x8ToVecI32x4: case WidenLowUVecI16x8ToVecI32x4: case WidenHighUVecI16x8ToVecI32x4: - type = v128; + type = Type::v128; break; case AnyTrueVecI8x16: case AllTrueVecI8x16: @@ -755,7 +756,7 @@ void Unary::finalize() { case AllTrueVecI32x4: case AnyTrueVecI64x2: case AllTrueVecI64x2: - type = i32; + type = Type::i32; break; case InvalidUnary: @@ -805,10 +806,10 @@ bool Binary::isRelational() { void Binary::finalize() { assert(left && right); - if (left->type == unreachable || right->type == unreachable) { - type = unreachable; + if (left->type == Type::unreachable || right->type == Type::unreachable) { + type = Type::unreachable; } else if (isRelational()) { - type = i32; + type = Type::i32; } else { type = left->type; } @@ -818,34 +819,34 @@ void Select::finalize(Type type_) { type = type_; } void Select::finalize() { assert(ifTrue && ifFalse); - if (ifTrue->type == unreachable || ifFalse->type == unreachable || - condition->type == unreachable) { - type = unreachable; + if (ifTrue->type == Type::unreachable || ifFalse->type == Type::unreachable || + condition->type == Type::unreachable) { + type = Type::unreachable; } else { type = Type::getLeastUpperBound(ifTrue->type, ifFalse->type); } } void Drop::finalize() { - if (value->type == unreachable) { - type = unreachable; + if (value->type == Type::unreachable) { + type = Type::unreachable; } else { - type = none; + type = Type::none; } } void Host::finalize() { switch (op) { case MemorySize: { - type = i32; + type = Type::i32; break; } case MemoryGrow: { // if the single operand is not reachable, so are we - if (operands[0]->type == unreachable) { - type = unreachable; + if (operands[0]->type == Type::unreachable) { + type = Type::unreachable; } else { - type = i32; + type = Type::i32; } break; } @@ -870,29 +871,29 @@ void Try::finalize() { void Try::finalize(Type type_) { type = type_; - if (type == none && body->type == unreachable && - catchBody->type == unreachable) { - type = unreachable; + if (type == Type::none && body->type == Type::unreachable && + catchBody->type == Type::unreachable) { + type = Type::unreachable; } } -void Throw::finalize() { type = unreachable; } +void Throw::finalize() { type = Type::unreachable; } -void Rethrow::finalize() { type = unreachable; } +void Rethrow::finalize() { type = Type::unreachable; } void BrOnExn::finalize() { - if (exnref->type == unreachable) { - type = unreachable; + if (exnref->type == Type::unreachable) { + type = Type::unreachable; } else { type = Type::exnref; } } void Push::finalize() { - if (value->type == unreachable) { - type = unreachable; + if (value->type == Type::unreachable) { + type = Type::unreachable; } else { - type = none; + type = Type::none; } } diff --git a/src/wasm2js.h b/src/wasm2js.h index 9f6c19338..609373cca 100644 --- a/src/wasm2js.h +++ b/src/wasm2js.h @@ -415,7 +415,7 @@ Ref Wasm2JSBuilder::processWasm(Module* wasm, Name funcName) { Signature(Type::none, Type::i32), {}, builder.makeReturn(builder.makeGlobalGet( - INT64_TO_32_HIGH_BITS, i32))))); + INT64_TO_32_HIGH_BITS, Type::i32))))); auto e = new Export(); e->name = WASM_FETCH_HIGH_BITS; e->value = WASM_FETCH_HIGH_BITS; @@ -517,7 +517,7 @@ void Wasm2JSBuilder::addGlobalImport(Ref ast, Global* import) { Ref module = ValueBuilder::makeName(ENV); Ref value = ValueBuilder::makeDot(module, fromName(import->base, NameScope::Top)); - if (import->type == i32) { + if (import->type == Type::i32) { value = makeAsmCoercion(value, ASM_INT); } ValueBuilder::appendToVar( @@ -677,10 +677,10 @@ Ref Wasm2JSBuilder::processFunction(Module* m, Names::ensureNames(func); Ref ret = ValueBuilder::makeFunction(fromName(func->name, NameScope::Top)); frees.clear(); - frees.resize(std::max(i32, std::max(f32, f64)) + 1); + frees.resize(std::max(Type::i32, std::max(Type::f32, Type::f64)) + 1); temps.clear(); - temps.resize(std::max(i32, std::max(f32, f64)) + 1); - temps[i32] = temps[f32] = temps[f64] = 0; + temps.resize(std::max(Type::i32, std::max(Type::f32, Type::f64)) + 1); + temps[Type::i32] = temps[Type::f32] = temps[Type::f64] = 0; // arguments bool needCoercions = options.optimizeLevel == 0 || standaloneFunction || functionsCallableFromOutside.count(func->name); @@ -711,9 +711,9 @@ Ref Wasm2JSBuilder::processFunction(Module* m, ret[3]->splice(theVarIndex, 1); } // checks: all temp vars should be free at the end - assert(frees[i32].size() == temps[i32]); - assert(frees[f32].size() == temps[f32]); - assert(frees[f64].size() == temps[f64]); + assert(frees[Type::i32].size() == temps[Type::i32]); + assert(frees[Type::f32].size() == temps[Type::f32]); + assert(frees[Type::f64].size() == temps[Type::f64]); return ret; } @@ -980,8 +980,8 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, continueLabels.insert(asmLabel); Ref body = visit(curr->body, result); // if we can reach the end of the block, we must leave the while (1) loop - if (curr->body->type != unreachable) { - assert(curr->body->type == none); // flat IR + if (curr->body->type != Type::Type::unreachable) { + assert(curr->body->type == Type::Type::none); // flat IR body = blockify(body); flattenAppend( body, ValueBuilder::makeBreak(fromName(asmLabel, NameScope::Label))); @@ -1045,7 +1045,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, for (auto* c : code) { ValueBuilder::appendCodeToSwitch( theSwitch, blockify(visit(c, NO_RESULT)), false); - hoistedEndsWithUnreachable = c->type == unreachable; + hoistedEndsWithUnreachable = c->type == Type::Type::unreachable; } } // After the hoisted cases, if any remain we must make sure not to @@ -1138,7 +1138,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, } if (mustReorder) { Ref ret; - ScopedTemp idx(i32, parent, func); + ScopedTemp idx(Type::i32, parent, func); std::vector<ScopedTemp*> temps; // TODO: utility class, with destructor? for (auto* operand : curr->operands) { temps.push_back(new ScopedTemp(operand->type, parent, func)); @@ -1211,7 +1211,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref ptr = makePointer(curr->ptr, curr->offset); Ref ret; switch (curr->type) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: ret = ValueBuilder::makeSub( @@ -1239,11 +1239,11 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, } break; } - case f32: + case Type::f32: ret = ValueBuilder::makeSub(ValueBuilder::makeName(HEAPF32), ValueBuilder::makePtrShift(ptr, 2)); break; - case f64: + case Type::f64: ret = ValueBuilder::makeSub(ValueBuilder::makeName(HEAPF64), ValueBuilder::makePtrShift(ptr, 3)); break; @@ -1265,7 +1265,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref visitStore(Store* curr) { if (module->memory.initial < module->memory.max && - curr->type != unreachable) { + curr->type != Type::Type::unreachable) { // In JS, if memory grows then it is dangerous to write // HEAP[f()] = .. // or @@ -1281,13 +1281,13 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, !FindAll<Host>(curr->ptr).list.empty() || !FindAll<Host>(curr->value).list.empty()) { Ref ret; - ScopedTemp ptr(i32, parent, func); + ScopedTemp ptr(Type::i32, parent, func); sequenceAppend(ret, visitAndAssign(curr->ptr, ptr)); ScopedTemp value(curr->value->type, parent, func); sequenceAppend(ret, visitAndAssign(curr->value, value)); LocalGet getPtr; getPtr.index = func->getLocalIndex(ptr.getName()); - getPtr.type = i32; + getPtr.type = Type::i32; LocalGet getValue; getValue.index = func->getLocalIndex(value.getName()); getValue.type = curr->value->type; @@ -1307,7 +1307,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref value = visit(curr->value, EXPRESSION_RESULT); Ref ret; switch (curr->valueType) { - case i32: { + case Type::i32: { switch (curr->bytes) { case 1: ret = ValueBuilder::makeSub(ValueBuilder::makeName(HEAP8), @@ -1326,11 +1326,11 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, } break; } - case f32: + case Type::f32: ret = ValueBuilder::makeSub(ValueBuilder::makeName(HEAPF32), ValueBuilder::makePtrShift(ptr, 2)); break; - case f64: + case Type::f64: ret = ValueBuilder::makeSub(ValueBuilder::makeName(HEAPF64), ValueBuilder::makePtrShift(ptr, 3)); break; @@ -1347,12 +1347,12 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref visitConst(Const* curr) { switch (curr->type) { - case i32: + case Type::i32: return ValueBuilder::makeInt(curr->value.geti32()); // An i64 argument translates to two actual arguments to asm.js // functions, so we do a bit of a hack here to get our one `Ref` to look // like two function arguments. - case i64: { + case Type::i64: { auto lo = (unsigned)curr->value.geti64(); auto hi = (unsigned)(curr->value.geti64() >> 32); std::ostringstream out; @@ -1361,15 +1361,15 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, IString name(os.c_str(), false); return ValueBuilder::makeName(name); } - case f32: { + case Type::f32: { Ref ret = ValueBuilder::makeCall(MATH_FROUND); Const fake(allocator); fake.value = Literal(double(curr->value.getf32())); - fake.type = f64; + fake.type = Type::f64; ret[2]->push_back(visitConst(&fake)); return ret; } - case f64: { + case Type::f64: { double d = curr->value.getf64(); if (d == 0 && std::signbit(d)) { // negative zero return ValueBuilder::makeUnary( @@ -1387,7 +1387,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref visitUnary(Unary* curr) { // normal unary switch (curr->type) { - case i32: { + case Type::i32: { switch (curr->op) { case ClzInt32: { return ValueBuilder::makeCall( @@ -1446,8 +1446,8 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, } } } - case f32: - case f64: { + case Type::f32: + case Type::f64: { Ref ret; switch (curr->op) { case NegFloat32: @@ -1528,7 +1528,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, default: WASM_UNREACHABLE("unhandled unary float operator"); } - if (curr->type == f32) { // doubles need much less coercing + if (curr->type == Type::f32) { // doubles need much less coercing return makeAsmCoercion(ret, ASM_FLOAT); } return ret; @@ -1546,7 +1546,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, Ref right = visit(curr->right, EXPRESSION_RESULT); Ref ret; switch (curr->type) { - case i32: { + case Type::i32: { switch (curr->op) { case AddInt32: ret = ValueBuilder::makeBinary(left, PLUS, right); @@ -1555,7 +1555,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, ret = ValueBuilder::makeBinary(left, MINUS, right); break; case MulInt32: { - if (curr->type == i32) { + if (curr->type == Type::i32) { // TODO: when one operand is a small int, emit a multiply return ValueBuilder::makeCall(MATH_IMUL, left, right); } else { @@ -1668,8 +1668,8 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, } break; } - case f32: - case f64: + case Type::f32: + case Type::f64: switch (curr->op) { case AddFloat32: case AddFloat64: @@ -1702,7 +1702,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, << std::endl; abort(); } - if (curr->type == f32) { + if (curr->type == Type::f32) { return makeAsmCoercion(ret, ASM_FLOAT); } return ret; @@ -1729,7 +1729,7 @@ Ref Wasm2JSBuilder::processFunctionBody(Module* m, if (useLocals) { ScopedTemp tempIfTrue(curr->type, parent, func), tempIfFalse(curr->type, parent, func), - tempCondition(i32, parent, func); + tempCondition(Type::i32, parent, func); Ref ifTrue = visit(curr->ifTrue, EXPRESSION_RESULT); Ref ifFalse = visit(curr->ifFalse, EXPRESSION_RESULT); Ref condition = visit(curr->condition, EXPRESSION_RESULT); |