summaryrefslogtreecommitdiff
path: root/src/asm2wasm.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/asm2wasm.h')
-rw-r--r--src/asm2wasm.h281
1 files changed, 217 insertions, 64 deletions
diff --git a/src/asm2wasm.h b/src/asm2wasm.h
index e926e381f..bf42a8a0e 100644
--- a/src/asm2wasm.h
+++ b/src/asm2wasm.h
@@ -40,6 +40,55 @@ namespace wasm {
using namespace cashew;
+// Names
+
+Name I64("i64"),
+ I64_CONST("i64_const"),
+ I64_ADD("i64_add"),
+ I64_SUB("i64_sub"),
+ I64_MUL("i64_mul"),
+ I64_UDIV("i64_udiv"),
+ I64_SDIV("i64_sdiv"),
+ I64_UREM("i64_urem"),
+ I64_SREM("i64_srem"),
+ I64_AND("i64_and"),
+ I64_OR("i64_or"),
+ I64_XOR("i64_xor"),
+ I64_SHL("i64_shl"),
+ I64_ASHR("i64_ashr"),
+ I64_LSHR("i64_lshr"),
+ I64_LOAD("i64_load"),
+ I64_STORE("i64_store"),
+ I64_EQ("i64_eq"),
+ I64_NE("i64_ne"),
+ I64_ULE("i64_ule"),
+ I64_SLE("i64_sle"),
+ I64_UGE("i64_uge"),
+ I64_SGE("i64_sge"),
+ I64_ULT("i64_ult"),
+ I64_SLT("i64_slt"),
+ I64_UGT("i64_ugt"),
+ I64_SGT("i64_sgt"),
+ I64_TRUNC("i64_trunc"),
+ I64_SEXT("i64_sext"),
+ I64_ZEXT("i64_zext"),
+ I64_S2F("i64_s2f"),
+ I64_S2D("i64_s2d"),
+ I64_U2F("i64_u2f"),
+ I64_U2D("i64_u2d"),
+ I64_F2S("i64_f2s"),
+ I64_D2S("i64_d2s"),
+ I64_F2U("i64_f2u"),
+ I64_D2U("i64_d2u"),
+ I64_BC2D("i64_bc2d"),
+ I64_BC2I("i64_bc2i"),
+ I64_CTTZ("i64_cttz"),
+ I64_CTLZ("i64_ctlz"),
+ I64S_REM("i64s-rem"),
+ I64U_REM("i64u-rem"),
+ I64S_DIV("i64s-div"),
+ I64U_DIV("i64u-div");
+
// Utilities
static void abort_on(std::string why, Ref element) {
@@ -53,6 +102,10 @@ static void abort_on(std::string why, IString element) {
abort();
}
+Index indexOr(Index x, Index y) {
+ return x ? x : y;
+}
+
// useful when we need to see our parent, in an asm.js expression stack
struct AstStackHelper {
static std::vector<Ref> astStack;
@@ -160,6 +213,7 @@ class Asm2WasmBuilder {
bool debug;
bool imprecise;
bool optimize;
+ bool wasmOnly;
public:
std::map<IString, MappedGlobal> mappedGlobals;
@@ -213,15 +267,14 @@ private:
std::map<IString, std::unique_ptr<FunctionType>> importedFunctionTypes;
- void noteImportedFunctionCall(Ref ast, WasmType resultType, AsmData *asmData, CallImport* call) {
+ void noteImportedFunctionCall(Ref ast, WasmType resultType, CallImport* call) {
assert(ast[0] == CALL && ast[1][0] == NAME);
IString importName = ast[1][1]->getIString();
auto type = make_unique<FunctionType>();
type->name = IString((std::string("type$") + importName.str).c_str(), false); // TODO: make a list of such types
type->result = resultType;
- Ref args = ast[2];
- for (unsigned i = 0; i < args->size(); i++) {
- type->params.push_back(detectWasmType(args[i], asmData));
+ for (auto* operand : call->operands) {
+ type->params.push_back(operand->type);
}
// if we already saw this signature, verify it's the same (or else handle that)
if (importedFunctionTypes.find(importName) != importedFunctionTypes.end()) {
@@ -259,14 +312,15 @@ private:
}
public:
- Asm2WasmBuilder(Module& wasm, bool memoryGrowth, bool debug, bool imprecise, bool optimize)
+ Asm2WasmBuilder(Module& wasm, bool memoryGrowth, bool debug, bool imprecise, bool optimize, bool wasmOnly)
: wasm(wasm),
allocator(wasm.allocator),
builder(wasm),
memoryGrowth(memoryGrowth),
debug(debug),
imprecise(imprecise),
- optimize(optimize) {}
+ optimize(optimize),
+ wasmOnly(wasmOnly) {}
void processAsm(Ref ast);
@@ -286,7 +340,7 @@ private:
return view->second.type;
}
}
- return detectType(ast, data, false, Math_fround);
+ return detectType(ast, data, false, Math_fround, wasmOnly);
}
WasmType detectWasmType(Ref ast, AsmData *data) {
@@ -367,6 +421,10 @@ private:
return -1; // avoid warning
}
+ bool maybeWasmInt64Intrinsic(Name name) {
+ return strncmp(name.str, "i64", 3) == 0;
+ }
+
std::map<unsigned, Ref> tempNums;
Literal checkLiteral(Ref ast) {
@@ -388,6 +446,10 @@ private:
if (ast[1] == MINUS && ast[2][0] == UNARY_PREFIX && ast[2][1] == PLUS && ast[2][2][0] == NUM) {
return Literal((double)-ast[2][2][1]->getNumber());
}
+ } else if (wasmOnly && ast[0] == CALL && ast[1][0] == NAME && ast[1][1] == I64_CONST) {
+ uint64_t low = ast[2][0][1]->getNumber();
+ uint64_t high = ast[2][1][1]->getNumber();
+ return Literal(uint64_t(low + (high << 32)));
}
return Literal();
}
@@ -426,6 +488,44 @@ private:
return ret;
}
+ // Some binary opts might trap, so emit them safely if we are precise
+ Expression* makeDangerousI64Binary(BinaryOp op, Expression* left, Expression* right) {
+ if (imprecise) return builder.makeBinary(op, left, right);
+ // we are precise, and the wasm operation might trap if done over 0, so generate a safe call
+ auto *call = allocator.alloc<Call>();
+ switch (op) {
+ case BinaryOp::RemSInt64: call->target = I64S_REM; break;
+ case BinaryOp::RemUInt64: call->target = I64U_REM; break;
+ case BinaryOp::DivSInt64: call->target = I64S_DIV; break;
+ case BinaryOp::DivUInt64: call->target = I64U_DIV; break;
+ default: WASM_UNREACHABLE();
+ }
+ call->operands.push_back(left);
+ call->operands.push_back(right);
+ call->type = i64;
+ static std::set<Name> addedFunctions;
+ if (addedFunctions.count(call->target) == 0) {
+ addedFunctions.insert(call->target);
+ auto func = new Function;
+ func->name = call->target;
+ func->params.push_back(i64);
+ func->params.push_back(i64);
+ func->result = i64;
+ func->body = builder.makeIf(
+ builder.makeUnary(EqZInt64,
+ builder.makeGetLocal(1, i64)
+ ),
+ builder.makeConst(Literal(int64_t(0))),
+ builder.makeBinary(op,
+ builder.makeGetLocal(0, i64),
+ builder.makeGetLocal(1, i64)
+ )
+ );
+ wasm.addFunction(func);
+ }
+ return call;
+ }
+
Function* processFunction(Ref ast);
};
@@ -762,7 +862,14 @@ void Asm2WasmBuilder::processAsm(Ref ast) {
FinalizeCalls(Asm2WasmBuilder* parent) : parent(parent) {}
void visitCall(Call* curr) {
- assert(getModule()->checkFunction(curr->target) ? true : (std::cerr << curr->target << '\n', false));
+ if (!getModule()->checkFunction(curr->target)) {
+ std::cerr << "invalid call target: " << curr->target << '\n';
+ if (parent->maybeWasmInt64Intrinsic(curr->target)) {
+ std::cerr << " - perhaps this is a wasm-only i64() method, and you should run asm2wasm with --wasm-only?\n";
+ if (parent->wasmOnly) std::cerr << " - wait, you *did*. so this is an internal compiler error, please file an issue!\n";
+ }
+ WASM_UNREACHABLE();
+ }
auto result = getModule()->getFunction(curr->target)->result;
if (curr->type != result) {
curr->type = result;
@@ -826,6 +933,10 @@ void Asm2WasmBuilder::processAsm(Ref ast) {
passRunner.add<FinalizeCalls>(this);
passRunner.add<ReFinalize>(); // FinalizeCalls changes call types, need to percolate
passRunner.add<AutoDrop>(); // FinalizeCalls may cause us to require additional drops
+ if (wasmOnly) {
+ // we didn't legalize i64s in fastcomp, and so must legalize the interface to the outside
+ passRunner.add("legalize-js-interface");
+ }
if (optimize) {
// autodrop can add some garbage
passRunner.add("vacuum");
@@ -913,28 +1024,8 @@ void Asm2WasmBuilder::processAsm(Ref ast) {
x64 = Builder::addVar(func, "x64", i64),
y64 = Builder::addVar(func, "y64", i64);
auto* body = allocator.alloc<Block>();
- auto recreateI64 = [&](Index target, Index low, Index high) {
- return builder.makeSetLocal(
- target,
- builder.makeBinary(
- OrInt64,
- builder.makeUnary(
- ExtendUInt32,
- builder.makeGetLocal(low, i32)
- ),
- builder.makeBinary(
- ShlInt64,
- builder.makeUnary(
- ExtendUInt32,
- builder.makeGetLocal(high, i32)
- ),
- builder.makeConst(Literal(int64_t(32)))
- )
- )
- );
- };
- body->list.push_back(recreateI64(x64, xl, xh));
- body->list.push_back(recreateI64(y64, yl, yh));
+ body->list.push_back(builder.makeSetLocal(x64, I64Utilities::recreateI64(builder, xl, xh)));
+ body->list.push_back(builder.makeSetLocal(y64, I64Utilities::recreateI64(builder, yl, yh)));
body->list.push_back(
builder.makeIf(
builder.makeGetLocal(r, i32),
@@ -963,22 +1054,10 @@ void Asm2WasmBuilder::processAsm(Ref ast) {
body->list.push_back(
builder.makeSetGlobal(
tempRet0,
- builder.makeUnary(
- WrapInt64,
- builder.makeBinary(
- ShrUInt64,
- builder.makeGetLocal(x64, i64),
- builder.makeConst(Literal(int64_t(32)))
- )
- )
- )
- );
- body->list.push_back(
- builder.makeUnary(
- WrapInt64,
- builder.makeGetLocal(x64, i64)
+ I64Utilities::getI64High(builder, x64)
)
);
+ body->list.push_back(I64Utilities::getI64Low(builder, x64));
body->finalize();
func->body = body;
}
@@ -1027,7 +1106,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
curr = curr[1];
assert(curr[0] == ASSIGN && curr[2][0] == NAME);
IString name = curr[2][1]->getIString();
- AsmType asmType = detectType(curr[3], nullptr, false, Math_fround);
+ AsmType asmType = detectType(curr[3], nullptr, false, Math_fround, wasmOnly);
Builder::addParam(function, name, asmToWasmType(asmType));
functionVariables.insert(name);
asmData.addParam(name, asmType);
@@ -1038,7 +1117,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
for (unsigned j = 0; j < curr[1]->size(); j++) {
Ref pair = curr[1][j];
IString name = pair[0]->getIString();
- AsmType asmType = detectType(pair[1], nullptr, true, Math_fround);
+ AsmType asmType = detectType(pair[1], nullptr, true, Math_fround, wasmOnly);
Builder::addVar(function, name, asmToWasmType(asmType));
functionVariables.insert(name);
asmData.addVar(name, asmType);
@@ -1110,7 +1189,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
conv->type = WasmType::f32;
ret->value = conv;
} else {
- abort();
+ abort_on("bad subtract types", ast);
}
}
return ret;
@@ -1446,13 +1525,74 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
}
return ret;
}
+ if (wasmOnly && maybeWasmInt64Intrinsic(name)) {
+ auto num = ast[2]->size();
+ if (num == 1) {
+ auto* value = process(ast[2][0]);
+ if (name == I64) {
+ // no-op "coercion" / "cast", although we also tolerate i64(0) for constants that fit in i32
+ if (value->type == i32) {
+ return builder.makeConst(Literal(int64_t(value->cast<Const>()->value.geti32())));
+ } else {
+ fixCallType(value, i64);
+ return value;
+ }
+ }
+ if (name == I64_TRUNC) return builder.makeUnary(UnaryOp::WrapInt64, value);
+ if (name == I64_SEXT) return builder.makeUnary(UnaryOp::ExtendSInt32, value);
+ if (name == I64_ZEXT) return builder.makeUnary(UnaryOp::ExtendUInt32, value);
+ if (name == I64_S2F) return builder.makeUnary(UnaryOp::ConvertSInt64ToFloat32, value);
+ if (name == I64_S2D) return builder.makeUnary(UnaryOp::ConvertSInt64ToFloat64, value);
+ if (name == I64_U2F) return builder.makeUnary(UnaryOp::ConvertUInt64ToFloat32, value);
+ if (name == I64_U2D) return builder.makeUnary(UnaryOp::ConvertUInt64ToFloat64, value);
+ if (name == I64_F2S) return builder.makeUnary(UnaryOp::TruncSFloat32ToInt64, value);
+ if (name == I64_D2S) return builder.makeUnary(UnaryOp::TruncSFloat64ToInt64, value);
+ if (name == I64_F2U) return builder.makeUnary(UnaryOp::TruncUFloat32ToInt64, value);
+ if (name == I64_D2U) return builder.makeUnary(UnaryOp::TruncUFloat64ToInt64, value);
+ if (name == I64_BC2D) return builder.makeUnary(UnaryOp::ReinterpretInt64, value);
+ if (name == I64_BC2I) return builder.makeUnary(UnaryOp::ReinterpretFloat64, value);
+ if (name == I64_CTTZ) return builder.makeUnary(UnaryOp::CtzInt64, value);
+ if (name == I64_CTLZ) return builder.makeUnary(UnaryOp::ClzInt64, value);
+ } else if (num == 2) { // 2 params,binary
+ if (name == I64_CONST) return builder.makeConst(getLiteral(ast));
+ if (name == I64_LOAD) return builder.makeLoad(8, true, 0, indexOr(ast[2][1][1]->getInteger(), 8), process(ast[2][0]), i64);
+ auto* left = process(ast[2][0]);
+ auto* right = process(ast[2][1]);
+ // maths
+ if (name == I64_ADD) return builder.makeBinary(BinaryOp::AddInt64, left, right);
+ if (name == I64_SUB) return builder.makeBinary(BinaryOp::SubInt64, left, right);
+ if (name == I64_MUL) return builder.makeBinary(BinaryOp::MulInt64, left, right);
+ if (name == I64_UDIV) return makeDangerousI64Binary(BinaryOp::DivUInt64, left, right);
+ if (name == I64_SDIV) return makeDangerousI64Binary(BinaryOp::DivSInt64, left, right);
+ if (name == I64_UREM) return makeDangerousI64Binary(BinaryOp::RemUInt64, left, right);
+ if (name == I64_SREM) return makeDangerousI64Binary(BinaryOp::RemSInt64, left, right);
+ if (name == I64_AND) return builder.makeBinary(BinaryOp::AndInt64, left, right);
+ if (name == I64_OR) return builder.makeBinary(BinaryOp::OrInt64, left, right);
+ if (name == I64_XOR) return builder.makeBinary(BinaryOp::XorInt64, left, right);
+ if (name == I64_SHL) return builder.makeBinary(BinaryOp::ShlInt64, left, right);
+ if (name == I64_ASHR) return builder.makeBinary(BinaryOp::ShrSInt64, left, right);
+ if (name == I64_LSHR) return builder.makeBinary(BinaryOp::ShrUInt64, left, right);
+ // comps
+ if (name == I64_EQ) return builder.makeBinary(BinaryOp::EqInt64, left, right);
+ if (name == I64_NE) return builder.makeBinary(BinaryOp::NeInt64, left, right);
+ if (name == I64_ULE) return builder.makeBinary(BinaryOp::LeUInt64, left, right);
+ if (name == I64_SLE) return builder.makeBinary(BinaryOp::LeSInt64, left, right);
+ if (name == I64_UGE) return builder.makeBinary(BinaryOp::GeUInt64, left, right);
+ if (name == I64_SGE) return builder.makeBinary(BinaryOp::GeSInt64, left, right);
+ if (name == I64_ULT) return builder.makeBinary(BinaryOp::LtUInt64, left, right);
+ if (name == I64_SLT) return builder.makeBinary(BinaryOp::LtSInt64, left, right);
+ if (name == I64_UGT) return builder.makeBinary(BinaryOp::GtUInt64, left, right);
+ if (name == I64_SGT) return builder.makeBinary(BinaryOp::GtSInt64, left, right);
+ } else if (num == 3) { // 3 params
+ if (name == I64_STORE) return builder.makeStore(8, 0, indexOr(ast[2][2][1]->getInteger(), 8), process(ast[2][0]), process(ast[2][1]), i64);
+ }
+ }
Expression* ret;
ExpressionList* operands;
+ bool import = false;
if (wasm.checkImport(name)) {
- Ref parent = astStackHelper.getParent();
- WasmType type = !!parent ? detectWasmType(parent, &asmData) : none;
+ import = true;
auto specific = allocator.alloc<CallImport>();
- noteImportedFunctionCall(ast, type, &asmData, specific);
specific->target = name;
operands = &specific->operands;
ret = specific;
@@ -1466,6 +1606,11 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
for (unsigned i = 0; i < args->size(); i++) {
operands->push_back(process(args[i]));
}
+ if (import) {
+ Ref parent = astStackHelper.getParent();
+ WasmType type = !!parent ? detectWasmType(parent, &asmData) : none;
+ noteImportedFunctionCall(ast, type, ret->cast<CallImport>());
+ }
return ret;
}
// function pointers
@@ -1755,17 +1900,15 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
auto br = allocator.alloc<Switch>();
br->condition = process(ast[1]);
- assert(br->condition->type == i32);
Ref cases = ast[2];
bool seen = false;
- int min = 0; // the lowest index we see; we will offset to it
+ int64_t min = 0; // the lowest index we see; we will offset to it
for (unsigned i = 0; i < cases->size(); i++) {
Ref curr = cases[i];
Ref condition = curr[0];
if (!condition->isNull()) {
- assert(condition[0] == NUM || condition[0] == UNARY_PREFIX);
- int32_t index = getLiteral(condition).geti32();
+ int64_t index = getLiteral(condition).getInteger();
if (!seen) {
seen = true;
min = index;
@@ -1774,12 +1917,23 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
}
}
}
- Binary* offsetor = allocator.alloc<Binary>();
- offsetor->op = BinaryOp::SubInt32;
- offsetor->left = br->condition;
- offsetor->right = builder.makeConst(Literal(min));
- offsetor->type = i32;
- br->condition = offsetor;
+ if (br->condition->type == i32) {
+ Binary* offsetor = allocator.alloc<Binary>();
+ offsetor->op = BinaryOp::SubInt32;
+ offsetor->left = br->condition;
+ offsetor->right = builder.makeConst(Literal(int32_t(min)));
+ offsetor->type = i32;
+ br->condition = offsetor;
+ } else {
+ assert(br->condition->type == i64);
+ // 64-bit condition. after offsetting it must be in a reasonable range, but the offsetting itself must be 64-bit
+ Binary* offsetor = allocator.alloc<Binary>();
+ offsetor->op = BinaryOp::SubInt64;
+ offsetor->left = br->condition;
+ offsetor->right = builder.makeConst(Literal(int64_t(min)));
+ offsetor->type = i64;
+ br->condition = builder.makeUnary(UnaryOp::WrapInt64, offsetor); // TODO: check this fits in 32 bits
+ }
auto top = allocator.alloc<Block>();
top->list.push_back(br);
@@ -1794,15 +1948,14 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) {
if (condition->isNull()) {
name = br->default_ = getNextId("switch-default");
} else {
- assert(condition[0] == NUM || condition[0] == UNARY_PREFIX);
- int32_t index = getLiteral(condition).geti32();
+ auto index = getLiteral(condition).getInteger();
assert(index >= min);
index -= min;
assert(index >= 0);
- size_t index_s = index;
+ uint64_t index_s = index;
name = getNextId("switch-case");
if (br->targets.size() <= index_s) {
- br->targets.resize(index_s+1);
+ br->targets.resize(index_s + 1);
}
br->targets[index_s] = name;
}