summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/binaryen-c.cpp2
-rw-r--r--src/shell-interface.h12
-rw-r--r--src/tools/execution-results.h6
-rw-r--r--src/tools/wasm-ctor-eval.cpp47
-rw-r--r--src/tools/wasm-shell.cpp8
-rw-r--r--src/wasm-interpreter.h1735
-rw-r--r--test/lit/exec/delegate-vacuum.wast60
7 files changed, 954 insertions, 916 deletions
diff --git a/src/binaryen-c.cpp b/src/binaryen-c.cpp
index be538ef86..df868dc62 100644
--- a/src/binaryen-c.cpp
+++ b/src/binaryen-c.cpp
@@ -4124,7 +4124,7 @@ BinaryenModuleRef BinaryenModuleRead(char* input, size_t inputSize) {
void BinaryenModuleInterpret(BinaryenModuleRef module) {
ShellExternalInterface interface;
- ModuleInstance instance(*(Module*)module, &interface, {});
+ ModuleRunner instance(*(Module*)module, &interface, {});
}
BinaryenIndex BinaryenModuleAddDebugInfoFileName(BinaryenModuleRef module,
diff --git a/src/shell-interface.h b/src/shell-interface.h
index 4fea7f8a6..d1cf3290e 100644
--- a/src/shell-interface.h
+++ b/src/shell-interface.h
@@ -42,7 +42,7 @@ struct TrapException {};
// GC memory may be allocated, but hosts have limits.)
struct HostLimitException {};
-struct ShellExternalInterface : ModuleInstance::ExternalInterface {
+struct ShellExternalInterface : ModuleRunner::ExternalInterface {
// The underlying memory can be accessed through unaligned pointers which
// isn't well-behaved in C++. WebAssembly nonetheless expects it to behave
// properly. Avoid emitting unaligned load/store by checking for alignment
@@ -95,16 +95,16 @@ struct ShellExternalInterface : ModuleInstance::ExternalInterface {
} memory;
std::unordered_map<Name, std::vector<Literal>> tables;
- std::map<Name, std::shared_ptr<ModuleInstance>> linkedInstances;
+ std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances;
ShellExternalInterface(
- std::map<Name, std::shared_ptr<ModuleInstance>> linkedInstances_ = {})
+ std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances_ = {})
: memory() {
linkedInstances.swap(linkedInstances_);
}
virtual ~ShellExternalInterface() = default;
- ModuleInstance* getImportInstance(Importable* import) {
+ ModuleRunner* getImportInstance(Importable* import) {
auto it = linkedInstances.find(import->module);
if (it == linkedInstances.end()) {
Fatal() << "importGlobals: unknown import: " << import->module.str << "."
@@ -113,7 +113,7 @@ struct ShellExternalInterface : ModuleInstance::ExternalInterface {
return it->second.get();
}
- void init(Module& wasm, ModuleInstance& instance) override {
+ void init(Module& wasm, ModuleRunner& instance) override {
if (wasm.memory.exists && !wasm.memory.imported()) {
memory.resize(wasm.memory.initial * wasm::Memory::kPageSize);
}
@@ -155,7 +155,7 @@ struct ShellExternalInterface : ModuleInstance::ExternalInterface {
HeapType sig,
Literals& arguments,
Type results,
- ModuleInstance& instance) override {
+ ModuleRunner& instance) override {
auto it = tables.find(tableName);
if (it == tables.end()) {
diff --git a/src/tools/execution-results.h b/src/tools/execution-results.h
index 78972cfc9..5f2e772f0 100644
--- a/src/tools/execution-results.h
+++ b/src/tools/execution-results.h
@@ -106,7 +106,7 @@ struct ExecutionResults {
void get(Module& wasm) {
LoggingExternalInterface interface(loggings);
try {
- ModuleInstance instance(wasm, &interface);
+ ModuleRunner instance(wasm, &interface);
// execute all exported methods (that are therefore preserved through
// opts)
for (auto& exp : wasm.exports) {
@@ -229,7 +229,7 @@ struct ExecutionResults {
FunctionResult run(Function* func, Module& wasm) {
LoggingExternalInterface interface(loggings);
try {
- ModuleInstance instance(wasm, &interface);
+ ModuleRunner instance(wasm, &interface);
return run(func, wasm, instance);
} catch (const TrapException&) {
// may throw in instance creation (init of offsets)
@@ -237,7 +237,7 @@ struct ExecutionResults {
}
}
- FunctionResult run(Function* func, Module& wasm, ModuleInstance& instance) {
+ FunctionResult run(Function* func, Module& wasm, ModuleRunner& instance) {
try {
Literals arguments;
// init hang support, if present
diff --git a/src/tools/wasm-ctor-eval.cpp b/src/tools/wasm-ctor-eval.cpp
index 5699ae37a..6dbff0c8f 100644
--- a/src/tools/wasm-ctor-eval.cpp
+++ b/src/tools/wasm-ctor-eval.cpp
@@ -120,14 +120,14 @@ public:
}
};
-class EvallingModuleInstance
- : public ModuleInstanceBase<EvallingGlobalManager, EvallingModuleInstance> {
+class EvallingModuleRunner
+ : public ModuleRunnerBase<EvallingGlobalManager, EvallingModuleRunner> {
public:
- EvallingModuleInstance(Module& wasm,
- ExternalInterface* externalInterface,
- std::map<Name, std::shared_ptr<EvallingModuleInstance>>
- linkedInstances_ = {})
- : ModuleInstanceBase(wasm, externalInterface, linkedInstances_) {
+ EvallingModuleRunner(
+ Module& wasm,
+ ExternalInterface* externalInterface,
+ std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances_ = {})
+ : ModuleRunnerBase(wasm, externalInterface, linkedInstances_) {
// if any global in the module has a non-const constructor, it is using a
// global import, which we don't have, and is illegal to use
ModuleUtils::iterDefinedGlobals(wasm, [&](Global* global) {
@@ -207,16 +207,16 @@ std::unique_ptr<Module> buildEnvModule(Module& wasm) {
// that there are not arguments passed to main, etc.
static bool ignoreExternalInput = false;
-struct CtorEvalExternalInterface : EvallingModuleInstance::ExternalInterface {
+struct CtorEvalExternalInterface : EvallingModuleRunner::ExternalInterface {
Module* wasm;
- EvallingModuleInstance* instance;
- std::map<Name, std::shared_ptr<EvallingModuleInstance>> linkedInstances;
+ EvallingModuleRunner* instance;
+ std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances;
// A representation of the contents of wasm memory as we execute.
std::vector<char> memory;
CtorEvalExternalInterface(
- std::map<Name, std::shared_ptr<EvallingModuleInstance>> linkedInstances_ =
+ std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances_ =
{}) {
linkedInstances.swap(linkedInstances_);
}
@@ -232,7 +232,7 @@ struct CtorEvalExternalInterface : EvallingModuleInstance::ExternalInterface {
instance->globals.applyToModule(*wasm);
}
- void init(Module& wasm_, EvallingModuleInstance& instance_) override {
+ void init(Module& wasm_, EvallingModuleRunner& instance_) override {
wasm = &wasm_;
instance = &instance_;
}
@@ -326,7 +326,7 @@ struct CtorEvalExternalInterface : EvallingModuleInstance::ExternalInterface {
HeapType sig,
Literals& arguments,
Type result,
- EvallingModuleInstance& instance) override {
+ EvallingModuleRunner& instance) override {
std::unordered_map<wasm::Name, std::vector<wasm::Name>>::iterator it;
@@ -503,7 +503,7 @@ using EvalCtorOutcome = std::optional<Literals>;
// evaluate the ctor (which means that the caller can proceed to try to eval
// further ctors if there are any), and if we did, the results if the function
// returns any.
-EvalCtorOutcome evalCtor(EvallingModuleInstance& instance,
+EvalCtorOutcome evalCtor(EvallingModuleRunner& instance,
CtorEvalExternalInterface& interface,
Name funcName,
Name exportName) {
@@ -557,23 +557,20 @@ EvalCtorOutcome evalCtor(EvallingModuleInstance& instance,
if (auto* block = func->body->dynCast<Block>()) {
// Go through the items in the block and try to execute them. We do all this
// in a single function scope for all the executions.
- EvallingModuleInstance::FunctionScope scope(func, params);
-
- EvallingModuleInstance::RuntimeExpressionRunner expressionRunner(
- instance, scope, instance.maxDepth);
+ EvallingModuleRunner::FunctionScope scope(func, params, instance);
// After we successfully eval a line we will apply the changes here. This is
// the same idea as applyToModule() - we must only do it after an entire
// atomic "chunk" has been processed, we do not want partial updates from
// an item in the block that we only partially evalled.
- EvallingModuleInstance::FunctionScope appliedScope(func, params);
+ std::vector<Literals> appliedLocals;
Literals results;
Index successes = 0;
for (auto* curr : block->list) {
Flow flow;
try {
- flow = expressionRunner.visit(curr);
+ flow = instance.visit(curr);
} catch (FailToEvalException& fail) {
if (successes == 0) {
std::cout << " ...stopping (in block) since could not eval: "
@@ -588,7 +585,7 @@ EvalCtorOutcome evalCtor(EvallingModuleInstance& instance,
// So far so good! Apply the results.
interface.applyToModule();
- appliedScope = scope;
+ appliedLocals = scope.locals;
successes++;
// Note the values here, if any. If we are exiting the function now then
@@ -634,7 +631,7 @@ EvalCtorOutcome evalCtor(EvallingModuleInstance& instance,
// unnecessary operations.
std::vector<Expression*> localSets;
for (Index i = 0; i < copyFunc->getNumLocals(); i++) {
- auto value = appliedScope.locals[i];
+ auto value = appliedLocals[i];
localSets.push_back(
builder.makeLocalSet(i, builder.makeConstantExpression(value)));
}
@@ -685,19 +682,19 @@ void evalCtors(Module& wasm,
std::unordered_set<std::string> keptExportsSet(keptExports.begin(),
keptExports.end());
- std::map<Name, std::shared_ptr<EvallingModuleInstance>> linkedInstances;
+ std::map<Name, std::shared_ptr<EvallingModuleRunner>> linkedInstances;
// build and link the env module
auto envModule = buildEnvModule(wasm);
CtorEvalExternalInterface envInterface;
auto envInstance =
- std::make_shared<EvallingModuleInstance>(*envModule, &envInterface);
+ std::make_shared<EvallingModuleRunner>(*envModule, &envInterface);
linkedInstances[envModule->name] = envInstance;
CtorEvalExternalInterface interface(linkedInstances);
try {
// create an instance for evalling
- EvallingModuleInstance instance(wasm, &interface, linkedInstances);
+ EvallingModuleRunner instance(wasm, &interface, linkedInstances);
// we should not add new globals from here on; as a result, using
// an imported global will fail, as it is missing and so looks new
instance.globals.seal();
diff --git a/src/tools/wasm-shell.cpp b/src/tools/wasm-shell.cpp
index 4243b00a9..0de93c69e 100644
--- a/src/tools/wasm-shell.cpp
+++ b/src/tools/wasm-shell.cpp
@@ -88,16 +88,16 @@ protected:
std::map<Name, std::shared_ptr<Module>> modules;
std::map<Name, std::shared_ptr<SExpressionWasmBuilder>> builders;
std::map<Name, std::shared_ptr<ShellExternalInterface>> interfaces;
- std::map<Name, std::shared_ptr<ModuleInstance>> instances;
+ std::map<Name, std::shared_ptr<ModuleRunner>> instances;
// used for imports
- std::map<Name, std::shared_ptr<ModuleInstance>> linkedInstances;
+ std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances;
Name lastModule;
void instantiate(Module* wasm) {
auto tempInterface =
std::make_shared<ShellExternalInterface>(linkedInstances);
- auto tempInstance = std::make_shared<ModuleInstance>(
+ auto tempInstance = std::make_shared<ModuleRunner>(
*wasm, tempInterface.get(), linkedInstances);
interfaces[wasm->name].swap(tempInterface);
instances[wasm->name].swap(tempInstance);
@@ -173,7 +173,7 @@ protected:
if (s[i]->dollared()) {
moduleName = s[i++]->str();
}
- ModuleInstance* instance = instances[moduleName].get();
+ ModuleRunner* instance = instances[moduleName].get();
assert(instance);
Name base = s[i++]->str();
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index ce89f045e..2a8df0111 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -150,6 +150,8 @@ public:
// Execute an expression
template<typename SubType>
class ExpressionRunner : public OverriddenVisitor<SubType, Flow> {
+ SubType* self() { return static_cast<SubType*>(this); }
+
protected:
// Optional module context to search for globals and called functions. NULL if
// we are not interested in any context.
@@ -166,7 +168,7 @@ protected:
NOTE_ENTER_("generateArguments");
arguments.reserve(operands.size());
for (auto expression : operands) {
- Flow flow = this->visit(expression);
+ Flow flow = self()->visit(expression);
if (flow.breaking()) {
return flow;
}
@@ -1020,7 +1022,7 @@ public:
}
Flow visitSIMDExtract(SIMDExtract* curr) {
NOTE_ENTER("SIMDExtract");
- Flow flow = this->visit(curr->vec);
+ Flow flow = self()->visit(curr->vec);
if (flow.breaking()) {
return flow;
}
@@ -1047,12 +1049,12 @@ public:
}
Flow visitSIMDReplace(SIMDReplace* curr) {
NOTE_ENTER("SIMDReplace");
- Flow flow = this->visit(curr->vec);
+ Flow flow = self()->visit(curr->vec);
if (flow.breaking()) {
return flow;
}
Literal vec = flow.getSingleValue();
- flow = this->visit(curr->value);
+ flow = self()->visit(curr->value);
if (flow.breaking()) {
return flow;
}
@@ -1075,12 +1077,12 @@ public:
}
Flow visitSIMDShuffle(SIMDShuffle* curr) {
NOTE_ENTER("SIMDShuffle");
- Flow flow = this->visit(curr->left);
+ Flow flow = self()->visit(curr->left);
if (flow.breaking()) {
return flow;
}
Literal left = flow.getSingleValue();
- flow = this->visit(curr->right);
+ flow = self()->visit(curr->right);
if (flow.breaking()) {
return flow;
}
@@ -1089,17 +1091,17 @@ public:
}
Flow visitSIMDTernary(SIMDTernary* curr) {
NOTE_ENTER("SIMDBitselect");
- Flow flow = this->visit(curr->a);
+ Flow flow = self()->visit(curr->a);
if (flow.breaking()) {
return flow;
}
Literal a = flow.getSingleValue();
- flow = this->visit(curr->b);
+ flow = self()->visit(curr->b);
if (flow.breaking()) {
return flow;
}
Literal b = flow.getSingleValue();
- flow = this->visit(curr->c);
+ flow = self()->visit(curr->c);
if (flow.breaking()) {
return flow;
}
@@ -1127,12 +1129,12 @@ public:
}
Flow visitSIMDShift(SIMDShift* curr) {
NOTE_ENTER("SIMDShift");
- Flow flow = this->visit(curr->vec);
+ Flow flow = self()->visit(curr->vec);
if (flow.breaking()) {
return flow;
}
Literal vec = flow.getSingleValue();
- flow = this->visit(curr->shift);
+ flow = self()->visit(curr->shift);
if (flow.breaking()) {
return flow;
}
@@ -1453,7 +1455,7 @@ public:
};
template<typename T> Cast doCast(T* curr) {
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return typename Cast::Breaking{ref};
}
@@ -1461,7 +1463,7 @@ public:
Literal intendedRtt;
if (curr->rtt) {
// This is a dynamic check with an RTT.
- Flow rtt = this->visit(curr->rtt);
+ Flow rtt = self()->visit(curr->rtt);
if (rtt.breaking()) {
return typename Cast::Breaking{rtt};
}
@@ -1623,7 +1625,7 @@ public:
return Literal::makeCanonicalRtt(curr->type.getHeapType());
}
Flow visitRttSub(RttSub* curr) {
- Flow parent = this->visit(curr->parent);
+ Flow parent = self()->visit(curr->parent);
if (parent.breaking()) {
return parent;
}
@@ -1640,7 +1642,7 @@ public:
NOTE_ENTER("StructNew");
Literal rttVal;
if (curr->rtt) {
- Flow rtt = this->visit(curr->rtt);
+ Flow rtt = self()->visit(curr->rtt);
if (rtt.breaking()) {
return rtt;
}
@@ -1650,7 +1652,7 @@ public:
// We cannot proceed to compute the heap type, as there isn't one. Just
// find why we are unreachable, and stop there.
for (auto* operand : curr->operands) {
- auto value = this->visit(operand);
+ auto value = self()->visit(operand);
if (value.breaking()) {
return value;
}
@@ -1664,7 +1666,7 @@ public:
if (curr->isWithDefault()) {
data[i] = Literal::makeZero(fields[i].type);
} else {
- auto value = this->visit(curr->operands[i]);
+ auto value = self()->visit(curr->operands[i]);
if (value.breaking()) {
return value;
}
@@ -1678,7 +1680,7 @@ public:
}
Flow visitStructGet(StructGet* curr) {
NOTE_ENTER("StructGet");
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return ref;
}
@@ -1691,11 +1693,11 @@ public:
}
Flow visitStructSet(StructSet* curr) {
NOTE_ENTER("StructSet");
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return ref;
}
- Flow value = this->visit(curr->value);
+ Flow value = self()->visit(curr->value);
if (value.breaking()) {
return value;
}
@@ -1719,20 +1721,20 @@ public:
NOTE_ENTER("ArrayNew");
Literal rttVal;
if (curr->rtt) {
- Flow rtt = this->visit(curr->rtt);
+ Flow rtt = self()->visit(curr->rtt);
if (rtt.breaking()) {
return rtt;
}
rttVal = rtt.getSingleValue();
}
- auto size = this->visit(curr->size);
+ auto size = self()->visit(curr->size);
if (size.breaking()) {
return size;
}
if (curr->type == Type::unreachable) {
// We cannot proceed to compute the heap type, as there isn't one. Just
// visit the unreachable child, and stop there.
- auto init = this->visit(curr->init);
+ auto init = self()->visit(curr->init);
assert(init.breaking());
return init;
}
@@ -1748,7 +1750,7 @@ public:
data[i] = Literal::makeZero(element.type);
}
} else {
- auto init = this->visit(curr->init);
+ auto init = self()->visit(curr->init);
if (init.breaking()) {
return init;
}
@@ -1767,7 +1769,7 @@ public:
NOTE_ENTER("ArrayInit");
Literal rttVal;
if (curr->rtt) {
- Flow rtt = this->visit(curr->rtt);
+ Flow rtt = self()->visit(curr->rtt);
if (rtt.breaking()) {
return rtt;
}
@@ -1781,7 +1783,7 @@ public:
// We cannot proceed to compute the heap type, as there isn't one. Just
// find why we are unreachable, and stop there.
for (auto* value : curr->values) {
- auto result = this->visit(value);
+ auto result = self()->visit(value);
if (result.breaking()) {
return result;
}
@@ -1792,7 +1794,7 @@ public:
auto field = heapType.getArray().element;
Literals data(num);
for (Index i = 0; i < num; i++) {
- auto value = this->visit(curr->values[i]);
+ auto value = self()->visit(curr->values[i]);
if (value.breaking()) {
return value;
}
@@ -1805,11 +1807,11 @@ public:
}
Flow visitArrayGet(ArrayGet* curr) {
NOTE_ENTER("ArrayGet");
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return ref;
}
- Flow index = this->visit(curr->index);
+ Flow index = self()->visit(curr->index);
if (index.breaking()) {
return index;
}
@@ -1826,15 +1828,15 @@ public:
}
Flow visitArraySet(ArraySet* curr) {
NOTE_ENTER("ArraySet");
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return ref;
}
- Flow index = this->visit(curr->index);
+ Flow index = self()->visit(curr->index);
if (index.breaking()) {
return index;
}
- Flow value = this->visit(curr->value);
+ Flow value = self()->visit(curr->value);
if (value.breaking()) {
return value;
}
@@ -1852,7 +1854,7 @@ public:
}
Flow visitArrayLen(ArrayLen* curr) {
NOTE_ENTER("ArrayLen");
- Flow ref = this->visit(curr->ref);
+ Flow ref = self()->visit(curr->ref);
if (ref.breaking()) {
return ref;
}
@@ -1864,23 +1866,23 @@ public:
}
Flow visitArrayCopy(ArrayCopy* curr) {
NOTE_ENTER("ArrayCopy");
- Flow destRef = this->visit(curr->destRef);
+ Flow destRef = self()->visit(curr->destRef);
if (destRef.breaking()) {
return destRef;
}
- Flow destIndex = this->visit(curr->destIndex);
+ Flow destIndex = self()->visit(curr->destIndex);
if (destIndex.breaking()) {
return destIndex;
}
- Flow srcRef = this->visit(curr->srcRef);
+ Flow srcRef = self()->visit(curr->srcRef);
if (srcRef.breaking()) {
return srcRef;
}
- Flow srcIndex = this->visit(curr->srcIndex);
+ Flow srcIndex = self()->visit(curr->srcIndex);
if (srcIndex.breaking()) {
return srcIndex;
}
- Flow length = this->visit(curr->length);
+ Flow length = self()->visit(curr->length);
if (length.breaking()) {
return length;
}
@@ -2279,8 +2281,11 @@ public:
};
//
-// An instance of a WebAssembly module, which can execute it via AST
-// interpretation.
+// A runner for a module. Each runner contains the information to execute the
+// module, such as the state of globals, and so forth, so it basically
+// encapsulates an instantiation of the wasm, and implements all the interpreter
+// instructions that use that info (like global.set etc.) that are not declared
+// in ExpressionRunner, which just looks at a single instruction.
//
// To embed this interpreter, you need to provide an ExternalInterface instance
// (see below) which provides the embedding-specific details, that is, how to
@@ -2289,7 +2294,8 @@ public:
// To call into the interpreter, use callExport.
//
-template<typename GlobalManager, typename SubType> class ModuleInstanceBase {
+template<typename GlobalManager, typename SubType>
+class ModuleRunnerBase : public ExpressionRunner<SubType> {
public:
//
// You need to implement one of these to create a concrete interpreter. The
@@ -2472,6 +2478,7 @@ public:
SubType* self() { return static_cast<SubType*>(this); }
+ // TODO: this duplicates module in ExpressionRunner, and can be removed
Module& wasm;
// Values of globals
@@ -2480,12 +2487,12 @@ public:
// Multivalue ABI support (see push/pop).
std::vector<Literals> multiValues;
- ModuleInstanceBase(
+ ModuleRunnerBase(
Module& wasm,
ExternalInterface* externalInterface,
std::map<Name, std::shared_ptr<SubType>> linkedInstances_ = {})
- : wasm(wasm), externalInterface(externalInterface),
- linkedInstances(linkedInstances_) {
+ : ExpressionRunner<SubType>(&wasm), wasm(wasm),
+ externalInterface(externalInterface), linkedInstances(linkedInstances_) {
// import globals from the outside
externalInterface->importGlobals(globals, wasm);
// prepare memory
@@ -2587,13 +2594,8 @@ private:
}
ModuleUtils::iterActiveElementSegments(wasm, [&](ElementSegment* segment) {
- Function dummyFunc;
- dummyFunc.type = Signature(Type::none, Type::none);
- FunctionScope dummyScope(&dummyFunc, {});
- RuntimeExpressionRunner runner(*this, dummyScope, maxDepth);
-
Address offset =
- (uint32_t)runner.visit(segment->offset).getSingleValue().geti32();
+ (uint32_t)self()->visit(segment->offset).getSingleValue().geti32();
Table* table = wasm.getTable(segment->table);
ExternalInterface* extInterface = externalInterface;
@@ -2605,7 +2607,7 @@ private:
}
for (Index i = 0; i < segment->data.size(); ++i) {
- Flow ret = runner.visit(segment->data[i]);
+ Flow ret = self()->visit(segment->data[i]);
extInterface->tableStore(tableName, offset + i, ret.getSingleValue());
}
});
@@ -2638,14 +2640,8 @@ private:
drop.segment = i;
drop.finalize();
- // we don't actually have a function, but we need one in order to visit
- // the memory.init and data.drop instructions.
- Function dummyFunc;
- dummyFunc.type = Signature(Type::none, Type::none);
- FunctionScope dummyScope(&dummyFunc, {});
- RuntimeExpressionRunner runner(*this, dummyScope, maxDepth);
- runner.visit(&init);
- runner.visit(&drop);
+ self()->visit(&init);
+ self()->visit(&drop);
}
}
@@ -2654,9 +2650,17 @@ public:
public:
std::vector<Literals> locals;
Function* function;
+ SubType& parent;
+
+ FunctionScope* oldScope;
+
+ FunctionScope(Function* function,
+ const Literals& arguments,
+ SubType& parent)
+ : function(function), parent(parent) {
+ oldScope = parent.scope;
+ parent.scope = this;
- FunctionScope(Function* function, const Literals& arguments)
- : function(function) {
if (function->getParams().size() != arguments.size()) {
std::cerr << "Function `" << function->name << "` expects "
<< function->getParams().size() << " parameters, got "
@@ -2680,899 +2684,875 @@ public:
}
}
}
- };
- // Executes expressions with concrete runtime info, the function and module at
- // runtime
- template<typename RERSubType>
- class RuntimeExpressionRunnerBase : public ExpressionRunner<RERSubType> {
- ModuleInstanceBase& instance;
- FunctionScope& scope;
- // Stack of <caught exception, caught catch's try label>
- SmallVector<std::pair<WasmException, Name>, 4> exceptionStack;
+ ~FunctionScope() { parent.scope = oldScope; }
+
// The current delegate target, if delegation of an exception is in
// progress. If no delegation is in progress, this will be an empty Name.
+ // This is on a function scope because it cannot "escape" to the outside,
+ // that is, a delegate target is like a branch target, it operates within
+ // a function.
Name currDelegateTarget;
+ };
- protected:
- // Returns the instance that defines the memory used by this one.
- SubType* getMemoryInstance() {
- auto* inst = instance.self();
- while (inst->wasm.memory.imported()) {
- inst = inst->linkedInstances.at(inst->wasm.memory.module).get();
- }
- return inst;
- }
+private:
+ // This is managed in an RAII manner by the FunctionScope class.
+ FunctionScope* scope = nullptr;
- // Returns a reference to the current value of a potentially imported global
- Literals& getGlobal(Name name) {
- auto* inst = instance.self();
- auto* global = inst->wasm.getGlobal(name);
- while (global->imported()) {
- inst = inst->linkedInstances.at(global->module).get();
- Export* globalExport = inst->wasm.getExport(global->base);
- global = inst->wasm.getGlobal(globalExport->value);
- }
+ // Stack of <caught exception, caught catch's try label>
+ SmallVector<std::pair<WasmException, Name>, 4> exceptionStack;
+
+protected:
+ // Returns the instance that defines the memory used by this one.
+ SubType* getMemoryInstance() {
+ auto* inst = self();
+ while (inst->wasm.memory.imported()) {
+ inst = inst->linkedInstances.at(inst->wasm.memory.module).get();
+ }
+ return inst;
+ }
- return inst->globals[global->name];
+ // Returns a reference to the current value of a potentially imported global
+ Literals& getGlobal(Name name) {
+ auto* inst = self();
+ auto* global = inst->wasm.getGlobal(name);
+ while (global->imported()) {
+ inst = inst->linkedInstances.at(global->module).get();
+ Export* globalExport = inst->wasm.getExport(global->base);
+ global = inst->wasm.getGlobal(globalExport->value);
}
- public:
- RuntimeExpressionRunnerBase(ModuleInstanceBase& instance,
- FunctionScope& scope,
- Index maxDepth)
- : ExpressionRunner<RERSubType>(&instance.wasm, maxDepth),
- instance(instance), scope(scope) {}
-
- Flow visitCall(Call* curr) {
- NOTE_ENTER("Call");
- NOTE_NAME(curr->target);
- Literals arguments;
- Flow flow = this->generateArguments(curr->operands, arguments);
- if (flow.breaking()) {
- return flow;
- }
- auto* func = instance.wasm.getFunction(curr->target);
- Flow ret;
- if (func->imported()) {
- ret.values = instance.externalInterface->callImport(func, arguments);
- } else {
- ret.values = instance.callFunctionInternal(curr->target, arguments);
- }
+ return inst->globals[global->name];
+ }
+
+public:
+ Flow visitCall(Call* curr) {
+ NOTE_ENTER("Call");
+ NOTE_NAME(curr->target);
+ Literals arguments;
+ Flow flow = self()->generateArguments(curr->operands, arguments);
+ if (flow.breaking()) {
+ return flow;
+ }
+ auto* func = wasm.getFunction(curr->target);
+ Flow ret;
+ if (func->imported()) {
+ ret.values = externalInterface->callImport(func, arguments);
+ } else {
+ ret.values = callFunctionInternal(curr->target, arguments);
+ }
#ifdef WASM_INTERPRETER_DEBUG
- std::cout << "(returned to " << scope.function->name << ")\n";
+ std::cout << "(returned to " << scope->function->name << ")\n";
#endif
- // TODO: make this a proper tail call (return first)
- if (curr->isReturn) {
- ret.breakTo = RETURN_FLOW;
- }
- return ret;
+ // TODO: make this a proper tail call (return first)
+ if (curr->isReturn) {
+ ret.breakTo = RETURN_FLOW;
}
+ return ret;
+ }
- Flow visitCallIndirect(CallIndirect* curr) {
- NOTE_ENTER("CallIndirect");
- Literals arguments;
- Flow flow = this->generateArguments(curr->operands, arguments);
- if (flow.breaking()) {
- return flow;
- }
- Flow target = this->visit(curr->target);
- if (target.breaking()) {
- return target;
- }
+ Flow visitCallIndirect(CallIndirect* curr) {
+ NOTE_ENTER("CallIndirect");
+ Literals arguments;
+ Flow flow = self()->generateArguments(curr->operands, arguments);
+ if (flow.breaking()) {
+ return flow;
+ }
+ Flow target = self()->visit(curr->target);
+ if (target.breaking()) {
+ return target;
+ }
- Index index = target.getSingleValue().geti32();
- Type type = curr->isReturn ? scope.function->getResults() : curr->type;
+ Index index = target.getSingleValue().geti32();
+ Type type = curr->isReturn ? scope->function->getResults() : curr->type;
- auto info = instance.getTableInterfaceInfo(curr->table);
- Flow ret = info.interface->callTable(
- info.name, index, curr->heapType, arguments, type, *instance.self());
+ auto info = getTableInterfaceInfo(curr->table);
+ Flow ret = info.interface->callTable(
+ info.name, index, curr->heapType, arguments, type, *self());
- // TODO: make this a proper tail call (return first)
- if (curr->isReturn) {
- ret.breakTo = RETURN_FLOW;
- }
- return ret;
+ // TODO: make this a proper tail call (return first)
+ if (curr->isReturn) {
+ ret.breakTo = RETURN_FLOW;
+ }
+ return ret;
+ }
+ Flow visitCallRef(CallRef* curr) {
+ NOTE_ENTER("CallRef");
+ Literals arguments;
+ Flow flow = self()->generateArguments(curr->operands, arguments);
+ if (flow.breaking()) {
+ return flow;
+ }
+ Flow target = self()->visit(curr->target);
+ if (target.breaking()) {
+ return target;
+ }
+ if (target.getSingleValue().isNull()) {
+ trap("null target in call_ref");
+ }
+ Name funcName = target.getSingleValue().getFunc();
+ auto* func = wasm.getFunction(funcName);
+ Flow ret;
+ if (func->imported()) {
+ ret.values = externalInterface->callImport(func, arguments);
+ } else {
+ ret.values = callFunctionInternal(funcName, arguments);
}
- Flow visitCallRef(CallRef* curr) {
- NOTE_ENTER("CallRef");
- Literals arguments;
- Flow flow = this->generateArguments(curr->operands, arguments);
- if (flow.breaking()) {
- return flow;
- }
- Flow target = this->visit(curr->target);
- if (target.breaking()) {
- return target;
- }
- if (target.getSingleValue().isNull()) {
- trap("null target in call_ref");
- }
- Name funcName = target.getSingleValue().getFunc();
- auto* func = instance.wasm.getFunction(funcName);
- Flow ret;
- if (func->imported()) {
- ret.values = instance.externalInterface->callImport(func, arguments);
- } else {
- ret.values = instance.callFunctionInternal(funcName, arguments);
- }
#ifdef WASM_INTERPRETER_DEBUG
- std::cout << "(returned to " << scope.function->name << ")\n";
+ std::cout << "(returned to " << scope->function->name << ")\n";
#endif
- // TODO: make this a proper tail call (return first)
- if (curr->isReturn) {
- ret.breakTo = RETURN_FLOW;
- }
- return ret;
+ // TODO: make this a proper tail call (return first)
+ if (curr->isReturn) {
+ ret.breakTo = RETURN_FLOW;
}
+ return ret;
+ }
- Flow visitTableGet(TableGet* curr) {
- NOTE_ENTER("TableGet");
- Flow index = this->visit(curr->index);
- if (index.breaking()) {
- return index;
- }
- auto info = instance.getTableInterfaceInfo(curr->table);
- return info.interface->tableLoad(info.name,
- index.getSingleValue().geti32());
- }
- Flow visitTableSet(TableSet* curr) {
- NOTE_ENTER("TableSet");
- Flow indexFlow = this->visit(curr->index);
- if (indexFlow.breaking()) {
- return indexFlow;
- }
- Flow valueFlow = this->visit(curr->value);
- if (valueFlow.breaking()) {
- return valueFlow;
- }
- auto info = instance.getTableInterfaceInfo(curr->table);
- info.interface->tableStore(info.name,
- indexFlow.getSingleValue().geti32(),
- valueFlow.getSingleValue());
- return Flow();
+ Flow visitTableGet(TableGet* curr) {
+ NOTE_ENTER("TableGet");
+ Flow index = self()->visit(curr->index);
+ if (index.breaking()) {
+ return index;
}
+ auto info = getTableInterfaceInfo(curr->table);
+ return info.interface->tableLoad(info.name,
+ index.getSingleValue().geti32());
+ }
+ Flow visitTableSet(TableSet* curr) {
+ NOTE_ENTER("TableSet");
+ Flow indexFlow = self()->visit(curr->index);
+ if (indexFlow.breaking()) {
+ return indexFlow;
+ }
+ Flow valueFlow = self()->visit(curr->value);
+ if (valueFlow.breaking()) {
+ return valueFlow;
+ }
+ auto info = getTableInterfaceInfo(curr->table);
+ info.interface->tableStore(info.name,
+ indexFlow.getSingleValue().geti32(),
+ valueFlow.getSingleValue());
+ return Flow();
+ }
- Flow visitTableSize(TableSize* curr) {
- NOTE_ENTER("TableSize");
- auto info = instance.getTableInterfaceInfo(curr->table);
- Index tableSize = info.interface->tableSize(curr->table);
- return Literal::makeFromInt32(tableSize, Type::i32);
- }
+ Flow visitTableSize(TableSize* curr) {
+ NOTE_ENTER("TableSize");
+ auto info = getTableInterfaceInfo(curr->table);
+ Index tableSize = info.interface->tableSize(curr->table);
+ return Literal::makeFromInt32(tableSize, Type::i32);
+ }
- Flow visitTableGrow(TableGrow* curr) {
- NOTE_ENTER("TableGrow");
- Flow valueFlow = this->visit(curr->value);
- if (valueFlow.breaking()) {
- return valueFlow;
- }
- Flow deltaFlow = this->visit(curr->delta);
- if (deltaFlow.breaking()) {
- return deltaFlow;
- }
- Name tableName = curr->table;
- auto info = instance.getTableInterfaceInfo(tableName);
+ Flow visitTableGrow(TableGrow* curr) {
+ NOTE_ENTER("TableGrow");
+ Flow valueFlow = self()->visit(curr->value);
+ if (valueFlow.breaking()) {
+ return valueFlow;
+ }
+ Flow deltaFlow = self()->visit(curr->delta);
+ if (deltaFlow.breaking()) {
+ return deltaFlow;
+ }
+ Name tableName = curr->table;
+ auto info = getTableInterfaceInfo(tableName);
- Index tableSize = info.interface->tableSize(tableName);
- Flow ret = Literal::makeFromInt32(tableSize, Type::i32);
- Flow fail = Literal::makeFromInt32(-1, Type::i32);
- Index delta = deltaFlow.getSingleValue().geti32();
+ Index tableSize = info.interface->tableSize(tableName);
+ Flow ret = Literal::makeFromInt32(tableSize, Type::i32);
+ Flow fail = Literal::makeFromInt32(-1, Type::i32);
+ Index delta = deltaFlow.getSingleValue().geti32();
- if (tableSize >= uint32_t(-1) - delta) {
- return fail;
- }
- auto maxTableSize = instance.self()->wasm.getTable(tableName)->max;
- if (uint64_t(tableSize) + uint64_t(delta) > uint64_t(maxTableSize)) {
- return fail;
- }
- Index newSize = tableSize + delta;
- if (!info.interface->growTable(
- tableName, valueFlow.getSingleValue(), tableSize, newSize)) {
- // We failed to grow the table in practice, even though it was valid
- // to try to do so.
- return fail;
- }
- return ret;
+ if (tableSize >= uint32_t(-1) - delta) {
+ return fail;
+ }
+ auto maxTableSize = self()->wasm.getTable(tableName)->max;
+ if (uint64_t(tableSize) + uint64_t(delta) > uint64_t(maxTableSize)) {
+ return fail;
}
+ Index newSize = tableSize + delta;
+ if (!info.interface->growTable(
+ tableName, valueFlow.getSingleValue(), tableSize, newSize)) {
+ // We failed to grow the table in practice, even though it was valid
+ // to try to do so.
+ return fail;
+ }
+ return ret;
+ }
- Flow visitLocalGet(LocalGet* curr) {
- NOTE_ENTER("LocalGet");
- auto index = curr->index;
- NOTE_EVAL1(index);
- NOTE_EVAL1(scope.locals[index]);
- return scope.locals[index];
+ Flow visitLocalGet(LocalGet* curr) {
+ NOTE_ENTER("LocalGet");
+ auto index = curr->index;
+ NOTE_EVAL1(index);
+ NOTE_EVAL1(scope->locals[index]);
+ return scope->locals[index];
+ }
+ Flow visitLocalSet(LocalSet* curr) {
+ NOTE_ENTER("LocalSet");
+ auto index = curr->index;
+ Flow flow = self()->visit(curr->value);
+ if (flow.breaking()) {
+ return flow;
}
- Flow visitLocalSet(LocalSet* curr) {
- NOTE_ENTER("LocalSet");
- auto index = curr->index;
- Flow flow = this->visit(curr->value);
- if (flow.breaking()) {
- return flow;
- }
- NOTE_EVAL1(index);
- NOTE_EVAL1(flow.getSingleValue());
- assert(curr->isTee() ? Type::isSubType(flow.getType(), curr->type)
- : true);
- scope.locals[index] = flow.values;
- return curr->isTee() ? flow : Flow();
- }
-
- Flow visitGlobalGet(GlobalGet* curr) {
- NOTE_ENTER("GlobalGet");
- auto name = curr->name;
- NOTE_EVAL1(name);
- return getGlobal(name);
- }
- Flow visitGlobalSet(GlobalSet* curr) {
- NOTE_ENTER("GlobalSet");
- auto name = curr->name;
- Flow flow = this->visit(curr->value);
- if (flow.breaking()) {
- return flow;
- }
- NOTE_EVAL1(name);
- NOTE_EVAL1(flow.getSingleValue());
+ NOTE_EVAL1(index);
+ NOTE_EVAL1(flow.getSingleValue());
+ assert(curr->isTee() ? Type::isSubType(flow.getType(), curr->type) : true);
+ scope->locals[index] = flow.values;
+ return curr->isTee() ? flow : Flow();
+ }
- getGlobal(name) = flow.values;
- return Flow();
+ Flow visitGlobalGet(GlobalGet* curr) {
+ NOTE_ENTER("GlobalGet");
+ auto name = curr->name;
+ NOTE_EVAL1(name);
+ return getGlobal(name);
+ }
+ Flow visitGlobalSet(GlobalSet* curr) {
+ NOTE_ENTER("GlobalSet");
+ auto name = curr->name;
+ Flow flow = self()->visit(curr->value);
+ if (flow.breaking()) {
+ return flow;
}
+ NOTE_EVAL1(name);
+ NOTE_EVAL1(flow.getSingleValue());
- Flow visitLoad(Load* curr) {
- NOTE_ENTER("Load");
- Flow flow = this->visit(curr->ptr);
- if (flow.breaking()) {
- return flow;
- }
- NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, flow.getSingleValue());
- if (curr->isAtomic) {
- inst->checkAtomicAddress(addr, curr->bytes);
- }
- auto ret = inst->externalInterface->load(curr, addr);
- NOTE_EVAL1(addr);
- NOTE_EVAL1(ret);
- return ret;
+ getGlobal(name) = flow.values;
+ return Flow();
+ }
+
+ Flow visitLoad(Load* curr) {
+ NOTE_ENTER("Load");
+ Flow flow = self()->visit(curr->ptr);
+ if (flow.breaking()) {
+ return flow;
}
- Flow visitStore(Store* curr) {
- NOTE_ENTER("Store");
- Flow ptr = this->visit(curr->ptr);
- if (ptr.breaking()) {
- return ptr;
- }
- Flow value = this->visit(curr->value);
- if (value.breaking()) {
- return value;
- }
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
- if (curr->isAtomic) {
- inst->checkAtomicAddress(addr, curr->bytes);
- }
- NOTE_EVAL1(addr);
- NOTE_EVAL1(value);
- inst->externalInterface->store(curr, addr, value.getSingleValue());
- return Flow();
+ NOTE_EVAL1(flow);
+ auto* inst = getMemoryInstance();
+ auto addr = inst->getFinalAddress(curr, flow.getSingleValue());
+ if (curr->isAtomic) {
+ inst->checkAtomicAddress(addr, curr->bytes);
+ }
+ auto ret = inst->externalInterface->load(curr, addr);
+ NOTE_EVAL1(addr);
+ NOTE_EVAL1(ret);
+ return ret;
+ }
+ Flow visitStore(Store* curr) {
+ NOTE_ENTER("Store");
+ Flow ptr = self()->visit(curr->ptr);
+ if (ptr.breaking()) {
+ return ptr;
+ }
+ Flow value = self()->visit(curr->value);
+ if (value.breaking()) {
+ return value;
+ }
+ auto* inst = getMemoryInstance();
+ auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ if (curr->isAtomic) {
+ inst->checkAtomicAddress(addr, curr->bytes);
}
+ NOTE_EVAL1(addr);
+ NOTE_EVAL1(value);
+ inst->externalInterface->store(curr, addr, value.getSingleValue());
+ return Flow();
+ }
- Flow visitAtomicRMW(AtomicRMW* curr) {
- NOTE_ENTER("AtomicRMW");
- Flow ptr = this->visit(curr->ptr);
- if (ptr.breaking()) {
- return ptr;
- }
- auto value = this->visit(curr->value);
- if (value.breaking()) {
- return value;
- }
- NOTE_EVAL1(ptr);
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
- NOTE_EVAL1(addr);
- NOTE_EVAL1(value);
- auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
- NOTE_EVAL1(loaded);
- auto computed = value.getSingleValue();
- switch (curr->op) {
- case RMWAdd:
- computed = loaded.add(computed);
- break;
- case RMWSub:
- computed = loaded.sub(computed);
- break;
- case RMWAnd:
- computed = loaded.and_(computed);
- break;
- case RMWOr:
- computed = loaded.or_(computed);
- break;
- case RMWXor:
- computed = loaded.xor_(computed);
- break;
- case RMWXchg:
- break;
- }
- inst->doAtomicStore(addr, curr->bytes, computed);
- return loaded;
- }
- Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) {
- NOTE_ENTER("AtomicCmpxchg");
- Flow ptr = this->visit(curr->ptr);
- if (ptr.breaking()) {
- return ptr;
- }
- NOTE_EVAL1(ptr);
- auto expected = this->visit(curr->expected);
- if (expected.breaking()) {
- return expected;
- }
- auto replacement = this->visit(curr->replacement);
- if (replacement.breaking()) {
- return replacement;
- }
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
- expected =
- Flow(wrapToSmallerSize(expected.getSingleValue(), curr->bytes));
- NOTE_EVAL1(addr);
- NOTE_EVAL1(expected);
- NOTE_EVAL1(replacement);
- auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
- NOTE_EVAL1(loaded);
- if (loaded == expected.getSingleValue()) {
- inst->doAtomicStore(addr, curr->bytes, replacement.getSingleValue());
- }
- return loaded;
- }
- Flow visitAtomicWait(AtomicWait* curr) {
- NOTE_ENTER("AtomicWait");
- Flow ptr = this->visit(curr->ptr);
- if (ptr.breaking()) {
- return ptr;
- }
- NOTE_EVAL1(ptr);
- auto expected = this->visit(curr->expected);
- NOTE_EVAL1(expected);
- if (expected.breaking()) {
- return expected;
- }
- auto timeout = this->visit(curr->timeout);
- NOTE_EVAL1(timeout);
- if (timeout.breaking()) {
- return timeout;
- }
- auto* inst = getMemoryInstance();
- auto bytes = curr->expectedType.getByteSize();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), bytes);
- auto loaded = inst->doAtomicLoad(addr, bytes, curr->expectedType);
- NOTE_EVAL1(loaded);
- if (loaded != expected.getSingleValue()) {
- return Literal(int32_t(1)); // not equal
- }
- // TODO: add threads support!
- // for now, just assume we are woken up
- return Literal(int32_t(0)); // woken up
- }
- Flow visitAtomicNotify(AtomicNotify* curr) {
- NOTE_ENTER("AtomicNotify");
- Flow ptr = this->visit(curr->ptr);
- if (ptr.breaking()) {
- return ptr;
- }
- NOTE_EVAL1(ptr);
- auto count = this->visit(curr->notifyCount);
- NOTE_EVAL1(count);
- if (count.breaking()) {
- return count;
- }
- auto* inst = getMemoryInstance();
- auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), 4);
- // Just check TODO actual threads support
- inst->checkAtomicAddress(addr, 4);
- return Literal(int32_t(0)); // none woken up
- }
- Flow visitSIMDLoad(SIMDLoad* curr) {
- NOTE_ENTER("SIMDLoad");
- switch (curr->op) {
- case Load8SplatVec128:
- case Load16SplatVec128:
- case Load32SplatVec128:
- case Load64SplatVec128:
- return visitSIMDLoadSplat(curr);
- case Load8x8SVec128:
- case Load8x8UVec128:
- case Load16x4SVec128:
- case Load16x4UVec128:
- case Load32x2SVec128:
- case Load32x2UVec128:
- return visitSIMDLoadExtend(curr);
- case Load32ZeroVec128:
- case Load64ZeroVec128:
- return visitSIMDLoadZero(curr);
- }
- WASM_UNREACHABLE("invalid op");
+ Flow visitAtomicRMW(AtomicRMW* curr) {
+ NOTE_ENTER("AtomicRMW");
+ Flow ptr = self()->visit(curr->ptr);
+ if (ptr.breaking()) {
+ return ptr;
}
- Flow visitSIMDLoadSplat(SIMDLoad* curr) {
- Load load;
- load.type = Type::i32;
- load.bytes = curr->getMemBytes();
- load.signed_ = false;
- load.offset = curr->offset;
- load.align = curr->align;
- load.isAtomic = false;
- load.ptr = curr->ptr;
- Literal (Literal::*splat)() const = nullptr;
- switch (curr->op) {
- case Load8SplatVec128:
- splat = &Literal::splatI8x16;
- break;
- case Load16SplatVec128:
- splat = &Literal::splatI16x8;
- break;
- case Load32SplatVec128:
- splat = &Literal::splatI32x4;
- break;
- case Load64SplatVec128:
- load.type = Type::i64;
- splat = &Literal::splatI64x2;
- break;
- default:
- WASM_UNREACHABLE("invalid op");
- }
- load.finalize();
- Flow flow = this->visit(&load);
- if (flow.breaking()) {
- return flow;
- }
- return (flow.getSingleValue().*splat)();
+ auto value = self()->visit(curr->value);
+ if (value.breaking()) {
+ return value;
}
- Flow visitSIMDLoadExtend(SIMDLoad* curr) {
- Flow flow = this->visit(curr->ptr);
- if (flow.breaking()) {
- return flow;
- }
- NOTE_EVAL1(flow);
- Address src(uint32_t(flow.getSingleValue().geti32()));
- auto* inst = getMemoryInstance();
- auto loadLane = [&](Address addr) {
- switch (curr->op) {
- case Load8x8SVec128:
- return Literal(int32_t(inst->externalInterface->load8s(addr)));
- case Load8x8UVec128:
- return Literal(int32_t(inst->externalInterface->load8u(addr)));
- case Load16x4SVec128:
- return Literal(int32_t(inst->externalInterface->load16s(addr)));
- case Load16x4UVec128:
- return Literal(int32_t(inst->externalInterface->load16u(addr)));
- case Load32x2SVec128:
- return Literal(int64_t(inst->externalInterface->load32s(addr)));
- case Load32x2UVec128:
- return Literal(int64_t(inst->externalInterface->load32u(addr)));
- default:
- WASM_UNREACHABLE("unexpected op");
- }
+ NOTE_EVAL1(ptr);
+ auto* inst = getMemoryInstance();
+ auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ NOTE_EVAL1(addr);
+ NOTE_EVAL1(value);
+ auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
+ NOTE_EVAL1(loaded);
+ auto computed = value.getSingleValue();
+ switch (curr->op) {
+ case RMWAdd:
+ computed = loaded.add(computed);
+ break;
+ case RMWSub:
+ computed = loaded.sub(computed);
+ break;
+ case RMWAnd:
+ computed = loaded.and_(computed);
+ break;
+ case RMWOr:
+ computed = loaded.or_(computed);
+ break;
+ case RMWXor:
+ computed = loaded.xor_(computed);
+ break;
+ case RMWXchg:
+ break;
+ }
+ inst->doAtomicStore(addr, curr->bytes, computed);
+ return loaded;
+ }
+ Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) {
+ NOTE_ENTER("AtomicCmpxchg");
+ Flow ptr = self()->visit(curr->ptr);
+ if (ptr.breaking()) {
+ return ptr;
+ }
+ NOTE_EVAL1(ptr);
+ auto expected = self()->visit(curr->expected);
+ if (expected.breaking()) {
+ return expected;
+ }
+ auto replacement = self()->visit(curr->replacement);
+ if (replacement.breaking()) {
+ return replacement;
+ }
+ auto* inst = getMemoryInstance();
+ auto addr = inst->getFinalAddress(curr, ptr.getSingleValue());
+ expected = Flow(wrapToSmallerSize(expected.getSingleValue(), curr->bytes));
+ NOTE_EVAL1(addr);
+ NOTE_EVAL1(expected);
+ NOTE_EVAL1(replacement);
+ auto loaded = inst->doAtomicLoad(addr, curr->bytes, curr->type);
+ NOTE_EVAL1(loaded);
+ if (loaded == expected.getSingleValue()) {
+ inst->doAtomicStore(addr, curr->bytes, replacement.getSingleValue());
+ }
+ return loaded;
+ }
+ Flow visitAtomicWait(AtomicWait* curr) {
+ NOTE_ENTER("AtomicWait");
+ Flow ptr = self()->visit(curr->ptr);
+ if (ptr.breaking()) {
+ return ptr;
+ }
+ NOTE_EVAL1(ptr);
+ auto expected = self()->visit(curr->expected);
+ NOTE_EVAL1(expected);
+ if (expected.breaking()) {
+ return expected;
+ }
+ auto timeout = self()->visit(curr->timeout);
+ NOTE_EVAL1(timeout);
+ if (timeout.breaking()) {
+ return timeout;
+ }
+ auto* inst = getMemoryInstance();
+ auto bytes = curr->expectedType.getByteSize();
+ auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), bytes);
+ auto loaded = inst->doAtomicLoad(addr, bytes, curr->expectedType);
+ NOTE_EVAL1(loaded);
+ if (loaded != expected.getSingleValue()) {
+ return Literal(int32_t(1)); // not equal
+ }
+ // TODO: add threads support!
+ // for now, just assume we are woken up
+ return Literal(int32_t(0)); // woken up
+ }
+ Flow visitAtomicNotify(AtomicNotify* curr) {
+ NOTE_ENTER("AtomicNotify");
+ Flow ptr = self()->visit(curr->ptr);
+ if (ptr.breaking()) {
+ return ptr;
+ }
+ NOTE_EVAL1(ptr);
+ auto count = self()->visit(curr->notifyCount);
+ NOTE_EVAL1(count);
+ if (count.breaking()) {
+ return count;
+ }
+ auto* inst = getMemoryInstance();
+ auto addr = inst->getFinalAddress(curr, ptr.getSingleValue(), 4);
+ // Just check TODO actual threads support
+ inst->checkAtomicAddress(addr, 4);
+ return Literal(int32_t(0)); // none woken up
+ }
+ Flow visitSIMDLoad(SIMDLoad* curr) {
+ NOTE_ENTER("SIMDLoad");
+ switch (curr->op) {
+ case Load8SplatVec128:
+ case Load16SplatVec128:
+ case Load32SplatVec128:
+ case Load64SplatVec128:
+ return visitSIMDLoadSplat(curr);
+ case Load8x8SVec128:
+ case Load8x8UVec128:
+ case Load16x4SVec128:
+ case Load16x4UVec128:
+ case Load32x2SVec128:
+ case Load32x2UVec128:
+ return visitSIMDLoadExtend(curr);
+ case Load32ZeroVec128:
+ case Load64ZeroVec128:
+ return visitSIMDLoadZero(curr);
+ }
+ WASM_UNREACHABLE("invalid op");
+ }
+ Flow visitSIMDLoadSplat(SIMDLoad* curr) {
+ Load load;
+ load.type = Type::i32;
+ load.bytes = curr->getMemBytes();
+ load.signed_ = false;
+ load.offset = curr->offset;
+ load.align = curr->align;
+ load.isAtomic = false;
+ load.ptr = curr->ptr;
+ Literal (Literal::*splat)() const = nullptr;
+ switch (curr->op) {
+ case Load8SplatVec128:
+ splat = &Literal::splatI8x16;
+ break;
+ case Load16SplatVec128:
+ splat = &Literal::splatI16x8;
+ break;
+ case Load32SplatVec128:
+ splat = &Literal::splatI32x4;
+ break;
+ case Load64SplatVec128:
+ load.type = Type::i64;
+ splat = &Literal::splatI64x2;
+ break;
+ default:
WASM_UNREACHABLE("invalid op");
- };
- auto fillLanes = [&](auto lanes, size_t laneBytes) {
- for (auto& lane : lanes) {
- lane = loadLane(
- inst->getFinalAddress(curr, Literal(uint32_t(src)), laneBytes));
- src = Address(uint32_t(src) + laneBytes);
- }
- return Literal(lanes);
- };
+ }
+ load.finalize();
+ Flow flow = self()->visit(&load);
+ if (flow.breaking()) {
+ return flow;
+ }
+ return (flow.getSingleValue().*splat)();
+ }
+ Flow visitSIMDLoadExtend(SIMDLoad* curr) {
+ Flow flow = self()->visit(curr->ptr);
+ if (flow.breaking()) {
+ return flow;
+ }
+ NOTE_EVAL1(flow);
+ Address src(uint32_t(flow.getSingleValue().geti32()));
+ auto* inst = getMemoryInstance();
+ auto loadLane = [&](Address addr) {
switch (curr->op) {
case Load8x8SVec128:
- case Load8x8UVec128: {
- std::array<Literal, 8> lanes;
- return fillLanes(lanes, 1);
- }
+ return Literal(int32_t(inst->externalInterface->load8s(addr)));
+ case Load8x8UVec128:
+ return Literal(int32_t(inst->externalInterface->load8u(addr)));
case Load16x4SVec128:
- case Load16x4UVec128: {
- std::array<Literal, 4> lanes;
- return fillLanes(lanes, 2);
- }
+ return Literal(int32_t(inst->externalInterface->load16s(addr)));
+ case Load16x4UVec128:
+ return Literal(int32_t(inst->externalInterface->load16u(addr)));
case Load32x2SVec128:
- case Load32x2UVec128: {
- std::array<Literal, 2> lanes;
- return fillLanes(lanes, 4);
- }
+ return Literal(int64_t(inst->externalInterface->load32s(addr)));
+ case Load32x2UVec128:
+ return Literal(int64_t(inst->externalInterface->load32u(addr)));
default:
WASM_UNREACHABLE("unexpected op");
}
WASM_UNREACHABLE("invalid op");
- }
- Flow visitSIMDLoadZero(SIMDLoad* curr) {
- Flow flow = this->visit(curr->ptr);
- if (flow.breaking()) {
- return flow;
+ };
+ auto fillLanes = [&](auto lanes, size_t laneBytes) {
+ for (auto& lane : lanes) {
+ lane = loadLane(
+ inst->getFinalAddress(curr, Literal(uint32_t(src)), laneBytes));
+ src = Address(uint32_t(src) + laneBytes);
}
- NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- Address src =
- inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
- auto zero =
- Literal::makeZero(curr->op == Load32ZeroVec128 ? Type::i32 : Type::i64);
- if (curr->op == Load32ZeroVec128) {
- auto val = Literal(inst->externalInterface->load32u(src));
- return Literal(std::array<Literal, 4>{{val, zero, zero, zero}});
- } else {
- auto val = Literal(inst->externalInterface->load64u(src));
- return Literal(std::array<Literal, 2>{{val, zero}});
+ return Literal(lanes);
+ };
+ switch (curr->op) {
+ case Load8x8SVec128:
+ case Load8x8UVec128: {
+ std::array<Literal, 8> lanes;
+ return fillLanes(lanes, 1);
}
- }
- Flow visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
- NOTE_ENTER("SIMDLoadStoreLane");
- Flow flow = this->visit(curr->ptr);
- if (flow.breaking()) {
- return flow;
+ case Load16x4SVec128:
+ case Load16x4UVec128: {
+ std::array<Literal, 4> lanes;
+ return fillLanes(lanes, 2);
}
- NOTE_EVAL1(flow);
- auto* inst = getMemoryInstance();
- Address addr =
- inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
- flow = this->visit(curr->vec);
- if (flow.breaking()) {
- return flow;
+ case Load32x2SVec128:
+ case Load32x2UVec128: {
+ std::array<Literal, 2> lanes;
+ return fillLanes(lanes, 4);
}
- Literal vec = flow.getSingleValue();
- switch (curr->op) {
- case Load8LaneVec128:
- case Store8LaneVec128: {
- std::array<Literal, 16> lanes = vec.getLanesUI8x16();
- if (curr->isLoad()) {
- lanes[curr->index] = Literal(inst->externalInterface->load8u(addr));
- return Literal(lanes);
- } else {
- inst->externalInterface->store8(addr, lanes[curr->index].geti32());
- return {};
- }
+ default:
+ WASM_UNREACHABLE("unexpected op");
+ }
+ WASM_UNREACHABLE("invalid op");
+ }
+ Flow visitSIMDLoadZero(SIMDLoad* curr) {
+ Flow flow = self()->visit(curr->ptr);
+ if (flow.breaking()) {
+ return flow;
+ }
+ NOTE_EVAL1(flow);
+ auto* inst = getMemoryInstance();
+ Address src =
+ inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
+ auto zero =
+ Literal::makeZero(curr->op == Load32ZeroVec128 ? Type::i32 : Type::i64);
+ if (curr->op == Load32ZeroVec128) {
+ auto val = Literal(inst->externalInterface->load32u(src));
+ return Literal(std::array<Literal, 4>{{val, zero, zero, zero}});
+ } else {
+ auto val = Literal(inst->externalInterface->load64u(src));
+ return Literal(std::array<Literal, 2>{{val, zero}});
+ }
+ }
+ Flow visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
+ NOTE_ENTER("SIMDLoadStoreLane");
+ Flow flow = self()->visit(curr->ptr);
+ if (flow.breaking()) {
+ return flow;
+ }
+ NOTE_EVAL1(flow);
+ auto* inst = getMemoryInstance();
+ Address addr =
+ inst->getFinalAddress(curr, flow.getSingleValue(), curr->getMemBytes());
+ flow = self()->visit(curr->vec);
+ if (flow.breaking()) {
+ return flow;
+ }
+ Literal vec = flow.getSingleValue();
+ switch (curr->op) {
+ case Load8LaneVec128:
+ case Store8LaneVec128: {
+ std::array<Literal, 16> lanes = vec.getLanesUI8x16();
+ if (curr->isLoad()) {
+ lanes[curr->index] = Literal(inst->externalInterface->load8u(addr));
+ return Literal(lanes);
+ } else {
+ inst->externalInterface->store8(addr, lanes[curr->index].geti32());
+ return {};
}
- case Load16LaneVec128:
- case Store16LaneVec128: {
- std::array<Literal, 8> lanes = vec.getLanesUI16x8();
- if (curr->isLoad()) {
- lanes[curr->index] =
- Literal(inst->externalInterface->load16u(addr));
- return Literal(lanes);
- } else {
- inst->externalInterface->store16(addr, lanes[curr->index].geti32());
- return {};
- }
+ }
+ case Load16LaneVec128:
+ case Store16LaneVec128: {
+ std::array<Literal, 8> lanes = vec.getLanesUI16x8();
+ if (curr->isLoad()) {
+ lanes[curr->index] = Literal(inst->externalInterface->load16u(addr));
+ return Literal(lanes);
+ } else {
+ inst->externalInterface->store16(addr, lanes[curr->index].geti32());
+ return {};
}
- case Load32LaneVec128:
- case Store32LaneVec128: {
- std::array<Literal, 4> lanes = vec.getLanesI32x4();
- if (curr->isLoad()) {
- lanes[curr->index] =
- Literal(inst->externalInterface->load32u(addr));
- return Literal(lanes);
- } else {
- inst->externalInterface->store32(addr, lanes[curr->index].geti32());
- return {};
- }
+ }
+ case Load32LaneVec128:
+ case Store32LaneVec128: {
+ std::array<Literal, 4> lanes = vec.getLanesI32x4();
+ if (curr->isLoad()) {
+ lanes[curr->index] = Literal(inst->externalInterface->load32u(addr));
+ return Literal(lanes);
+ } else {
+ inst->externalInterface->store32(addr, lanes[curr->index].geti32());
+ return {};
}
- case Store64LaneVec128:
- case Load64LaneVec128: {
- std::array<Literal, 2> lanes = vec.getLanesI64x2();
- if (curr->isLoad()) {
- lanes[curr->index] =
- Literal(inst->externalInterface->load64u(addr));
- return Literal(lanes);
- } else {
- inst->externalInterface->store64(addr, lanes[curr->index].geti64());
- return {};
- }
+ }
+ case Store64LaneVec128:
+ case Load64LaneVec128: {
+ std::array<Literal, 2> lanes = vec.getLanesI64x2();
+ if (curr->isLoad()) {
+ lanes[curr->index] = Literal(inst->externalInterface->load64u(addr));
+ return Literal(lanes);
+ } else {
+ inst->externalInterface->store64(addr, lanes[curr->index].geti64());
+ return {};
}
}
- WASM_UNREACHABLE("unexpected op");
}
- Flow visitMemorySize(MemorySize* curr) {
- NOTE_ENTER("MemorySize");
- auto* inst = getMemoryInstance();
- return Literal::makeFromInt64(inst->memorySize,
- inst->wasm.memory.indexType);
+ WASM_UNREACHABLE("unexpected op");
+ }
+ Flow visitMemorySize(MemorySize* curr) {
+ NOTE_ENTER("MemorySize");
+ auto* inst = getMemoryInstance();
+ return Literal::makeFromInt64(inst->memorySize,
+ inst->wasm.memory.indexType);
+ }
+ Flow visitMemoryGrow(MemoryGrow* curr) {
+ NOTE_ENTER("MemoryGrow");
+ auto* inst = getMemoryInstance();
+ auto indexType = inst->wasm.memory.indexType;
+ auto fail = Literal::makeFromInt64(-1, indexType);
+ Flow flow = self()->visit(curr->delta);
+ if (flow.breaking()) {
+ return flow;
}
- Flow visitMemoryGrow(MemoryGrow* curr) {
- NOTE_ENTER("MemoryGrow");
- auto* inst = getMemoryInstance();
- auto indexType = inst->wasm.memory.indexType;
- auto fail = Literal::makeFromInt64(-1, indexType);
- Flow flow = this->visit(curr->delta);
- if (flow.breaking()) {
- return flow;
- }
- Flow ret = Literal::makeFromInt64(inst->memorySize, indexType);
- uint64_t delta = flow.getSingleValue().getUnsigned();
- if (delta > uint32_t(-1) / Memory::kPageSize && indexType == Type::i32) {
- return fail;
- }
- if (inst->memorySize >= uint32_t(-1) - delta && indexType == Type::i32) {
- return fail;
- }
- auto newSize = inst->memorySize + delta;
- if (newSize > inst->wasm.memory.max) {
- return fail;
- }
- if (!inst->externalInterface->growMemory(inst->memorySize *
- Memory::kPageSize,
- newSize * Memory::kPageSize)) {
- // We failed to grow the memory in practice, even though it was valid
- // to try to do so.
- return fail;
- }
- inst->memorySize = newSize;
- return ret;
- }
- Flow visitMemoryInit(MemoryInit* curr) {
- NOTE_ENTER("MemoryInit");
- Flow dest = this->visit(curr->dest);
- if (dest.breaking()) {
- return dest;
- }
- Flow offset = this->visit(curr->offset);
- if (offset.breaking()) {
- return offset;
- }
- Flow size = this->visit(curr->size);
- if (size.breaking()) {
- return size;
- }
- NOTE_EVAL1(dest);
- NOTE_EVAL1(offset);
- NOTE_EVAL1(size);
-
- assert(curr->segment < instance.wasm.memory.segments.size());
- Memory::Segment& segment = instance.wasm.memory.segments[curr->segment];
-
- Address destVal(dest.getSingleValue().getUnsigned());
- Address offsetVal(uint32_t(offset.getSingleValue().geti32()));
- Address sizeVal(uint32_t(size.getSingleValue().geti32()));
-
- if (offsetVal + sizeVal > 0 &&
- instance.droppedSegments.count(curr->segment)) {
- trap("out of bounds segment access in memory.init");
- }
- if ((uint64_t)offsetVal + sizeVal > segment.data.size()) {
- trap("out of bounds segment access in memory.init");
- }
- auto* inst = getMemoryInstance();
- if (destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
- trap("out of bounds memory access in memory.init");
- }
- for (size_t i = 0; i < sizeVal; ++i) {
- Literal addr(destVal + i);
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(addr, 1),
- segment.data[offsetVal + i]);
- }
- return {};
- }
- Flow visitDataDrop(DataDrop* curr) {
- NOTE_ENTER("DataDrop");
- instance.droppedSegments.insert(curr->segment);
- return {};
- }
- Flow visitMemoryCopy(MemoryCopy* curr) {
- NOTE_ENTER("MemoryCopy");
- Flow dest = this->visit(curr->dest);
- if (dest.breaking()) {
- return dest;
- }
- Flow source = this->visit(curr->source);
- if (source.breaking()) {
- return source;
- }
- Flow size = this->visit(curr->size);
- if (size.breaking()) {
- return size;
- }
- NOTE_EVAL1(dest);
- NOTE_EVAL1(source);
- NOTE_EVAL1(size);
- Address destVal(dest.getSingleValue().getUnsigned());
- Address sourceVal(source.getSingleValue().getUnsigned());
- Address sizeVal(size.getSingleValue().getUnsigned());
-
- auto* inst = getMemoryInstance();
- if (sourceVal + sizeVal > inst->memorySize * Memory::kPageSize ||
- destVal + sizeVal > inst->memorySize * Memory::kPageSize ||
- // FIXME: better/cheaper way to detect wrapping?
- sourceVal + sizeVal < sourceVal || sourceVal + sizeVal < sizeVal ||
- destVal + sizeVal < destVal || destVal + sizeVal < sizeVal) {
- trap("out of bounds segment access in memory.copy");
- }
-
- int64_t start = 0;
- int64_t end = sizeVal;
- int step = 1;
- // Reverse direction if source is below dest
- if (sourceVal < destVal) {
- start = int64_t(sizeVal) - 1;
- end = -1;
- step = -1;
- }
- for (int64_t i = start; i != end; i += step) {
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1),
- inst->externalInterface->load8s(
- inst->getFinalAddressWithoutOffset(Literal(sourceVal + i), 1)));
- }
- return {};
- }
- Flow visitMemoryFill(MemoryFill* curr) {
- NOTE_ENTER("MemoryFill");
- Flow dest = this->visit(curr->dest);
- if (dest.breaking()) {
- return dest;
- }
- Flow value = this->visit(curr->value);
- if (value.breaking()) {
- return value;
- }
- Flow size = this->visit(curr->size);
- if (size.breaking()) {
- return size;
- }
- NOTE_EVAL1(dest);
- NOTE_EVAL1(value);
- NOTE_EVAL1(size);
- Address destVal(dest.getSingleValue().getUnsigned());
- Address sizeVal(size.getSingleValue().getUnsigned());
-
- auto* inst = getMemoryInstance();
- // FIXME: cheaper wrapping detection?
- if (destVal > inst->memorySize * Memory::kPageSize ||
- sizeVal > inst->memorySize * Memory::kPageSize ||
- destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
- trap("out of bounds memory access in memory.fill");
- }
- uint8_t val(value.getSingleValue().geti32());
- for (size_t i = 0; i < sizeVal; ++i) {
- inst->externalInterface->store8(
- inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1), val);
- }
- return {};
- }
- Flow visitTry(Try* curr) {
- NOTE_ENTER("Try");
- try {
- return this->visit(curr->body);
- } catch (const WasmException& e) {
- // If delegation is in progress and the current try is not the target of
- // the delegation, don't handle it and just rethrow.
- if (currDelegateTarget.is()) {
- if (currDelegateTarget == curr->name) {
- currDelegateTarget.clear();
- } else {
- throw;
- }
- }
+ Flow ret = Literal::makeFromInt64(inst->memorySize, indexType);
+ uint64_t delta = flow.getSingleValue().getUnsigned();
+ if (delta > uint32_t(-1) / Memory::kPageSize && indexType == Type::i32) {
+ return fail;
+ }
+ if (inst->memorySize >= uint32_t(-1) - delta && indexType == Type::i32) {
+ return fail;
+ }
+ auto newSize = inst->memorySize + delta;
+ if (newSize > inst->wasm.memory.max) {
+ return fail;
+ }
+ if (!inst->externalInterface->growMemory(
+ inst->memorySize * Memory::kPageSize, newSize * Memory::kPageSize)) {
+ // We failed to grow the memory in practice, even though it was valid
+ // to try to do so.
+ return fail;
+ }
+ inst->memorySize = newSize;
+ return ret;
+ }
+ Flow visitMemoryInit(MemoryInit* curr) {
+ NOTE_ENTER("MemoryInit");
+ Flow dest = self()->visit(curr->dest);
+ if (dest.breaking()) {
+ return dest;
+ }
+ Flow offset = self()->visit(curr->offset);
+ if (offset.breaking()) {
+ return offset;
+ }
+ Flow size = self()->visit(curr->size);
+ if (size.breaking()) {
+ return size;
+ }
+ NOTE_EVAL1(dest);
+ NOTE_EVAL1(offset);
+ NOTE_EVAL1(size);
- auto processCatchBody = [&](Expression* catchBody) {
- // Push the current exception onto the exceptionStack in case
- // 'rethrow's use it
- exceptionStack.push_back(std::make_pair(e, curr->name));
- // We need to pop exceptionStack in either case: when the catch body
- // exits normally or when a new exception is thrown
- Flow ret;
- try {
- ret = this->visit(catchBody);
- } catch (const WasmException&) {
- exceptionStack.pop_back();
- throw;
- }
- exceptionStack.pop_back();
- return ret;
- };
+ assert(curr->segment < wasm.memory.segments.size());
+ Memory::Segment& segment = wasm.memory.segments[curr->segment];
- for (size_t i = 0; i < curr->catchTags.size(); i++) {
- if (curr->catchTags[i] == e.tag) {
- instance.multiValues.push_back(e.values);
- return processCatchBody(curr->catchBodies[i]);
- }
+ Address destVal(dest.getSingleValue().getUnsigned());
+ Address offsetVal(uint32_t(offset.getSingleValue().geti32()));
+ Address sizeVal(uint32_t(size.getSingleValue().geti32()));
+
+ if (offsetVal + sizeVal > 0 && droppedSegments.count(curr->segment)) {
+ trap("out of bounds segment access in memory.init");
+ }
+ if ((uint64_t)offsetVal + sizeVal > segment.data.size()) {
+ trap("out of bounds segment access in memory.init");
+ }
+ auto* inst = getMemoryInstance();
+ if (destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
+ trap("out of bounds memory access in memory.init");
+ }
+ for (size_t i = 0; i < sizeVal; ++i) {
+ Literal addr(destVal + i);
+ inst->externalInterface->store8(
+ inst->getFinalAddressWithoutOffset(addr, 1),
+ segment.data[offsetVal + i]);
+ }
+ return {};
+ }
+ Flow visitDataDrop(DataDrop* curr) {
+ NOTE_ENTER("DataDrop");
+ droppedSegments.insert(curr->segment);
+ return {};
+ }
+ Flow visitMemoryCopy(MemoryCopy* curr) {
+ NOTE_ENTER("MemoryCopy");
+ Flow dest = self()->visit(curr->dest);
+ if (dest.breaking()) {
+ return dest;
+ }
+ Flow source = self()->visit(curr->source);
+ if (source.breaking()) {
+ return source;
+ }
+ Flow size = self()->visit(curr->size);
+ if (size.breaking()) {
+ return size;
+ }
+ NOTE_EVAL1(dest);
+ NOTE_EVAL1(source);
+ NOTE_EVAL1(size);
+ Address destVal(dest.getSingleValue().getUnsigned());
+ Address sourceVal(source.getSingleValue().getUnsigned());
+ Address sizeVal(size.getSingleValue().getUnsigned());
+
+ auto* inst = getMemoryInstance();
+ if (sourceVal + sizeVal > inst->memorySize * Memory::kPageSize ||
+ destVal + sizeVal > inst->memorySize * Memory::kPageSize ||
+ // FIXME: better/cheaper way to detect wrapping?
+ sourceVal + sizeVal < sourceVal || sourceVal + sizeVal < sizeVal ||
+ destVal + sizeVal < destVal || destVal + sizeVal < sizeVal) {
+ trap("out of bounds segment access in memory.copy");
+ }
+
+ int64_t start = 0;
+ int64_t end = sizeVal;
+ int step = 1;
+ // Reverse direction if source is below dest
+ if (sourceVal < destVal) {
+ start = int64_t(sizeVal) - 1;
+ end = -1;
+ step = -1;
+ }
+ for (int64_t i = start; i != end; i += step) {
+ inst->externalInterface->store8(
+ inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1),
+ inst->externalInterface->load8s(
+ inst->getFinalAddressWithoutOffset(Literal(sourceVal + i), 1)));
+ }
+ return {};
+ }
+ Flow visitMemoryFill(MemoryFill* curr) {
+ NOTE_ENTER("MemoryFill");
+ Flow dest = self()->visit(curr->dest);
+ if (dest.breaking()) {
+ return dest;
+ }
+ Flow value = self()->visit(curr->value);
+ if (value.breaking()) {
+ return value;
+ }
+ Flow size = self()->visit(curr->size);
+ if (size.breaking()) {
+ return size;
+ }
+ NOTE_EVAL1(dest);
+ NOTE_EVAL1(value);
+ NOTE_EVAL1(size);
+ Address destVal(dest.getSingleValue().getUnsigned());
+ Address sizeVal(size.getSingleValue().getUnsigned());
+
+ auto* inst = getMemoryInstance();
+ // FIXME: cheaper wrapping detection?
+ if (destVal > inst->memorySize * Memory::kPageSize ||
+ sizeVal > inst->memorySize * Memory::kPageSize ||
+ destVal + sizeVal > inst->memorySize * Memory::kPageSize) {
+ trap("out of bounds memory access in memory.fill");
+ }
+ uint8_t val(value.getSingleValue().geti32());
+ for (size_t i = 0; i < sizeVal; ++i) {
+ inst->externalInterface->store8(
+ inst->getFinalAddressWithoutOffset(Literal(destVal + i), 1), val);
+ }
+ return {};
+ }
+ Flow visitTry(Try* curr) {
+ NOTE_ENTER("Try");
+ try {
+ return self()->visit(curr->body);
+ } catch (const WasmException& e) {
+ // If delegation is in progress and the current try is not the target of
+ // the delegation, don't handle it and just rethrow.
+ if (scope->currDelegateTarget.is()) {
+ if (scope->currDelegateTarget == curr->name) {
+ scope->currDelegateTarget.clear();
+ } else {
+ throw;
}
- if (curr->hasCatchAll()) {
- return processCatchBody(curr->catchBodies.back());
+ }
+
+ auto processCatchBody = [&](Expression* catchBody) {
+ // Push the current exception onto the exceptionStack in case
+ // 'rethrow's use it
+ exceptionStack.push_back(std::make_pair(e, curr->name));
+ // We need to pop exceptionStack in either case: when the catch body
+ // exits normally or when a new exception is thrown
+ Flow ret;
+ try {
+ ret = self()->visit(catchBody);
+ } catch (const WasmException&) {
+ exceptionStack.pop_back();
+ throw;
}
- if (curr->isDelegate()) {
- currDelegateTarget = curr->delegateTarget;
+ exceptionStack.pop_back();
+ return ret;
+ };
+
+ for (size_t i = 0; i < curr->catchTags.size(); i++) {
+ if (curr->catchTags[i] == e.tag) {
+ multiValues.push_back(e.values);
+ return processCatchBody(curr->catchBodies[i]);
}
- // This exception is not caught by this try-catch. Rethrow it.
- throw;
}
- }
- Flow visitRethrow(Rethrow* curr) {
- for (int i = exceptionStack.size() - 1; i >= 0; i--) {
- if (exceptionStack[i].second == curr->target) {
- throwException(exceptionStack[i].first);
- }
+ if (curr->hasCatchAll()) {
+ return processCatchBody(curr->catchBodies.back());
}
- WASM_UNREACHABLE("rethrow");
+ if (curr->isDelegate()) {
+ scope->currDelegateTarget = curr->delegateTarget;
+ }
+ // This exception is not caught by this try-catch. Rethrow it.
+ throw;
}
- Flow visitPop(Pop* curr) {
- NOTE_ENTER("Pop");
- assert(!instance.multiValues.empty());
- auto ret = instance.multiValues.back();
- assert(curr->type == ret.getType());
- instance.multiValues.pop_back();
- return ret;
+ }
+ Flow visitRethrow(Rethrow* curr) {
+ for (int i = exceptionStack.size() - 1; i >= 0; i--) {
+ if (exceptionStack[i].second == curr->target) {
+ throwException(exceptionStack[i].first);
+ }
}
+ WASM_UNREACHABLE("rethrow");
+ }
+ Flow visitPop(Pop* curr) {
+ NOTE_ENTER("Pop");
+ assert(!multiValues.empty());
+ auto ret = multiValues.back();
+ assert(curr->type == ret.getType());
+ multiValues.pop_back();
+ return ret;
+ }
- void trap(const char* why) override {
- instance.externalInterface->trap(why);
- }
+ void trap(const char* why) override { externalInterface->trap(why); }
- void hostLimit(const char* why) override {
- instance.externalInterface->hostLimit(why);
- }
+ void hostLimit(const char* why) override {
+ externalInterface->hostLimit(why);
+ }
- void throwException(const WasmException& exn) override {
- instance.externalInterface->throwException(exn);
- }
+ void throwException(const WasmException& exn) override {
+ externalInterface->throwException(exn);
+ }
- // Given a value, wrap it to a smaller given number of bytes.
- Literal wrapToSmallerSize(Literal value, Index bytes) {
- if (value.type == Type::i32) {
- switch (bytes) {
- case 1: {
- return value.and_(Literal(uint32_t(0xff)));
- }
- case 2: {
- return value.and_(Literal(uint32_t(0xffff)));
- }
- case 4: {
- break;
- }
- default:
- WASM_UNREACHABLE("unexpected bytes");
+ // Given a value, wrap it to a smaller given number of bytes.
+ Literal wrapToSmallerSize(Literal value, Index bytes) {
+ if (value.type == Type::i32) {
+ switch (bytes) {
+ case 1: {
+ return value.and_(Literal(uint32_t(0xff)));
}
- } else {
- assert(value.type == Type::i64);
- switch (bytes) {
- case 1: {
- return value.and_(Literal(uint64_t(0xff)));
- }
- case 2: {
- return value.and_(Literal(uint64_t(0xffff)));
- }
- case 4: {
- return value.and_(Literal(uint64_t(0xffffffffUL)));
- }
- case 8: {
- break;
- }
- default:
- WASM_UNREACHABLE("unexpected bytes");
+ case 2: {
+ return value.and_(Literal(uint32_t(0xffff)));
}
+ case 4: {
+ break;
+ }
+ default:
+ WASM_UNREACHABLE("unexpected bytes");
+ }
+ } else {
+ assert(value.type == Type::i64);
+ switch (bytes) {
+ case 1: {
+ return value.and_(Literal(uint64_t(0xff)));
+ }
+ case 2: {
+ return value.and_(Literal(uint64_t(0xffff)));
+ }
+ case 4: {
+ return value.and_(Literal(uint64_t(0xffffffffUL)));
+ }
+ case 8: {
+ break;
+ }
+ default:
+ WASM_UNREACHABLE("unexpected bytes");
}
- return value;
}
- };
-
- class RuntimeExpressionRunner
- : public RuntimeExpressionRunnerBase<RuntimeExpressionRunner> {
- public:
- RuntimeExpressionRunner(ModuleInstanceBase& instance,
- FunctionScope& scope,
- Index maxDepth)
- : RuntimeExpressionRunnerBase<RuntimeExpressionRunner>(
- instance, scope, maxDepth) {}
- };
+ return value;
+ }
// Call a function, starting an invocation.
- template<typename Runner = RuntimeExpressionRunner>
Literals callFunction(Name name, const Literals& arguments) {
// if the last call ended in a jump up the stack, it might have left stuff
// for us to clean up here
callDepth = 0;
functionStack.clear();
- return callFunctionInternal<Runner>(name, arguments);
+ return callFunctionInternal(name, arguments);
}
// Internal function call. Must be public so that callTable implementations
// can use it (refactor?)
- template<typename Runner = RuntimeExpressionRunner>
Literals callFunctionInternal(Name name, const Literals& arguments) {
if (callDepth > maxDepth) {
externalInterface->trap("stack limit");
@@ -3584,7 +3564,7 @@ public:
Function* function = wasm.getFunction(name);
assert(function);
- FunctionScope scope(function, arguments);
+ FunctionScope scope(function, arguments, *self());
#ifdef WASM_INTERPRETER_DEBUG
std::cout << "entering " << function->name << "\n with arguments:\n";
@@ -3593,7 +3573,7 @@ public:
}
#endif
- Flow flow = Runner(*this, scope, maxDepth).visit(function->body);
+ Flow flow = self()->visit(function->body);
// cannot still be breaking, it means we missed our stop
assert(!flow.breaking() || flow.breakTo == RETURN_FLOW);
auto type = flow.getType();
@@ -3606,6 +3586,7 @@ public:
// may decrease more than one, if we jumped up the stack
callDepth = previousCallDepth;
// if we jumped up the stack, we also need to pop higher frames
+ // TODO can FunctionScope handle this automatically?
while (functionStack.size() > previousFunctionStackSize) {
functionStack.pop_back();
}
@@ -3706,16 +3687,16 @@ protected:
std::map<Name, std::shared_ptr<SubType>> linkedInstances;
};
-// The default ModuleInstance uses a trivial global manager
+// The default ModuleRunner uses a trivial global manager
using TrivialGlobalManager = std::map<Name, Literals>;
-class ModuleInstance
- : public ModuleInstanceBase<TrivialGlobalManager, ModuleInstance> {
+class ModuleRunner
+ : public ModuleRunnerBase<TrivialGlobalManager, ModuleRunner> {
public:
- ModuleInstance(
+ ModuleRunner(
Module& wasm,
ExternalInterface* externalInterface,
- std::map<Name, std::shared_ptr<ModuleInstance>> linkedInstances = {})
- : ModuleInstanceBase(wasm, externalInterface, linkedInstances) {}
+ std::map<Name, std::shared_ptr<ModuleRunner>> linkedInstances = {})
+ : ModuleRunnerBase(wasm, externalInterface, linkedInstances) {}
};
} // namespace wasm
diff --git a/test/lit/exec/delegate-vacuum.wast b/test/lit/exec/delegate-vacuum.wast
new file mode 100644
index 000000000..c92760568
--- /dev/null
+++ b/test/lit/exec/delegate-vacuum.wast
@@ -0,0 +1,60 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py --output=fuzz-exec and should not be edited.
+
+;; RUN: wasm-opt %s --vacuum --fuzz-exec -all -q -o /dev/null 2>&1 | filecheck %s
+;; Test the effect of vaccum on delegation. The delegate target must not
+;; "escape" the current function scope and affect anything external, that is,
+;; it must be cleared on function exit.
+
+(module
+ (tag $tag$0 (param i32))
+ ;; CHECK: [fuzz-exec] calling export-1
+ ;; CHECK-NEXT: [exception thrown: tag$0 0]
+ (func "export-1"
+ (try
+ (do
+ (try
+ (do
+ (throw $tag$0
+ (i32.const 0)
+ )
+ )
+ ;; A delegation that leads to the caller. This sets the delegate field on
+ ;; this function scope.
+ (delegate 1)
+ )
+ )
+ (catch_all
+ (nop)
+ )
+ )
+ )
+ ;; CHECK: [fuzz-exec] calling export-2
+ ;; CHECK-NEXT: [trap unreachable]
+ (func "export-2"
+ (call $inner)
+ (unreachable)
+ )
+ (func $inner
+ ;; This inner function must not notice the delegate field that was set by
+ ;; the call to the previous export (if it does notice it, it would delegate
+ ;; to the caller or something else invalid, and the execution results would
+ ;; differ, causing fuzz-exec to fail).
+ (try
+ (do
+ (throw $tag$0
+ (i32.const 0)
+ )
+ )
+ (catch_all
+ (nop)
+ )
+ )
+ )
+)
+;; CHECK: [fuzz-exec] calling export-1
+;; CHECK-NEXT: [exception thrown: tag$0 0]
+
+;; CHECK: [fuzz-exec] calling export-2
+;; CHECK-NEXT: [trap unreachable]
+;; CHECK-NEXT: [fuzz-exec] comparing export-1
+;; CHECK-NEXT: [fuzz-exec] comparing export-2