summaryrefslogtreecommitdiff
path: root/src/passes
diff options
context:
space:
mode:
Diffstat (limited to 'src/passes')
-rw-r--r--src/passes/CMakeLists.txt1
-rw-r--r--src/passes/I64ToI32Lowering.cpp16
-rw-r--r--src/passes/MemoryPacking.cpp2
-rw-r--r--src/passes/Metrics.cpp2
-rw-r--r--src/passes/Print.cpp5
-rw-r--r--src/passes/PrintCallGraph.cpp2
-rw-r--r--src/passes/RemoveNonJSOps.cpp6
-rw-r--r--src/passes/StackIR.cpp393
-rw-r--r--src/passes/pass.cpp138
-rw-r--r--src/passes/passes.h3
10 files changed, 558 insertions, 10 deletions
diff --git a/src/passes/CMakeLists.txt b/src/passes/CMakeLists.txt
index bbb2a4610..25a1828dc 100644
--- a/src/passes/CMakeLists.txt
+++ b/src/passes/CMakeLists.txt
@@ -32,6 +32,7 @@ SET(passes_SOURCES
Precompute.cpp
Print.cpp
PrintCallGraph.cpp
+ StackIR.cpp
RedundantSetElimination.cpp
RelooperJumpThreading.cpp
ReReloop.cpp
diff --git a/src/passes/I64ToI32Lowering.cpp b/src/passes/I64ToI32Lowering.cpp
index e501107bd..2986deb9f 100644
--- a/src/passes/I64ToI32Lowering.cpp
+++ b/src/passes/I64ToI32Lowering.cpp
@@ -27,6 +27,7 @@
#include "emscripten-optimizer/istring.h"
#include "support/name.h"
#include "wasm-builder.h"
+#include "ir/module-utils.h"
#include "ir/names.h"
#include "asmjs/shared-constants.h"
@@ -143,19 +144,20 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
highBitVars.clear();
labelHighBitVars.clear();
freeTemps.clear();
- Function oldFunc(*func);
+ Module temp;
+ auto* oldFunc = ModuleUtils::copyFunction(func, temp);
func->params.clear();
func->vars.clear();
func->localNames.clear();
func->localIndices.clear();
Index newIdx = 0;
- Names::ensureNames(&oldFunc);
- for (Index i = 0; i < oldFunc.getNumLocals(); ++i) {
- assert(oldFunc.hasLocalName(i));
- Name lowName = oldFunc.getLocalName(i);
+ Names::ensureNames(oldFunc);
+ for (Index i = 0; i < oldFunc->getNumLocals(); ++i) {
+ assert(oldFunc->hasLocalName(i));
+ Name lowName = oldFunc->getLocalName(i);
Name highName = makeHighName(lowName);
- Type paramType = oldFunc.getLocalType(i);
- auto builderFunc = (i < oldFunc.getVarIndexBase()) ?
+ Type paramType = oldFunc->getLocalType(i);
+ auto builderFunc = (i < oldFunc->getVarIndexBase()) ?
Builder::addParam :
static_cast<Index (*)(Function*, Name, Type)>(Builder::addVar);
if (paramType == i64) {
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 1ba004886..c7b20c582 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -24,6 +24,8 @@ namespace wasm {
const Index OVERHEAD = 8;
struct MemoryPacking : public Pass {
+ bool modifiesBinaryenIR() override { return false; }
+
void run(PassRunner* runner, Module* module) override {
if (!module->memory.exists) return;
std::vector<Memory::Segment> packed;
diff --git a/src/passes/Metrics.cpp b/src/passes/Metrics.cpp
index 8cbf96db5..81706042b 100644
--- a/src/passes/Metrics.cpp
+++ b/src/passes/Metrics.cpp
@@ -32,6 +32,8 @@ static Counts lastCounts;
// Prints metrics between optimization passes.
struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<Metrics>>> {
+ bool modifiesBinaryenIR() override { return false; }
+
bool byFunction;
Counts counts;
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index b1082dcc1..dfdfa46d4 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -725,6 +725,9 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
}
o << " (; " << functionIndexes[curr->name] << " ;)";
}
+ if (curr->stackIR && !minify) {
+ o << " (; has Stack IR ;)";
+ }
if (curr->type.is()) {
o << maybeSpace << "(type " << curr->type << ')';
}
@@ -888,6 +891,8 @@ public:
Printer() : o(std::cout) {}
Printer(std::ostream* o) : o(*o) {}
+ bool modifiesBinaryenIR() override { return false; }
+
void run(PassRunner* runner, Module* module) override {
PrintSExpression print(o);
print.visitModule(module);
diff --git a/src/passes/PrintCallGraph.cpp b/src/passes/PrintCallGraph.cpp
index ac11dfb8b..fa58e3859 100644
--- a/src/passes/PrintCallGraph.cpp
+++ b/src/passes/PrintCallGraph.cpp
@@ -29,6 +29,8 @@
namespace wasm {
struct PrintCallGraph : public Pass {
+ bool modifiesBinaryenIR() override { return false; }
+
void run(PassRunner* runner, Module* module) override {
std::ostream &o = std::cout;
o << "digraph call {\n"
diff --git a/src/passes/RemoveNonJSOps.cpp b/src/passes/RemoveNonJSOps.cpp
index 76c9528cb..ef8c7531c 100644
--- a/src/passes/RemoveNonJSOps.cpp
+++ b/src/passes/RemoveNonJSOps.cpp
@@ -89,7 +89,11 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
// copy we then walk the function to rewrite any non-js operations it has
// as well.
for (auto &name : neededFunctions) {
- doWalkFunction(ModuleUtils::copyFunction(intrinsicsModule, *module, name));
+ auto* func = module->getFunctionOrNull(name);
+ if (!func) {
+ func = ModuleUtils::copyFunction(intrinsicsModule.getFunction(name), *module);
+ }
+ doWalkFunction(func);
}
neededFunctions.clear();
}
diff --git a/src/passes/StackIR.cpp b/src/passes/StackIR.cpp
new file mode 100644
index 000000000..43c95608e
--- /dev/null
+++ b/src/passes/StackIR.cpp
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2018 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Operations on Stack IR.
+//
+
+#include "wasm.h"
+#include "pass.h"
+#include "wasm-stack.h"
+#include "ir/iteration.h"
+#include "ir/local-graph.h"
+
+namespace wasm {
+
+// Generate Stack IR from Binaryen IR
+
+struct GenerateStackIR : public WalkerPass<PostWalker<GenerateStackIR>> {
+ bool isFunctionParallel() override { return true; }
+
+ Pass* create() override { return new GenerateStackIR; }
+
+ bool modifiesBinaryenIR() override { return false; }
+
+ void doWalkFunction(Function* func) {
+ BufferWithRandomAccess buffer;
+ // a shim for the parent that a stackWriter expects - we don't need
+ // it to do anything, as we are just writing to Stack IR
+ struct Parent {
+ Module* module;
+ Parent(Module* module) : module(module) {}
+
+ Module* getModule() {
+ return module;
+ }
+ void writeDebugLocation(Expression* curr, Function* func) {
+ WASM_UNREACHABLE();
+ }
+ Index getFunctionIndex(Name name) {
+ WASM_UNREACHABLE();
+ }
+ Index getFunctionTypeIndex(Name name) {
+ WASM_UNREACHABLE();
+ }
+ Index getGlobalIndex(Name name) {
+ WASM_UNREACHABLE();
+ }
+ } parent(getModule());
+ StackWriter<StackWriterMode::Binaryen2Stack, Parent> stackWriter(parent, buffer, false);
+ stackWriter.setFunction(func);
+ stackWriter.visitPossibleBlockContents(func->body);
+ func->stackIR = make_unique<StackIR>();
+ func->stackIR->swap(stackWriter.stackIR);
+ }
+};
+
+Pass* createGenerateStackIRPass() {
+ return new GenerateStackIR();
+}
+
+// Print (for debugging purposes)
+
+struct PrintStackIR : public WalkerPass<PostWalker<PrintStackIR>> {
+ // Not parallel: this pass is just for testing and debugging; keep the output
+ // sorted by function order.
+ bool isFunctionParallel() override { return false; }
+
+ Pass* create() override { return new PrintStackIR; }
+
+ bool modifiesBinaryenIR() override { return false; }
+
+ void doWalkFunction(Function* func) {
+ std::cout << func->name << ":\n";
+ if (func->stackIR) {
+ std::cout << *func->stackIR;
+ } else {
+ std::cout << " (no stack ir)";
+ }
+ std::cout << '\n';
+ }
+};
+
+Pass* createPrintStackIRPass() {
+ return new PrintStackIR();
+}
+
+// Optimize
+
+class StackIROptimizer {
+ Function* func;
+ PassOptions& passOptions;
+ StackIR& insts;
+
+public:
+ StackIROptimizer(Function* func, PassOptions& passOptions) :
+ func(func), passOptions(passOptions), insts(*func->stackIR.get()) {
+ assert(func->stackIR);
+ }
+
+ void run() {
+ dce();
+ // FIXME: local2Stack is currently rather slow (due to localGraph),
+ // so for now run it only when really optimizing
+ if (passOptions.optimizeLevel >= 3 || passOptions.shrinkLevel >= 1) {
+ local2Stack();
+ }
+ removeUnneededBlocks();
+ dce();
+ }
+
+private:
+ // Passes.
+
+ // Remove unreachable code.
+ void dce() {
+ bool inUnreachableCode = false;
+ for (Index i = 0; i < insts.size(); i++) {
+ auto* inst = insts[i];
+ if (!inst) continue;
+ if (inUnreachableCode) {
+ // Does the unreachable code end here?
+ if (isControlFlowBarrier(inst)) {
+ inUnreachableCode = false;
+ } else {
+ // We can remove this.
+ removeAt(i);
+ }
+ } else if (inst->type == unreachable) {
+ inUnreachableCode = true;
+ }
+ }
+ }
+
+ // If ordered properly, we can avoid a set_local/get_local pair,
+ // and use the value directly from the stack, for example
+ // [..produce a value on the stack..]
+ // set_local $x
+ // [..much code..]
+ // get_local $x
+ // call $foo ;; use the value, foo(value)
+ // As long as the code in between does not modify $x, and has
+ // no control flow branching out, we can remove both the set
+ // and the get.
+ void local2Stack() {
+ // We use the localGraph to tell us if a get-set pair is indeed
+ // a set that is read by that get, and only that get. Note that we run
+ // this on the Binaryen IR, so we are assuming that no previous opt
+ // has changed the interaction of local operations.
+ // TODO: we can do this a lot faster, as we just care about linear
+ // control flow.
+ LocalGraph localGraph(func);
+ localGraph.computeInfluences();
+ // We maintain a stack of relevant values. This contains:
+ // * a null for each actual value that the value stack would have
+ // * an index of each SetLocal that *could* be on the value
+ // stack at that location.
+ const Index null = -1;
+ std::vector<Index> values;
+ // We also maintain a stack of values vectors for control flow,
+ // saving the stack as we enter and restoring it when we exit.
+ std::vector<std::vector<Index>> savedValues;
+#ifdef STACK_OPT_DEBUG
+ std::cout << "func: " << func->name << '\n' << insts << '\n';
+#endif
+ for (Index i = 0; i < insts.size(); i++) {
+ auto* inst = insts[i];
+ if (!inst) continue;
+ // First, consume values from the stack as required.
+ auto consumed = getNumConsumedValues(inst);
+#ifdef STACK_OPT_DEBUG
+ std::cout << " " << i << " : " << *inst << ", " << values.size() << " on stack, will consume " << consumed << "\n ";
+ for (auto s : values) std::cout << s << ' ';
+ std::cout << '\n';
+#endif
+ // TODO: currently we run dce before this, but if we didn't, we'd need
+ // to handle unreachable code here - it's ok to pop multiple values
+ // there even if the stack is at size 0.
+ while (consumed > 0) {
+ assert(values.size() > 0);
+ // Whenever we hit a possible stack value, kill it - it would
+ // be consumed here, so we can never optimize to it.
+ while (values.back() != null) {
+ values.pop_back();
+ assert(values.size() > 0);
+ }
+ // Finally, consume the actual value that is consumed here.
+ values.pop_back();
+ consumed--;
+ }
+ // After consuming, we can see what to do with this. First, handle
+ // control flow.
+ if (isControlFlowBegin(inst)) {
+ // Save the stack for when we end this control flow.
+ savedValues.push_back(values); // TODO: optimize copies
+ values.clear();
+ } else if (isControlFlowEnd(inst)) {
+ assert(!savedValues.empty());
+ values = savedValues.back();
+ savedValues.pop_back();
+ } else if (isControlFlow(inst)) {
+ // Otherwise, in the middle of control flow, just clear it
+ values.clear();
+ }
+ // This is something we should handle, look into it.
+ if (isConcreteType(inst->type)) {
+ bool optimized = false;
+ if (auto* get = inst->origin->dynCast<GetLocal>()) {
+ // This is a potential optimization opportunity! See if we
+ // can reach the set.
+ if (values.size() > 0) {
+ Index j = values.size() - 1;
+ while (1) {
+ // If there's an actual value in the way, we've failed.
+ auto index = values[j];
+ if (index == null) break;
+ auto* set = insts[index]->origin->cast<SetLocal>();
+ if (set->index == get->index) {
+ // This might be a proper set-get pair, where the set is
+ // used by this get and nothing else, check that.
+ auto& sets = localGraph.getSetses[get];
+ if (sets.size() == 1 && *sets.begin() == set) {
+ auto& setInfluences = localGraph.setInfluences[set];
+ if (setInfluences.size() == 1) {
+ assert(*setInfluences.begin() == get);
+ // Do it! The set and the get can go away, the proper
+ // value is on the stack.
+#ifdef STACK_OPT_DEBUG
+ std::cout << " stackify the get\n";
+#endif
+ insts[index] = nullptr;
+ insts[i] = nullptr;
+ // Continuing on from here, replace this on the stack
+ // with a null, representing a regular value. We
+ // keep possible values above us active - they may
+ // be optimized later, as they would be pushed after
+ // us, and used before us, so there is no conflict.
+ values[j] = null;
+ optimized = true;
+ break;
+ }
+ }
+ }
+ // We failed here. Can we look some more?
+ if (j == 0) break;
+ j--;
+ }
+ }
+ }
+ if (!optimized) {
+ // This is an actual regular value on the value stack.
+ values.push_back(null);
+ }
+ } else if (inst->origin->is<SetLocal>() && inst->type == none) {
+ // This set is potentially optimizable later, add to stack.
+ values.push_back(i);
+ }
+ }
+ }
+
+ // There may be unnecessary blocks we can remove: blocks
+ // without branches to them are always ok to remove.
+ // TODO: a branch to a block in an if body can become
+ // a branch to that if body
+ void removeUnneededBlocks() {
+ for (auto*& inst : insts) {
+ if (!inst) continue;
+ if (auto* block = inst->origin->dynCast<Block>()) {
+ if (!BranchUtils::BranchSeeker::hasNamed(block, block->name)) {
+ // TODO optimize, maybe run remove-unused-names
+ inst = nullptr;
+ }
+ }
+ }
+ }
+
+ // Utilities.
+
+ // A control flow "barrier" - a point where stack machine
+ // unreachability ends.
+ bool isControlFlowBarrier(StackInst* inst) {
+ switch (inst->op) {
+ case StackInst::BlockEnd:
+ case StackInst::IfElse:
+ case StackInst::IfEnd:
+ case StackInst::LoopEnd: {
+ return true;
+ }
+ default: {
+ return false;
+ }
+ }
+ }
+
+ // A control flow beginning.
+ bool isControlFlowBegin(StackInst* inst) {
+ switch (inst->op) {
+ case StackInst::BlockBegin:
+ case StackInst::IfBegin:
+ case StackInst::LoopBegin: {
+ return true;
+ }
+ default: {
+ return false;
+ }
+ }
+ }
+
+ // A control flow ending.
+ bool isControlFlowEnd(StackInst* inst) {
+ switch (inst->op) {
+ case StackInst::BlockEnd:
+ case StackInst::IfEnd:
+ case StackInst::LoopEnd: {
+ return true;
+ }
+ default: {
+ return false;
+ }
+ }
+ }
+
+ bool isControlFlow(StackInst* inst) {
+ return inst->op != StackInst::Basic;
+ }
+
+ // Remove the instruction at index i. If the instruction
+ // is control flow, and so has been expanded to multiple
+ // instructions, remove them as well.
+ void removeAt(Index i) {
+ auto* inst = insts[i];
+ insts[i] = nullptr;
+ if (inst->op == StackInst::Basic) {
+ return; // that was it
+ }
+ auto* origin = inst->origin;
+ while (1) {
+ i++;
+ assert(i < insts.size());
+ inst = insts[i];
+ insts[i] = nullptr;
+ if (inst && inst->origin == origin && isControlFlowEnd(inst)) {
+ return; // that's it, we removed it all
+ }
+ }
+ }
+
+ Index getNumConsumedValues(StackInst* inst) {
+ if (isControlFlow(inst)) {
+ // If consumes 1; that's it.
+ if (inst->op == StackInst::IfBegin) {
+ return 1;
+ }
+ return 0;
+ }
+ // Otherwise, for basic instructions, just count the expression children.
+ return ChildIterator(inst->origin).children.size();
+ }
+};
+
+struct OptimizeStackIR : public WalkerPass<PostWalker<OptimizeStackIR>> {
+ bool isFunctionParallel() override { return true; }
+
+ Pass* create() override { return new OptimizeStackIR; }
+
+ bool modifiesBinaryenIR() override { return false; }
+
+ void doWalkFunction(Function* func) {
+ if (!func->stackIR) {
+ return;
+ }
+ StackIROptimizer(func, getPassOptions()).run();
+ }
+};
+
+Pass* createOptimizeStackIRPass() {
+ return new OptimizeStackIR();
+}
+
+} // namespace wasm
+
diff --git a/src/passes/pass.cpp b/src/passes/pass.cpp
index c0354524d..97151f847 100644
--- a/src/passes/pass.cpp
+++ b/src/passes/pass.cpp
@@ -22,6 +22,7 @@
#include <pass.h>
#include <wasm-validator.h>
#include <wasm-io.h>
+#include "ir/hashed.h"
namespace wasm {
@@ -76,6 +77,7 @@ void PassRegistry::registerPasses() {
registerPass("flatten", "flattens out code, removing nesting", createFlattenPass);
registerPass("fpcast-emu", "emulates function pointer casts, allowing incorrect indirect calls to (sometimes) work", createFuncCastEmulationPass);
registerPass("func-metrics", "reports function metrics", createFunctionMetricsPass);
+ registerPass("generate-stack-ir", "generate Stack IR", createGenerateStackIRPass);
registerPass("inlining", "inline functions (you probably want inlining-optimizing)", createInliningPass);
registerPass("inlining-optimizing", "inline functions and optimizes where we inlined", createInliningOptimizingPass);
registerPass("legalize-js-interface", "legalizes i64 types on the import/export boundary", createLegalizeJSInterfacePass);
@@ -90,6 +92,7 @@ void PassRegistry::registerPasses() {
registerPass("metrics", "reports metrics", createMetricsPass);
registerPass("nm", "name list", createNameListPass);
registerPass("optimize-instructions", "optimizes instruction combinations", createOptimizeInstructionsPass);
+ registerPass("optimize-stack-ir", "optimize Stack IR", createOptimizeStackIRPass);
registerPass("pick-load-signs", "pick load signs based on their uses", createPickLoadSignsPass);
registerPass("post-emscripten", "miscellaneous optimizations for Emscripten-generated code", createPostEmscriptenPass);
registerPass("precompute", "computes compile-time evaluatable expressions", createPrecomputePass);
@@ -98,6 +101,7 @@ void PassRegistry::registerPasses() {
registerPass("print-minified", "print in minified s-expression format", createMinifiedPrinterPass);
registerPass("print-full", "print in full s-expression format", createFullPrinterPass);
registerPass("print-call-graph", "print call graph", createPrintCallGraphPass);
+ registerPass("print-stack-ir", "print out Stack IR (useful for internal debugging)", createPrintStackIRPass);
registerPass("relooper-jump-threading", "thread relooper jumps (fastcomp output only)", createRelooperJumpThreadingPass);
registerPass("remove-non-js-ops", "removes operations incompatible with js", createRemoveNonJSOpsPass);
registerPass("remove-imports", "removes imports and replaces them with nops", createRemoveImportsPass);
@@ -201,6 +205,12 @@ void PassRunner::addDefaultGlobalOptimizationPostPasses() {
add("duplicate-function-elimination"); // optimizations show more functions as duplicate
add("remove-unused-module-elements");
add("memory-packing");
+ // perform Stack IR optimizations here, at the very end of the
+ // optimization pipeline
+ if (options.optimizeLevel >= 2 || options.shrinkLevel >= 1) {
+ add("generate-stack-ir");
+ add("optimize-stack-ir");
+ }
}
static void dumpWast(Name name, Module* wasm) {
@@ -252,7 +262,7 @@ void PassRunner::run() {
runPassOnFunction(pass, func.get());
}
} else {
- pass->run(this, wasm);
+ runPass(pass);
}
auto after = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = after - before;
@@ -320,7 +330,7 @@ void PassRunner::run() {
stack.push_back(pass);
} else {
flush();
- pass->run(this, wasm);
+ runPass(pass);
}
}
flush();
@@ -347,11 +357,135 @@ void PassRunner::doAdd(Pass* pass) {
pass->prepareToRun(this, wasm);
}
+// Checks that the state is valid before and after a
+// pass runs on a function. We run these extra checks when
+// pass-debug mode is enabled.
+struct AfterEffectFunctionChecker {
+ Function* func;
+ Name name;
+
+ // Check Stack IR state: if the main IR changes, there should be no
+ // stack IR, as the stack IR would be wrong.
+ bool beganWithStackIR;
+ HashType originalFunctionHash;
+
+ // In the creator we can scan the state of the module and function before the
+ // pass runs.
+ AfterEffectFunctionChecker(Function* func) : func(func), name(func->name) {
+ beganWithStackIR = func->stackIR != nullptr;
+ if (beganWithStackIR) {
+ originalFunctionHash = FunctionHasher::hashFunction(func);
+ }
+ }
+
+ // This is called after the pass is run, at which time we can check things.
+ void check() {
+ assert(func->name == name); // no global module changes should have occurred
+ if (beganWithStackIR && func->stackIR) {
+ auto after = FunctionHasher::hashFunction(func);
+ if (after != originalFunctionHash) {
+ Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before and after the pass ran, and the pass modified the main IR, which invalidates Stack IR - pass should have been marked 'modifiesBinaryenIR'";
+ }
+ }
+ }
+};
+
+// Runs checks on the entire module, in a non-function-parallel pass.
+// In particular, in such a pass functions may be removed or renamed, track that.
+struct AfterEffectModuleChecker {
+ Module* module;
+
+ std::vector<AfterEffectFunctionChecker> checkers;
+
+ bool beganWithAnyStackIR;
+
+ AfterEffectModuleChecker(Module* module) : module(module) {
+ for (auto& func : module->functions) {
+ checkers.emplace_back(func.get());
+ }
+ beganWithAnyStackIR = hasAnyStackIR();
+ }
+
+ void check() {
+ if (beganWithAnyStackIR && hasAnyStackIR()) {
+ // If anything changed to the functions, that's not good.
+ if (checkers.size() != module->functions.size()) {
+ error();
+ }
+ for (Index i = 0; i < checkers.size(); i++) {
+ // Did a pointer change? (a deallocated function could cause that)
+ if (module->functions[i].get() != checkers[i].func ||
+ module->functions[i]->body != checkers[i].func->body) {
+ error();
+ }
+ // Did a name change?
+ if (module->functions[i]->name != checkers[i].name) {
+ error();
+ }
+ }
+ // Global function state appears to not have been changed: the same
+ // functions are there. Look into their contents.
+ for (auto& checker : checkers) {
+ checker.check();
+ }
+ }
+ }
+
+ void error() {
+ Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before and after the pass ran, and the pass modified global function state - pass should have been marked 'modifiesBinaryenIR'";
+ }
+
+ bool hasAnyStackIR() {
+ for (auto& func : module->functions) {
+ if (func->stackIR) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+void PassRunner::runPass(Pass* pass) {
+ std::unique_ptr<AfterEffectModuleChecker> checker;
+ if (getPassDebug()) {
+ checker = std::unique_ptr<AfterEffectModuleChecker>(
+ new AfterEffectModuleChecker(wasm));
+ }
+ pass->run(this, wasm);
+ handleAfterEffects(pass);
+ if (getPassDebug()) {
+ checker->check();
+ }
+}
+
void PassRunner::runPassOnFunction(Pass* pass, Function* func) {
assert(pass->isFunctionParallel());
// function-parallel passes get a new instance per function
auto instance = std::unique_ptr<Pass>(pass->create());
+ std::unique_ptr<AfterEffectFunctionChecker> checker;
+ if (getPassDebug()) {
+ checker = std::unique_ptr<AfterEffectFunctionChecker>(
+ new AfterEffectFunctionChecker(func));
+ }
instance->runOnFunction(this, wasm, func);
+ handleAfterEffects(pass, func);
+ if (getPassDebug()) {
+ checker->check();
+ }
+}
+
+void PassRunner::handleAfterEffects(Pass* pass, Function* func) {
+ if (pass->modifiesBinaryenIR()) {
+ // If Binaryen IR is modified, Stack IR must be cleared - it would
+ // be out of sync in a potentially dangerous way.
+ if (func) {
+ func->stackIR.reset(nullptr);
+ } else {
+ for (auto& func : wasm->functions) {
+ func->stackIR.reset(nullptr);
+ }
+ }
+ }
}
int PassRunner::getPassDebug() {
diff --git a/src/passes/passes.h b/src/passes/passes.h
index 7a96799b3..1e26dc777 100644
--- a/src/passes/passes.h
+++ b/src/passes/passes.h
@@ -34,6 +34,7 @@ Pass* createFlattenPass();
Pass* createFuncCastEmulationPass();
Pass* createFullPrinterPass();
Pass* createFunctionMetricsPass();
+Pass* createGenerateStackIRPass();
Pass* createI64ToI32LoweringPass();
Pass* createInliningPass();
Pass* createInliningOptimizingPass();
@@ -49,12 +50,14 @@ Pass* createMinifiedPrinterPass();
Pass* createMetricsPass();
Pass* createNameListPass();
Pass* createOptimizeInstructionsPass();
+Pass* createOptimizeStackIRPass();
Pass* createPickLoadSignsPass();
Pass* createPostEmscriptenPass();
Pass* createPrecomputePass();
Pass* createPrecomputePropagatePass();
Pass* createPrinterPass();
Pass* createPrintCallGraphPass();
+Pass* createPrintStackIRPass();
Pass* createRelooperJumpThreadingPass();
Pass* createRemoveNonJSOpsPass();
Pass* createRemoveImportsPass();