summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/ir/effects.h629
-rw-r--r--src/passes/CodePushing.cpp4
-rw-r--r--src/passes/Vacuum.cpp4
3 files changed, 336 insertions, 301 deletions
diff --git a/src/ir/effects.h b/src/ir/effects.h
index d788d176d..79d44afd3 100644
--- a/src/ir/effects.h
+++ b/src/ir/effects.h
@@ -25,15 +25,15 @@ namespace wasm {
// Look for side effects, including control flow
// TODO: optimize
-struct EffectAnalyzer
- : public PostWalker<EffectAnalyzer, OverriddenVisitor<EffectAnalyzer>> {
+class EffectAnalyzer {
+public:
EffectAnalyzer(const PassOptions& passOptions,
FeatureSet features,
Expression* ast = nullptr)
: ignoreImplicitTraps(passOptions.ignoreImplicitTraps),
debugInfo(passOptions.debugInfo), features(features) {
if (ast) {
- analyze(ast);
+ walk(ast);
}
}
@@ -41,14 +41,18 @@ struct EffectAnalyzer
bool debugInfo;
FeatureSet features;
- void analyze(Expression* ast) {
- breakTargets.clear();
- walk(ast);
- assert(tryDepth == 0);
+ // Walk an expression and all its children.
+ void walk(Expression* ast) {
+ pre();
+ InternalAnalyzer(*this).walk(ast);
+ post();
+ }
- if (ignoreImplicitTraps) {
- implicitTrap = false;
- }
+ // Visit an expression, without any children.
+ void visit(Expression* ast) {
+ pre();
+ InternalAnalyzer(*this).visit(ast);
+ post();
}
// Core effect tracking
@@ -86,38 +90,6 @@ struct EffectAnalyzer
// body. For example, (drop (exnref.pop)) should set this to true.
bool danglingPop = false;
- static void scan(EffectAnalyzer* self, Expression** currp) {
- Expression* curr = *currp;
- // We need to decrement try depth before catch starts, so handle it
- // separately
- if (curr->is<Try>()) {
- self->pushTask(doVisitTry, currp);
- self->pushTask(doEndCatch, currp);
- self->pushTask(scan, &curr->cast<Try>()->catchBody);
- self->pushTask(doStartCatch, currp);
- self->pushTask(scan, &curr->cast<Try>()->body);
- self->pushTask(doStartTry, currp);
- return;
- }
- PostWalker<EffectAnalyzer, OverriddenVisitor<EffectAnalyzer>>::scan(self,
- currp);
- }
-
- static void doStartTry(EffectAnalyzer* self, Expression** currp) {
- self->tryDepth++;
- }
-
- static void doStartCatch(EffectAnalyzer* self, Expression** currp) {
- assert(self->tryDepth > 0 && "try depth cannot be negative");
- self->tryDepth--;
- self->catchDepth++;
- }
-
- static void doEndCatch(EffectAnalyzer* self, Expression** currp) {
- assert(self->catchDepth > 0 && "catch depth cannot be negative");
- self->catchDepth--;
- }
-
// Helper functions to check for various effect types
bool accessesLocal() const {
@@ -209,7 +181,7 @@ struct EffectAnalyzer
// function, so transfersControlFlow would be true) - while we allow the
// reordering of traps with each other, we do not reorder exceptions with
// anything.
- assert(!(implicitTrap && other.throws) && !(throws && other.implicitTrap));
+ assert(!((implicitTrap && other.throws) || (throws && other.implicitTrap)));
// We can't reorder an implicit trap in a way that could alter what global
// state is modified.
if ((implicitTrap && other.writesGlobalState()) ||
@@ -266,282 +238,334 @@ struct EffectAnalyzer
std::set<Name> breakTargets;
- void visitBlock(Block* curr) {
- if (curr->name.is()) {
- breakTargets.erase(curr->name); // these were internal breaks
+private:
+ struct InternalAnalyzer
+ : public PostWalker<InternalAnalyzer, OverriddenVisitor<InternalAnalyzer>> {
+
+ EffectAnalyzer& parent;
+
+ InternalAnalyzer(EffectAnalyzer& parent) : parent(parent) {}
+
+ static void scan(InternalAnalyzer* self, Expression** currp) {
+ Expression* curr = *currp;
+ // We need to decrement try depth before catch starts, so handle it
+ // separately
+ if (curr->is<Try>()) {
+ self->pushTask(doVisitTry, currp);
+ self->pushTask(doEndCatch, currp);
+ self->pushTask(scan, &curr->cast<Try>()->catchBody);
+ self->pushTask(doStartCatch, currp);
+ self->pushTask(scan, &curr->cast<Try>()->body);
+ self->pushTask(doStartTry, currp);
+ return;
+ }
+ PostWalker<InternalAnalyzer, OverriddenVisitor<InternalAnalyzer>>::scan(
+ self, currp);
}
- }
- void visitIf(If* curr) {}
- void visitLoop(Loop* curr) {
- if (curr->name.is()) {
- breakTargets.erase(curr->name); // these were internal breaks
- }
- // if the loop is unreachable, then there is branching control flow:
- // (1) if the body is unreachable because of a (return), uncaught (br)
- // etc., then we already noted branching, so it is ok to mark it again
- // (if we have *caught* (br)s, then they did not lead to the loop body
- // being unreachable). (same logic applies to blocks)
- // (2) if the loop is unreachable because it only has branches up to the
- // loop top, but no way to get out, then it is an infinite loop, and we
- // consider that a branching side effect (note how the same logic does
- // not apply to blocks).
- if (curr->type == Type::unreachable) {
- branchesOut = true;
+
+ static void doStartTry(InternalAnalyzer* self, Expression** currp) {
+ self->parent.tryDepth++;
}
- }
- void visitBreak(Break* curr) { breakTargets.insert(curr->name); }
- void visitSwitch(Switch* curr) {
- for (auto name : curr->targets) {
- breakTargets.insert(name);
+
+ static void doStartCatch(InternalAnalyzer* self, Expression** currp) {
+ assert(self->parent.tryDepth > 0 && "try depth cannot be negative");
+ self->parent.tryDepth--;
+ self->parent.catchDepth++;
}
- breakTargets.insert(curr->default_);
- }
- void visitCall(Call* curr) {
- calls = true;
- // When EH is enabled, any call can throw.
- if (features.hasExceptionHandling() && tryDepth == 0) {
- throws = true;
+ static void doEndCatch(InternalAnalyzer* self, Expression** currp) {
+ assert(self->parent.catchDepth > 0 && "catch depth cannot be negative");
+ self->parent.catchDepth--;
}
- if (curr->isReturn) {
- branchesOut = true;
+
+ void visitBlock(Block* curr) {
+ if (curr->name.is()) {
+ parent.breakTargets.erase(curr->name); // these were internal breaks
+ }
}
- if (debugInfo) {
- // debugInfo call imports must be preserved very strongly, do not
- // move code around them
- // FIXME: we could check if the call is to an import
- branchesOut = true;
+ void visitIf(If* curr) {}
+ void visitLoop(Loop* curr) {
+ if (curr->name.is()) {
+ parent.breakTargets.erase(curr->name); // these were internal breaks
+ }
+ // if the loop is unreachable, then there is branching control flow:
+ // (1) if the body is unreachable because of a (return), uncaught (br)
+ // etc., then we already noted branching, so it is ok to mark it
+ // again (if we have *caught* (br)s, then they did not lead to the
+ // loop body being unreachable). (same logic applies to blocks)
+ // (2) if the loop is unreachable because it only has branches up to the
+ // loop top, but no way to get out, then it is an infinite loop, and
+ // we consider that a branching side effect (note how the same logic
+ // does not apply to blocks).
+ if (curr->type == Type::unreachable) {
+ parent.branchesOut = true;
+ }
}
- }
- void visitCallIndirect(CallIndirect* curr) {
- calls = true;
- if (features.hasExceptionHandling() && tryDepth == 0) {
- throws = true;
+ void visitBreak(Break* curr) { parent.breakTargets.insert(curr->name); }
+ void visitSwitch(Switch* curr) {
+ for (auto name : curr->targets) {
+ parent.breakTargets.insert(name);
+ }
+ parent.breakTargets.insert(curr->default_);
}
- if (curr->isReturn) {
- branchesOut = true;
+
+ void visitCall(Call* curr) {
+ parent.calls = true;
+ // When EH is enabled, any call can throw.
+ if (parent.features.hasExceptionHandling() && parent.tryDepth == 0) {
+ parent.throws = true;
+ }
+ if (curr->isReturn) {
+ parent.branchesOut = true;
+ }
+ if (parent.debugInfo) {
+ // debugInfo call imports must be preserved very strongly, do not
+ // move code around them
+ // FIXME: we could check if the call is to an import
+ parent.branchesOut = true;
+ }
}
- }
- void visitLocalGet(LocalGet* curr) { localsRead.insert(curr->index); }
- void visitLocalSet(LocalSet* curr) { localsWritten.insert(curr->index); }
- void visitGlobalGet(GlobalGet* curr) { globalsRead.insert(curr->name); }
- void visitGlobalSet(GlobalSet* curr) { globalsWritten.insert(curr->name); }
- void visitLoad(Load* curr) {
- readsMemory = true;
- isAtomic |= curr->isAtomic;
- implicitTrap = true;
- }
- void visitStore(Store* curr) {
- writesMemory = true;
- isAtomic |= curr->isAtomic;
- implicitTrap = true;
- }
- void visitAtomicRMW(AtomicRMW* curr) {
- readsMemory = true;
- writesMemory = true;
- isAtomic = true;
- implicitTrap = true;
- }
- void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
- readsMemory = true;
- writesMemory = true;
- isAtomic = true;
- implicitTrap = true;
- }
- void visitAtomicWait(AtomicWait* curr) {
- readsMemory = true;
- // AtomicWait doesn't strictly write memory, but it does modify the waiters
- // list associated with the specified address, which we can think of as a
- // write.
- writesMemory = true;
- isAtomic = true;
- implicitTrap = true;
- }
- void visitAtomicNotify(AtomicNotify* curr) {
- // AtomicNotify doesn't strictly write memory, but it does modify the
- // waiters list associated with the specified address, which we can think of
- // as a write.
- readsMemory = true;
- writesMemory = true;
- isAtomic = true;
- implicitTrap = true;
- }
- void visitAtomicFence(AtomicFence* curr) {
- // AtomicFence should not be reordered with any memory operations, so we set
- // these to true.
- readsMemory = true;
- writesMemory = true;
- isAtomic = true;
- }
- void visitSIMDExtract(SIMDExtract* curr) {}
- void visitSIMDReplace(SIMDReplace* curr) {}
- void visitSIMDShuffle(SIMDShuffle* curr) {}
- void visitSIMDTernary(SIMDTernary* curr) {}
- void visitSIMDShift(SIMDShift* curr) {}
- void visitSIMDLoad(SIMDLoad* curr) {
- readsMemory = true;
- implicitTrap = true;
- }
- void visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
- if (curr->isLoad()) {
- readsMemory = true;
- } else {
- writesMemory = true;
+ void visitCallIndirect(CallIndirect* curr) {
+ parent.calls = true;
+ if (parent.features.hasExceptionHandling() && parent.tryDepth == 0) {
+ parent.throws = true;
+ }
+ if (curr->isReturn) {
+ parent.branchesOut = true;
+ }
}
- implicitTrap = true;
- }
- void visitMemoryInit(MemoryInit* curr) {
- writesMemory = true;
- implicitTrap = true;
- }
- void visitDataDrop(DataDrop* curr) {
- // data.drop does not actually write memory, but it does alter the size of
- // a segment, which can be noticeable later by memory.init, so we need to
- // mark it as having a global side effect of some kind.
- writesMemory = true;
- implicitTrap = true;
- }
- void visitMemoryCopy(MemoryCopy* curr) {
- readsMemory = true;
- writesMemory = true;
- implicitTrap = true;
- }
- void visitMemoryFill(MemoryFill* curr) {
- writesMemory = true;
- implicitTrap = true;
- }
- void visitConst(Const* curr) {}
- void visitUnary(Unary* curr) {
- switch (curr->op) {
- case TruncSFloat32ToInt32:
- case TruncSFloat32ToInt64:
- case TruncUFloat32ToInt32:
- case TruncUFloat32ToInt64:
- case TruncSFloat64ToInt32:
- case TruncSFloat64ToInt64:
- case TruncUFloat64ToInt32:
- case TruncUFloat64ToInt64: {
- implicitTrap = true;
- break;
+ void visitLocalGet(LocalGet* curr) {
+ parent.localsRead.insert(curr->index);
+ }
+ void visitLocalSet(LocalSet* curr) {
+ parent.localsWritten.insert(curr->index);
+ }
+ void visitGlobalGet(GlobalGet* curr) {
+ parent.globalsRead.insert(curr->name);
+ }
+ void visitGlobalSet(GlobalSet* curr) {
+ parent.globalsWritten.insert(curr->name);
+ }
+ void visitLoad(Load* curr) {
+ parent.readsMemory = true;
+ parent.isAtomic |= curr->isAtomic;
+ parent.implicitTrap = true;
+ }
+ void visitStore(Store* curr) {
+ parent.writesMemory = true;
+ parent.isAtomic |= curr->isAtomic;
+ parent.implicitTrap = true;
+ }
+ void visitAtomicRMW(AtomicRMW* curr) {
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ parent.isAtomic = true;
+ parent.implicitTrap = true;
+ }
+ void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ parent.isAtomic = true;
+ parent.implicitTrap = true;
+ }
+ void visitAtomicWait(AtomicWait* curr) {
+ parent.readsMemory = true;
+ // AtomicWait doesn't strictly write memory, but it does modify the
+ // waiters list associated with the specified address, which we can think
+ // of as a write.
+ parent.writesMemory = true;
+ parent.isAtomic = true;
+ parent.implicitTrap = true;
+ }
+ void visitAtomicNotify(AtomicNotify* curr) {
+ // AtomicNotify doesn't strictly write memory, but it does modify the
+ // waiters list associated with the specified address, which we can think
+ // of as a write.
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ parent.isAtomic = true;
+ parent.implicitTrap = true;
+ }
+ void visitAtomicFence(AtomicFence* curr) {
+ // AtomicFence should not be reordered with any memory operations, so we
+ // set these to true.
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ parent.isAtomic = true;
+ }
+ void visitSIMDExtract(SIMDExtract* curr) {}
+ void visitSIMDReplace(SIMDReplace* curr) {}
+ void visitSIMDShuffle(SIMDShuffle* curr) {}
+ void visitSIMDTernary(SIMDTernary* curr) {}
+ void visitSIMDShift(SIMDShift* curr) {}
+ void visitSIMDLoad(SIMDLoad* curr) {
+ parent.readsMemory = true;
+ parent.implicitTrap = true;
+ }
+ void visitSIMDLoadStoreLane(SIMDLoadStoreLane* curr) {
+ if (curr->isLoad()) {
+ parent.readsMemory = true;
+ } else {
+ parent.writesMemory = true;
}
- default: {
+ parent.implicitTrap = true;
+ }
+ void visitMemoryInit(MemoryInit* curr) {
+ parent.writesMemory = true;
+ parent.implicitTrap = true;
+ }
+ void visitDataDrop(DataDrop* curr) {
+ // data.drop does not actually write memory, but it does alter the size of
+ // a segment, which can be noticeable later by memory.init, so we need to
+ // mark it as having a global side effect of some kind.
+ parent.writesMemory = true;
+ parent.implicitTrap = true;
+ }
+ void visitMemoryCopy(MemoryCopy* curr) {
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ parent.implicitTrap = true;
+ }
+ void visitMemoryFill(MemoryFill* curr) {
+ parent.writesMemory = true;
+ parent.implicitTrap = true;
+ }
+ void visitConst(Const* curr) {}
+ void visitUnary(Unary* curr) {
+ switch (curr->op) {
+ case TruncSFloat32ToInt32:
+ case TruncSFloat32ToInt64:
+ case TruncUFloat32ToInt32:
+ case TruncUFloat32ToInt64:
+ case TruncSFloat64ToInt32:
+ case TruncSFloat64ToInt64:
+ case TruncUFloat64ToInt32:
+ case TruncUFloat64ToInt64: {
+ parent.implicitTrap = true;
+ break;
+ }
+ default: {}
}
}
- }
- void visitBinary(Binary* curr) {
- switch (curr->op) {
- case DivSInt32:
- case DivUInt32:
- case RemSInt32:
- case RemUInt32:
- case DivSInt64:
- case DivUInt64:
- case RemSInt64:
- case RemUInt64: {
- // div and rem may contain implicit trap only if RHS is
- // non-constant or constant which equal zero or -1 for
- // signed divisions. Reminder traps only with zero
- // divider.
- if (auto* c = curr->right->dynCast<Const>()) {
- if (c->value.isZero()) {
- implicitTrap = true;
- } else if ((curr->op == DivSInt32 || curr->op == DivSInt64) &&
- c->value.getInteger() == -1LL) {
- implicitTrap = true;
+ void visitBinary(Binary* curr) {
+ switch (curr->op) {
+ case DivSInt32:
+ case DivUInt32:
+ case RemSInt32:
+ case RemUInt32:
+ case DivSInt64:
+ case DivUInt64:
+ case RemSInt64:
+ case RemUInt64: {
+ // div and rem may contain implicit trap only if RHS is
+ // non-constant or constant which equal zero or -1 for
+ // signed divisions. Reminder traps only with zero
+ // divider.
+ if (auto* c = curr->right->dynCast<Const>()) {
+ if (c->value.isZero()) {
+ parent.implicitTrap = true;
+ } else if ((curr->op == DivSInt32 || curr->op == DivSInt64) &&
+ c->value.getInteger() == -1LL) {
+ parent.implicitTrap = true;
+ }
+ } else {
+ parent.implicitTrap = true;
}
- } else {
- implicitTrap = true;
+ break;
}
- break;
+ default: {}
}
- default: {
+ }
+ void visitSelect(Select* curr) {}
+ void visitDrop(Drop* curr) {}
+ void visitReturn(Return* curr) { parent.branchesOut = true; }
+ void visitMemorySize(MemorySize* curr) {
+ // memory.size accesses the size of the memory, and thus can be modeled as
+ // reading memory
+ parent.readsMemory = true;
+ // Atomics are sequentially consistent with memory.size.
+ parent.isAtomic = true;
+ }
+ void visitMemoryGrow(MemoryGrow* curr) {
+ parent.calls = true;
+ // memory.grow technically does a read-modify-write operation on the
+ // memory size in the successful case, modifying the set of valid
+ // addresses, and just a read operation in the failure case
+ parent.readsMemory = true;
+ parent.writesMemory = true;
+ // Atomics are also sequentially consistent with memory.grow.
+ parent.isAtomic = true;
+ }
+ void visitRefNull(RefNull* curr) {}
+ void visitRefIsNull(RefIsNull* curr) {}
+ void visitRefFunc(RefFunc* curr) {}
+ void visitRefEq(RefEq* curr) {}
+ void visitTry(Try* curr) {}
+ void visitThrow(Throw* curr) {
+ if (parent.tryDepth == 0) {
+ parent.throws = true;
}
}
- }
- void visitSelect(Select* curr) {}
- void visitDrop(Drop* curr) {}
- void visitReturn(Return* curr) { branchesOut = true; }
- void visitMemorySize(MemorySize* curr) {
- // memory.size accesses the size of the memory, and thus can be modeled as
- // reading memory
- readsMemory = true;
- // Atomics are sequentially consistent with memory.size.
- isAtomic = true;
- }
- void visitMemoryGrow(MemoryGrow* curr) {
- calls = true;
- // memory.grow technically does a read-modify-write operation on the memory
- // size in the successful case, modifying the set of valid addresses, and
- // just a read operation in the failure case
- readsMemory = true;
- writesMemory = true;
- // Atomics are also sequentially consistent with memory.grow.
- isAtomic = true;
- }
- void visitRefNull(RefNull* curr) {}
- void visitRefIsNull(RefIsNull* curr) {}
- void visitRefFunc(RefFunc* curr) {}
- void visitRefEq(RefEq* curr) {}
- void visitTry(Try* curr) {}
- void visitThrow(Throw* curr) {
- if (tryDepth == 0) {
- throws = true;
+ void visitRethrow(Rethrow* curr) {
+ if (parent.tryDepth == 0) {
+ parent.throws = true;
+ }
+ // rethrow traps when the arg is null
+ parent.implicitTrap = true;
+ }
+ void visitBrOnExn(BrOnExn* curr) {
+ parent.breakTargets.insert(curr->name);
+ // br_on_exn traps when the arg is null
+ parent.implicitTrap = true;
+ }
+ void visitNop(Nop* curr) {}
+ void visitUnreachable(Unreachable* curr) { parent.branchesOut = true; }
+ void visitPop(Pop* curr) {
+ if (parent.catchDepth == 0) {
+ parent.danglingPop = true;
+ }
}
- }
- void visitRethrow(Rethrow* curr) {
- if (tryDepth == 0) {
- throws = true;
+ void visitTupleMake(TupleMake* curr) {}
+ void visitTupleExtract(TupleExtract* curr) {}
+ void visitI31New(I31New* curr) {}
+ void visitI31Get(I31Get* curr) {}
+ void visitRefTest(RefTest* curr) {
+ WASM_UNREACHABLE("TODO (gc): ref.test");
}
- // rethrow traps when the arg is null
- implicitTrap = true;
- }
- void visitBrOnExn(BrOnExn* curr) {
- breakTargets.insert(curr->name);
- // br_on_exn traps when the arg is null
- implicitTrap = true;
- }
- void visitNop(Nop* curr) {}
- void visitUnreachable(Unreachable* curr) { branchesOut = true; }
- void visitPop(Pop* curr) {
- if (catchDepth == 0) {
- danglingPop = true;
+ void visitRefCast(RefCast* curr) {
+ WASM_UNREACHABLE("TODO (gc): ref.cast");
}
- }
- void visitTupleMake(TupleMake* curr) {}
- void visitTupleExtract(TupleExtract* curr) {}
- void visitI31New(I31New* curr) {}
- void visitI31Get(I31Get* curr) {}
- void visitRefTest(RefTest* curr) { WASM_UNREACHABLE("TODO (gc): ref.test"); }
- void visitRefCast(RefCast* curr) { WASM_UNREACHABLE("TODO (gc): ref.cast"); }
- void visitBrOnCast(BrOnCast* curr) {
- WASM_UNREACHABLE("TODO (gc): br_on_cast");
- }
- void visitRttCanon(RttCanon* curr) {
- WASM_UNREACHABLE("TODO (gc): rtt.canon");
- }
- void visitRttSub(RttSub* curr) { WASM_UNREACHABLE("TODO (gc): rtt.sub"); }
- void visitStructNew(StructNew* curr) {
- WASM_UNREACHABLE("TODO (gc): struct.new");
- }
- void visitStructGet(StructGet* curr) {
- WASM_UNREACHABLE("TODO (gc): struct.get");
- }
- void visitStructSet(StructSet* curr) {
- WASM_UNREACHABLE("TODO (gc): struct.set");
- }
- void visitArrayNew(ArrayNew* curr) {
- WASM_UNREACHABLE("TODO (gc): array.new");
- }
- void visitArrayGet(ArrayGet* curr) {
- WASM_UNREACHABLE("TODO (gc): array.get");
- }
- void visitArraySet(ArraySet* curr) {
- WASM_UNREACHABLE("TODO (gc): array.set");
- }
- void visitArrayLen(ArrayLen* curr) {
- WASM_UNREACHABLE("TODO (gc): array.len");
- }
+ void visitBrOnCast(BrOnCast* curr) {
+ WASM_UNREACHABLE("TODO (gc): br_on_cast");
+ }
+ void visitRttCanon(RttCanon* curr) {
+ WASM_UNREACHABLE("TODO (gc): rtt.canon");
+ }
+ void visitRttSub(RttSub* curr) { WASM_UNREACHABLE("TODO (gc): rtt.sub"); }
+ void visitStructNew(StructNew* curr) {
+ WASM_UNREACHABLE("TODO (gc): struct.new");
+ }
+ void visitStructGet(StructGet* curr) {
+ WASM_UNREACHABLE("TODO (gc): struct.get");
+ }
+ void visitStructSet(StructSet* curr) {
+ WASM_UNREACHABLE("TODO (gc): struct.set");
+ }
+ void visitArrayNew(ArrayNew* curr) {
+ WASM_UNREACHABLE("TODO (gc): array.new");
+ }
+ void visitArrayGet(ArrayGet* curr) {
+ WASM_UNREACHABLE("TODO (gc): array.get");
+ }
+ void visitArraySet(ArraySet* curr) {
+ WASM_UNREACHABLE("TODO (gc): array.set");
+ }
+ void visitArrayLen(ArrayLen* curr) {
+ WASM_UNREACHABLE("TODO (gc): array.len");
+ }
+ };
+public:
// Helpers
static bool canReorder(const PassOptions& passOptions,
@@ -616,6 +640,17 @@ struct EffectAnalyzer
branchesOut = false;
breakTargets.clear();
}
+
+private:
+ void pre() { breakTargets.clear(); }
+
+ void post() {
+ assert(tryDepth == 0);
+
+ if (ignoreImplicitTraps) {
+ implicitTrap = false;
+ }
+ }
};
} // namespace wasm
diff --git a/src/passes/CodePushing.cpp b/src/passes/CodePushing.cpp
index 7af192838..55c955539 100644
--- a/src/passes/CodePushing.cpp
+++ b/src/passes/CodePushing.cpp
@@ -160,7 +160,7 @@ private:
firstPushable < pushPoint);
// everything that matters if you want to be pushed past the pushPoint
EffectAnalyzer cumulativeEffects(passOptions, features);
- cumulativeEffects.analyze(list[pushPoint]);
+ cumulativeEffects.walk(list[pushPoint]);
// it is ok to ignore the branching here, that is the crucial point of this
// opt
// TODO: it would be ok to ignore thrown exceptions here, if we know they
@@ -194,7 +194,7 @@ private:
}
} else {
// something that can't be pushed, so it might block further pushing
- cumulativeEffects.analyze(list[i]);
+ cumulativeEffects.walk(list[i]);
}
assert(i > 0);
i--;
diff --git a/src/passes/Vacuum.cpp b/src/passes/Vacuum.cpp
index fcfa944de..e0c00e0da 100644
--- a/src/passes/Vacuum.cpp
+++ b/src/passes/Vacuum.cpp
@@ -116,7 +116,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
// binaries have implicit traps
if (auto* unary = curr->dynCast<Unary>()) {
EffectAnalyzer tester(getPassOptions(), features);
- tester.visitUnary(unary);
+ tester.visit(unary);
if (tester.hasSideEffects()) {
return curr;
}
@@ -129,7 +129,7 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
} else if (auto* binary = curr->dynCast<Binary>()) {
EffectAnalyzer tester(getPassOptions(), features);
- tester.visitBinary(binary);
+ tester.visit(binary);
if (tester.hasSideEffects()) {
return curr;
}