summaryrefslogtreecommitdiff
path: root/src/passes
diff options
context:
space:
mode:
authorAlon Zakai <azakai@google.com>2019-04-26 16:59:41 -0700
committerGitHub <noreply@github.com>2019-04-26 16:59:41 -0700
commitdb9124f1de0478dcac525009b6f1589b44a7edd8 (patch)
treefa26395a0f6cca53cf5cb6e10189f989c5bfa847 /src/passes
parent87636dccd404a340d75acb1d96301581343f29ca (diff)
downloadbinaryen-db9124f1de0478dcac525009b6f1589b44a7edd8.tar.gz
binaryen-db9124f1de0478dcac525009b6f1589b44a7edd8.tar.bz2
binaryen-db9124f1de0478dcac525009b6f1589b44a7edd8.zip
Apply format changes from #2048 (#2059)
Mass change to apply clang-format to everything. We are applying this in a PR by me so the (git) blame is all mine ;) but @aheejin did all the work to get clang-format set up and all the manual work to tidy up some things to make the output nicer in #2048
Diffstat (limited to 'src/passes')
-rw-r--r--src/passes/CoalesceLocals.cpp186
-rw-r--r--src/passes/CodeFolding.cpp253
-rw-r--r--src/passes/CodePushing.cpp87
-rw-r--r--src/passes/ConstHoisting.cpp22
-rw-r--r--src/passes/DataFlowOpts.cpp41
-rw-r--r--src/passes/DeadArgumentElimination.cpp85
-rw-r--r--src/passes/DeadCodeElimination.cpp228
-rw-r--r--src/passes/Directize.cpp40
-rw-r--r--src/passes/DuplicateFunctionElimination.cpp45
-rw-r--r--src/passes/ExtractFunction.cpp11
-rw-r--r--src/passes/Flatten.cpp42
-rw-r--r--src/passes/FuncCastEmulation.cpp52
-rw-r--r--src/passes/I64ToI32Lowering.cpp1170
-rw-r--r--src/passes/Inlining.cpp113
-rw-r--r--src/passes/InstrumentLocals.cpp108
-rw-r--r--src/passes/InstrumentMemory.cpp138
-rw-r--r--src/passes/LegalizeJSInterface.cpp86
-rw-r--r--src/passes/LimitSegments.cpp7
-rw-r--r--src/passes/LocalCSE.cpp23
-rw-r--r--src/passes/LogExecution.cpp32
-rw-r--r--src/passes/LoopInvariantCodeMotion.cpp35
-rw-r--r--src/passes/MemoryPacking.cpp16
-rw-r--r--src/passes/MergeBlocks.cpp138
-rw-r--r--src/passes/MergeLocals.cpp38
-rw-r--r--src/passes/Metrics.cpp52
-rw-r--r--src/passes/MinifyImportsAndExports.cpp28
-rw-r--r--src/passes/NameList.cpp12
-rw-r--r--src/passes/NoExitRuntime.cpp24
-rw-r--r--src/passes/OptimizeAddedConstants.cpp114
-rw-r--r--src/passes/OptimizeInstructions.cpp603
-rw-r--r--src/passes/PickLoadSigns.cpp26
-rw-r--r--src/passes/PostEmscripten.cpp21
-rw-r--r--src/passes/Precompute.cpp149
-rw-r--r--src/passes/Print.cpp1383
-rw-r--r--src/passes/PrintCallGraph.cpp45
-rw-r--r--src/passes/PrintFeatures.cpp8
-rw-r--r--src/passes/ReReloop.cpp96
-rw-r--r--src/passes/RedundantSetElimination.cpp43
-rw-r--r--src/passes/RelooperJumpThreading.cpp109
-rw-r--r--src/passes/RemoveImports.cpp17
-rw-r--r--src/passes/RemoveMemory.cpp6
-rw-r--r--src/passes/RemoveNonJSOps.cpp76
-rw-r--r--src/passes/RemoveUnusedBrs.cpp581
-rw-r--r--src/passes/RemoveUnusedModuleElements.cpp113
-rw-r--r--src/passes/RemoveUnusedNames.cpp32
-rw-r--r--src/passes/ReorderFunctions.cpp38
-rw-r--r--src/passes/ReorderLocals.cpp50
-rw-r--r--src/passes/SSAify.cpp51
-rw-r--r--src/passes/SafeHeap.cpp205
-rw-r--r--src/passes/SimplifyLocals.cpp260
-rw-r--r--src/passes/Souperify.cpp193
-rw-r--r--src/passes/SpillPointers.cpp82
-rw-r--r--src/passes/StackIR.cpp75
-rw-r--r--src/passes/Strip.cpp23
-rw-r--r--src/passes/StripTargetFeatures.cpp4
-rw-r--r--src/passes/TrapMode.cpp237
-rw-r--r--src/passes/Untee.cpp17
-rw-r--r--src/passes/Vacuum.cpp142
-rw-r--r--src/passes/intrinsics-module.h1
-rw-r--r--src/passes/opt-utils.h9
-rw-r--r--src/passes/pass.cpp391
-rw-r--r--src/passes/passes.h2
62 files changed, 4807 insertions, 3507 deletions
diff --git a/src/passes/CoalesceLocals.cpp b/src/passes/CoalesceLocals.cpp
index 621383ca4..a085b61fb 100644
--- a/src/passes/CoalesceLocals.cpp
+++ b/src/passes/CoalesceLocals.cpp
@@ -14,32 +14,31 @@
* limitations under the License.
*/
-
//
// Coalesce locals, in order to reduce the total number of locals. This
// is similar to register allocation, however, there is never any
// spilling, and there isn't a fixed number of locals.
//
-
#include <algorithm>
#include <memory>
#include <unordered_set>
-#include "wasm.h"
-#include "pass.h"
-#include "ir/utils.h"
#include "cfg/liveness-traversal.h"
-#include "wasm-builder.h"
+#include "ir/utils.h"
+#include "pass.h"
#include "support/learning.h"
#include "support/permutations.h"
+#include "wasm-builder.h"
+#include "wasm.h"
#ifdef CFG_PROFILE
#include "support/timing.h"
#endif
namespace wasm {
-struct CoalesceLocals : public WalkerPass<LivenessWalker<CoalesceLocals, Visitor<CoalesceLocals>>> {
+struct CoalesceLocals
+ : public WalkerPass<LivenessWalker<CoalesceLocals, Visitor<CoalesceLocals>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new CoalesceLocals; }
@@ -54,23 +53,30 @@ struct CoalesceLocals : public WalkerPass<LivenessWalker<CoalesceLocals, Visitor
void calculateInterferences(const LocalSet& locals);
- void pickIndicesFromOrder(std::vector<Index>& order, std::vector<Index>& indices);
- void pickIndicesFromOrder(std::vector<Index>& order, std::vector<Index>& indices, Index& removedCopies);
+ void pickIndicesFromOrder(std::vector<Index>& order,
+ std::vector<Index>& indices);
+ void pickIndicesFromOrder(std::vector<Index>& order,
+ std::vector<Index>& indices,
+ Index& removedCopies);
- virtual void pickIndices(std::vector<Index>& indices); // returns a vector of oldIndex => newIndex
+ // returns a vector of oldIndex => newIndex
+ virtual void pickIndices(std::vector<Index>& indices);
void applyIndices(std::vector<Index>& indices, Expression* root);
// interference state
- std::vector<bool> interferences; // canonicalized - accesses should check (low, high)
+ // canonicalized - accesses should check (low, high)
+ std::vector<bool> interferences;
void interfere(Index i, Index j) {
- if (i == j) return;
+ if (i == j)
+ return;
interferences[std::min(i, j) * numLocals + std::max(i, j)] = 1;
}
- void interfereLowHigh(Index low, Index high) { // optimized version where you know that low < high
+ // optimized version where you know that low < high
+ void interfereLowHigh(Index low, Index high) {
assert(low < high);
interferences[low * numLocals + high] = 1;
}
@@ -97,20 +103,25 @@ void CoalesceLocals::doWalkFunction(Function* func) {
applyIndices(indices, func->body);
}
-// A copy on a backedge can be especially costly, forcing us to branch just to do that copy.
-// Add weight to such copies, so we prioritize getting rid of them.
+// A copy on a backedge can be especially costly, forcing us to branch just to
+// do that copy. Add weight to such copies, so we prioritize getting rid of
+// them.
void CoalesceLocals::increaseBackEdgePriorities() {
for (auto* loopTop : loopTops) {
// ignore the first edge, it is the initial entry, we just want backedges
auto& in = loopTop->in;
for (Index i = 1; i < in.size(); i++) {
auto* arrivingBlock = in[i];
- if (arrivingBlock->out.size() > 1) continue; // we just want unconditional branches to the loop top, true phi fragments
+ if (arrivingBlock->out.size() > 1)
+ // we just want unconditional branches to the loop top, true phi
+ // fragments
+ continue;
for (auto& action : arrivingBlock->contents.actions) {
if (action.isSet()) {
auto* set = (*action.origin)->cast<SetLocal>();
if (auto* get = getCopy(set)) {
- // this is indeed a copy, add to the cost (default cost is 2, so this adds 50%, and can mostly break ties)
+ // this is indeed a copy, add to the cost (default cost is 2, so
+ // this adds 50%, and can mostly break ties)
addCopy(set->index, get->index);
}
}
@@ -123,8 +134,10 @@ void CoalesceLocals::calculateInterferences() {
interferences.resize(numLocals * numLocals);
std::fill(interferences.begin(), interferences.end(), false);
for (auto& curr : basicBlocks) {
- if (liveBlocks.count(curr.get()) == 0) continue; // ignore dead blocks
- // everything coming in might interfere, as it might come from a different block
+ if (liveBlocks.count(curr.get()) == 0)
+ continue; // ignore dead blocks
+ // everything coming in might interfere, as it might come from a different
+ // block
auto live = curr->contents.end;
calculateInterferences(live);
// scan through the block itself
@@ -166,18 +179,22 @@ void CoalesceLocals::calculateInterferences(const LocalSet& locals) {
// Indices decision making
-void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order, std::vector<Index>& indices) {
+void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order,
+ std::vector<Index>& indices) {
Index removedCopies;
pickIndicesFromOrder(order, indices, removedCopies);
}
-void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order, std::vector<Index>& indices, Index& removedCopies) {
- // mostly-simple greedy coloring
+void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order,
+ std::vector<Index>& indices,
+ Index& removedCopies) {
+// mostly-simple greedy coloring
#if CFG_DEBUG
std::cerr << "\npickIndicesFromOrder on " << getFunction()->name << '\n';
std::cerr << getFunction()->body << '\n';
std::cerr << "order:\n";
- for (auto i : order) std::cerr << i << ' ';
+ for (auto i : order)
+ std::cerr << i << ' ';
std::cerr << '\n';
std::cerr << "interferences:\n";
for (Index i = 0; i < numLocals; i++) {
@@ -204,16 +221,20 @@ void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order, std::vector
std::cerr << " $" << i << ": " << totalCopies[i] << '\n';
}
#endif
- // TODO: take into account distribution (99-1 is better than 50-50 with two registers, for gzip)
+ // TODO: take into account distribution (99-1 is better than 50-50 with two
+ // registers, for gzip)
std::vector<Type> types;
- std::vector<bool> newInterferences; // new index * numLocals => list of all interferences of locals merged to it
- std::vector<uint8_t> newCopies; // new index * numLocals => list of all copies of locals merged to it
+ // new index * numLocals => list of all interferences of locals merged to it
+ std::vector<bool> newInterferences;
+ // new index * numLocals => list of all copies of locals merged to it
+ std::vector<uint8_t> newCopies;
indices.resize(numLocals);
types.resize(numLocals);
newInterferences.resize(numLocals * numLocals);
std::fill(newInterferences.begin(), newInterferences.end(), false);
auto numParams = getFunction()->getNumParams();
- newCopies.resize(numParams * numLocals); // start with enough room for the params
+ // start with enough room for the params
+ newCopies.resize(numParams * numLocals);
std::fill(newCopies.begin(), newCopies.end(), 0);
Index nextFree = 0;
removedCopies = 0;
@@ -234,9 +255,12 @@ void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order, std::vector
Index found = -1;
uint8_t foundCopies = -1;
for (Index j = 0; j < nextFree; j++) {
- if (!newInterferences[j * numLocals + actual] && getFunction()->getLocalType(actual) == types[j]) {
- // this does not interfere, so it might be what we want. but pick the one eliminating the most copies
- // (we could stop looking forward when there are no more items that have copies anyhow, but it doesn't seem to help)
+ if (!newInterferences[j * numLocals + actual] &&
+ getFunction()->getLocalType(actual) == types[j]) {
+ // this does not interfere, so it might be what we want. but pick the
+ // one eliminating the most copies (we could stop looking forward when
+ // there are no more items that have copies anyhow, but it doesn't seem
+ // to help)
auto currCopies = newCopies[j * numLocals + actual];
if (found == Index(-1) || currCopies > foundCopies) {
indices[actual] = found = j;
@@ -258,46 +282,53 @@ void CoalesceLocals::pickIndicesFromOrder(std::vector<Index>& order, std::vector
#endif
// merge new interferences and copies for the new index
for (Index k = i + 1; k < numLocals; k++) {
- auto j = order[k]; // go in the order, we only need to update for those we will see later
- newInterferences[found * numLocals + j] = newInterferences[found * numLocals + j] | interferes(actual, j);
+ // go in the order, we only need to update for those we will see later
+ auto j = order[k];
+ newInterferences[found * numLocals + j] =
+ newInterferences[found * numLocals + j] | interferes(actual, j);
newCopies[found * numLocals + j] += getCopies(actual, j);
}
}
}
-// given a baseline order, adjust it based on an important order of priorities (higher values
-// are higher priority). The priorities take precedence, unless they are equal and then
-// the original order should be kept.
-std::vector<Index> adjustOrderByPriorities(std::vector<Index>& baseline, std::vector<Index>& priorities) {
+// given a baseline order, adjust it based on an important order of priorities
+// (higher values are higher priority). The priorities take precedence, unless
+// they are equal and then the original order should be kept.
+std::vector<Index> adjustOrderByPriorities(std::vector<Index>& baseline,
+ std::vector<Index>& priorities) {
std::vector<Index> ret = baseline;
std::vector<Index> reversed = makeReversed(baseline);
std::sort(ret.begin(), ret.end(), [&priorities, &reversed](Index x, Index y) {
- return priorities[x] > priorities[y] || (priorities[x] == priorities[y] && reversed[x] < reversed[y]);
+ return priorities[x] > priorities[y] ||
+ (priorities[x] == priorities[y] && reversed[x] < reversed[y]);
});
return ret;
}
void CoalesceLocals::pickIndices(std::vector<Index>& indices) {
- if (numLocals == 0) return;
+ if (numLocals == 0)
+ return;
if (numLocals == 1) {
indices.push_back(0);
return;
}
- // take into account total copies. but we must keep params in place, so give them max priority
+ // take into account total copies. but we must keep params in place, so give
+ // them max priority
auto adjustedTotalCopies = totalCopies;
auto numParams = getFunction()->getNumParams();
for (Index i = 0; i < numParams; i++) {
adjustedTotalCopies[i] = std::numeric_limits<Index>::max();
}
- // first try the natural order. this is less arbitrary than it seems, as the program
- // may have a natural order of locals inherent in it.
+ // first try the natural order. this is less arbitrary than it seems, as the
+ // program may have a natural order of locals inherent in it.
auto order = makeIdentity(numLocals);
order = adjustOrderByPriorities(order, adjustedTotalCopies);
Index removedCopies;
pickIndicesFromOrder(order, indices, removedCopies);
auto maxIndex = *std::max_element(indices.begin(), indices.end());
- // next try the reverse order. this both gives us another chance at something good,
- // and also the very naturalness of the simple order may be quite suboptimal
+ // next try the reverse order. this both gives us another chance at something
+ // good, and also the very naturalness of the simple order may be quite
+ // suboptimal
setIdentity(order);
for (Index i = numParams; i < numLocals; i++) {
order[i] = numParams + numLocals - 1 - i;
@@ -306,15 +337,18 @@ void CoalesceLocals::pickIndices(std::vector<Index>& indices) {
std::vector<Index> reverseIndices;
Index reverseRemovedCopies;
pickIndicesFromOrder(order, reverseIndices, reverseRemovedCopies);
- auto reverseMaxIndex = *std::max_element(reverseIndices.begin(), reverseIndices.end());
- // prefer to remove copies foremost, as it matters more for code size (minus gzip), and
- // improves throughput.
- if (reverseRemovedCopies > removedCopies || (reverseRemovedCopies == removedCopies && reverseMaxIndex < maxIndex)) {
+ auto reverseMaxIndex =
+ *std::max_element(reverseIndices.begin(), reverseIndices.end());
+ // prefer to remove copies foremost, as it matters more for code size (minus
+ // gzip), and improves throughput.
+ if (reverseRemovedCopies > removedCopies ||
+ (reverseRemovedCopies == removedCopies && reverseMaxIndex < maxIndex)) {
indices.swap(reverseIndices);
}
}
-void CoalesceLocals::applyIndices(std::vector<Index>& indices, Expression* root) {
+void CoalesceLocals::applyIndices(std::vector<Index>& indices,
+ Expression* root) {
assert(indices.size() == numLocals);
for (auto& curr : basicBlocks) {
auto& actions = curr->contents.actions;
@@ -325,15 +359,19 @@ void CoalesceLocals::applyIndices(std::vector<Index>& indices, Expression* root)
} else if (action.isSet()) {
auto* set = (*action.origin)->cast<SetLocal>();
set->index = indices[set->index];
- // in addition, we can optimize out redundant copies and ineffective sets
+ // in addition, we can optimize out redundant copies and ineffective
+ // sets
GetLocal* get;
- if ((get = set->value->dynCast<GetLocal>()) && get->index == set->index) {
+ if ((get = set->value->dynCast<GetLocal>()) &&
+ get->index == set->index) {
action.removeCopy();
continue;
}
// remove ineffective actions
if (!action.effective) {
- *action.origin = set->value; // value may have no side effects, further optimizations can eliminate it
+ // value may have no side effects, further optimizations can eliminate
+ // it
+ *action.origin = set->value;
if (!set->isTee()) {
// we need to drop it
Drop* drop = ExpressionManipulator::convert<SetLocal, Drop>(set);
@@ -382,10 +420,12 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
double getFitness() { return fitness; }
void dump(std::string text) {
std::cout << text + ": ( ";
- for (Index i = 0; i < size(); i++) std::cout << (*this)[i] << " ";
+ for (Index i = 0; i < size(); i++)
+ std::cout << (*this)[i] << " ";
std::cout << ")\n";
std::cout << "of quality: " << getFitness() << "\n";
}
+
private:
double fitness;
};
@@ -405,9 +445,11 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
// secondarily, it is nice to not reorder locals unnecessarily
double fragment = 1.0 / (2.0 * parent->numLocals);
for (Index i = 0; i < parent->numLocals; i++) {
- if ((*order)[i] == i) fitness += fragment; // boost for each that wasn't moved
+ if ((*order)[i] == i)
+ fitness += fragment; // boost for each that wasn't moved
}
- fitness = (100 * fitness) + removedCopies; // removing copies is a secondary concern
+ // removing copies is a secondary concern
+ fitness = (100 * fitness) + removedCopies;
order->setFitness(fitness);
}
@@ -418,16 +460,19 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
(*ret)[i] = i;
}
if (first) {
- // as the first guess, use the natural order. this is not arbitrary for two reasons.
- // first, there may be an inherent order in the input (frequent indices are lower,
- // etc.). second, by ensuring we start with the natural order, we ensure we are at
- // least as good as the non-learning variant.
- // TODO: use ::pickIndices from the parent, so we literally get the simpler approach
- // as our first option
+ // as the first guess, use the natural order. this is not arbitrary for
+ // two reasons. first, there may be an inherent order in the input
+ // (frequent indices are lower, etc.). second, by ensuring we start with
+ // the natural order, we ensure we are at least as good as the
+ // non-learning variant.
+ // TODO: use ::pickIndices from the parent, so we literally get the
+ // simpler approach as our first option
first = false;
} else {
// leave params alone, shuffle the rest
- std::shuffle(ret->begin() + parent->getFunction()->getNumParams(), ret->end(), noise);
+ std::shuffle(ret->begin() + parent->getFunction()->getNumParams(),
+ ret->end(),
+ noise);
}
calculateFitness(ret);
#ifdef CFG_LEARN_DEBUG
@@ -455,7 +500,9 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
// if (i, i + 1) is in reverse order in right, flip them
if (reverseRight[(*ret)[i]] > reverseRight[(*ret)[i + 1]]) {
std::swap((*ret)[i], (*ret)[i + 1]);
- i++; // if we don't skip, we might end up pushing an element all the way to the end, which is not very perturbation-y
+ // if we don't skip, we might end up pushing an element all the way to
+ // the end, which is not very perturbation-y
+ i++;
}
}
calculateFitness(ret);
@@ -475,7 +522,8 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
std::cout << "[learning for " << getFunction()->name << "]\n";
#endif
auto numVars = this->getFunction()->getNumVars();
- const int GENERATION_SIZE = std::min(Index(numVars * (numVars - 1)), Index(20));
+ const int GENERATION_SIZE =
+ std::min(Index(numVars * (numVars - 1)), Index(20));
Generator generator(this);
GeneticLearner<Order, double, Generator> learner(generator, GENERATION_SIZE);
#ifdef CFG_LEARN_DEBUG
@@ -486,7 +534,8 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
while (1) {
learner.runGeneration();
auto newBest = learner.getBest()->getFitness();
- if (newBest == oldBest) break; // unlikely we can improve
+ if (newBest == oldBest)
+ break; // unlikely we can improve
oldBest = newBest;
#ifdef CFG_LEARN_DEBUG
learner.getBest()->dump("current best");
@@ -495,16 +544,15 @@ void CoalesceLocalsWithLearning::pickIndices(std::vector<Index>& indices) {
#ifdef CFG_LEARN_DEBUG
learner.getBest()->dump("the best");
#endif
- this->pickIndicesFromOrder(*learner.getBest(), indices); // TODO: cache indices in Orders, at the cost of more memory?
+ // TODO: cache indices in Orders, at the cost of more memory?
+ this->pickIndicesFromOrder(*learner.getBest(), indices);
}
// declare passes
-Pass *createCoalesceLocalsPass() {
- return new CoalesceLocals();
-}
+Pass* createCoalesceLocalsPass() { return new CoalesceLocals(); }
-Pass *createCoalesceLocalsWithLearningPass() {
+Pass* createCoalesceLocalsWithLearningPass() {
return new CoalesceLocalsWithLearning();
}
diff --git a/src/passes/CodeFolding.cpp b/src/passes/CodeFolding.cpp
index a79980cfe..0479472d8 100644
--- a/src/passes/CodeFolding.cpp
+++ b/src/passes/CodeFolding.cpp
@@ -57,28 +57,29 @@
#include <iterator>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
-#include "ir/utils.h"
#include "ir/branch-utils.h"
#include "ir/effects.h"
#include "ir/label-utils.h"
+#include "ir/utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
static const Index WORTH_ADDING_BLOCK_TO_REMOVE_THIS_MUCH = 3;
-struct ExpressionMarker : public PostWalker<ExpressionMarker, UnifiedExpressionVisitor<ExpressionMarker>> {
+struct ExpressionMarker
+ : public PostWalker<ExpressionMarker,
+ UnifiedExpressionVisitor<ExpressionMarker>> {
std::set<Expression*>& marked;
- ExpressionMarker(std::set<Expression*>& marked, Expression* expr) : marked(marked) {
+ ExpressionMarker(std::set<Expression*>& marked, Expression* expr)
+ : marked(marked) {
walk(expr);
}
- void visitExpression(Expression* expr) {
- marked.insert(expr);
- }
+ void visitExpression(Expression* expr) { marked.insert(expr); }
};
struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
@@ -91,15 +92,18 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
struct Tail {
Expression* expr; // nullptr if this is a fallthrough
Block* block; // the enclosing block of code we hope to merge at its tail
- Expression** pointer; // for an expr with no parent block, the location it is at, so we can replace it
+ Expression** pointer; // for an expr with no parent block, the location it
+ // is at, so we can replace it
// For a fallthrough
Tail(Block* block) : expr(nullptr), block(block), pointer(nullptr) {}
// For a break
- Tail(Expression* expr, Block* block) : expr(expr), block(block), pointer(nullptr) {
+ Tail(Expression* expr, Block* block)
+ : expr(expr), block(block), pointer(nullptr) {
validate();
}
- Tail(Expression* expr, Expression** pointer) : expr(expr), block(nullptr), pointer(pointer) {}
+ Tail(Expression* expr, Expression** pointer)
+ : expr(expr), block(nullptr), pointer(pointer) {}
bool isFallthrough() const { return expr == nullptr; }
@@ -116,11 +120,13 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
// pass state
- std::map<Name, std::vector<Tail>> breakTails; // break target name => tails that reach it
+ std::map<Name, std::vector<Tail>> breakTails; // break target name => tails
+ // that reach it
std::vector<Tail> unreachableTails; // tails leading to (unreachable)
- std::vector<Tail> returnTails; // tails leading to (return)
- std::set<Name> unoptimizables; // break target names that we can't handle
- std::set<Expression*> modifieds; // modified code should not be processed again, wait for next pass
+ std::vector<Tail> returnTails; // tails leading to (return)
+ std::set<Name> unoptimizables; // break target names that we can't handle
+ std::set<Expression*> modifieds; // modified code should not be processed
+ // again, wait for next pass
// walking
@@ -167,20 +173,25 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
return;
}
}
- // otherwise, if we have a large value, it might be worth optimizing us as well
+ // otherwise, if we have a large value, it might be worth optimizing us as
+ // well
returnTails.push_back(Tail(curr, getCurrentPointer()));
}
void visitBlock(Block* curr) {
- if (curr->list.empty()) return;
- if (!curr->name.is()) return;
- if (unoptimizables.count(curr->name) > 0) return;
+ if (curr->list.empty())
+ return;
+ if (!curr->name.is())
+ return;
+ if (unoptimizables.count(curr->name) > 0)
+ return;
// we can't optimize a fallthrough value
if (isConcreteType(curr->list.back()->type)) {
return;
}
auto iter = breakTails.find(curr->name);
- if (iter == breakTails.end()) return;
+ if (iter == breakTails.end())
+ return;
// looks promising
auto& tails = iter->second;
// see if there is a fallthrough
@@ -191,23 +202,22 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
}
}
if (hasFallthrough) {
- tails.push_back({ Tail(curr) });
+ tails.push_back({Tail(curr)});
}
optimizeExpressionTails(tails, curr);
}
void visitIf(If* curr) {
- if (!curr->ifFalse) return;
+ if (!curr->ifFalse)
+ return;
// if both sides are identical, this is easy to fold
if (ExpressionAnalyzer::equal(curr->ifTrue, curr->ifFalse)) {
Builder builder(*getModule());
// remove if (4 bytes), remove one arm, add drop (1), add block (3),
// so this must be a net savings
markAsModified(curr);
- auto* ret = builder.makeSequence(
- builder.makeDrop(curr->condition),
- curr->ifTrue
- );
+ auto* ret =
+ builder.makeSequence(builder.makeDrop(curr->condition), curr->ifTrue);
// we must ensure we present the same type as the if had
ret->finalize(curr->type);
replaceCurrent(ret);
@@ -237,9 +247,8 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
}
// we need nameless blocks, as if there is a name, someone might branch
// to the end, skipping the code we want to merge
- if (left && right &&
- !left->name.is() && !right->name.is()) {
- std::vector<Tail> tails = { Tail(left), Tail(right) };
+ if (left && right && !left->name.is() && !right->name.is()) {
+ std::vector<Tail> tails = {Tail(left), Tail(right)};
optimizeExpressionTails(tails, curr);
}
}
@@ -251,7 +260,8 @@ struct CodeFolding : public WalkerPass<ControlFlowWalker<CodeFolding>> {
anotherPass = false;
super::doWalkFunction(func);
optimizeTerminatingTails(unreachableTails);
- // optimize returns at the end, so we can benefit from a fallthrough if there is a value TODO: separate passes for them?
+ // optimize returns at the end, so we can benefit from a fallthrough if
+ // there is a value TODO: separate passes for them?
optimizeTerminatingTails(returnTails);
// TODO add fallthrough for returns
// TODO optimize returns not in blocks, a big return value can be worth it
@@ -277,7 +287,10 @@ private:
for (auto* item : items) {
auto exiting = BranchUtils::getExitingBranches(item);
std::vector<Name> intersection;
- std::set_intersection(allTargets.begin(), allTargets.end(), exiting.begin(), exiting.end(),
+ std::set_intersection(allTargets.begin(),
+ allTargets.end(),
+ exiting.begin(),
+ exiting.end(),
std::back_inserter(intersection));
if (intersection.size() > 0) {
// anything exiting that is in all targets is something bad
@@ -287,15 +300,18 @@ private:
return true;
}
- // optimize tails that reach the outside of an expression. code that is identical in all
- // paths leading to the block exit can be merged.
+ // optimize tails that reach the outside of an expression. code that is
+ // identical in all paths leading to the block exit can be merged.
template<typename T>
void optimizeExpressionTails(std::vector<Tail>& tails, T* curr) {
- if (tails.size() < 2) return;
+ if (tails.size() < 2)
+ return;
// see if anything is untoward, and we should not do this
for (auto& tail : tails) {
- if (tail.expr && modifieds.count(tail.expr) > 0) return;
- if (modifieds.count(tail.block) > 0) return;
+ if (tail.expr && modifieds.count(tail.expr) > 0)
+ return;
+ if (modifieds.count(tail.block) > 0)
+ return;
// if we were not modified, then we should be valid for processing
tail.validate();
}
@@ -316,7 +332,7 @@ private:
// elements to be worth that extra block (although, there is
// some chance the block would get merged higher up, see later)
std::vector<Expression*> mergeable; // the elements we can merge
- Index num = 0; // how many elements back from the tail to look at
+ Index num = 0; // how many elements back from the tail to look at
Index saved = 0; // how much we can save
while (1) {
// check if this num is still relevant
@@ -329,7 +345,8 @@ private:
break;
}
}
- if (stop) break;
+ if (stop)
+ break;
auto* item = getMergeable(tails[0], num);
for (auto& tail : tails) {
if (!ExpressionAnalyzer::equal(item, getMergeable(tail, num))) {
@@ -338,15 +355,18 @@ private:
break;
}
}
- if (stop) break;
+ if (stop)
+ break;
// we may have found another one we can merge - can we move it?
- if (!canMove({ item }, curr)) break;
+ if (!canMove({item}, curr))
+ break;
// we found another one we can merge
mergeable.push_back(item);
num++;
saved += Measurer::measure(item);
}
- if (saved == 0) return;
+ if (saved == 0)
+ return;
// we may be able to save enough.
if (saved < WORTH_ADDING_BLOCK_TO_REMOVE_THIS_MUCH) {
// it's not obvious we can save enough. see if we get rid
@@ -363,13 +383,16 @@ private:
if (!willEmptyBlock) {
// last chance, if our parent is a block, then it should be
// fine to create a new block here, it will be merged up
- assert(curr == controlFlowStack.back()); // we are an if or a block, at the top
+ // we are an if or a block, at the top
+ assert(curr == controlFlowStack.back());
if (controlFlowStack.size() <= 1) {
return; // no parent at all
- // TODO: if we are the toplevel in the function, then in the binary format
- // we might avoid emitting a block, so the same logic applies here?
+ // TODO: if we are the toplevel in the function, then in the binary
+ // format we might avoid emitting a block, so the same logic
+ // applies here?
}
- auto* parent = controlFlowStack[controlFlowStack.size() - 2]->dynCast<Block>();
+ auto* parent =
+ controlFlowStack[controlFlowStack.size() - 2]->dynCast<Block>();
if (!parent) {
return; // parent is not a block
}
@@ -440,15 +463,23 @@ private:
// deeper merges first.
// returns whether we optimized something.
bool optimizeTerminatingTails(std::vector<Tail>& tails, Index num = 0) {
- if (tails.size() < 2) return false;
- // remove things that are untoward and cannot be optimized
- tails.erase(std::remove_if(tails.begin(), tails.end(), [&](Tail& tail) {
- if (tail.expr && modifieds.count(tail.expr) > 0) return true;
- if (tail.block && modifieds.count(tail.block) > 0) return true;
- // if we were not modified, then we should be valid for processing
- tail.validate();
+ if (tails.size() < 2)
return false;
- }), tails.end());
+ // remove things that are untoward and cannot be optimized
+ tails.erase(
+ std::remove_if(tails.begin(),
+ tails.end(),
+ [&](Tail& tail) {
+ if (tail.expr && modifieds.count(tail.expr) > 0)
+ return true;
+ if (tail.block && modifieds.count(tail.block) > 0)
+ return true;
+ // if we were not modified, then we should be valid for
+ // processing
+ tail.validate();
+ return false;
+ }),
+ tails.end());
// now let's try to find subsets that are mergeable. we don't look hard
// for the most optimal; further passes may find more
// effectiveSize: TODO: special-case fallthrough, matters for returns
@@ -481,7 +512,7 @@ private:
// estimate if a merging is worth the cost
auto worthIt = [&](Index num, std::vector<Tail>& tails) {
auto items = getTailItems(num, tails); // the elements we can merge
- Index saved = 0; // how much we can save
+ Index saved = 0; // how much we can save
for (auto* item : items) {
saved += Measurer::measure(item) * (tails.size() - 1);
}
@@ -496,7 +527,8 @@ private:
cost += WORTH_ADDING_BLOCK_TO_REMOVE_THIS_MUCH;
// if we cannot merge to the end, then we definitely need 2 blocks,
// and a branch
- if (!canMove(items, getFunction()->body)) { // TODO: efficiency, entire body
+ // TODO: efficiency, entire body
+ if (!canMove(items, getFunction()->body)) {
cost += 1 + WORTH_ADDING_BLOCK_TO_REMOVE_THIS_MUCH;
// TODO: to do this, we need to maintain a map of element=>parent,
// so that we can insert the new blocks in the right place
@@ -509,64 +541,86 @@ private:
// let's see if we can merge deeper than num, to num + 1
auto next = tails;
// remove tails that are too short, or that we hit an item we can't handle
- next.erase(std::remove_if(next.begin(), next.end(), [&](Tail& tail) {
- if (effectiveSize(tail) < num + 1) return true;
- auto* newItem = getItem(tail, num);
- // ignore tails that break to outside blocks. we want to move code to
- // the very outermost position, so such code cannot be moved
- // TODO: this should not be a problem in *non*-terminating tails,
- // but double-verify that
- if (EffectAnalyzer(getPassOptions(), newItem).hasExternalBreakTargets()) {
- return true;
- }
- return false;
- }), next.end());
+ next.erase(std::remove_if(next.begin(),
+ next.end(),
+ [&](Tail& tail) {
+ if (effectiveSize(tail) < num + 1)
+ return true;
+ auto* newItem = getItem(tail, num);
+ // ignore tails that break to outside blocks. we
+ // want to move code to the very outermost
+ // position, so such code cannot be moved
+ // TODO: this should not be a problem in
+ // *non*-terminating tails, but
+ // double-verify that
+ if (EffectAnalyzer(getPassOptions(), newItem)
+ .hasExternalBreakTargets()) {
+ return true;
+ }
+ return false;
+ }),
+ next.end());
// if we have enough to investigate, do so
if (next.size() >= 2) {
- // now we want to find a mergeable item - any item that is equal among a subset
+ // now we want to find a mergeable item - any item that is equal among a
+ // subset
std::map<Expression*, HashType> hashes; // expression => hash value
- std::map<HashType, std::vector<Expression*>> hashed; // hash value => expressions with that hash
+ // hash value => expressions with that hash
+ std::map<HashType, std::vector<Expression*>> hashed;
for (auto& tail : next) {
auto* item = getItem(tail, num);
auto hash = hashes[item] = ExpressionAnalyzer::hash(item);
hashed[hash].push_back(item);
}
- // look at each hash value exactly once. we do this in a deterministic order.
+ // look at each hash value exactly once. we do this in a deterministic
+ // order.
std::set<HashType> seen;
for (auto& tail : next) {
auto* item = getItem(tail, num);
auto hash = hashes[item];
- if (seen.count(hash)) continue;
+ if (seen.count(hash))
+ continue;
seen.insert(hash);
auto& items = hashed[hash];
- if (items.size() == 1) continue;
+ if (items.size() == 1)
+ continue;
assert(items.size() > 0);
// look for an item that has another match.
while (items.size() >= 2) {
auto first = items[0];
std::vector<Expression*> others;
- items.erase(std::remove_if(items.begin(), items.end(), [&](Expression* item) {
- if (item == first || // don't bother comparing the first
- ExpressionAnalyzer::equal(item, first)) {
- // equal, keep it
- return false;
- } else {
- // unequal, look at it later
- others.push_back(item);
- return true;
- }
- }), items.end());
+ items.erase(
+ std::remove_if(items.begin(),
+ items.end(),
+ [&](Expression* item) {
+ if (item ==
+ first || // don't bother comparing the first
+ ExpressionAnalyzer::equal(item, first)) {
+ // equal, keep it
+ return false;
+ } else {
+ // unequal, look at it later
+ others.push_back(item);
+ return true;
+ }
+ }),
+ items.end());
if (items.size() >= 2) {
// possible merge here, investigate it
auto* correct = items[0];
auto explore = next;
- explore.erase(std::remove_if(explore.begin(), explore.end(), [&](Tail& tail) {
- auto* item = getItem(tail, num);
- return !ExpressionAnalyzer::equal(item, correct);
- }), explore.end());
- // try to optimize this deeper tail. if we succeed, then stop here, as the
- // changes may influence us. we leave further opts to further passes (as this
- // is rare in practice, it's generally not a perf issue, but TODO optimize)
+ explore.erase(std::remove_if(explore.begin(),
+ explore.end(),
+ [&](Tail& tail) {
+ auto* item = getItem(tail, num);
+ return !ExpressionAnalyzer::equal(
+ item, correct);
+ }),
+ explore.end());
+ // try to optimize this deeper tail. if we succeed, then stop here,
+ // as the changes may influence us. we leave further opts to further
+ // passes (as this is rare in practice, it's generally not a perf
+ // issue, but TODO optimize)
if (optimizeTerminatingTails(explore, num + 1)) {
return true;
}
@@ -578,15 +632,18 @@ private:
// we explored deeper (higher num) options, but perhaps there
// was nothing there while there is something we can do at this level
// but if we are at num == 0, then we found nothing at all
- if (num == 0) return false;
+ if (num == 0)
+ return false;
// if not worth it, stop
- if (!worthIt(num, tails)) return false;
+ if (!worthIt(num, tails))
+ return false;
// this is worth doing, do it!
auto mergeable = getTailItems(num, tails); // the elements we can merge
// since we managed a merge, then it might open up more opportunities later
anotherPass = true;
Builder builder(*getModule());
- LabelUtils::LabelManager labels(getFunction()); // TODO: don't create one per merge, linear in function size
+ // TODO: don't create one per merge, linear in function size
+ LabelUtils::LabelManager labels(getFunction());
Name innerName = labels.getUnique("folding-inner");
for (auto& tail : tails) {
// remove the items we are merging / moving, and add a break
@@ -623,7 +680,8 @@ private:
// rules, and now it won't be toplevel in the function, it can
// change)
auto* toplevel = old->dynCast<Block>();
- if (toplevel) toplevel->finalize();
+ if (toplevel)
+ toplevel->finalize();
if (old->type != unreachable) {
inner->list.push_back(builder.makeReturn(old));
} else {
@@ -649,9 +707,6 @@ private:
}
};
-Pass *createCodeFoldingPass() {
- return new CodeFolding();
-}
+Pass* createCodeFoldingPass() { return new CodeFolding(); }
} // namespace wasm
-
diff --git a/src/passes/CodePushing.cpp b/src/passes/CodePushing.cpp
index 52aab08ad..342cb5182 100644
--- a/src/passes/CodePushing.cpp
+++ b/src/passes/CodePushing.cpp
@@ -19,10 +19,10 @@
// a location behind a condition, where it might not always execute.
//
-#include <wasm.h>
+#include <ir/effects.h>
#include <pass.h>
#include <wasm-builder.h>
-#include <ir/effects.h>
+#include <wasm.h>
namespace wasm {
@@ -50,26 +50,23 @@ struct LocalAnalyzer : public PostWalker<LocalAnalyzer> {
std::fill(sfa.begin() + func->getNumParams(), sfa.end(), true);
walk(func->body);
for (Index i = 0; i < num; i++) {
- if (numSets[i] == 0) sfa[i] = false;
+ if (numSets[i] == 0)
+ sfa[i] = false;
}
}
- bool isSFA(Index i) {
- return sfa[i];
- }
+ bool isSFA(Index i) { return sfa[i]; }
- Index getNumGets(Index i) {
- return numGets[i];
- }
+ Index getNumGets(Index i) { return numGets[i]; }
- void visitGetLocal(GetLocal *curr) {
+ void visitGetLocal(GetLocal* curr) {
if (numSets[curr->index] == 0) {
sfa[curr->index] = false;
}
numGets[curr->index]++;
}
- void visitSetLocal(SetLocal *curr) {
+ void visitSetLocal(SetLocal* curr) {
numSets[curr->index]++;
if (numSets[curr->index] > 1) {
sfa[curr->index] = false;
@@ -86,12 +83,18 @@ class Pusher {
PassOptions& passOptions;
public:
- Pusher(Block* block, LocalAnalyzer& analyzer, std::vector<Index>& numGetsSoFar, PassOptions& passOptions) : list(block->list), analyzer(analyzer), numGetsSoFar(numGetsSoFar), passOptions(passOptions) {
+ Pusher(Block* block,
+ LocalAnalyzer& analyzer,
+ std::vector<Index>& numGetsSoFar,
+ PassOptions& passOptions)
+ : list(block->list), analyzer(analyzer), numGetsSoFar(numGetsSoFar),
+ passOptions(passOptions) {
// Find an optimization segment: from the first pushable thing, to the first
// point past which we want to push. We then push in that range before
// continuing forward.
- Index relevant = list.size() - 1; // we never need to push past a final element, as
- // we couldn't be used after it.
+ // we never need to push past a final element, as we couldn't be used after
+ // it.
+ Index relevant = list.size() - 1;
const Index nothing = -1;
Index i = 0;
Index firstPushable = nothing;
@@ -114,7 +117,8 @@ public:
private:
SetLocal* isPushable(Expression* curr) {
auto* set = curr->dynCast<SetLocal>();
- if (!set) return nullptr;
+ if (!set)
+ return nullptr;
auto index = set->index;
// to be pushable, this must be SFA and the right # of gets,
// but also have no side effects, as it may not execute if pushed.
@@ -133,7 +137,8 @@ private:
if (auto* drop = curr->dynCast<Drop>()) {
curr = drop->value;
}
- if (curr->is<If>()) return true;
+ if (curr->is<If>())
+ return true;
if (auto* br = curr->dynCast<Break>()) {
return !!br->condition;
}
@@ -146,12 +151,14 @@ private:
// forward, that way we can push later things out of the way
// of earlier ones. Once we know all we can push, we push it all
// in one pass, keeping the order of the pushables intact.
- assert(firstPushable != Index(-1) && pushPoint != Index(-1) && firstPushable < pushPoint);
- EffectAnalyzer cumulativeEffects(passOptions); // everything that matters if you want
- // to be pushed past the pushPoint
+ assert(firstPushable != Index(-1) && pushPoint != Index(-1) &&
+ firstPushable < pushPoint);
+ // everything that matters if you want to be pushed past the pushPoint
+ EffectAnalyzer cumulativeEffects(passOptions);
cumulativeEffects.analyze(list[pushPoint]);
- cumulativeEffects.branches = false; // it is ok to ignore the branching here,
- // that is the crucial point of this opt
+ // it is ok to ignore the branching here, that is the crucial point of this
+ // opt
+ cumulativeEffects.branches = false;
std::vector<SetLocal*> toPush;
Index i = pushPoint - 1;
while (1) {
@@ -159,11 +166,11 @@ private:
if (pushable) {
auto iter = pushableEffects.find(pushable);
if (iter == pushableEffects.end()) {
- iter = pushableEffects.emplace(
- std::piecewise_construct,
- std::forward_as_tuple(pushable),
- std::forward_as_tuple(passOptions, pushable)
- ).first;
+ iter = pushableEffects
+ .emplace(std::piecewise_construct,
+ std::forward_as_tuple(pushable),
+ std::forward_as_tuple(passOptions, pushable))
+ .first;
}
auto& effects = iter->second;
if (cumulativeEffects.invalidates(effects)) {
@@ -236,30 +243,26 @@ struct CodePushing : public WalkerPass<PostWalker<CodePushing>> {
walk(func->body);
}
- void visitGetLocal(GetLocal *curr) {
- numGetsSoFar[curr->index]++;
- }
+ void visitGetLocal(GetLocal* curr) { numGetsSoFar[curr->index]++; }
void visitBlock(Block* curr) {
// Pushing code only makes sense if we are size 3 or above: we need
// one element to push, an element to push it past, and an element to use
// what we pushed.
- if (curr->list.size() < 3) return;
- // At this point in the postorder traversal we have gone through all our children.
- // Therefore any variable whose gets seen so far is equal to the total gets must
- // have no further users after this block. And therefore when we see an SFA
- // variable defined here, we know it isn't used before it either, and has just this
- // one assign. So we can push it forward while we don't hit a non-control-flow
- // ordering invalidation issue, since if this isn't a loop, it's fine (we're not
- // used outside), and if it is, we hit the assign before any use (as we can't
- // push it past a use).
+ if (curr->list.size() < 3)
+ return;
+ // At this point in the postorder traversal we have gone through all our
+ // children. Therefore any variable whose gets seen so far is equal to the
+ // total gets must have no further users after this block. And therefore
+ // when we see an SFA variable defined here, we know it isn't used before it
+ // either, and has just this one assign. So we can push it forward while we
+ // don't hit a non-control-flow ordering invalidation issue, since if this
+ // isn't a loop, it's fine (we're not used outside), and if it is, we hit
+ // the assign before any use (as we can't push it past a use).
Pusher pusher(curr, analyzer, numGetsSoFar, getPassOptions());
}
};
-Pass *createCodePushingPass() {
- return new CodePushing();
-}
+Pass* createCodePushingPass() { return new CodePushing(); }
} // namespace wasm
-
diff --git a/src/passes/ConstHoisting.cpp b/src/passes/ConstHoisting.cpp
index 11188a9ba..f67a48645 100644
--- a/src/passes/ConstHoisting.cpp
+++ b/src/passes/ConstHoisting.cpp
@@ -32,10 +32,10 @@
#include <map>
-#include <wasm.h>
#include <pass.h>
#include <wasm-binary.h>
#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -66,16 +66,14 @@ struct ConstHoisting : public WalkerPass<PostWalker<ConstHoisting>> {
if (!prelude.empty()) {
Builder builder(*getModule());
// merge-blocks can optimize this into a single block later in most cases
- curr->body = builder.makeSequence(
- builder.makeBlock(prelude),
- curr->body
- );
+ curr->body = builder.makeSequence(builder.makeBlock(prelude), curr->body);
}
}
private:
bool worthHoisting(Literal value, Index num) {
- if (num < MIN_USES) return false;
+ if (num < MIN_USES)
+ return false;
// measure the size of the constant
Index size = 0;
switch (value.type) {
@@ -112,8 +110,7 @@ private:
return after < before;
}
- template<typename T>
- Index getWrittenSize(const T& thing) {
+ template<typename T> Index getWrittenSize(const T& thing) {
BufferWithRandomAccess buffer;
buffer << thing;
return buffer.size();
@@ -125,10 +122,7 @@ private:
auto type = (*(vec[0]))->type;
Builder builder(*getModule());
auto temp = builder.addVar(getFunction(), type);
- auto* ret = builder.makeSetLocal(
- temp,
- *(vec[0])
- );
+ auto* ret = builder.makeSetLocal(temp, *(vec[0]));
for (auto item : vec) {
*item = builder.makeGetLocal(temp, type);
}
@@ -136,8 +130,6 @@ private:
}
};
-Pass *createConstHoistingPass() {
- return new ConstHoisting();
-}
+Pass* createConstHoistingPass() { return new ConstHoisting(); }
} // namespace wasm
diff --git a/src/passes/DataFlowOpts.cpp b/src/passes/DataFlowOpts.cpp
index 42f01673f..3391359ef 100644
--- a/src/passes/DataFlowOpts.cpp
+++ b/src/passes/DataFlowOpts.cpp
@@ -24,15 +24,15 @@
// --flatten --dfo -Os
//
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
-#include "ir/flat.h"
-#include "ir/utils.h"
-#include "dataflow/node.h"
#include "dataflow/graph.h"
+#include "dataflow/node.h"
#include "dataflow/users.h"
#include "dataflow/utils.h"
+#include "ir/flat.h"
+#include "ir/utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -59,8 +59,8 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
workLeft.insert(node.get()); // we should try to optimize each node
}
while (!workLeft.empty()) {
- //std::cout << "\n\ndump before work iter\n";
- //dump(graph, std::cout);
+ // std::cout << "\n\ndump before work iter\n";
+ // dump(graph, std::cout);
auto iter = workLeft.begin();
auto* node = *iter;
workLeft.erase(iter);
@@ -81,9 +81,11 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
}
void workOn(DataFlow::Node* node) {
- if (node->isConst()) return;
+ if (node->isConst())
+ return;
// If there are no uses, there is no point to work.
- if (nodeUsers.getNumUses(node) == 0) return;
+ if (nodeUsers.getNumUses(node) == 0)
+ return;
// Optimize: Look for nodes that we can easily convert into
// something simpler.
// TODO: we can expressionify and run full normal opts on that,
@@ -110,8 +112,9 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
void optimizeExprToConstant(DataFlow::Node* node) {
assert(node->isExpr());
assert(!node->isConst());
- //std::cout << "will optimize an Expr of all constant inputs. before" << '\n';
- //dump(node, std::cout);
+ // std::cout << "will optimize an Expr of all constant inputs. before" <<
+ // '\n';
+ // dump(node, std::cout);
auto* expr = node->expr;
// First, note that some of the expression's children may be
// local.gets that we inferred during SSA analysis as constant.
@@ -132,7 +135,8 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
Module temp;
// XXX we should copy expr here, in principle, and definitely will need to
// when we do arbitrarily regenerated expressions
- auto* func = Builder(temp).makeFunction("temp", std::vector<Type>{}, none, std::vector<Type>{}, expr);
+ auto* func = Builder(temp).makeFunction(
+ "temp", std::vector<Type>{}, none, std::vector<Type>{}, expr);
PassRunner runner(&temp);
runner.setIsNested(true);
runner.add("precompute");
@@ -140,7 +144,8 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
// Get the optimized thing
auto* result = func->body;
// It may not be a constant, e.g. 0 / 0 does not optimize to 0
- if (!result->is<Const>()) return;
+ if (!result->is<Const>())
+ return;
// All good, copy it.
node->expr = Builder(*getModule()).makeConst(result->cast<Const>()->value);
assert(node->isConst());
@@ -206,7 +211,8 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
// should look into TODO
break;
}
- default: WASM_UNREACHABLE();
+ default:
+ WASM_UNREACHABLE();
}
}
// No one is a user of this node after we replaced all the uses.
@@ -244,9 +250,6 @@ struct DataFlowOpts : public WalkerPass<PostWalker<DataFlowOpts>> {
}
};
-Pass *createDataFlowOptsPass() {
- return new DataFlowOpts();
-}
+Pass* createDataFlowOptsPass() { return new DataFlowOpts(); }
} // namespace wasm
-
diff --git a/src/passes/DeadArgumentElimination.cpp b/src/passes/DeadArgumentElimination.cpp
index e4b8eef56..0c6561ef6 100644
--- a/src/passes/DeadArgumentElimination.cpp
+++ b/src/passes/DeadArgumentElimination.cpp
@@ -37,14 +37,14 @@
#include <unordered_map>
#include <unordered_set>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
#include "cfg/cfg-traversal.h"
#include "ir/effects.h"
#include "ir/module-utils.h"
+#include "pass.h"
#include "passes/opt-utils.h"
#include "support/sorted_vector.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -73,14 +73,13 @@ struct DAEBlockInfo {
// If it is both read and written, we just care about the first
// action (if it is read first, that's all the info we are
// looking for; if it is written first, it can't be read later).
- enum LocalUse {
- Read,
- Written
- };
+ enum LocalUse { Read, Written };
std::unordered_map<Index, LocalUse> localUses;
};
-struct DAEScanner : public WalkerPass<CFGWalker<DAEScanner, Visitor<DAEScanner>, DAEBlockInfo>> {
+struct DAEScanner
+ : public WalkerPass<
+ CFGWalker<DAEScanner, Visitor<DAEScanner>, DAEBlockInfo>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new DAEScanner(infoMap); }
@@ -131,7 +130,8 @@ struct DAEScanner : public WalkerPass<CFGWalker<DAEScanner, Visitor<DAEScanner>,
void doWalkFunction(Function* func) {
numParams = func->getNumParams();
info = &((*infoMap)[func->name]);
- CFGWalker<DAEScanner, Visitor<DAEScanner>, DAEBlockInfo>::doWalkFunction(func);
+ CFGWalker<DAEScanner, Visitor<DAEScanner>, DAEBlockInfo>::doWalkFunction(
+ func);
// If there are relevant params, check if they are used. (If
// we can't optimize the function anyhow, there's no point.)
if (numParams > 0 && !info->hasUnseenCalls) {
@@ -182,7 +182,8 @@ struct DAEScanner : public WalkerPass<CFGWalker<DAEScanner, Visitor<DAEScanner>,
if (use == DAEBlockInfo::Read) {
usedParams.insert(i);
}
- // Whether it was a read or a write, we can stop looking at that local here.
+ // Whether it was a read or a write, we can stop looking at that local
+ // here.
} else {
remainingIndexes.insert(i);
}
@@ -217,10 +218,10 @@ struct DAE : public Pass {
bool iteration(PassRunner* runner, Module* module) {
DAEFunctionInfoMap infoMap;
- // Ensure they all exist so the parallel threads don't modify the data structure.
- ModuleUtils::iterDefinedFunctions(*module, [&](Function* func) {
- infoMap[func->name];
- });
+ // Ensure they all exist so the parallel threads don't modify the data
+ // structure.
+ ModuleUtils::iterDefinedFunctions(
+ *module, [&](Function* func) { infoMap[func->name]; });
// Check the influence of the table and exports.
for (auto& curr : module->exports) {
if (curr->kind == ExternalKind::Function) {
@@ -287,13 +288,11 @@ struct DAE : public Pass {
}
}
if (value.type != none) {
- // Success! We can just apply the constant in the function, which makes
- // the parameter value unused, which lets us remove it later.
+ // Success! We can just apply the constant in the function, which
+ // makes the parameter value unused, which lets us remove it later.
Builder builder(*module);
func->body = builder.makeSequence(
- builder.makeSetLocal(i, builder.makeConst(value)),
- func->body
- );
+ builder.makeSetLocal(i, builder.makeConst(value)), func->body);
// Mark it as unused, which we know it now is (no point to
// re-scan just for that).
infoMap[name].unusedParams.insert(i);
@@ -308,14 +307,15 @@ struct DAE : public Pass {
auto& calls = pair.second;
auto* func = module->getFunction(name);
auto numParams = func->getNumParams();
- if (numParams == 0) continue;
+ if (numParams == 0)
+ continue;
// Iterate downwards, as we may remove more than one.
Index i = numParams - 1;
while (1) {
if (infoMap[name].unusedParams.has(i)) {
- // Great, it's not used. Check if none of the calls has a param with side
- // effects, as that would prevent us removing them (flattening should
- // have been done earlier).
+ // Great, it's not used. Check if none of the calls has a param with
+ // side effects, as that would prevent us removing them (flattening
+ // should have been done earlier).
bool canRemove = true;
for (auto* call : calls) {
auto* operand = call->operands[i];
@@ -331,13 +331,15 @@ struct DAE : public Pass {
changed.insert(func);
}
}
- if (i == 0) break;
+ if (i == 0)
+ break;
i--;
}
}
- // We can also tell which calls have all their return values dropped. Note that we can't do this
- // if we changed anything so far, as we may have modified allCalls (we can't modify a call site
- // twice in one iteration, once to remove a param, once to drop the return value).
+ // We can also tell which calls have all their return values dropped. Note
+ // that we can't do this if we changed anything so far, as we may have
+ // modified allCalls (we can't modify a call site twice in one iteration,
+ // once to remove a param, once to drop the return value).
if (changed.empty()) {
for (auto& func : module->functions) {
if (func->result == none) {
@@ -363,7 +365,8 @@ struct DAE : public Pass {
continue;
}
removeReturnValue(func.get(), calls, module);
- // TODO Removing a drop may also open optimization opportunities in the callers.
+ // TODO Removing a drop may also open optimization opportunities in the
+ // callers.
changed.insert(func.get());
}
}
@@ -391,15 +394,12 @@ private:
struct LocalUpdater : public PostWalker<LocalUpdater> {
Index removedIndex;
Index newIndex;
- LocalUpdater(Function* func, Index removedIndex, Index newIndex) : removedIndex(removedIndex), newIndex(newIndex) {
+ LocalUpdater(Function* func, Index removedIndex, Index newIndex)
+ : removedIndex(removedIndex), newIndex(newIndex) {
walk(func->body);
}
- void visitGetLocal(GetLocal* curr) {
- updateIndex(curr->index);
- }
- void visitSetLocal(SetLocal* curr) {
- updateIndex(curr->index);
- }
+ void visitGetLocal(GetLocal* curr) { updateIndex(curr->index); }
+ void visitSetLocal(SetLocal* curr) { updateIndex(curr->index); }
void updateIndex(Index& index) {
if (index == removedIndex) {
index = newIndex;
@@ -414,7 +414,8 @@ private:
}
}
- void removeReturnValue(Function* func, std::vector<Call*>& calls, Module* module) {
+ void
+ removeReturnValue(Function* func, std::vector<Call*>& calls, Module* module) {
// Clear the type, which is no longer accurate.
func->type = Name();
func->result = none;
@@ -430,10 +431,7 @@ private:
assert(value);
curr->value = nullptr;
Builder builder(*module);
- replaceCurrent(builder.makeSequence(
- builder.makeDrop(value),
- curr
- ));
+ replaceCurrent(builder.makeSequence(builder.makeDrop(value), curr));
}
} returnUpdater(func, module);
// Remove any value flowing out.
@@ -454,15 +452,12 @@ private:
}
};
-Pass *createDAEPass() {
- return new DAE();
-}
+Pass* createDAEPass() { return new DAE(); }
-Pass *createDAEOptimizingPass() {
+Pass* createDAEOptimizingPass() {
auto* ret = new DAE();
ret->optimize = true;
return ret;
}
} // namespace wasm
-
diff --git a/src/passes/DeadCodeElimination.cpp b/src/passes/DeadCodeElimination.cpp
index a56c88929..d23713060 100644
--- a/src/passes/DeadCodeElimination.cpp
+++ b/src/passes/DeadCodeElimination.cpp
@@ -28,17 +28,18 @@
// have no side effects.
//
-#include <vector>
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <ir/block-utils.h>
#include <ir/branch-utils.h>
#include <ir/type-updating.h>
+#include <pass.h>
+#include <vector>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
-struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>> {
+struct DeadCodeElimination
+ : public WalkerPass<PostWalker<DeadCodeElimination>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new DeadCodeElimination; }
@@ -48,7 +49,8 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
Expression* replaceCurrent(Expression* expression) {
auto* old = getCurrent();
- if (old == expression) return expression;
+ if (old == expression)
+ return expression;
super::replaceCurrent(expression);
// also update the type updater
typeUpdater.noteReplacement(old, expression);
@@ -79,20 +81,17 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
}
// if a child exists and is unreachable, we can replace ourselves with it
- bool isDead(Expression* child) {
- return child && child->type == unreachable;
- }
+ bool isDead(Expression* child) { return child && child->type == unreachable; }
// a similar check, assumes the child exists
- bool isUnreachable(Expression* child) {
- return child->type == unreachable;
- }
+ bool isUnreachable(Expression* child) { return child->type == unreachable; }
// things that stop control flow
void visitBreak(Break* curr) {
if (isDead(curr->value)) {
- // the condition is evaluated last, so if the value was unreachable, the whole thing is
+ // the condition is evaluated last, so if the value was unreachable, the
+ // whole thing is
replaceCurrent(curr->value);
return;
}
@@ -152,9 +151,7 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
reachable = false;
}
- void visitUnreachable(Unreachable* curr) {
- reachable = false;
- }
+ void visitUnreachable(Unreachable* curr) { reachable = false; }
void visitBlock(Block* curr) {
auto& list = curr->list;
@@ -175,9 +172,11 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
reachableBreaks.erase(curr->name);
}
if (list.size() == 1 && isUnreachable(list[0])) {
- replaceCurrent(BlockUtils::simplifyToContentsWithPossibleTypeChange(curr, this));
+ replaceCurrent(
+ BlockUtils::simplifyToContentsWithPossibleTypeChange(curr, this));
} else {
- // the block may have had a type, but can now be unreachable, which allows more reduction outside
+ // the block may have had a type, but can now be unreachable, which allows
+ // more reduction outside
typeUpdater.maybeUpdateTypeToUnreachable(curr);
}
}
@@ -186,7 +185,8 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
if (curr->name.is()) {
reachableBreaks.erase(curr->name);
}
- if (isUnreachable(curr->body) && !BranchUtils::BranchSeeker::hasNamed(curr->body, curr->name)) {
+ if (isUnreachable(curr->body) &&
+ !BranchUtils::BranchSeeker::hasNamed(curr->body, curr->name)) {
replaceCurrent(curr->body);
return;
}
@@ -194,9 +194,11 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
// ifs need special handling
- std::vector<bool> ifStack; // stack of reachable state, for forking and joining
+ // stack of reachable state, for forking and joining
+ std::vector<bool> ifStack;
- static void doAfterIfCondition(DeadCodeElimination* self, Expression** currp) {
+ static void doAfterIfCondition(DeadCodeElimination* self,
+ Expression** currp) {
self->ifStack.push_back(self->reachable);
}
@@ -209,67 +211,108 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
}
void visitIf(If* curr) {
- // the ifStack has the branch that joins us, either from before if just an if, or the ifTrue if an if-else
+ // the ifStack has the branch that joins us, either from before if just an
+ // if, or the ifTrue if an if-else
reachable = reachable || ifStack.back();
ifStack.pop_back();
if (isUnreachable(curr->condition)) {
replaceCurrent(curr->condition);
}
- // the if may have had a type, but can now be unreachable, which allows more reduction outside
+ // the if may have had a type, but can now be unreachable, which allows more
+ // reduction outside
typeUpdater.maybeUpdateTypeToUnreachable(curr);
}
static void scan(DeadCodeElimination* self, Expression** currp) {
auto* curr = *currp;
if (!self->reachable) {
- // convert to an unreachable safely
- #define DELEGATE(CLASS_TO_VISIT) { \
- auto* parent = self->typeUpdater.parents[curr]; \
- self->typeUpdater.noteRecursiveRemoval(curr); \
- ExpressionManipulator::convert<CLASS_TO_VISIT, Unreachable>(static_cast<CLASS_TO_VISIT*>(curr)); \
- self->typeUpdater.noteAddition(curr, parent); \
- break; \
- }
+// convert to an unreachable safely
+#define DELEGATE(CLASS_TO_VISIT) \
+ { \
+ auto* parent = self->typeUpdater.parents[curr]; \
+ self->typeUpdater.noteRecursiveRemoval(curr); \
+ ExpressionManipulator::convert<CLASS_TO_VISIT, Unreachable>( \
+ static_cast<CLASS_TO_VISIT*>(curr)); \
+ self->typeUpdater.noteAddition(curr, parent); \
+ break; \
+ }
switch (curr->_id) {
- case Expression::Id::BlockId: DELEGATE(Block);
- case Expression::Id::IfId: DELEGATE(If);
- case Expression::Id::LoopId: DELEGATE(Loop);
- case Expression::Id::BreakId: DELEGATE(Break);
- case Expression::Id::SwitchId: DELEGATE(Switch);
- case Expression::Id::CallId: DELEGATE(Call);
- case Expression::Id::CallIndirectId: DELEGATE(CallIndirect);
- case Expression::Id::GetLocalId: DELEGATE(GetLocal);
- case Expression::Id::SetLocalId: DELEGATE(SetLocal);
- case Expression::Id::GetGlobalId: DELEGATE(GetGlobal);
- case Expression::Id::SetGlobalId: DELEGATE(SetGlobal);
- case Expression::Id::LoadId: DELEGATE(Load);
- case Expression::Id::StoreId: DELEGATE(Store);
- case Expression::Id::ConstId: DELEGATE(Const);
- case Expression::Id::UnaryId: DELEGATE(Unary);
- case Expression::Id::BinaryId: DELEGATE(Binary);
- case Expression::Id::SelectId: DELEGATE(Select);
- case Expression::Id::DropId: DELEGATE(Drop);
- case Expression::Id::ReturnId: DELEGATE(Return);
- case Expression::Id::HostId: DELEGATE(Host);
- case Expression::Id::NopId: DELEGATE(Nop);
- case Expression::Id::UnreachableId: break;
- case Expression::Id::AtomicCmpxchgId: DELEGATE(AtomicCmpxchg);
- case Expression::Id::AtomicRMWId: DELEGATE(AtomicRMW);
- case Expression::Id::AtomicWaitId: DELEGATE(AtomicWait);
- case Expression::Id::AtomicNotifyId: DELEGATE(AtomicNotify);
- case Expression::Id::SIMDExtractId: DELEGATE(SIMDExtract);
- case Expression::Id::SIMDReplaceId: DELEGATE(SIMDReplace);
- case Expression::Id::SIMDShuffleId: DELEGATE(SIMDShuffle);
- case Expression::Id::SIMDBitselectId: DELEGATE(SIMDBitselect);
- case Expression::Id::SIMDShiftId: DELEGATE(SIMDShift);
- case Expression::Id::MemoryInitId: DELEGATE(MemoryInit);
- case Expression::Id::DataDropId: DELEGATE(DataDrop);
- case Expression::Id::MemoryCopyId: DELEGATE(MemoryCopy);
- case Expression::Id::MemoryFillId: DELEGATE(MemoryFill);
- case Expression::Id::InvalidId: WASM_UNREACHABLE();
- case Expression::Id::NumExpressionIds: WASM_UNREACHABLE();
+ case Expression::Id::BlockId:
+ DELEGATE(Block);
+ case Expression::Id::IfId:
+ DELEGATE(If);
+ case Expression::Id::LoopId:
+ DELEGATE(Loop);
+ case Expression::Id::BreakId:
+ DELEGATE(Break);
+ case Expression::Id::SwitchId:
+ DELEGATE(Switch);
+ case Expression::Id::CallId:
+ DELEGATE(Call);
+ case Expression::Id::CallIndirectId:
+ DELEGATE(CallIndirect);
+ case Expression::Id::GetLocalId:
+ DELEGATE(GetLocal);
+ case Expression::Id::SetLocalId:
+ DELEGATE(SetLocal);
+ case Expression::Id::GetGlobalId:
+ DELEGATE(GetGlobal);
+ case Expression::Id::SetGlobalId:
+ DELEGATE(SetGlobal);
+ case Expression::Id::LoadId:
+ DELEGATE(Load);
+ case Expression::Id::StoreId:
+ DELEGATE(Store);
+ case Expression::Id::ConstId:
+ DELEGATE(Const);
+ case Expression::Id::UnaryId:
+ DELEGATE(Unary);
+ case Expression::Id::BinaryId:
+ DELEGATE(Binary);
+ case Expression::Id::SelectId:
+ DELEGATE(Select);
+ case Expression::Id::DropId:
+ DELEGATE(Drop);
+ case Expression::Id::ReturnId:
+ DELEGATE(Return);
+ case Expression::Id::HostId:
+ DELEGATE(Host);
+ case Expression::Id::NopId:
+ DELEGATE(Nop);
+ case Expression::Id::UnreachableId:
+ break;
+ case Expression::Id::AtomicCmpxchgId:
+ DELEGATE(AtomicCmpxchg);
+ case Expression::Id::AtomicRMWId:
+ DELEGATE(AtomicRMW);
+ case Expression::Id::AtomicWaitId:
+ DELEGATE(AtomicWait);
+ case Expression::Id::AtomicNotifyId:
+ DELEGATE(AtomicNotify);
+ case Expression::Id::SIMDExtractId:
+ DELEGATE(SIMDExtract);
+ case Expression::Id::SIMDReplaceId:
+ DELEGATE(SIMDReplace);
+ case Expression::Id::SIMDShuffleId:
+ DELEGATE(SIMDShuffle);
+ case Expression::Id::SIMDBitselectId:
+ DELEGATE(SIMDBitselect);
+ case Expression::Id::SIMDShiftId:
+ DELEGATE(SIMDShift);
+ case Expression::Id::MemoryInitId:
+ DELEGATE(MemoryInit);
+ case Expression::Id::DataDropId:
+ DELEGATE(DataDrop);
+ case Expression::Id::MemoryCopyId:
+ DELEGATE(MemoryCopy);
+ case Expression::Id::MemoryFillId:
+ DELEGATE(MemoryFill);
+ case Expression::Id::InvalidId:
+ WASM_UNREACHABLE();
+ case Expression::Id::NumExpressionIds:
+ WASM_UNREACHABLE();
}
- #undef DELEGATE
+#undef DELEGATE
return;
}
if (curr->is<If>()) {
@@ -290,12 +333,12 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
// we don't need to drop unreachable nodes
Expression* drop(Expression* toDrop) {
- if (toDrop->type == unreachable) return toDrop;
+ if (toDrop->type == unreachable)
+ return toDrop;
return Builder(*getModule()).makeDrop(toDrop);
}
- template<typename T>
- Expression* handleCall(T* curr) {
+ template<typename T> Expression* handleCall(T* curr) {
for (Index i = 0; i < curr->operands.size(); i++) {
if (isUnreachable(curr->operands[i])) {
if (i > 0) {
@@ -316,12 +359,11 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
return curr;
}
- void visitCall(Call* curr) {
- handleCall(curr);
- }
+ void visitCall(Call* curr) { handleCall(curr); }
void visitCallIndirect(CallIndirect* curr) {
- if (handleCall(curr) != curr) return;
+ if (handleCall(curr) != curr)
+ return;
if (isUnreachable(curr->target)) {
auto* block = getModule()->allocator.alloc<Block>();
for (auto* operand : curr->operands) {
@@ -356,56 +398,52 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>>
}
void visitSetLocal(SetLocal* curr) {
- blockifyReachableOperands({ curr->value }, curr->type);
+ blockifyReachableOperands({curr->value}, curr->type);
}
void visitSetGlobal(SetGlobal* curr) {
- blockifyReachableOperands({ curr->value }, curr->type);
+ blockifyReachableOperands({curr->value}, curr->type);
}
void visitLoad(Load* curr) {
- blockifyReachableOperands({ curr->ptr }, curr->type);
+ blockifyReachableOperands({curr->ptr}, curr->type);
}
void visitStore(Store* curr) {
- blockifyReachableOperands({ curr->ptr, curr->value }, curr->type);
+ blockifyReachableOperands({curr->ptr, curr->value}, curr->type);
}
void visitAtomicRMW(AtomicRMW* curr) {
- blockifyReachableOperands({ curr->ptr, curr->value }, curr->type);
+ blockifyReachableOperands({curr->ptr, curr->value}, curr->type);
}
void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
- blockifyReachableOperands({ curr->ptr, curr->expected, curr->replacement }, curr->type);
+ blockifyReachableOperands({curr->ptr, curr->expected, curr->replacement},
+ curr->type);
}
void visitUnary(Unary* curr) {
- blockifyReachableOperands({ curr->value }, curr->type);
+ blockifyReachableOperands({curr->value}, curr->type);
}
void visitBinary(Binary* curr) {
- blockifyReachableOperands({ curr->left, curr->right }, curr->type);
+ blockifyReachableOperands({curr->left, curr->right}, curr->type);
}
void visitSelect(Select* curr) {
- blockifyReachableOperands({ curr->ifTrue, curr->ifFalse, curr->condition }, curr->type);
+ blockifyReachableOperands({curr->ifTrue, curr->ifFalse, curr->condition},
+ curr->type);
}
void visitDrop(Drop* curr) {
- blockifyReachableOperands({ curr->value }, curr->type);
+ blockifyReachableOperands({curr->value}, curr->type);
}
- void visitHost(Host* curr) {
- handleCall(curr);
- }
+ void visitHost(Host* curr) { handleCall(curr); }
- void visitFunction(Function* curr) {
- assert(reachableBreaks.size() == 0);
- }
+ void visitFunction(Function* curr) { assert(reachableBreaks.size() == 0); }
};
-Pass *createDeadCodeEliminationPass() {
- return new DeadCodeElimination();
-}
+Pass* createDeadCodeEliminationPass() { return new DeadCodeElimination(); }
} // namespace wasm
diff --git a/src/passes/Directize.cpp b/src/passes/Directize.cpp
index 3d8fcdfdd..8f75c8f57 100644
--- a/src/passes/Directize.cpp
+++ b/src/passes/Directize.cpp
@@ -22,13 +22,13 @@
#include <unordered_map>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
-#include "wasm-traversal.h"
#include "asm_v_wasm.h"
#include "ir/table-utils.h"
#include "ir/utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm-traversal.h"
+#include "wasm.h"
namespace wasm {
@@ -64,11 +64,8 @@ struct FunctionDirectizer : public WalkerPass<PostWalker<FunctionDirectizer>> {
return;
}
// Everything looks good!
- replaceCurrent(Builder(*getModule()).makeCall(
- name,
- curr->operands,
- curr->type
- ));
+ replaceCurrent(
+ Builder(*getModule()).makeCall(name, curr->operands, curr->type));
}
}
@@ -88,25 +85,25 @@ private:
for (auto*& operand : call->operands) {
operand = builder.makeDrop(operand);
}
- replaceCurrent(
- builder.makeSequence(
- builder.makeBlock(call->operands),
- builder.makeUnreachable()
- )
- );
+ replaceCurrent(builder.makeSequence(builder.makeBlock(call->operands),
+ builder.makeUnreachable()));
changedTypes = true;
}
};
struct Directize : public Pass {
void run(PassRunner* runner, Module* module) override {
- if (!module->table.exists) return;
- if (module->table.imported()) return;
+ if (!module->table.exists)
+ return;
+ if (module->table.imported())
+ return;
for (auto& ex : module->exports) {
- if (ex->kind == ExternalKind::Table) return;
+ if (ex->kind == ExternalKind::Table)
+ return;
}
FlatTable flatTable(module->table);
- if (!flatTable.valid) return;
+ if (!flatTable.valid)
+ return;
// The table exists and is constant, so this is possible.
{
PassRunner runner(module);
@@ -119,9 +116,6 @@ struct Directize : public Pass {
} // anonymous namespace
-Pass *createDirectizePass() {
- return new Directize();
-}
+Pass* createDirectizePass() { return new Directize(); }
} // namespace wasm
-
diff --git a/src/passes/DuplicateFunctionElimination.cpp b/src/passes/DuplicateFunctionElimination.cpp
index b7fcb556c..3caa43e1d 100644
--- a/src/passes/DuplicateFunctionElimination.cpp
+++ b/src/passes/DuplicateFunctionElimination.cpp
@@ -20,19 +20,20 @@
// identical when finally lowered into concrete wasm code.
//
-#include "wasm.h"
-#include "pass.h"
-#include "ir/utils.h"
#include "ir/function-utils.h"
#include "ir/hashed.h"
#include "ir/module-utils.h"
+#include "ir/utils.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
struct FunctionReplacer : public WalkerPass<PostWalker<FunctionReplacer>> {
bool isFunctionParallel() override { return true; }
- FunctionReplacer(std::map<Name, Name>* replacements) : replacements(replacements) {}
+ FunctionReplacer(std::map<Name, Name>* replacements)
+ : replacements(replacements) {}
FunctionReplacer* create() override {
return new FunctionReplacer(replacements);
@@ -51,15 +52,17 @@ private:
struct DuplicateFunctionElimination : public Pass {
void run(PassRunner* runner, Module* module) override {
- // Multiple iterations may be necessary: A and B may be identical only after we
- // see the functions C1 and C2 that they call are in fact identical. Rarely, such
- // "chains" can be very long, so we limit how many we do.
+ // Multiple iterations may be necessary: A and B may be identical only after
+ // we see the functions C1 and C2 that they call are in fact identical.
+ // Rarely, such "chains" can be very long, so we limit how many we do.
auto& options = runner->options;
Index limit;
if (options.optimizeLevel >= 3 || options.shrinkLevel >= 1) {
limit = module->functions.size(); // no limit
} else if (options.optimizeLevel >= 2) {
- limit = 10; // 10 passes usually does most of the work, as this is typically logarithmic
+ // 10 passes usually does most of the work, as this is typically
+ // logarithmic
+ limit = 10;
} else {
limit = 1;
}
@@ -82,16 +85,19 @@ struct DuplicateFunctionElimination : public Pass {
for (auto& pair : hashGroups) {
auto& group = pair.second;
Index size = group.size();
- if (size == 1) continue;
- // The groups should be fairly small, and even if a group is large we should
- // have almost all of them identical, so we should not hit actual O(N^2)
- // here unless the hash is quite poor.
+ if (size == 1)
+ continue;
+ // The groups should be fairly small, and even if a group is large we
+ // should have almost all of them identical, so we should not hit actual
+ // O(N^2) here unless the hash is quite poor.
for (Index i = 0; i < size - 1; i++) {
auto* first = group[i];
- if (duplicates.count(first->name)) continue;
+ if (duplicates.count(first->name))
+ continue;
for (Index j = i + 1; j < size; j++) {
auto* second = group[j];
- if (duplicates.count(second->name)) continue;
+ if (duplicates.count(second->name))
+ continue;
if (FunctionUtils::equal(first, second)) {
// great, we can replace the second with the first!
replacements[second->name] = first->name;
@@ -104,9 +110,12 @@ struct DuplicateFunctionElimination : public Pass {
if (replacements.size() > 0) {
// remove the duplicates
auto& v = module->functions;
- v.erase(std::remove_if(v.begin(), v.end(), [&](const std::unique_ptr<Function>& curr) {
- return duplicates.count(curr->name) > 0;
- }), v.end());
+ v.erase(std::remove_if(v.begin(),
+ v.end(),
+ [&](const std::unique_ptr<Function>& curr) {
+ return duplicates.count(curr->name) > 0;
+ }),
+ v.end());
module->updateMaps();
// replace direct calls
PassRunner replacerRunner(module);
@@ -143,7 +152,7 @@ struct DuplicateFunctionElimination : public Pass {
}
};
-Pass *createDuplicateFunctionEliminationPass() {
+Pass* createDuplicateFunctionEliminationPass() {
return new DuplicateFunctionElimination();
}
diff --git a/src/passes/ExtractFunction.cpp b/src/passes/ExtractFunction.cpp
index 8a97ced8e..8942771fe 100644
--- a/src/passes/ExtractFunction.cpp
+++ b/src/passes/ExtractFunction.cpp
@@ -18,14 +18,16 @@
// with (mostly) just the code you want to debug (function-parallel,
// non-lto) passes on.
-#include "wasm.h"
#include "pass.h"
+#include "wasm.h"
namespace wasm {
struct ExtractFunction : public Pass {
void run(PassRunner* runner, Module* module) override {
- Name name = runner->options.getArgument("extract", "ExtractFunction usage: wasm-opt --pass-arg=extract:FUNCTION_NAME");
+ Name name = runner->options.getArgument(
+ "extract",
+ "ExtractFunction usage: wasm-opt --pass-arg=extract:FUNCTION_NAME");
std::cerr << "extracting " << name << "\n";
bool found = false;
for (auto& func : module->functions) {
@@ -58,9 +60,6 @@ struct ExtractFunction : public Pass {
// declare pass
-Pass *createExtractFunctionPass() {
- return new ExtractFunction();
-}
+Pass* createExtractFunctionPass() { return new ExtractFunction(); }
} // namespace wasm
-
diff --git a/src/passes/Flatten.cpp b/src/passes/Flatten.cpp
index df6be947d..a68fc9abe 100644
--- a/src/passes/Flatten.cpp
+++ b/src/passes/Flatten.cpp
@@ -18,13 +18,13 @@
// Flattens code into "Flat IR" form. See ir/flat.h.
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <ir/branch-utils.h>
#include <ir/effects.h>
#include <ir/flat.h>
#include <ir/utils.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -43,12 +43,15 @@ namespace wasm {
// Once exception is that we allow an (unreachable) node, which is used
// when we move something unreachable to another place, and need a
// placeholder. We will never reach that (unreachable) anyhow
-struct Flatten : public WalkerPass<ExpressionStackWalker<Flatten, UnifiedExpressionVisitor<Flatten>>> {
+struct Flatten
+ : public WalkerPass<
+ ExpressionStackWalker<Flatten, UnifiedExpressionVisitor<Flatten>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new Flatten; }
- // For each expression, a bunch of expressions that should execute right before it
+ // For each expression, a bunch of expressions that should execute right
+ // before it
std::unordered_map<Expression*, std::vector<Expression*>> preludes;
// Break values are sent through a temp local
@@ -61,7 +64,9 @@ struct Flatten : public WalkerPass<ExpressionStackWalker<Flatten, UnifiedExpress
if (Flat::isControlFlowStructure(curr)) {
// handle control flow explicitly. our children do not have control flow,
// but they do have preludes which we need to set up in the right place
- assert(preludes.find(curr) == preludes.end()); // no one should have given us preludes, they are on the children
+
+ // no one should have given us preludes, they are on the children
+ assert(preludes.find(curr) == preludes.end());
if (auto* block = curr->dynCast<Block>()) {
// make a new list, where each item's preludes are added before it
ExpressionList newList(getModule()->allocator);
@@ -123,7 +128,9 @@ struct Flatten : public WalkerPass<ExpressionStackWalker<Flatten, UnifiedExpress
rep = builder.makeGetLocal(temp, type);
}
iff->ifTrue = getPreludesWithExpression(originalIfTrue, iff->ifTrue);
- if (iff->ifFalse) iff->ifFalse = getPreludesWithExpression(originalIfFalse, iff->ifFalse);
+ if (iff->ifFalse)
+ iff->ifFalse =
+ getPreludesWithExpression(originalIfFalse, iff->ifFalse);
iff->finalize();
if (prelude) {
ReFinalizeNode().visit(prelude);
@@ -204,10 +211,9 @@ struct Flatten : public WalkerPass<ExpressionStackWalker<Flatten, UnifiedExpress
// we don't know which break target will be hit - assign to them all
auto names = BranchUtils::getUniqueTargets(sw);
for (auto name : names) {
- ourPreludes.push_back(builder.makeSetLocal(
- getTempForBreakTarget(name, type),
- builder.makeGetLocal(temp, type)
- ));
+ ourPreludes.push_back(
+ builder.makeSetLocal(getTempForBreakTarget(name, type),
+ builder.makeGetLocal(temp, type)));
}
sw->value = nullptr;
sw->finalize();
@@ -275,9 +281,11 @@ private:
// gets an expression, either by itself, or in a block with some
// preludes (which we use up) for another expression before it
- Expression* getPreludesWithExpression(Expression* preluder, Expression* after) {
+ Expression* getPreludesWithExpression(Expression* preluder,
+ Expression* after) {
auto iter = preludes.find(preluder);
- if (iter == preludes.end()) return after;
+ if (iter == preludes.end())
+ return after;
// we have preludes
auto& thePreludes = iter->second;
auto* ret = Builder(*getModule()).makeBlock(thePreludes);
@@ -294,14 +302,12 @@ private:
if (iter != breakTemps.end()) {
return iter->second;
} else {
- return breakTemps[name] = Builder(*getModule()).addVar(getFunction(), type);
+ return breakTemps[name] =
+ Builder(*getModule()).addVar(getFunction(), type);
}
}
};
-Pass *createFlattenPass() {
- return new Flatten();
-}
+Pass* createFlattenPass() { return new Flatten(); }
} // namespace wasm
-
diff --git a/src/passes/FuncCastEmulation.cpp b/src/passes/FuncCastEmulation.cpp
index 36a2819b2..904b8a202 100644
--- a/src/passes/FuncCastEmulation.cpp
+++ b/src/passes/FuncCastEmulation.cpp
@@ -27,12 +27,12 @@
// This should work even with dynamic linking, however, the number of
// params must be identical, i.e., the "ABI" must match.
-#include <wasm.h>
-#include <wasm-builder.h>
#include <asm_v_wasm.h>
+#include <ir/literal-utils.h>
#include <pass.h>
+#include <wasm-builder.h>
#include <wasm-emscripten.h>
-#include <ir/literal-utils.h>
+#include <wasm.h>
namespace wasm {
@@ -54,10 +54,8 @@ static Expression* toABI(Expression* value, Module* module) {
break;
}
case f32: {
- value = builder.makeUnary(
- ExtendUInt32,
- builder.makeUnary(ReinterpretFloat32, value)
- );
+ value = builder.makeUnary(ExtendUInt32,
+ builder.makeUnary(ReinterpretFloat32, value));
break;
}
case f64: {
@@ -70,10 +68,7 @@ static Expression* toABI(Expression* value, Module* module) {
}
case none: {
// the value is none, but we need a value here
- value = builder.makeSequence(
- value,
- LiteralUtils::makeZero(i64, *module)
- );
+ value = builder.makeSequence(value, LiteralUtils::makeZero(i64, *module));
break;
}
case unreachable: {
@@ -97,10 +92,8 @@ static Expression* fromABI(Expression* value, Type type, Module* module) {
break;
}
case f32: {
- value = builder.makeUnary(
- ReinterpretInt32,
- builder.makeUnary(WrapInt64, value)
- );
+ value = builder.makeUnary(ReinterpretInt32,
+ builder.makeUnary(WrapInt64, value));
break;
}
case f64: {
@@ -122,7 +115,8 @@ static Expression* fromABI(Expression* value, Type type, Module* module) {
return value;
}
-struct ParallelFuncCastEmulation : public WalkerPass<PostWalker<ParallelFuncCastEmulation>> {
+struct ParallelFuncCastEmulation
+ : public WalkerPass<PostWalker<ParallelFuncCastEmulation>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new ParallelFuncCastEmulation(ABIType); }
@@ -131,8 +125,8 @@ struct ParallelFuncCastEmulation : public WalkerPass<PostWalker<ParallelFuncCast
void visitCallIndirect(CallIndirect* curr) {
if (curr->operands.size() > NUM_PARAMS) {
- Fatal() << "FuncCastEmulation::NUM_PARAMS needs to be at least " <<
- curr->operands.size();
+ Fatal() << "FuncCastEmulation::NUM_PARAMS needs to be at least "
+ << curr->operands.size();
}
for (Expression*& operand : curr->operands) {
operand = toABI(operand, getModule());
@@ -197,7 +191,8 @@ private:
Name makeThunk(Name name, Module* module) {
Name thunk = std::string("byn$fpcast-emu$") + name.str;
if (module->getFunctionOrNull(thunk)) {
- Fatal() << "FuncCastEmulation::makeThunk seems a thunk name already in use. Was the pass already run on this code?";
+ Fatal() << "FuncCastEmulation::makeThunk seems a thunk name already in "
+ "use. Was the pass already run on this code?";
}
// The item in the table may be a function or a function import.
auto* func = module->getFunction(name);
@@ -206,28 +201,25 @@ private:
Builder builder(*module);
std::vector<Expression*> callOperands;
for (Index i = 0; i < params.size(); i++) {
- callOperands.push_back(fromABI(builder.makeGetLocal(i, i64), params[i], module));
+ callOperands.push_back(
+ fromABI(builder.makeGetLocal(i, i64), params[i], module));
}
auto* call = builder.makeCall(name, callOperands, type);
std::vector<Type> thunkParams;
for (Index i = 0; i < NUM_PARAMS; i++) {
thunkParams.push_back(i64);
}
- auto* thunkFunc = builder.makeFunction(
- thunk,
- std::move(thunkParams),
- i64,
- {}, // no vars
- toABI(call, module)
- );
+ auto* thunkFunc = builder.makeFunction(thunk,
+ std::move(thunkParams),
+ i64,
+ {}, // no vars
+ toABI(call, module));
thunkFunc->type = ABIType;
module->addFunction(thunkFunc);
return thunk;
}
};
-Pass* createFuncCastEmulationPass() {
- return new FuncCastEmulation();
-}
+Pass* createFuncCastEmulationPass() { return new FuncCastEmulation(); }
} // namespace wasm
diff --git a/src/passes/I64ToI32Lowering.cpp b/src/passes/I64ToI32Lowering.cpp
index 731b42d3a..e2d3cc414 100644
--- a/src/passes/I64ToI32Lowering.cpp
+++ b/src/passes/I64ToI32Lowering.cpp
@@ -21,32 +21,31 @@
// global.
//
-#include <algorithm>
-#include "wasm.h"
-#include "pass.h"
-#include "emscripten-optimizer/istring.h"
-#include "support/name.h"
-#include "wasm-builder.h"
#include "abi/js.h"
+#include "asmjs/shared-constants.h"
+#include "emscripten-optimizer/istring.h"
#include "ir/flat.h"
#include "ir/iteration.h"
#include "ir/memory-utils.h"
#include "ir/module-utils.h"
#include "ir/names.h"
-#include "asmjs/shared-constants.h"
+#include "pass.h"
+#include "support/name.h"
+#include "wasm-builder.h"
+#include "wasm.h"
+#include <algorithm>
namespace wasm {
-static Name makeHighName(Name n) {
- return std::string(n.c_str()) + "$hi";
-}
+static Name makeHighName(Name n) { return std::string(n.c_str()) + "$hi"; }
struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
struct TempVar {
- TempVar(Index idx, Type ty, I64ToI32Lowering& pass) :
- idx(idx), pass(pass), moved(false), ty(ty) {}
+ TempVar(Index idx, Type ty, I64ToI32Lowering& pass)
+ : idx(idx), pass(pass), moved(false), ty(ty) {}
- TempVar(TempVar&& other) : idx(other), pass(other.pass), moved(false), ty(other.ty) {
+ TempVar(TempVar&& other)
+ : idx(other), pass(other.pass), moved(false), ty(other.ty) {
assert(!other.moved);
other.moved = true;
}
@@ -54,7 +53,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
TempVar& operator=(TempVar&& rhs) {
assert(!rhs.moved);
// free overwritten idx
- if (!moved) freeIdx();
+ if (!moved)
+ freeIdx();
idx = rhs.idx;
rhs.moved = true;
moved = false;
@@ -62,7 +62,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
~TempVar() {
- if (!moved) freeIdx();
+ if (!moved)
+ freeIdx();
}
bool operator==(const TempVar& rhs) {
@@ -81,8 +82,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
private:
void freeIdx() {
- auto &freeList = pass.freeTemps[(int) ty];
- assert(std::find(freeList.begin(), freeList.end(), idx) == freeList.end());
+ auto& freeList = pass.freeTemps[(int)ty];
+ assert(std::find(freeList.begin(), freeList.end(), idx) ==
+ freeList.end());
freeList.push_back(idx);
}
@@ -96,19 +98,22 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
// TODO: allow module-level transformations in parallel passes
bool isFunctionParallel() override { return false; }
- Pass* create() override {
- return new I64ToI32Lowering;
- }
+ Pass* create() override { return new I64ToI32Lowering; }
void doWalkModule(Module* module) {
- if (!builder) builder = make_unique<Builder>(*module);
+ if (!builder)
+ builder = make_unique<Builder>(*module);
// add new globals for high bits
for (size_t i = 0, globals = module->globals.size(); i < globals; ++i) {
auto* curr = module->globals[i].get();
- if (curr->type != i64) continue;
+ if (curr->type != i64)
+ continue;
originallyI64Globals.insert(curr->name);
curr->type = i32;
- auto* high = builder->makeGlobal(makeHighName(curr->name), i32, builder->makeConst(Literal(int32_t(0))), Builder::Mutable);
+ auto* high = builder->makeGlobal(makeHighName(curr->name),
+ i32,
+ builder->makeConst(Literal(int32_t(0))),
+ Builder::Mutable);
module->addGlobal(high);
if (curr->imported()) {
Fatal() << "TODO: imported i64 globals";
@@ -157,7 +162,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
void doWalkFunction(Function* func) {
Flat::verifyFlatness(func);
// create builder here if this is first entry to module for this object
- if (!builder) builder = make_unique<Builder>(*getModule());
+ if (!builder)
+ builder = make_unique<Builder>(*getModule());
indexMap.clear();
highBitVars.clear();
freeTemps.clear();
@@ -174,9 +180,10 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
Name lowName = oldFunc->getLocalName(i);
Name highName = makeHighName(lowName);
Type paramType = oldFunc->getLocalType(i);
- auto builderFunc = (i < oldFunc->getVarIndexBase()) ?
- Builder::addParam :
- static_cast<Index (*)(Function*, Name, Type)>(Builder::addVar);
+ auto builderFunc =
+ (i < oldFunc->getVarIndexBase())
+ ? Builder::addParam
+ : static_cast<Index (*)(Function*, Name, Type)>(Builder::addVar);
if (paramType == i64) {
builderFunc(func, lowName, i32);
builderFunc(func, highName, i32);
@@ -201,14 +208,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
if (hasOutParam(func->body)) {
TempVar highBits = fetchOutParam(func->body);
TempVar lowBits = getTemp();
- SetLocal* setLow = builder->makeSetLocal(
- lowBits,
- func->body
- );
+ SetLocal* setLow = builder->makeSetLocal(lowBits, func->body);
SetGlobal* setHigh = builder->makeSetGlobal(
- INT64_TO_32_HIGH_BITS,
- builder->makeGetLocal(highBits, i32)
- );
+ INT64_TO_32_HIGH_BITS, builder->makeGetLocal(highBits, i32));
GetLocal* getLow = builder->makeGetLocal(lowBits, i32);
func->body = builder->blockify(setLow, setHigh, getLow);
}
@@ -223,7 +225,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
template<typename T>
using BuilderFunc = std::function<T*(std::vector<Expression*>&, Type)>;
- // Fixes up a call. If we performed fixups, returns the call; otherwise returns nullptr;
+ // Fixes up a call. If we performed fixups, returns the call; otherwise
+ // returns nullptr;
template<typename T>
T* visitGenericCall(T* curr, BuilderFunc<T> callBuilder) {
bool fixed = false;
@@ -233,7 +236,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
if (hasOutParam(e)) {
TempVar argHighBits = fetchOutParam(e);
args.push_back(builder->makeGetLocal(argHighBits, i32));
- fixed = true;
+ fixed = true;
}
}
if (curr->type != i64) {
@@ -244,14 +247,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
TempVar lowBits = getTemp();
TempVar highBits = getTemp();
auto* call = callBuilder(args, i32);
- SetLocal* doCall = builder->makeSetLocal(
- lowBits,
- call
- );
+ SetLocal* doCall = builder->makeSetLocal(lowBits, call);
SetLocal* setHigh = builder->makeSetLocal(
- highBits,
- builder->makeGetGlobal(INT64_TO_32_HIGH_BITS, i32)
- );
+ highBits, builder->makeGetGlobal(INT64_TO_32_HIGH_BITS, i32));
GetLocal* getLow = builder->makeGetLocal(lowBits, i32);
Block* result = builder->blockify(doCall, setHigh, getLow);
setOutParam(result, std::move(highBits));
@@ -260,11 +258,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
void visitCall(Call* curr) {
auto* fixedCall = visitGenericCall<Call>(
- curr,
- [&](std::vector<Expression*>& args, Type ty) {
+ curr, [&](std::vector<Expression*>& args, Type ty) {
return builder->makeCall(curr->target, args, ty);
- }
- );
+ });
// If this was to an import, we need to call the legal version. This assumes
// that legalize-js-interface has been run before.
if (fixedCall && getModule()->getFunction(fixedCall->target)->imported()) {
@@ -275,16 +271,10 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
void visitCallIndirect(CallIndirect* curr) {
visitGenericCall<CallIndirect>(
- curr,
- [&](std::vector<Expression*>& args, Type ty) {
+ curr, [&](std::vector<Expression*>& args, Type ty) {
return builder->makeCallIndirect(
- curr->fullType,
- curr->target,
- args,
- ty
- );
- }
- );
+ curr->fullType, curr->target, args, ty);
+ });
}
void visitGetLocal(GetLocal* curr) {
@@ -297,13 +287,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
curr->type = i32;
TempVar highBits = getTemp();
- SetLocal *setHighBits = builder->makeSetLocal(
- highBits,
- builder->makeGetLocal(
- mappedIndex + 1,
- i32
- )
- );
+ SetLocal* setHighBits = builder->makeSetLocal(
+ highBits, builder->makeGetLocal(mappedIndex + 1, i32));
Block* result = builder->blockify(setHighBits, curr);
replaceCurrent(result);
setOutParam(result, std::move(highBits));
@@ -315,9 +300,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
curr->type = i32;
SetLocal* setLow = builder->makeSetLocal(tmp, curr);
SetLocal* setHigh = builder->makeSetLocal(
- curr->index + 1,
- builder->makeGetLocal(highBits, i32)
- );
+ curr->index + 1, builder->makeGetLocal(highBits, i32));
GetLocal* getLow = builder->makeGetLocal(tmp, i32);
Block* result = builder->blockify(setLow, setHigh, getLow);
replaceCurrent(result);
@@ -337,44 +320,40 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
return;
}
TempVar highBits = fetchOutParam(curr->value);
- auto* setHigh = builder->makeSetLocal(
- mappedIndex + 1,
- builder->makeGetLocal(highBits, i32)
- );
+ auto* setHigh = builder->makeSetLocal(mappedIndex + 1,
+ builder->makeGetLocal(highBits, i32));
Block* result = builder->blockify(curr, setHigh);
replaceCurrent(result);
}
void visitGetGlobal(GetGlobal* curr) {
- if (!getFunction()) return; // if in a global init, skip - we already handled that.
- if (!originallyI64Globals.count(curr->name)) return;
+ if (!getFunction())
+ return; // if in a global init, skip - we already handled that.
+ if (!originallyI64Globals.count(curr->name))
+ return;
curr->type = i32;
TempVar highBits = getTemp();
- SetLocal *setHighBits = builder->makeSetLocal(
- highBits,
- builder->makeGetGlobal(
- makeHighName(curr->name),
- i32
- )
- );
+ SetLocal* setHighBits = builder->makeSetLocal(
+ highBits, builder->makeGetGlobal(makeHighName(curr->name), i32));
Block* result = builder->blockify(setHighBits, curr);
replaceCurrent(result);
setOutParam(result, std::move(highBits));
}
void visitSetGlobal(SetGlobal* curr) {
- if (!originallyI64Globals.count(curr->name)) return;
- if (handleUnreachable(curr)) return;
+ if (!originallyI64Globals.count(curr->name))
+ return;
+ if (handleUnreachable(curr))
+ return;
TempVar highBits = fetchOutParam(curr->value);
auto* setHigh = builder->makeSetGlobal(
- makeHighName(curr->name),
- builder->makeGetLocal(highBits, i32)
- );
+ makeHighName(curr->name), builder->makeGetLocal(highBits, i32));
replaceCurrent(builder->makeSequence(curr, setHigh));
}
void visitLoad(Load* curr) {
- if (curr->type != i64) return;
+ if (curr->type != i64)
+ return;
assert(!curr->isAtomic && "atomic load not implemented");
TempVar lowBits = getTemp();
TempVar highBits = getTemp();
@@ -384,46 +363,37 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
if (curr->bytes == 8) {
loadHigh = builder->makeSetLocal(
highBits,
- builder->makeLoad(
- 4,
- curr->signed_,
- curr->offset + 4,
- 1,
- builder->makeGetLocal(ptrTemp, i32),
- i32
- )
- );
+ builder->makeLoad(4,
+ curr->signed_,
+ curr->offset + 4,
+ 1,
+ builder->makeGetLocal(ptrTemp, i32),
+ i32));
} else if (curr->signed_) {
loadHigh = builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShrSInt32,
- builder->makeGetLocal(lowBits, i32),
- builder->makeConst(Literal(int32_t(31)))
- )
- );
+ builder->makeBinary(ShrSInt32,
+ builder->makeGetLocal(lowBits, i32),
+ builder->makeConst(Literal(int32_t(31)))));
} else {
- loadHigh = builder->makeSetLocal(
- highBits,
- builder->makeConst(Literal(int32_t(0)))
- );
+ loadHigh = builder->makeSetLocal(highBits,
+ builder->makeConst(Literal(int32_t(0))));
}
curr->type = i32;
curr->bytes = std::min(curr->bytes, uint8_t(4));
curr->align = std::min(uint32_t(curr->align), uint32_t(4));
curr->ptr = builder->makeGetLocal(ptrTemp, i32);
- Block* result = builder->blockify(
- setPtr,
- builder->makeSetLocal(lowBits, curr),
- loadHigh,
- builder->makeGetLocal(lowBits, i32)
- );
+ Block* result = builder->blockify(setPtr,
+ builder->makeSetLocal(lowBits, curr),
+ loadHigh,
+ builder->makeGetLocal(lowBits, i32));
replaceCurrent(result);
setOutParam(result, std::move(highBits));
}
void visitStore(Store* curr) {
- if (!hasOutParam(curr->value)) return;
+ if (!hasOutParam(curr->value))
+ return;
assert(curr->offset + 4 > curr->offset);
assert(!curr->isAtomic && "atomic store not implemented");
TempVar highBits = fetchOutParam(curr->value);
@@ -436,14 +406,13 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
SetLocal* setPtr = builder->makeSetLocal(ptrTemp, curr->ptr);
curr->ptr = builder->makeGetLocal(ptrTemp, i32);
curr->finalize();
- Store* storeHigh = builder->makeStore(
- 4,
- curr->offset + 4,
- 1,
- builder->makeGetLocal(ptrTemp, i32),
- builder->makeGetLocal(highBits, i32),
- i32
- );
+ Store* storeHigh =
+ builder->makeStore(4,
+ curr->offset + 4,
+ 1,
+ builder->makeGetLocal(ptrTemp, i32),
+ builder->makeGetLocal(highBits, i32),
+ i32);
replaceCurrent(builder->blockify(setPtr, curr, storeHigh));
}
}
@@ -457,18 +426,17 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
void visitConst(Const* curr) {
- if (!getFunction()) return; // if in a global init, skip - we already handled that.
- if (curr->type != i64) return;
+ if (!getFunction())
+ return; // if in a global init, skip - we already handled that.
+ if (curr->type != i64)
+ return;
TempVar highBits = getTemp();
- Const* lowVal = builder->makeConst(
- Literal(int32_t(curr->value.geti64() & 0xffffffff))
- );
- SetLocal* setHigh = builder->makeSetLocal(
- highBits,
- builder->makeConst(
- Literal(int32_t(uint64_t(curr->value.geti64()) >> 32))
- )
- );
+ Const* lowVal =
+ builder->makeConst(Literal(int32_t(curr->value.geti64() & 0xffffffff)));
+ SetLocal* setHigh =
+ builder->makeSetLocal(highBits,
+ builder->makeConst(Literal(
+ int32_t(uint64_t(curr->value.geti64()) >> 32))));
Block* result = builder->blockify(setHigh, lowVal);
setOutParam(result, std::move(highBits));
replaceCurrent(result);
@@ -480,11 +448,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
auto* result = builder->makeUnary(
EqZInt32,
builder->makeBinary(
- OrInt32,
- curr->value,
- builder->makeGetLocal(highBits, i32)
- )
- );
+ OrInt32, curr->value, builder->makeGetLocal(highBits, i32)));
replaceCurrent(result);
}
@@ -493,8 +457,7 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
TempVar highBits = getTemp();
Block* result = builder->blockify(
builder->makeSetLocal(highBits, builder->makeConst(Literal(int32_t(0)))),
- curr->value
- );
+ curr->value);
setOutParam(result, std::move(highBits));
replaceCurrent(result);
}
@@ -506,18 +469,12 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
SetLocal* setLow = builder->makeSetLocal(lowBits, curr->value);
SetLocal* setHigh = builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShrSInt32,
- builder->makeGetLocal(lowBits, i32),
- builder->makeConst(Literal(int32_t(31)))
- )
- );
+ builder->makeBinary(ShrSInt32,
+ builder->makeGetLocal(lowBits, i32),
+ builder->makeConst(Literal(int32_t(31)))));
- Block* result = builder->blockify(
- setLow,
- setHigh,
- builder->makeGetLocal(lowBits, i32)
- );
+ Block* result =
+ builder->blockify(setLow, setHigh, builder->makeGetLocal(lowBits, i32));
setOutParam(result, std::move(highBits));
replaceCurrent(result);
@@ -533,14 +490,16 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
// Assume that the wasm file assumes the address 0 is invalid and roundtrip
// our f64 through memory at address 0
TempVar highBits = getTemp();
- Block *result = builder->blockify(
- builder->makeCall(ABI::wasm2js::SCRATCH_STORE_F64, { curr->value }, none),
+ Block* result = builder->blockify(
+ builder->makeCall(ABI::wasm2js::SCRATCH_STORE_F64, {curr->value}, none),
builder->makeSetLocal(
highBits,
- builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32, { builder->makeConst(Literal(int32_t(1))) }, i32)
- ),
- builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32, { builder->makeConst(Literal(int32_t(0))) }, i32)
- );
+ builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32,
+ {builder->makeConst(Literal(int32_t(1)))},
+ i32)),
+ builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_I32,
+ {builder->makeConst(Literal(int32_t(0)))},
+ i32));
setOutParam(result, std::move(highBits));
replaceCurrent(result);
MemoryUtils::ensureExists(getModule()->memory);
@@ -551,17 +510,21 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
// Assume that the wasm file assumes the address 0 is invalid and roundtrip
// our i64 through memory at address 0
TempVar highBits = fetchOutParam(curr->value);
- Block *result = builder->blockify(
- builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32, { builder->makeConst(Literal(int32_t(0))), curr->value }, none),
- builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32, { builder->makeConst(Literal(int32_t(1))), builder->makeGetLocal(highBits, i32) }, none),
- builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_F64, {}, f64)
- );
+ Block* result = builder->blockify(
+ builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32,
+ {builder->makeConst(Literal(int32_t(0))), curr->value},
+ none),
+ builder->makeCall(ABI::wasm2js::SCRATCH_STORE_I32,
+ {builder->makeConst(Literal(int32_t(1))),
+ builder->makeGetLocal(highBits, i32)},
+ none),
+ builder->makeCall(ABI::wasm2js::SCRATCH_LOAD_F64, {}, f64));
replaceCurrent(result);
MemoryUtils::ensureExists(getModule()->memory);
ABI::wasm2js::ensureScratchMemoryHelpers(getModule());
}
- void lowerTruncFloatToInt(Unary *curr) {
+ void lowerTruncFloatToInt(Unary* curr) {
// hiBits = if abs(f) >= 1.0 {
// if f > 0.0 {
// (unsigned) min(
@@ -584,9 +547,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
switch (curr->op) {
case TruncSFloat32ToInt64:
case TruncUFloat32ToInt64: {
- litZero = Literal((float) 0);
- litOne = Literal((float) 1);
- u32Max = Literal(((float) UINT_MAX) + 1);
+ litZero = Literal((float)0);
+ litOne = Literal((float)1);
+ u32Max = Literal(((float)UINT_MAX) + 1);
trunc = TruncUFloat32ToInt32;
convert = ConvertUInt32ToFloat32;
localType = f32;
@@ -602,9 +565,9 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
case TruncSFloat64ToInt64:
case TruncUFloat64ToInt64: {
- litZero = Literal((double) 0);
- litOne = Literal((double) 1);
- u32Max = Literal(((double) UINT_MAX) + 1);
+ litZero = Literal((double)0);
+ litOne = Literal((double)1);
+ u32Max = Literal(((double)UINT_MAX) + 1);
trunc = TruncUFloat64ToInt32;
convert = ConvertUInt32ToFloat64;
localType = f64;
@@ -618,74 +581,63 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
sub = SubFloat64;
break;
}
- default: abort();
+ default:
+ abort();
}
TempVar f = getTemp(localType);
TempVar highBits = getTemp();
- Expression *gtZeroBranch = builder->makeBinary(
- min,
- builder->makeUnary(
- floor,
- builder->makeBinary(
- div,
- builder->makeGetLocal(f, localType),
- builder->makeConst(u32Max)
- )
- ),
- builder->makeBinary(sub, builder->makeConst(u32Max), builder->makeConst(litOne))
- );
- Expression *ltZeroBranch = builder->makeUnary(
- ceil,
+ Expression* gtZeroBranch = builder->makeBinary(
+ min,
+ builder->makeUnary(
+ floor,
+ builder->makeBinary(div,
+ builder->makeGetLocal(f, localType),
+ builder->makeConst(u32Max))),
+ builder->makeBinary(
+ sub, builder->makeConst(u32Max), builder->makeConst(litOne)));
+ Expression* ltZeroBranch = builder->makeUnary(
+ ceil,
+ builder->makeBinary(
+ div,
builder->makeBinary(
- div,
- builder->makeBinary(
- sub,
- builder->makeGetLocal(f, localType),
- builder->makeUnary(convert,
- builder->makeUnary(trunc, builder->makeGetLocal(f, localType))
- )
- ),
- builder->makeConst(u32Max)
- )
- );
-
- If *highBitsCalc = builder->makeIf(
+ sub,
+ builder->makeGetLocal(f, localType),
+ builder->makeUnary(
+ convert,
+ builder->makeUnary(trunc, builder->makeGetLocal(f, localType)))),
+ builder->makeConst(u32Max)));
+
+ If* highBitsCalc = builder->makeIf(
builder->makeBinary(
- gt,
- builder-> makeGetLocal(f, localType),
- builder->makeConst(litZero)
- ),
+ gt, builder->makeGetLocal(f, localType), builder->makeConst(litZero)),
builder->makeUnary(trunc, gtZeroBranch),
- builder->makeUnary(trunc, ltZeroBranch)
- );
- If *highBitsVal = builder->makeIf(
+ builder->makeUnary(trunc, ltZeroBranch));
+ If* highBitsVal = builder->makeIf(
builder->makeBinary(
ge,
builder->makeUnary(abs, builder->makeGetLocal(f, localType)),
- builder->makeConst(litOne)
- ),
+ builder->makeConst(litOne)),
highBitsCalc,
- builder->makeConst(Literal(int32_t(0)))
- );
- Block *result = builder->blockify(
+ builder->makeConst(Literal(int32_t(0))));
+ Block* result = builder->blockify(
builder->makeSetLocal(f, curr->value),
builder->makeSetLocal(highBits, highBitsVal),
- builder->makeUnary(trunc, builder->makeGetLocal(f, localType))
- );
+ builder->makeUnary(trunc, builder->makeGetLocal(f, localType)));
setOutParam(result, std::move(highBits));
replaceCurrent(result);
}
- void lowerConvertIntToFloat(Unary *curr) {
+ void lowerConvertIntToFloat(Unary* curr) {
// Here the same strategy as `emcc` is taken which takes the two halves of
// the 64-bit integer and creates a mathematical expression using float
// arithmetic to reassemble the final floating point value.
//
// For example for i64 -> f32 we generate:
//
- // ((double) (unsigned) lowBits) + ((double) U32_MAX) * ((double) (int) highBits)
+ // ((double) (unsigned) lowBits) +
+ // ((double) U32_MAX) * ((double) (int) highBits)
//
// Mostly just shuffling things around here with coercions and whatnot!
// Note though that all arithmetic is done with f64 to have as much
@@ -704,31 +656,23 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
case ConvertUInt64ToFloat64:
convertHigh = ConvertUInt32ToFloat64;
break;
- default: abort();
+ default:
+ abort();
}
- Expression *result = builder->blockify(
+ Expression* result = builder->blockify(
builder->makeSetLocal(lowBits, curr->value),
- builder->makeSetLocal(
- highResult,
- builder->makeConst(Literal(int32_t(0)))
- ),
+ builder->makeSetLocal(highResult,
+ builder->makeConst(Literal(int32_t(0)))),
builder->makeBinary(
AddFloat64,
- builder->makeUnary(
- ConvertUInt32ToFloat64,
- builder->makeGetLocal(lowBits, i32)
- ),
+ builder->makeUnary(ConvertUInt32ToFloat64,
+ builder->makeGetLocal(lowBits, i32)),
builder->makeBinary(
MulFloat64,
builder->makeConst(Literal((double)UINT_MAX + 1)),
- builder->makeUnary(
- convertHigh,
- builder->makeGetLocal(highBits, i32)
- )
- )
- )
- );
+ builder->makeUnary(convertHigh,
+ builder->makeGetLocal(highBits, i32)))));
switch (curr->op) {
case ConvertSInt64ToFloat32:
@@ -736,52 +680,43 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
result = builder->makeUnary(DemoteFloat64, result);
break;
}
- default: break;
+ default:
+ break;
}
replaceCurrent(result);
}
void lowerCountZeros(Unary* curr) {
- auto lower = [&](Block* result, UnaryOp op32, TempVar&& first, TempVar&& second) {
+ auto lower = [&](Block* result,
+ UnaryOp op32,
+ TempVar&& first,
+ TempVar&& second) {
TempVar highResult = getTemp();
TempVar firstResult = getTemp();
SetLocal* setFirst = builder->makeSetLocal(
firstResult,
- builder->makeUnary(op32, builder->makeGetLocal(first, i32))
- );
+ builder->makeUnary(op32, builder->makeGetLocal(first, i32)));
- Binary* check = builder->makeBinary(
- EqInt32,
- builder->makeGetLocal(firstResult, i32),
- builder->makeConst(Literal(int32_t(32)))
- );
+ Binary* check =
+ builder->makeBinary(EqInt32,
+ builder->makeGetLocal(firstResult, i32),
+ builder->makeConst(Literal(int32_t(32))));
If* conditional = builder->makeIf(
check,
builder->makeBinary(
AddInt32,
builder->makeUnary(op32, builder->makeGetLocal(second, i32)),
- builder->makeConst(Literal(int32_t(32)))
- ),
- builder->makeGetLocal(firstResult, i32)
- );
+ builder->makeConst(Literal(int32_t(32)))),
+ builder->makeGetLocal(firstResult, i32));
SetLocal* setHigh = builder->makeSetLocal(
- highResult,
- builder->makeConst(Literal(int32_t(0)))
- );
+ highResult, builder->makeConst(Literal(int32_t(0))));
setOutParam(result, std::move(highResult));
- replaceCurrent(
- builder->blockify(
- result,
- setFirst,
- setHigh,
- conditional
- )
- );
+ replaceCurrent(builder->blockify(result, setFirst, setHigh, conditional));
};
TempVar highBits = fetchOutParam(curr->value);
@@ -820,32 +755,54 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
case ConvertSInt64ToFloat64:
case ConvertUInt64ToFloat32:
case ConvertUInt64ToFloat64:
- case ReinterpretInt64: return true;
- default: return false;
+ case ReinterpretInt64:
+ return true;
+ default:
+ return false;
}
}
void visitUnary(Unary* curr) {
- if (!unaryNeedsLowering(curr->op)) return;
- if (handleUnreachable(curr)) return;
+ if (!unaryNeedsLowering(curr->op))
+ return;
+ if (handleUnreachable(curr))
+ return;
assert(hasOutParam(curr->value) || curr->type == i64 || curr->type == f64);
switch (curr->op) {
case ClzInt64:
- case CtzInt64: lowerCountZeros(curr); break;
- case EqZInt64: lowerEqZInt64(curr); break;
- case ExtendSInt32: lowerExtendSInt32(curr); break;
- case ExtendUInt32: lowerExtendUInt32(curr); break;
- case WrapInt64: lowerWrapInt64(curr); break;
- case ReinterpretFloat64: lowerReinterpretFloat64(curr); break;
- case ReinterpretInt64: lowerReinterpretInt64(curr); break;
+ case CtzInt64:
+ lowerCountZeros(curr);
+ break;
+ case EqZInt64:
+ lowerEqZInt64(curr);
+ break;
+ case ExtendSInt32:
+ lowerExtendSInt32(curr);
+ break;
+ case ExtendUInt32:
+ lowerExtendUInt32(curr);
+ break;
+ case WrapInt64:
+ lowerWrapInt64(curr);
+ break;
+ case ReinterpretFloat64:
+ lowerReinterpretFloat64(curr);
+ break;
+ case ReinterpretInt64:
+ lowerReinterpretInt64(curr);
+ break;
case TruncSFloat32ToInt64:
case TruncUFloat32ToInt64:
case TruncSFloat64ToInt64:
- case TruncUFloat64ToInt64: lowerTruncFloatToInt(curr); break;
+ case TruncUFloat64ToInt64:
+ lowerTruncFloatToInt(curr);
+ break;
case ConvertSInt64ToFloat32:
case ConvertSInt64ToFloat64:
case ConvertUInt64ToFloat32:
- case ConvertUInt64ToFloat64: lowerConvertIntToFloat(curr); break;
+ case ConvertUInt64ToFloat64:
+ lowerConvertIntToFloat(curr);
+ break;
case PopcntInt64:
std::cerr << "i64.popcnt should already be removed" << std::endl;
WASM_UNREACHABLE();
@@ -855,117 +812,104 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
}
- Block* lowerAdd(Block* result, TempVar&& leftLow, TempVar&& leftHigh,
- TempVar&& rightLow, TempVar&& rightHigh) {
+ Block* lowerAdd(Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
TempVar lowResult = getTemp();
TempVar highResult = getTemp();
SetLocal* addLow = builder->makeSetLocal(
lowResult,
- builder->makeBinary(
- AddInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- )
- );
+ builder->makeBinary(AddInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)));
SetLocal* addHigh = builder->makeSetLocal(
highResult,
- builder->makeBinary(
- AddInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- )
- );
+ builder->makeBinary(AddInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32)));
SetLocal* carryBit = builder->makeSetLocal(
highResult,
- builder->makeBinary(
- AddInt32,
- builder->makeGetLocal(highResult, i32),
- builder->makeConst(Literal(int32_t(1)))
- )
- );
- If* checkOverflow = builder->makeIf(
- builder->makeBinary(
- LtUInt32,
- builder->makeGetLocal(lowResult, i32),
- builder->makeGetLocal(rightLow, i32)
- ),
- carryBit
- );
+ builder->makeBinary(AddInt32,
+ builder->makeGetLocal(highResult, i32),
+ builder->makeConst(Literal(int32_t(1)))));
+ If* checkOverflow =
+ builder->makeIf(builder->makeBinary(LtUInt32,
+ builder->makeGetLocal(lowResult, i32),
+ builder->makeGetLocal(rightLow, i32)),
+ carryBit);
GetLocal* getLow = builder->makeGetLocal(lowResult, i32);
result = builder->blockify(result, addLow, addHigh, checkOverflow, getLow);
setOutParam(result, std::move(highResult));
return result;
}
- Block* lowerSub(Block* result, TempVar&& leftLow, TempVar&& leftHigh,
- TempVar&& rightLow, TempVar&& rightHigh) {
+ Block* lowerSub(Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
TempVar lowResult = getTemp();
TempVar highResult = getTemp();
TempVar borrow = getTemp();
SetLocal* subLow = builder->makeSetLocal(
lowResult,
- builder->makeBinary(
- SubInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- )
- );
+ builder->makeBinary(SubInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)));
SetLocal* borrowBit = builder->makeSetLocal(
borrow,
- builder->makeBinary(
- LtUInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- )
- );
+ builder->makeBinary(LtUInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)));
SetLocal* subHigh1 = builder->makeSetLocal(
highResult,
- builder->makeBinary(
- AddInt32,
- builder->makeGetLocal(borrow, i32),
- builder->makeGetLocal(rightHigh, i32)
- )
- );
+ builder->makeBinary(AddInt32,
+ builder->makeGetLocal(borrow, i32),
+ builder->makeGetLocal(rightHigh, i32)));
SetLocal* subHigh2 = builder->makeSetLocal(
highResult,
- builder->makeBinary(
- SubInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(highResult, i32)
- )
- );
+ builder->makeBinary(SubInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(highResult, i32)));
GetLocal* getLow = builder->makeGetLocal(lowResult, i32);
- result = builder->blockify(result, subLow, borrowBit, subHigh1, subHigh2, getLow);
+ result =
+ builder->blockify(result, subLow, borrowBit, subHigh1, subHigh2, getLow);
setOutParam(result, std::move(highResult));
return result;
}
- Block* lowerBitwise(BinaryOp op, Block* result, TempVar&& leftLow,
- TempVar&& leftHigh, TempVar&& rightLow,
+ Block* lowerBitwise(BinaryOp op,
+ Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
TempVar&& rightHigh) {
BinaryOp op32;
switch (op) {
- case AndInt64: op32 = AndInt32; break;
- case OrInt64: op32 = OrInt32; break;
- case XorInt64: op32 = XorInt32; break;
- default: abort();
+ case AndInt64:
+ op32 = AndInt32;
+ break;
+ case OrInt64:
+ op32 = OrInt32;
+ break;
+ case XorInt64:
+ op32 = XorInt32;
+ break;
+ default:
+ abort();
}
result = builder->blockify(
result,
builder->makeSetLocal(
rightHigh,
- builder->makeBinary(
- op32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- )
- ),
- builder->makeBinary(
- op32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- )
- );
+ builder->makeBinary(op32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32))),
+ builder->makeBinary(op32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)));
setOutParam(result, std::move(rightHigh));
return result;
}
@@ -974,14 +918,10 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
return builder->blockify(
builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShlInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(shift, i32)
- )
- ),
- builder->makeConst(Literal(int32_t(0)))
- );
+ builder->makeBinary(ShlInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(shift, i32))),
+ builder->makeConst(Literal(int32_t(0))));
}
// a >> b where `b` >= 32
@@ -994,58 +934,43 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
return builder->blockify(
builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShrSInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeConst(Literal(int32_t(31)))
- )
- ),
- builder->makeBinary(
- ShrSInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(shift, i32)
- )
- );
+ builder->makeBinary(ShrSInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeConst(Literal(int32_t(31))))),
+ builder->makeBinary(ShrSInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(shift, i32)));
}
Block* makeLargeShrU(Index highBits, Index leftHigh, Index shift) {
return builder->blockify(
builder->makeSetLocal(highBits, builder->makeConst(Literal(int32_t(0)))),
- builder->makeBinary(
- ShrUInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(shift, i32)
- )
- );
+ builder->makeBinary(ShrUInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(shift, i32)));
}
- Block* makeSmallShl(Index highBits, Index leftLow, Index leftHigh,
- Index shift, Binary* shiftMask, Binary* widthLessShift) {
+ Block* makeSmallShl(Index highBits,
+ Index leftLow,
+ Index leftHigh,
+ Index shift,
+ Binary* shiftMask,
+ Binary* widthLessShift) {
Binary* shiftedInBits = builder->makeBinary(
AndInt32,
shiftMask,
builder->makeBinary(
- ShrUInt32,
- builder->makeGetLocal(leftLow, i32),
- widthLessShift
- )
- );
- Binary* shiftHigh = builder->makeBinary(
- ShlInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(shift, i32)
- );
+ ShrUInt32, builder->makeGetLocal(leftLow, i32), widthLessShift));
+ Binary* shiftHigh =
+ builder->makeBinary(ShlInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(shift, i32));
return builder->blockify(
builder->makeSetLocal(
- highBits,
- builder->makeBinary(OrInt32, shiftedInBits, shiftHigh)
- ),
- builder->makeBinary(
- ShlInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(shift, i32)
- )
- );
+ highBits, builder->makeBinary(OrInt32, shiftedInBits, shiftHigh)),
+ builder->makeBinary(ShlInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(shift, i32)));
}
// a >> b where `b` < 32
@@ -1054,66 +979,58 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
//
// hi = leftHigh >> b
// lo = (leftLow >>> b) | (leftHigh << (32 - b))
- Block* makeSmallShrS(Index highBits, Index leftLow, Index leftHigh,
- Index shift, Binary* shiftMask, Binary* widthLessShift) {
+ Block* makeSmallShrS(Index highBits,
+ Index leftLow,
+ Index leftHigh,
+ Index shift,
+ Binary* shiftMask,
+ Binary* widthLessShift) {
Binary* shiftedInBits = builder->makeBinary(
ShlInt32,
builder->makeBinary(
- AndInt32,
- shiftMask,
- builder->makeGetLocal(leftHigh, i32)
- ),
- widthLessShift
- );
- Binary* shiftLow = builder->makeBinary(
- ShrUInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(shift, i32)
- );
+ AndInt32, shiftMask, builder->makeGetLocal(leftHigh, i32)),
+ widthLessShift);
+ Binary* shiftLow = builder->makeBinary(ShrUInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(shift, i32));
return builder->blockify(
builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShrSInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(shift, i32)
- )
- ),
- builder->makeBinary(OrInt32, shiftedInBits, shiftLow)
- );
+ builder->makeBinary(ShrSInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(shift, i32))),
+ builder->makeBinary(OrInt32, shiftedInBits, shiftLow));
}
- Block* makeSmallShrU(Index highBits, Index leftLow, Index leftHigh,
- Index shift, Binary* shiftMask, Binary* widthLessShift) {
+ Block* makeSmallShrU(Index highBits,
+ Index leftLow,
+ Index leftHigh,
+ Index shift,
+ Binary* shiftMask,
+ Binary* widthLessShift) {
Binary* shiftedInBits = builder->makeBinary(
ShlInt32,
builder->makeBinary(
- AndInt32,
- shiftMask,
- builder->makeGetLocal(leftHigh, i32)
- ),
- widthLessShift
- );
- Binary* shiftLow = builder->makeBinary(
- ShrUInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(shift, i32)
- );
+ AndInt32, shiftMask, builder->makeGetLocal(leftHigh, i32)),
+ widthLessShift);
+ Binary* shiftLow = builder->makeBinary(ShrUInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(shift, i32));
return builder->blockify(
builder->makeSetLocal(
highBits,
- builder->makeBinary(
- ShrUInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(shift, i32)
- )
- ),
- builder->makeBinary(OrInt32, shiftedInBits, shiftLow)
- );
+ builder->makeBinary(ShrUInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(shift, i32))),
+ builder->makeBinary(OrInt32, shiftedInBits, shiftLow));
}
- Block* lowerShift(BinaryOp op, Block* result, TempVar&& leftLow,
- TempVar&& leftHigh, TempVar&& rightLow, TempVar&& rightHigh) {
+ Block* lowerShift(BinaryOp op,
+ Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
assert(op == ShlInt64 || op == ShrUInt64 || op == ShrSInt64);
// shift left lowered as:
// if 32 <= rightLow % 64:
@@ -1125,191 +1042,192 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
TempVar shift = getTemp();
SetLocal* setShift = builder->makeSetLocal(
shift,
- builder->makeBinary(
- AndInt32,
- builder->makeGetLocal(rightLow, i32),
- builder->makeConst(Literal(int32_t(32 - 1)))
- )
- );
+ builder->makeBinary(AndInt32,
+ builder->makeGetLocal(rightLow, i32),
+ builder->makeConst(Literal(int32_t(32 - 1)))));
Binary* isLargeShift = builder->makeBinary(
LeUInt32,
builder->makeConst(Literal(int32_t(32))),
- builder->makeBinary(
- AndInt32,
- builder->makeGetLocal(rightLow, i32),
- builder->makeConst(Literal(int32_t(64 - 1)))
- )
- );
+ builder->makeBinary(AndInt32,
+ builder->makeGetLocal(rightLow, i32),
+ builder->makeConst(Literal(int32_t(64 - 1)))));
Block* largeShiftBlock;
switch (op) {
case ShlInt64:
- largeShiftBlock = makeLargeShl(rightHigh, leftLow, shift); break;
+ largeShiftBlock = makeLargeShl(rightHigh, leftLow, shift);
+ break;
case ShrSInt64:
- largeShiftBlock = makeLargeShrS(rightHigh, leftHigh, shift); break;
+ largeShiftBlock = makeLargeShrS(rightHigh, leftHigh, shift);
+ break;
case ShrUInt64:
- largeShiftBlock = makeLargeShrU(rightHigh, leftHigh, shift); break;
- default: abort();
+ largeShiftBlock = makeLargeShrU(rightHigh, leftHigh, shift);
+ break;
+ default:
+ abort();
}
Binary* shiftMask = builder->makeBinary(
SubInt32,
- builder->makeBinary(
- ShlInt32,
- builder->makeConst(Literal(int32_t(1))),
- builder->makeGetLocal(shift, i32)
- ),
- builder->makeConst(Literal(int32_t(1)))
- );
- Binary* widthLessShift = builder->makeBinary(
- SubInt32,
- builder->makeConst(Literal(int32_t(32))),
- builder->makeGetLocal(shift, i32)
- );
+ builder->makeBinary(ShlInt32,
+ builder->makeConst(Literal(int32_t(1))),
+ builder->makeGetLocal(shift, i32)),
+ builder->makeConst(Literal(int32_t(1))));
+ Binary* widthLessShift =
+ builder->makeBinary(SubInt32,
+ builder->makeConst(Literal(int32_t(32))),
+ builder->makeGetLocal(shift, i32));
Block* smallShiftBlock;
- switch(op) {
+ switch (op) {
case ShlInt64: {
- smallShiftBlock = makeSmallShl(rightHigh, leftLow, leftHigh,
- shift, shiftMask, widthLessShift);
+ smallShiftBlock = makeSmallShl(
+ rightHigh, leftLow, leftHigh, shift, shiftMask, widthLessShift);
break;
}
case ShrSInt64: {
- smallShiftBlock = makeSmallShrS(rightHigh, leftLow, leftHigh,
- shift, shiftMask, widthLessShift);
+ smallShiftBlock = makeSmallShrS(
+ rightHigh, leftLow, leftHigh, shift, shiftMask, widthLessShift);
break;
}
case ShrUInt64: {
- smallShiftBlock = makeSmallShrU(rightHigh, leftLow, leftHigh,
- shift, shiftMask, widthLessShift);
+ smallShiftBlock = makeSmallShrU(
+ rightHigh, leftLow, leftHigh, shift, shiftMask, widthLessShift);
break;
}
- default: abort();
+ default:
+ abort();
}
- If* ifLargeShift = builder->makeIf(
- isLargeShift,
- largeShiftBlock,
- smallShiftBlock
- );
+ If* ifLargeShift =
+ builder->makeIf(isLargeShift, largeShiftBlock, smallShiftBlock);
result = builder->blockify(result, setShift, ifLargeShift);
setOutParam(result, std::move(rightHigh));
return result;
}
- Block* lowerEq(Block* result, TempVar&& leftLow, TempVar&& leftHigh,
- TempVar&& rightLow, TempVar&& rightHigh) {
+ Block* lowerEq(Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
return builder->blockify(
result,
builder->makeBinary(
AndInt32,
- builder->makeBinary(
- EqInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- ),
- builder->makeBinary(
- EqInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- )
- )
- );
+ builder->makeBinary(EqInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)),
+ builder->makeBinary(EqInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32))));
}
- Block* lowerNe(Block* result, TempVar&& leftLow, TempVar&& leftHigh,
- TempVar&& rightLow, TempVar&& rightHigh) {
+ Block* lowerNe(Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
return builder->blockify(
result,
builder->makeBinary(
OrInt32,
- builder->makeBinary(
- NeInt32,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- ),
- builder->makeBinary(
- NeInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- )
- )
- );
+ builder->makeBinary(NeInt32,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32)),
+ builder->makeBinary(NeInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32))));
}
- Block* lowerUComp(BinaryOp op, Block* result, TempVar&& leftLow,
- TempVar&& leftHigh, TempVar&& rightLow,
+ Block* lowerUComp(BinaryOp op,
+ Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
TempVar&& rightHigh) {
BinaryOp highOp, lowOp;
switch (op) {
- case LtUInt64: highOp = LtUInt32; lowOp = LtUInt32; break;
- case LeUInt64: highOp = LtUInt32; lowOp = LeUInt32; break;
- case GtUInt64: highOp = GtUInt32; lowOp = GtUInt32; break;
- case GeUInt64: highOp = GtUInt32; lowOp = GeUInt32; break;
- default: abort();
+ case LtUInt64:
+ highOp = LtUInt32;
+ lowOp = LtUInt32;
+ break;
+ case LeUInt64:
+ highOp = LtUInt32;
+ lowOp = LeUInt32;
+ break;
+ case GtUInt64:
+ highOp = GtUInt32;
+ lowOp = GtUInt32;
+ break;
+ case GeUInt64:
+ highOp = GtUInt32;
+ lowOp = GeUInt32;
+ break;
+ default:
+ abort();
}
- Binary* compHigh = builder->makeBinary(
- highOp,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- );
- Binary* eqHigh = builder->makeBinary(
- EqInt32,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- );
- Binary* compLow = builder->makeBinary(
- lowOp,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- );
+ Binary* compHigh =
+ builder->makeBinary(highOp,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32));
+ Binary* eqHigh = builder->makeBinary(EqInt32,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32));
+ Binary* compLow = builder->makeBinary(lowOp,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32));
return builder->blockify(
result,
builder->makeBinary(
- OrInt32,
- compHigh,
- builder->makeBinary(AndInt32, eqHigh, compLow)
- )
- );
+ OrInt32, compHigh, builder->makeBinary(AndInt32, eqHigh, compLow)));
}
- Block* lowerSComp(BinaryOp op, Block* result, TempVar&& leftLow,
- TempVar&& leftHigh, TempVar&& rightLow,
- TempVar&& rightHigh) {
+ Block* lowerSComp(BinaryOp op,
+ Block* result,
+ TempVar&& leftLow,
+ TempVar&& leftHigh,
+ TempVar&& rightLow,
+ TempVar&& rightHigh) {
BinaryOp highOp1, highOp2, lowOp;
switch (op) {
- case LtSInt64: highOp1 = LtSInt32; highOp2 = LeSInt32; lowOp = GeUInt32; break;
- case LeSInt64: highOp1 = LtSInt32; highOp2 = LeSInt32; lowOp = GtUInt32; break;
- case GtSInt64: highOp1 = GtSInt32; highOp2 = GeSInt32; lowOp = LeUInt32; break;
- case GeSInt64: highOp1 = GtSInt32; highOp2 = GeSInt32; lowOp = LtUInt32; break;
- default: abort();
+ case LtSInt64:
+ highOp1 = LtSInt32;
+ highOp2 = LeSInt32;
+ lowOp = GeUInt32;
+ break;
+ case LeSInt64:
+ highOp1 = LtSInt32;
+ highOp2 = LeSInt32;
+ lowOp = GtUInt32;
+ break;
+ case GtSInt64:
+ highOp1 = GtSInt32;
+ highOp2 = GeSInt32;
+ lowOp = LeUInt32;
+ break;
+ case GeSInt64:
+ highOp1 = GtSInt32;
+ highOp2 = GeSInt32;
+ lowOp = LtUInt32;
+ break;
+ default:
+ abort();
}
- Binary* compHigh1 = builder->makeBinary(
- highOp1,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- );
- Binary* compHigh2 = builder->makeBinary(
- highOp2,
- builder->makeGetLocal(leftHigh, i32),
- builder->makeGetLocal(rightHigh, i32)
- );
- Binary* compLow = builder->makeBinary(
- lowOp,
- builder->makeGetLocal(leftLow, i32),
- builder->makeGetLocal(rightLow, i32)
- );
- If* lowIf = builder->makeIf(
- compLow,
- builder->makeConst(Literal(int32_t(0))),
- builder->makeConst(Literal(int32_t(1)))
- );
+ Binary* compHigh1 =
+ builder->makeBinary(highOp1,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32));
+ Binary* compHigh2 =
+ builder->makeBinary(highOp2,
+ builder->makeGetLocal(leftHigh, i32),
+ builder->makeGetLocal(rightHigh, i32));
+ Binary* compLow = builder->makeBinary(lowOp,
+ builder->makeGetLocal(leftLow, i32),
+ builder->makeGetLocal(rightLow, i32));
+ If* lowIf = builder->makeIf(compLow,
+ builder->makeConst(Literal(int32_t(0))),
+ builder->makeConst(Literal(int32_t(1))));
If* highIf2 = builder->makeIf(
- compHigh2,
- lowIf,
- builder->makeConst(Literal(int32_t(0)))
- );
+ compHigh2, lowIf, builder->makeConst(Literal(int32_t(0))));
If* highIf1 = builder->makeIf(
- compHigh1,
- builder->makeConst(Literal(int32_t(1))),
- highIf2
- );
+ compHigh1, builder->makeConst(Literal(int32_t(1))), highIf2);
return builder->blockify(result, highIf1);
}
@@ -1339,14 +1257,18 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
case GtSInt64:
case GtUInt64:
case GeSInt64:
- case GeUInt64: return true;
- default: return false;
+ case GeUInt64:
+ return true;
+ default:
+ return false;
}
}
void visitBinary(Binary* curr) {
- if (handleUnreachable(curr)) return;
- if (!binaryNeedsLowering(curr->op)) return;
+ if (handleUnreachable(curr))
+ return;
+ if (!binaryNeedsLowering(curr->op))
+ return;
// left and right reachable, lower normally
TempVar leftLow = getTemp();
TempVar leftHigh = fetchOutParam(curr->left);
@@ -1357,15 +1279,19 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
Block* result = builder->blockify(setLeft, setRight);
switch (curr->op) {
case AddInt64: {
- replaceCurrent(
- lowerAdd(result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh)));
+ replaceCurrent(lowerAdd(result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case SubInt64: {
- replaceCurrent(
- lowerSub(result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh)));
+ replaceCurrent(lowerSub(result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case MulInt64:
@@ -1375,59 +1301,69 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
case RemUInt64:
case RotLInt64:
case RotRInt64:
- std::cerr << "should have been removed by now " << curr->op << std::endl;
+ std::cerr << "should have been removed by now " << curr->op
+ << std::endl;
WASM_UNREACHABLE();
case AndInt64:
case OrInt64:
case XorInt64: {
- replaceCurrent(
- lowerBitwise(curr->op, result, std::move(leftLow),
- std::move(leftHigh), std::move(rightLow),
- std::move(rightHigh))
- );
+ replaceCurrent(lowerBitwise(curr->op,
+ result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case ShlInt64:
case ShrSInt64:
case ShrUInt64: {
- replaceCurrent(
- lowerShift(curr->op, result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh))
- );
+ replaceCurrent(lowerShift(curr->op,
+ result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case EqInt64: {
- replaceCurrent(
- lowerEq(result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh))
- );
+ replaceCurrent(lowerEq(result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case NeInt64: {
- replaceCurrent(
- lowerNe(result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh))
- );
+ replaceCurrent(lowerNe(result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
case LtSInt64:
case LeSInt64:
case GtSInt64:
case GeSInt64:
- replaceCurrent(
- lowerSComp(curr->op, result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh))
- );
- break;
+ replaceCurrent(lowerSComp(curr->op,
+ result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
+ break;
case LtUInt64:
case LeUInt64:
case GtUInt64:
case GeUInt64: {
- replaceCurrent(
- lowerUComp(curr->op, result, std::move(leftLow), std::move(leftHigh),
- std::move(rightLow), std::move(rightHigh))
- );
+ replaceCurrent(lowerUComp(curr->op,
+ result,
+ std::move(leftLow),
+ std::move(leftHigh),
+ std::move(rightLow),
+ std::move(rightHigh)));
break;
}
default: {
@@ -1438,7 +1374,8 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
}
void visitSelect(Select* curr) {
- if (handleUnreachable(curr)) return;
+ if (handleUnreachable(curr))
+ return;
if (!hasOutParam(curr->ifTrue)) {
assert(!hasOutParam(curr->ifFalse));
return;
@@ -1452,40 +1389,33 @@ struct I64ToI32Lowering : public WalkerPass<PostWalker<I64ToI32Lowering>> {
builder->makeSetLocal(
lowBits,
builder->makeSelect(
- builder->makeGetLocal(cond, i32),
- curr->ifTrue,
- curr->ifFalse
- )
- ),
+ builder->makeGetLocal(cond, i32), curr->ifTrue, curr->ifFalse)),
builder->makeSetLocal(
highBits,
builder->makeSelect(
builder->makeGetLocal(cond, i32),
builder->makeGetLocal(fetchOutParam(curr->ifTrue), i32),
- builder->makeGetLocal(fetchOutParam(curr->ifFalse), i32)
- )
- ),
- builder->makeGetLocal(lowBits, i32)
- );
+ builder->makeGetLocal(fetchOutParam(curr->ifFalse), i32))),
+ builder->makeGetLocal(lowBits, i32));
setOutParam(result, std::move(highBits));
replaceCurrent(result);
}
void visitDrop(Drop* curr) {
- if (!hasOutParam(curr->value)) return;
+ if (!hasOutParam(curr->value))
+ return;
// free temp var
fetchOutParam(curr->value);
}
void visitReturn(Return* curr) {
- if (!hasOutParam(curr->value)) return;
+ if (!hasOutParam(curr->value))
+ return;
TempVar lowBits = getTemp();
TempVar highBits = fetchOutParam(curr->value);
SetLocal* setLow = builder->makeSetLocal(lowBits, curr->value);
SetGlobal* setHigh = builder->makeSetGlobal(
- INT64_TO_32_HIGH_BITS,
- builder->makeGetLocal(highBits, i32)
- );
+ INT64_TO_32_HIGH_BITS, builder->makeGetLocal(highBits, i32));
curr->value = builder->makeGetLocal(lowBits, i32);
Block* result = builder->blockify(setLow, setHigh, curr);
replaceCurrent(result);
@@ -1502,7 +1432,7 @@ private:
TempVar getTemp(Type ty = i32) {
Index ret;
- auto &freeList = freeTemps[(int) ty];
+ auto& freeList = freeTemps[(int)ty];
if (freeList.size() > 0) {
ret = freeList.back();
freeList.pop_back();
@@ -1538,7 +1468,8 @@ private:
// unconditionally before themselves, so it is not valid for an if,
// in particular.
bool handleUnreachable(Expression* curr) {
- if (curr->type != unreachable) return false;
+ if (curr->type != unreachable)
+ return false;
std::vector<Expression*> children;
bool hasUnreachable = false;
for (auto* child : ChildIterator(curr)) {
@@ -1549,7 +1480,8 @@ private:
}
children.push_back(child);
}
- if (!hasUnreachable) return false;
+ if (!hasUnreachable)
+ return false;
// This has an unreachable child, so we can replace it with
// the children.
auto* block = builder->makeBlock(children);
@@ -1559,8 +1491,6 @@ private:
}
};
-Pass *createI64ToI32LoweringPass() {
- return new I64ToI32Lowering();
-}
+Pass* createI64ToI32LoweringPass() { return new I64ToI32Lowering(); }
-}
+} // namespace wasm
diff --git a/src/passes/Inlining.cpp b/src/passes/Inlining.cpp
index f801662e0..681109af8 100644
--- a/src/passes/Inlining.cpp
+++ b/src/passes/Inlining.cpp
@@ -32,14 +32,14 @@
#include <atomic>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
#include "ir/literal-utils.h"
#include "ir/module-utils.h"
#include "ir/utils.h"
#include "parsing.h"
+#include "pass.h"
#include "passes/opt-utils.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -79,25 +79,30 @@ struct FunctionInfo {
bool worthInlining(PassOptions& options) {
// if it's big, it's just not worth doing (TODO: investigate more)
- if (size > FLEXIBLE_SIZE_LIMIT) return false;
+ if (size > FLEXIBLE_SIZE_LIMIT)
+ return false;
// if it's so small we have a guarantee that after we optimize the
// size will not increase, inline it
- if (size <= INLINING_OPTIMIZING_WILL_DECREASE_SIZE_LIMIT) return true;
+ if (size <= INLINING_OPTIMIZING_WILL_DECREASE_SIZE_LIMIT)
+ return true;
// if it has one use, then inlining it would likely reduce code size
// since we are just moving code around, + optimizing, so worth it
// if small enough that we are pretty sure its ok
- if (calls == 1 && !usedGlobally && size <= CAREFUL_SIZE_LIMIT) return true;
+ if (calls == 1 && !usedGlobally && size <= CAREFUL_SIZE_LIMIT)
+ return true;
// more than one use, so we can't eliminate it after inlining,
// so only worth it if we really care about speed and don't care
// about size, and if it's lightweight so a good candidate for
// speeding us up.
- return options.optimizeLevel >= 3 && options.shrinkLevel == 0 && lightweight;
+ return options.optimizeLevel >= 3 && options.shrinkLevel == 0 &&
+ lightweight;
}
};
typedef std::unordered_map<Name, FunctionInfo> NameInfoMap;
-struct FunctionInfoScanner : public WalkerPass<PostWalker<FunctionInfoScanner>> {
+struct FunctionInfoScanner
+ : public WalkerPass<PostWalker<FunctionInfoScanner>> {
bool isFunctionParallel() override { return true; }
FunctionInfoScanner(NameInfoMap* infos) : infos(infos) {}
@@ -112,7 +117,8 @@ struct FunctionInfoScanner : public WalkerPass<PostWalker<FunctionInfoScanner>>
}
void visitCall(Call* curr) {
- assert(infos->count(curr->target) > 0); // can't add a new element in parallel
+ // can't add a new element in parallel
+ assert(infos->count(curr->target) > 0);
(*infos)[curr->target].calls++;
// having a call is not lightweight
(*infos)[getFunction()->name].lightweight = false;
@@ -130,12 +136,14 @@ struct InliningAction {
Expression** callSite;
Function* contents;
- InliningAction(Expression** callSite, Function* contents) : callSite(callSite), contents(contents) {}
+ InliningAction(Expression** callSite, Function* contents)
+ : callSite(callSite), contents(contents) {}
};
struct InliningState {
std::unordered_set<Name> worthInlining;
- std::unordered_map<Name, std::vector<InliningAction>> actionsForFunction; // function name => actions that can be performed in it
+ // function name => actions that can be performed in it
+ std::unordered_map<Name, std::vector<InliningAction>> actionsForFunction;
};
struct Planner : public WalkerPass<PostWalker<Planner>> {
@@ -143,30 +151,27 @@ struct Planner : public WalkerPass<PostWalker<Planner>> {
Planner(InliningState* state) : state(state) {}
- Planner* create() override {
- return new Planner(state);
- }
+ Planner* create() override { return new Planner(state); }
void visitCall(Call* curr) {
// plan to inline if we know this is valid to inline, and if the call is
// actually performed - if it is dead code, it's pointless to inline.
// we also cannot inline ourselves.
- if (state->worthInlining.count(curr->target) &&
- curr->type != unreachable &&
+ if (state->worthInlining.count(curr->target) && curr->type != unreachable &&
curr->target != getFunction()->name) {
- // nest the call in a block. that way the location of the pointer to the call will not
- // change even if we inline multiple times into the same function, otherwise
- // call1(call2()) might be a problem
+ // nest the call in a block. that way the location of the pointer to the
+ // call will not change even if we inline multiple times into the same
+ // function, otherwise call1(call2()) might be a problem
auto* block = Builder(*getModule()).makeBlock(curr);
replaceCurrent(block);
- assert(state->actionsForFunction.count(getFunction()->name) > 0); // can't add a new element in parallel
- state->actionsForFunction[getFunction()->name].emplace_back(&block->list[0], getModule()->getFunction(curr->target));
+ // can't add a new element in parallel
+ assert(state->actionsForFunction.count(getFunction()->name) > 0);
+ state->actionsForFunction[getFunction()->name].emplace_back(
+ &block->list[0], getModule()->getFunction(curr->target));
}
}
- void doWalkFunction(Function* func) {
- walk(func->body);
- }
+ void doWalkFunction(Function* func) { walk(func->body); }
private:
InliningState* state;
@@ -174,7 +179,8 @@ private:
// Core inlining logic. Modifies the outside function (adding locals as
// needed), and returns the inlined code.
-static Expression* doInlining(Module* module, Function* into, InliningAction& action) {
+static Expression*
+doInlining(Module* module, Function* into, InliningAction& action) {
Function* from = action.contents;
auto* call = (*action.callSite)->cast<Call>();
Builder builder(*module);
@@ -204,11 +210,15 @@ static Expression* doInlining(Module* module, Function* into, InliningAction& ac
}
// assign the operands into the params
for (Index i = 0; i < from->params.size(); i++) {
- block->list.push_back(builder.makeSetLocal(updater.localMapping[i], call->operands[i]));
+ block->list.push_back(
+ builder.makeSetLocal(updater.localMapping[i], call->operands[i]));
}
- // zero out the vars (as we may be in a loop, and may depend on their zero-init value
+ // zero out the vars (as we may be in a loop, and may depend on their
+ // zero-init value
for (Index i = 0; i < from->vars.size(); i++) {
- block->list.push_back(builder.makeSetLocal(updater.localMapping[from->getVarIndexBase() + i], LiteralUtils::makeZero(from->vars[i], *module)));
+ block->list.push_back(
+ builder.makeSetLocal(updater.localMapping[from->getVarIndexBase() + i],
+ LiteralUtils::makeZero(from->vars[i], *module)));
}
// generate and update the inlined contents
auto* contents = ExpressionManipulator::copy(from->body, *module);
@@ -246,7 +256,8 @@ struct Inlining : public Pass {
// can look like it is worth inlining)
while (iterationNumber <= numFunctions) {
#ifdef INLINING_DEBUG
- std::cout << "inlining loop iter " << iterationNumber << " (numFunctions: " << numFunctions << ")\n";
+ std::cout << "inlining loop iter " << iterationNumber
+ << " (numFunctions: " << numFunctions << ")\n";
#endif
calculateInfos(module);
if (!iteration(runner, module)) {
@@ -258,7 +269,8 @@ struct Inlining : public Pass {
void calculateInfos(Module* module) {
infos.clear();
- // fill in info, as we operate on it in parallel (each function to its own entry)
+ // fill in info, as we operate on it in parallel (each function to its own
+ // entry)
for (auto& func : module->functions) {
infos[func->name];
}
@@ -288,8 +300,10 @@ struct Inlining : public Pass {
state.worthInlining.insert(func->name);
}
});
- if (state.worthInlining.size() == 0) return false;
- // fill in actionsForFunction, as we operate on it in parallel (each function to its own entry)
+ if (state.worthInlining.size() == 0)
+ return false;
+ // fill in actionsForFunction, as we operate on it in parallel (each
+ // function to its own entry)
for (auto& func : module->functions) {
state.actionsForFunction[func->name];
}
@@ -302,20 +316,23 @@ struct Inlining : public Pass {
}
// perform inlinings TODO: parallelize
std::unordered_map<Name, Index> inlinedUses; // how many uses we inlined
- std::unordered_set<Function*> inlinedInto; // which functions were inlined into
+ // which functions were inlined into
+ std::unordered_set<Function*> inlinedInto;
for (auto& func : module->functions) {
// if we've inlined a function, don't inline into it in this iteration,
// avoid risk of races
// note that we do not risk stalling progress, as each iteration() will
// inline at least one call before hitting this
- if (inlinedUses.count(func->name)) continue;
+ if (inlinedUses.count(func->name))
+ continue;
for (auto& action : state.actionsForFunction[func->name]) {
auto* inlinedFunction = action.contents;
// if we've inlined into a function, don't inline it in this iteration,
// avoid risk of races
// note that we do not risk stalling progress, as each iteration() will
// inline at least one call before hitting this
- if (inlinedInto.count(inlinedFunction)) continue;
+ if (inlinedInto.count(inlinedFunction))
+ continue;
Name inlinedName = inlinedFunction->name;
#ifdef INLINING_DEBUG
std::cout << "inline " << inlinedName << " into " << func->name << '\n';
@@ -335,23 +352,28 @@ struct Inlining : public Pass {
}
// remove functions that we no longer need after inlining
auto& funcs = module->functions;
- funcs.erase(std::remove_if(funcs.begin(), funcs.end(), [&](const std::unique_ptr<Function>& curr) {
- auto name = curr->name;
- auto& info = infos[name];
- bool canRemove = inlinedUses.count(name) && inlinedUses[name] == info.calls && !info.usedGlobally;
+ funcs.erase(std::remove_if(funcs.begin(),
+ funcs.end(),
+ [&](const std::unique_ptr<Function>& curr) {
+ auto name = curr->name;
+ auto& info = infos[name];
+ bool canRemove =
+ inlinedUses.count(name) &&
+ inlinedUses[name] == info.calls &&
+ !info.usedGlobally;
#ifdef INLINING_DEBUG
- if (canRemove) std::cout << "removing " << name << '\n';
+ if (canRemove)
+ std::cout << "removing " << name << '\n';
#endif
- return canRemove;
- }), funcs.end());
+ return canRemove;
+ }),
+ funcs.end());
// return whether we did any work
return inlinedUses.size() > 0;
}
};
-Pass* createInliningPass() {
- return new Inlining();
-}
+Pass* createInliningPass() { return new Inlining(); }
Pass* createInliningOptimizingPass() {
auto* ret = new Inlining();
@@ -360,4 +382,3 @@ Pass* createInliningOptimizingPass() {
}
} // namespace wasm
-
diff --git a/src/passes/InstrumentLocals.cpp b/src/passes/InstrumentLocals.cpp
index 6b44af0ad..3845b0fee 100644
--- a/src/passes/InstrumentLocals.cpp
+++ b/src/passes/InstrumentLocals.cpp
@@ -43,13 +43,13 @@
// )
// )
-#include <wasm.h>
-#include <wasm-builder.h>
-#include <pass.h>
-#include "shared-constants.h"
-#include "asmjs/shared-constants.h"
#include "asm_v_wasm.h"
+#include "asmjs/shared-constants.h"
#include "ir/function-type-utils.h"
+#include "shared-constants.h"
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -68,59 +68,71 @@ struct InstrumentLocals : public WalkerPass<PostWalker<InstrumentLocals>> {
Builder builder(*getModule());
Name import;
switch (curr->type) {
- case i32: import = get_i32; break;
- case i64: return; // TODO
- case f32: import = get_f32; break;
- case f64: import = get_f64; break;
- case v128: assert(false && "v128 not implemented yet");
- case none: WASM_UNREACHABLE();
- case unreachable: WASM_UNREACHABLE();
+ case i32:
+ import = get_i32;
+ break;
+ case i64:
+ return; // TODO
+ case f32:
+ import = get_f32;
+ break;
+ case f64:
+ import = get_f64;
+ break;
+ case v128:
+ assert(false && "v128 not implemented yet");
+ case none:
+ WASM_UNREACHABLE();
+ case unreachable:
+ WASM_UNREACHABLE();
}
replaceCurrent(
- builder.makeCall(
- import,
- {
- builder.makeConst(Literal(int32_t(id++))),
- builder.makeConst(Literal(int32_t(curr->index))),
- curr
- },
- curr->type
- )
- );
+ builder.makeCall(import,
+ {builder.makeConst(Literal(int32_t(id++))),
+ builder.makeConst(Literal(int32_t(curr->index))),
+ curr},
+ curr->type));
}
void visitSetLocal(SetLocal* curr) {
Builder builder(*getModule());
Name import;
switch (curr->value->type) {
- case i32: import = set_i32; break;
- case i64: return; // TODO
- case f32: import = set_f32; break;
- case f64: import = set_f64; break;
- case v128: assert(false && "v128 not implemented yet");
- case unreachable: return; // nothing to do here
- case none: WASM_UNREACHABLE();
+ case i32:
+ import = set_i32;
+ break;
+ case i64:
+ return; // TODO
+ case f32:
+ import = set_f32;
+ break;
+ case f64:
+ import = set_f64;
+ break;
+ case v128:
+ assert(false && "v128 not implemented yet");
+ case unreachable:
+ return; // nothing to do here
+ case none:
+ WASM_UNREACHABLE();
}
- curr->value = builder.makeCall(
- import,
- {
- builder.makeConst(Literal(int32_t(id++))),
- builder.makeConst(Literal(int32_t(curr->index))),
- curr->value
- },
- curr->value->type
- );
+ curr->value =
+ builder.makeCall(import,
+ {builder.makeConst(Literal(int32_t(id++))),
+ builder.makeConst(Literal(int32_t(curr->index))),
+ curr->value},
+ curr->value->type);
}
void visitModule(Module* curr) {
- addImport(curr, get_i32, "iiii");
- addImport(curr, get_i64, "jiij");
- addImport(curr, get_f32, "fiif");
- addImport(curr, get_f64, "diid");
- addImport(curr, set_i32, "iiii");
- addImport(curr, set_i64, "jiij");
- addImport(curr, set_f32, "fiif");
- addImport(curr, set_f64, "diid");
+ addImport(curr, get_i32, "iiii");
+ addImport(curr, get_i64, "jiij");
+ addImport(curr, get_f32, "fiif");
+ addImport(curr, get_f64, "diid");
+ addImport(curr, set_i32, "iiii");
+ addImport(curr, set_i64, "jiij");
+ addImport(curr, set_f32, "fiif");
+ addImport(curr, set_f64, "diid");
}
private:
@@ -138,8 +150,6 @@ private:
}
};
-Pass* createInstrumentLocalsPass() {
- return new InstrumentLocals();
-}
+Pass* createInstrumentLocalsPass() { return new InstrumentLocals(); }
} // namespace wasm
diff --git a/src/passes/InstrumentMemory.cpp b/src/passes/InstrumentMemory.cpp
index a929478df..4a479db34 100644
--- a/src/passes/InstrumentMemory.cpp
+++ b/src/passes/InstrumentMemory.cpp
@@ -52,26 +52,26 @@
// )
// )
-#include <wasm.h>
-#include <wasm-builder.h>
-#include <pass.h>
-#include "shared-constants.h"
-#include "asmjs/shared-constants.h"
#include "asm_v_wasm.h"
+#include "asmjs/shared-constants.h"
#include "ir/function-type-utils.h"
+#include "shared-constants.h"
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
-static Name load_ptr("load_ptr"),
- load_val_i32("load_val_i32"),
- load_val_i64("load_val_i64"),
- load_val_f32("load_val_f32"),
- load_val_f64("load_val_f64"),
- store_ptr("store_ptr"),
- store_val_i32("store_val_i32"),
- store_val_i64("store_val_i64"),
- store_val_f32("store_val_f32"),
- store_val_f64("store_val_f64");
+static Name load_ptr("load_ptr");
+static Name load_val_i32("load_val_i32");
+static Name load_val_i64("load_val_i64");
+static Name load_val_f32("load_val_f32");
+static Name load_val_f64("load_val_f64");
+static Name store_ptr("store_ptr");
+static Name store_val_i32("store_val_i32");
+static Name store_val_i64("store_val_i64");
+static Name store_val_f32("store_val_f32");
+static Name store_val_f64("store_val_f64");
// TODO: Add support for atomicRMW/cmpxchg
@@ -79,66 +79,74 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> {
void visitLoad(Load* curr) {
id++;
Builder builder(*getModule());
- curr->ptr = builder.makeCall(load_ptr,
- {
- builder.makeConst(Literal(int32_t(id))),
- builder.makeConst(Literal(int32_t(curr->bytes))),
- builder.makeConst(Literal(int32_t(curr->offset.addr))),
- curr->ptr
- },
- i32
- );
+ curr->ptr =
+ builder.makeCall(load_ptr,
+ {builder.makeConst(Literal(int32_t(id))),
+ builder.makeConst(Literal(int32_t(curr->bytes))),
+ builder.makeConst(Literal(int32_t(curr->offset.addr))),
+ curr->ptr},
+ i32);
Name target;
switch (curr->type) {
- case i32: target = load_val_i32; break;
- case i64: target = load_val_i64; break;
- case f32: target = load_val_f32; break;
- case f64: target = load_val_f64; break;
- default: return; // TODO: other types, unreachable, etc.
+ case i32:
+ target = load_val_i32;
+ break;
+ case i64:
+ target = load_val_i64;
+ break;
+ case f32:
+ target = load_val_f32;
+ break;
+ case f64:
+ target = load_val_f64;
+ break;
+ default:
+ return; // TODO: other types, unreachable, etc.
}
- replaceCurrent(builder.makeCall(target,
- {
- builder.makeConst(Literal(int32_t(id))),
- curr
- },
- curr->type
- ));
+ replaceCurrent(builder.makeCall(
+ target, {builder.makeConst(Literal(int32_t(id))), curr}, curr->type));
}
void visitStore(Store* curr) {
id++;
Builder builder(*getModule());
- curr->ptr = builder.makeCall(store_ptr,
- { builder.makeConst(Literal(int32_t(id))),
- builder.makeConst(Literal(int32_t(curr->bytes))),
- builder.makeConst(Literal(int32_t(curr->offset.addr))),
- curr->ptr },
- i32
- );
+ curr->ptr =
+ builder.makeCall(store_ptr,
+ {builder.makeConst(Literal(int32_t(id))),
+ builder.makeConst(Literal(int32_t(curr->bytes))),
+ builder.makeConst(Literal(int32_t(curr->offset.addr))),
+ curr->ptr},
+ i32);
Name target;
switch (curr->value->type) {
- case i32: target = store_val_i32; break;
- case i64: target = store_val_i64; break;
- case f32: target = store_val_f32; break;
- case f64: target = store_val_f64; break;
- default: return; // TODO: other types, unreachable, etc.
+ case i32:
+ target = store_val_i32;
+ break;
+ case i64:
+ target = store_val_i64;
+ break;
+ case f32:
+ target = store_val_f32;
+ break;
+ case f64:
+ target = store_val_f64;
+ break;
+ default:
+ return; // TODO: other types, unreachable, etc.
}
- curr->value = builder.makeCall(target,
- {
- builder.makeConst(Literal(int32_t(id))),
- curr->value
- },
- curr->value->type
- );
+ curr->value =
+ builder.makeCall(target,
+ {builder.makeConst(Literal(int32_t(id))), curr->value},
+ curr->value->type);
}
- void visitModule(Module *curr) {
- addImport(curr, load_ptr, "iiiii");
- addImport(curr, load_val_i32, "iii");
- addImport(curr, load_val_i64, "jij");
- addImport(curr, load_val_f32, "fif");
- addImport(curr, load_val_f64, "did");
- addImport(curr, store_ptr, "iiiii");
+ void visitModule(Module* curr) {
+ addImport(curr, load_ptr, "iiiii");
+ addImport(curr, load_val_i32, "iii");
+ addImport(curr, load_val_i64, "jij");
+ addImport(curr, load_val_f32, "fif");
+ addImport(curr, load_val_f64, "did");
+ addImport(curr, store_ptr, "iiiii");
addImport(curr, store_val_i32, "iii");
addImport(curr, store_val_i64, "jij");
addImport(curr, store_val_f32, "fif");
@@ -148,7 +156,7 @@ struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> {
private:
Index id;
- void addImport(Module *curr, Name name, std::string sig) {
+ void addImport(Module* curr, Name name, std::string sig) {
auto import = new Function;
import->name = name;
import->module = ENV;
@@ -160,8 +168,6 @@ private:
}
};
-Pass *createInstrumentMemoryPass() {
- return new InstrumentMemory();
-}
+Pass* createInstrumentMemoryPass() { return new InstrumentMemory(); }
} // namespace wasm
diff --git a/src/passes/LegalizeJSInterface.cpp b/src/passes/LegalizeJSInterface.cpp
index cde4373f8..324590427 100644
--- a/src/passes/LegalizeJSInterface.cpp
+++ b/src/passes/LegalizeJSInterface.cpp
@@ -30,17 +30,17 @@
// table even to a signature that is not legal.
//
-#include <utility>
-#include "wasm.h"
-#include "pass.h"
#include "asm_v_wasm.h"
-#include "shared-constants.h"
#include "asmjs/shared-constants.h"
-#include "wasm-builder.h"
#include "ir/function-type-utils.h"
#include "ir/import-utils.h"
#include "ir/literal-utils.h"
#include "ir/utils.h"
+#include "pass.h"
+#include "shared-constants.h"
+#include "wasm-builder.h"
+#include "wasm.h"
+#include <utility>
namespace wasm {
@@ -71,8 +71,9 @@ struct LegalizeJSInterface : public Pass {
if (im->imported() && isIllegal(im) && shouldBeLegalized(im)) {
auto funcName = makeLegalStubForCalledImport(im, module);
illegalImportsToLegal[im->name] = funcName;
- // we need to use the legalized version in the table, as the import from JS
- // is legal for JS. Our stub makes it look like a native wasm function.
+ // we need to use the legalized version in the table, as the import from
+ // JS is legal for JS. Our stub makes it look like a native wasm
+ // function.
for (auto& segment : module->table.segments) {
for (auto& name : segment.data) {
if (name == im->name) {
@@ -87,23 +88,32 @@ struct LegalizeJSInterface : public Pass {
module->removeFunction(pair.first);
}
- // fix up imports: call_import of an illegal must be turned to a call of a legal
+ // fix up imports: call_import of an illegal must be turned to a call of a
+ // legal
struct FixImports : public WalkerPass<PostWalker<FixImports>> {
bool isFunctionParallel() override { return true; }
- Pass* create() override { return new FixImports(illegalImportsToLegal); }
+ Pass* create() override {
+ return new FixImports(illegalImportsToLegal);
+ }
std::map<Name, Name>* illegalImportsToLegal;
- FixImports(std::map<Name, Name>* illegalImportsToLegal) : illegalImportsToLegal(illegalImportsToLegal) {}
+ FixImports(std::map<Name, Name>* illegalImportsToLegal)
+ : illegalImportsToLegal(illegalImportsToLegal) {}
void visitCall(Call* curr) {
auto iter = illegalImportsToLegal->find(curr->target);
- if (iter == illegalImportsToLegal->end()) return;
-
- if (iter->second == getFunction()->name) return; // inside the stub function itself, is the one safe place to do the call
- replaceCurrent(Builder(*getModule()).makeCall(iter->second, curr->operands, curr->type));
+ if (iter == illegalImportsToLegal->end())
+ return;
+
+ if (iter->second == getFunction()->name)
+ // inside the stub function itself, is the one safe place to do the
+ // call
+ return;
+ replaceCurrent(Builder(*getModule())
+ .makeCall(iter->second, curr->operands, curr->type));
}
};
@@ -118,29 +128,32 @@ private:
// map of illegal to legal names for imports
std::map<Name, Name> illegalImportsToLegal;
- template<typename T>
- bool isIllegal(T* t) {
+ template<typename T> bool isIllegal(T* t) {
for (auto param : t->params) {
- if (param == i64) return true;
+ if (param == i64)
+ return true;
}
return t->result == i64;
}
// Check if an export should be legalized.
bool shouldBeLegalized(Export* ex, Function* func) {
- if (full) return true;
+ if (full)
+ return true;
// We are doing minimal legalization - just what JS needs.
return ex->name.startsWith("dynCall_");
}
// Check if an import should be legalized.
bool shouldBeLegalized(Function* im) {
- if (full) return true;
+ if (full)
+ return true;
// We are doing minimal legalization - just what JS needs.
return im->module == ENV && im->base.startsWith("invoke_");
}
- // JS calls the export, so it must call a legal stub that calls the actual wasm function
+ // JS calls the export, so it must call a legal stub that calls the actual
+ // wasm function
Name makeLegalStub(Function* func, Module* module) {
Builder builder(*module);
auto* legal = new Function();
@@ -152,11 +165,13 @@ private:
for (auto param : func->params) {
if (param == i64) {
- call->operands.push_back(I64Utilities::recreateI64(builder, legal->params.size(), legal->params.size() + 1));
+ call->operands.push_back(I64Utilities::recreateI64(
+ builder, legal->params.size(), legal->params.size() + 1));
legal->params.push_back(i32);
legal->params.push_back(i32);
} else {
- call->operands.push_back(builder.makeGetLocal(legal->params.size(), param));
+ call->operands.push_back(
+ builder.makeGetLocal(legal->params.size(), param));
legal->params.push_back(param);
}
}
@@ -167,7 +182,8 @@ private:
auto index = Builder::addVar(legal, Name(), i64);
auto* block = builder.makeBlock();
block->list.push_back(builder.makeSetLocal(index, call));
- block->list.push_back(builder.makeCall(f->name, {I64Utilities::getI64High(builder, index)}, none));
+ block->list.push_back(builder.makeCall(
+ f->name, {I64Utilities::getI64High(builder, index)}, none));
block->list.push_back(I64Utilities::getI64Low(builder, index));
block->finalize();
legal->body = block;
@@ -183,11 +199,12 @@ private:
return legal->name;
}
- // wasm calls the import, so it must call a stub that calls the actual legal JS import
+ // wasm calls the import, so it must call a stub that calls the actual legal
+ // JS import
Name makeLegalStubForCalledImport(Function* im, Module* module) {
Builder builder(*module);
auto type = make_unique<FunctionType>();
- type->name = Name(std::string("legaltype$") + im->name.str);
+ type->name = Name(std::string("legaltype$") + im->name.str);
auto legal = make_unique<Function>();
legal->name = Name(std::string("legalimport$") + im->name.str);
legal->module = im->module;
@@ -203,12 +220,15 @@ private:
for (auto param : imFunctionType->params) {
if (param == i64) {
- call->operands.push_back(I64Utilities::getI64Low(builder, func->params.size()));
- call->operands.push_back(I64Utilities::getI64High(builder, func->params.size()));
+ call->operands.push_back(
+ I64Utilities::getI64Low(builder, func->params.size()));
+ call->operands.push_back(
+ I64Utilities::getI64High(builder, func->params.size()));
type->params.push_back(i32);
type->params.push_back(i32);
} else {
- call->operands.push_back(builder.makeGetLocal(func->params.size(), param));
+ call->operands.push_back(
+ builder.makeGetLocal(func->params.size(), param));
type->params.push_back(param);
}
func->params.push_back(param);
@@ -241,7 +261,8 @@ private:
return funcName;
}
- static Function* getFunctionOrImport(Module* module, Name name, std::string sig) {
+ static Function*
+ getFunctionOrImport(Module* module, Name name, std::string sig) {
// First look for the function by name
if (Function* f = module->getFunctionOrNull(name)) {
return f;
@@ -264,13 +285,10 @@ private:
}
};
-Pass *createLegalizeJSInterfacePass() {
- return new LegalizeJSInterface(true);
-}
+Pass* createLegalizeJSInterfacePass() { return new LegalizeJSInterface(true); }
-Pass *createLegalizeJSInterfaceMinimallyPass() {
+Pass* createLegalizeJSInterfaceMinimallyPass() {
return new LegalizeJSInterface(false);
}
} // namespace wasm
-
diff --git a/src/passes/LimitSegments.cpp b/src/passes/LimitSegments.cpp
index 521969a28..0ea70f53d 100644
--- a/src/passes/LimitSegments.cpp
+++ b/src/passes/LimitSegments.cpp
@@ -26,14 +26,11 @@ struct LimitSegments : public Pass {
void run(PassRunner* runner, Module* module) override {
if (!MemoryUtils::ensureLimitedSegments(*module)) {
std::cerr << "Unable to merge segments. "
- << "wasm VMs may not accept this binary"
- << std::endl;
+ << "wasm VMs may not accept this binary" << std::endl;
}
}
};
-Pass *createLimitSegmentsPass() {
- return new LimitSegments();
-}
+Pass* createLimitSegmentsPass() { return new LimitSegments(); }
} // namespace wasm
diff --git a/src/passes/LocalCSE.cpp b/src/passes/LocalCSE.cpp
index 1338c6571..d582c8275 100644
--- a/src/passes/LocalCSE.cpp
+++ b/src/passes/LocalCSE.cpp
@@ -37,15 +37,15 @@
#include <algorithm>
#include <memory>
-#include <wasm.h>
-#include <wasm-builder.h>
-#include <wasm-traversal.h>
-#include <pass.h>
+#include "ir/flat.h"
#include <ir/cost.h>
#include <ir/effects.h>
#include <ir/equivalent_sets.h>
-#include "ir/flat.h"
#include <ir/hashed.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm-traversal.h>
+#include <wasm.h>
namespace wasm {
@@ -60,7 +60,8 @@ struct LocalCSE : public WalkerPass<LinearExecutionWalker<LocalCSE>> {
Index index; // the local we are assigned to, local.get that to reuse us
EffectAnalyzer effects;
- UsableInfo(Expression* value, Index index, PassOptions& passOptions) : value(value), index(index), effects(passOptions, value) {}
+ UsableInfo(Expression* value, Index index, PassOptions& passOptions)
+ : value(value), index(index), effects(passOptions, value) {}
};
// a list of usables in a linear execution trace
@@ -183,11 +184,13 @@ struct LocalCSE : public WalkerPass<LinearExecutionWalker<LocalCSE>> {
if (iter != usables.end()) {
// already exists in the table, this is good to reuse
auto& info = iter->second;
- set->value = Builder(*getModule()).makeGetLocal(info.index, value->type);
+ set->value =
+ Builder(*getModule()).makeGetLocal(info.index, value->type);
anotherPass = true;
} else {
// not in table, add this, maybe we can help others later
- usables.emplace(std::make_pair(hashed, UsableInfo(value, set->index, getPassOptions())));
+ usables.emplace(std::make_pair(
+ hashed, UsableInfo(value, set->index, getPassOptions())));
}
}
} else if (auto* get = curr->dynCast<GetLocal>()) {
@@ -227,8 +230,6 @@ struct LocalCSE : public WalkerPass<LinearExecutionWalker<LocalCSE>> {
}
};
-Pass *createLocalCSEPass() {
- return new LocalCSE();
-}
+Pass* createLocalCSEPass() { return new LocalCSE(); }
} // namespace wasm
diff --git a/src/passes/LogExecution.cpp b/src/passes/LogExecution.cpp
index abdaa8d23..7bfee7c24 100644
--- a/src/passes/LogExecution.cpp
+++ b/src/passes/LogExecution.cpp
@@ -28,26 +28,22 @@
// value.
//
-#include <wasm.h>
-#include <wasm-builder.h>
-#include <pass.h>
-#include "shared-constants.h"
-#include "asmjs/shared-constants.h"
#include "asm_v_wasm.h"
+#include "asmjs/shared-constants.h"
#include "ir/function-type-utils.h"
+#include "shared-constants.h"
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
Name LOGGER("log_execution");
struct LogExecution : public WalkerPass<PostWalker<LogExecution>> {
- void visitLoop(Loop* curr) {
- curr->body = makeLogCall(curr->body);
- }
+ void visitLoop(Loop* curr) { curr->body = makeLogCall(curr->body); }
- void visitReturn(Return* curr) {
- replaceCurrent(makeLogCall(curr));
- }
+ void visitReturn(Return* curr) { replaceCurrent(makeLogCall(curr)); }
void visitFunction(Function* curr) {
if (curr->imported()) {
@@ -61,7 +57,7 @@ struct LogExecution : public WalkerPass<PostWalker<LogExecution>> {
curr->body = makeLogCall(curr->body);
}
- void visitModule(Module *curr) {
+ void visitModule(Module* curr) {
// Add the import
auto import = new Function;
import->name = LOGGER;
@@ -79,17 +75,11 @@ private:
Builder builder(*getModule());
return builder.makeSequence(
builder.makeCall(
- LOGGER,
- { builder.makeConst(Literal(int32_t(id++))) },
- none
- ),
- curr
- );
+ LOGGER, {builder.makeConst(Literal(int32_t(id++)))}, none),
+ curr);
}
};
-Pass *createLogExecutionPass() {
- return new LogExecution();
-}
+Pass* createLogExecutionPass() { return new LogExecution(); }
} // namespace wasm
diff --git a/src/passes/LoopInvariantCodeMotion.cpp b/src/passes/LoopInvariantCodeMotion.cpp
index aec2f7ce5..e9f376bd3 100644
--- a/src/passes/LoopInvariantCodeMotion.cpp
+++ b/src/passes/LoopInvariantCodeMotion.cpp
@@ -24,16 +24,17 @@
#include <unordered_map>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
-#include "ir/local-graph.h"
#include "ir/effects.h"
#include "ir/find_all.h"
+#include "ir/local-graph.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
-struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInvariantCodeMotion>> {
+struct LoopInvariantCodeMotion
+ : public WalkerPass<ExpressionStackWalker<LoopInvariantCodeMotion>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new LoopInvariantCodeMotion; }
@@ -128,11 +129,12 @@ struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInv
// outside of the loop, in which case everything is good -
// either they are before the loop and constant for us, or
// they are after and don't matter.
- if (effects.localsRead.empty() || !hasGetDependingOnLoopSet(curr, loopSets)) {
- // We have checked if our gets are influenced by sets in the loop, and
- // must also check if our sets interfere with them. To do so, assume
- // temporarily that we are moving curr out; see if any sets remain for
- // its indexes.
+ if (effects.localsRead.empty() ||
+ !hasGetDependingOnLoopSet(curr, loopSets)) {
+ // We have checked if our gets are influenced by sets in the loop,
+ // and must also check if our sets interfere with them. To do so,
+ // assume temporarily that we are moving curr out; see if any sets
+ // remain for its indexes.
FindAll<SetLocal> currSets(curr);
for (auto* set : currSets.list) {
assert(numSetsForIndex[set->index] > 0);
@@ -187,8 +189,8 @@ struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInv
bool interestingToMove(Expression* curr) {
// In theory we could consider blocks, but then heavy nesting of
// switch patterns would be heavy, and almost always pointless.
- if (curr->type != none || curr->is<Nop>() || curr->is<Block>()
- || curr->is<Loop>()) {
+ if (curr->type != none || curr->is<Nop>() || curr->is<Block>() ||
+ curr->is<Loop>()) {
return false;
}
// Don't move copies (set of a get, or set of a tee of a get, etc.),
@@ -206,7 +208,8 @@ struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInv
if (auto* set = curr->dynCast<SetLocal>()) {
while (1) {
auto* next = set->value->dynCast<SetLocal>();
- if (!next) break;
+ if (!next)
+ break;
set = next;
}
if (set->value->is<GetLocal>() || set->value->is<Const>()) {
@@ -223,7 +226,8 @@ struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInv
for (auto* set : sets) {
// nullptr means a parameter or zero-init value;
// no danger to us.
- if (!set) continue;
+ if (!set)
+ continue;
// Check if the set is in the loop. If not, it's either before,
// which is fine, or after, which is also fine - moving curr
// to just outside the loop will preserve those relationships.
@@ -238,9 +242,8 @@ struct LoopInvariantCodeMotion : public WalkerPass<ExpressionStackWalker<LoopInv
}
};
-Pass *createLoopInvariantCodeMotionPass() {
+Pass* createLoopInvariantCodeMotionPass() {
return new LoopInvariantCodeMotion();
}
} // namespace wasm
-
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 704f57d89..11dbc1743 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -15,9 +15,9 @@
*/
#include "pass.h"
-#include "wasm.h"
#include "wasm-binary.h"
#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -57,7 +57,8 @@ struct MemoryPacking : public Pass {
};
for (auto& segment : module->memory.segments) {
- if (!isSplittable(segment)) continue;
+ if (!isSplittable(segment))
+ continue;
// skip final zeros
while (segment.data.size() > 0 && segment.data.back() == 0) {
@@ -81,7 +82,7 @@ struct MemoryPacking : public Pass {
start++;
}
Index end = start; // end of data-containing part
- Index next = end; // after zeros we can skip. preserves next >= end
+ Index next = end; // after zeros we can skip. preserves next >= end
if (!shouldSplit()) {
next = end = data.size();
}
@@ -99,7 +100,10 @@ struct MemoryPacking : public Pass {
}
}
if (end != start) {
- packed.emplace_back(Builder(*module).makeConst(Literal(int32_t(base + start))), &data[start], end - start);
+ packed.emplace_back(
+ Builder(*module).makeConst(Literal(int32_t(base + start))),
+ &data[start],
+ end - start);
}
start = next;
}
@@ -109,8 +113,6 @@ struct MemoryPacking : public Pass {
}
};
-Pass *createMemoryPackingPass() {
- return new MemoryPacking();
-}
+Pass* createMemoryPackingPass() { return new MemoryPacking(); }
} // namespace wasm
diff --git a/src/passes/MergeBlocks.cpp b/src/passes/MergeBlocks.cpp
index 38e9fc6a2..8df35abd2 100644
--- a/src/passes/MergeBlocks.cpp
+++ b/src/passes/MergeBlocks.cpp
@@ -72,21 +72,23 @@
// single outside block.
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <ir/branch-utils.h>
#include <ir/effects.h>
#include <ir/utils.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
// Looks for reasons we can't remove the values from breaks to an origin
-// For example, if there is a switch targeting us, we can't do it - we can't remove the value from other targets
+// For example, if there is a switch targeting us, we can't do it - we can't
+// remove the value from other targets
struct ProblemFinder : public ControlFlowWalker<ProblemFinder> {
Name origin;
bool foundProblem = false;
- // count br_ifs, and dropped br_ifs. if they don't match, then a br_if flow value is used, and we can't drop it
+ // count br_ifs, and dropped br_ifs. if they don't match, then a br_if flow
+ // value is used, and we can't drop it
Index brIfs = 0;
Index droppedBrIfs = 0;
PassOptions& passOptions;
@@ -158,8 +160,10 @@ struct BreakValueDropper : public ControlFlowWalker<BreakValueDropper> {
}
void visitDrop(Drop* curr) {
- // if we dropped a br_if whose value we removed, then we are now dropping a (block (drop value) (br_if)) with type none, which does not need a drop
- // likewise, unreachable does not need to be dropped, so we just leave drops of concrete values
+ // if we dropped a br_if whose value we removed, then we are now dropping a
+ // (block (drop value) (br_if)) with type none, which does not need a drop
+ // likewise, unreachable does not need to be dropped, so we just leave drops
+ // of concrete values
if (!isConcreteType(curr->value->type)) {
replaceCurrent(curr->value);
}
@@ -188,7 +192,8 @@ static bool hasDeadCode(Block* block) {
}
// core block optimizer routine
-static void optimizeBlock(Block* curr, Module* module, PassOptions& passOptions) {
+static void
+optimizeBlock(Block* curr, Module* module, PassOptions& passOptions) {
auto& list = curr->list;
// Main merging loop.
bool more = true;
@@ -205,7 +210,8 @@ static void optimizeBlock(Block* curr, Module* module, PassOptions& passOptions)
Loop* loop = nullptr;
// To to handle a non-block child.
if (!childBlock) {
- // if we have a child that is (drop (block ..)) then we can move the drop into the block, and remove br values. this allows more merging,
+ // if we have a child that is (drop (block ..)) then we can move the
+ // drop into the block, and remove br values. this allows more merging,
if (auto* drop = list[i]->dynCast<Drop>()) {
childBlock = drop->value->dynCast<Block>();
if (childBlock) {
@@ -253,13 +259,16 @@ static void optimizeBlock(Block* curr, Module* module, PassOptions& passOptions)
}
}
// If no block, we can't do anything.
- if (!childBlock) continue;
+ if (!childBlock)
+ continue;
auto& childList = childBlock->list;
auto childSize = childList.size();
- if (childSize == 0) continue;
- // If the child has items after an unreachable, ignore it - dce should have
- // been run, and we prefer to not handle the complexity here.
- if (hasDeadCode(childBlock)) continue;
+ if (childSize == 0)
+ continue;
+ // If the child has items after an unreachable, ignore it - dce should
+ // have been run, and we prefer to not handle the complexity here.
+ if (hasDeadCode(childBlock))
+ continue;
// In some cases we can remove only the head or the tail of the block,
// and must keep some things in the child block.
Index keepStart = childSize;
@@ -295,8 +304,9 @@ static void optimizeBlock(Block* curr, Module* module, PassOptions& passOptions)
break;
}
}
- // If we can only do part of the block, and if the block has a flowing value, we
- // would need special handling for that - not worth it, probably TODO
+ // If we can only do part of the block, and if the block has a flowing
+ // value, we would need special handling for that - not worth it,
+ // probably TODO
// FIXME is this not handled by the drop later down?
if (keepEnd < childSize && isConcreteType(childList.back()->type)) {
continue;
@@ -304,7 +314,8 @@ static void optimizeBlock(Block* curr, Module* module, PassOptions& passOptions)
}
// Maybe there's nothing to do, if we must keep it all in the
// child anyhow.
- if (keepStart == 0 && keepEnd == childSize) continue;
+ if (keepStart == 0 && keepEnd == childSize)
+ continue;
// There is something to do!
bool keepingPart = keepStart < keepEnd;
// Create a new merged list, and fill in the code before the
@@ -393,31 +404,45 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> {
// (..other..children..)
// )
// )
- // at which point the block is on the outside and potentially mergeable with an outer block
- Block* optimize(Expression* curr, Expression*& child, Block* outer = nullptr, Expression** dependency1 = nullptr, Expression** dependency2 = nullptr) {
- if (!child) return outer;
+ // at which point the block is on the outside and potentially mergeable with
+ // an outer block
+ Block* optimize(Expression* curr,
+ Expression*& child,
+ Block* outer = nullptr,
+ Expression** dependency1 = nullptr,
+ Expression** dependency2 = nullptr) {
+ if (!child)
+ return outer;
if ((dependency1 && *dependency1) || (dependency2 && *dependency2)) {
- // there are dependencies, things we must be reordered through. make sure no problems there
+ // there are dependencies, things we must be reordered through. make sure
+ // no problems there
EffectAnalyzer childEffects(getPassOptions(), child);
- if (dependency1 && *dependency1 && EffectAnalyzer(getPassOptions(), *dependency1).invalidates(childEffects)) return outer;
- if (dependency2 && *dependency2 && EffectAnalyzer(getPassOptions(), *dependency2).invalidates(childEffects)) return outer;
+ if (dependency1 && *dependency1 &&
+ EffectAnalyzer(getPassOptions(), *dependency1)
+ .invalidates(childEffects))
+ return outer;
+ if (dependency2 && *dependency2 &&
+ EffectAnalyzer(getPassOptions(), *dependency2)
+ .invalidates(childEffects))
+ return outer;
}
if (auto* block = child->dynCast<Block>()) {
if (!block->name.is() && block->list.size() >= 2) {
- // if we move around unreachable code, type changes could occur. avoid that, as
- // anyhow it means we should have run dce before getting here
+ // if we move around unreachable code, type changes could occur. avoid
+ // that, as anyhow it means we should have run dce before getting here
if (curr->type == none && hasUnreachableChild(block)) {
- // moving the block to the outside would replace a none with an unreachable
+ // moving the block to the outside would replace a none with an
+ // unreachable
return outer;
}
auto* back = block->list.back();
if (back->type == unreachable) {
- // curr is not reachable, dce could remove it; don't try anything fancy
- // here
+ // curr is not reachable, dce could remove it; don't try anything
+ // fancy here
return outer;
}
- // we are going to replace the block with the final element, so they should
- // be identically typed
+ // we are going to replace the block with the final element, so they
+ // should be identically typed
if (block->type != back->type) {
return outer;
}
@@ -443,18 +468,10 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> {
return outer;
}
- void visitUnary(Unary* curr) {
- optimize(curr, curr->value);
- }
- void visitSetLocal(SetLocal* curr) {
- optimize(curr, curr->value);
- }
- void visitLoad(Load* curr) {
- optimize(curr, curr->ptr);
- }
- void visitReturn(Return* curr) {
- optimize(curr, curr->value);
- }
+ void visitUnary(Unary* curr) { optimize(curr, curr->value); }
+ void visitSetLocal(SetLocal* curr) { optimize(curr, curr->value); }
+ void visitLoad(Load* curr) { optimize(curr, curr->ptr); }
+ void visitReturn(Return* curr) { optimize(curr, curr->value); }
void visitBinary(Binary* curr) {
optimize(curr, curr->right, optimize(curr, curr->left), &curr->left);
@@ -466,15 +483,20 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> {
optimize(curr, curr->value, optimize(curr, curr->ptr), &curr->ptr);
}
void optimizeTernary(Expression* curr,
- Expression*& first, Expression*& second, Expression*& third) {
+ Expression*& first,
+ Expression*& second,
+ Expression*& third) {
// TODO: for now, just stop when we see any side effect. instead, we could
// check effects carefully for reordering
Block* outer = nullptr;
- if (EffectAnalyzer(getPassOptions(), first).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), first).hasSideEffects())
+ return;
outer = optimize(curr, first, outer);
- if (EffectAnalyzer(getPassOptions(), second).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), second).hasSideEffects())
+ return;
outer = optimize(curr, second, outer);
- if (EffectAnalyzer(getPassOptions(), third).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), third).hasSideEffects())
+ return;
optimize(curr, third, outer);
}
void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
@@ -485,9 +507,7 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> {
optimizeTernary(curr, curr->ifTrue, curr->ifFalse, curr->condition);
}
- void visitDrop(Drop* curr) {
- optimize(curr, curr->value);
- }
+ void visitDrop(Drop* curr) { optimize(curr, curr->value); }
void visitBreak(Break* curr) {
optimize(curr, curr->condition, optimize(curr, curr->value), &curr->value);
@@ -496,33 +516,31 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> {
optimize(curr, curr->condition, optimize(curr, curr->value), &curr->value);
}
- template<typename T>
- void handleCall(T* curr) {
+ template<typename T> void handleCall(T* curr) {
Block* outer = nullptr;
for (Index i = 0; i < curr->operands.size(); i++) {
- if (EffectAnalyzer(getPassOptions(), curr->operands[i]).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), curr->operands[i]).hasSideEffects())
+ return;
outer = optimize(curr, curr->operands[i], outer);
}
return;
}
- void visitCall(Call* curr) {
- handleCall(curr);
- }
+ void visitCall(Call* curr) { handleCall(curr); }
void visitCallIndirect(CallIndirect* curr) {
Block* outer = nullptr;
for (Index i = 0; i < curr->operands.size(); i++) {
- if (EffectAnalyzer(getPassOptions(), curr->operands[i]).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), curr->operands[i]).hasSideEffects())
+ return;
outer = optimize(curr, curr->operands[i], outer);
}
- if (EffectAnalyzer(getPassOptions(), curr->target).hasSideEffects()) return;
+ if (EffectAnalyzer(getPassOptions(), curr->target).hasSideEffects())
+ return;
optimize(curr, curr->target, outer);
}
};
-Pass *createMergeBlocksPass() {
- return new MergeBlocks();
-}
+Pass* createMergeBlocksPass() { return new MergeBlocks(); }
} // namespace wasm
diff --git a/src/passes/MergeLocals.cpp b/src/passes/MergeLocals.cpp
index 4092e1ea8..fe9f4bb86 100644
--- a/src/passes/MergeLocals.cpp
+++ b/src/passes/MergeLocals.cpp
@@ -28,7 +28,7 @@
// (i32.const 100)
// (local.get $x)
// )
-//
+//
// If that assignment of $y is never used again, everything is fine. But if
// if is, then the live range of $y does not end in that get, and will
// necessarily overlap with that of $x - making them appear to interfere
@@ -46,14 +46,16 @@
// TODO: investigate more
//
-#include <wasm.h>
+#include <ir/local-graph.h>
#include <pass.h>
#include <wasm-builder.h>
-#include <ir/local-graph.h>
+#include <wasm.h>
namespace wasm {
-struct MergeLocals : public WalkerPass<PostWalker<MergeLocals, UnifiedExpressionVisitor<MergeLocals>>> {
+struct MergeLocals
+ : public WalkerPass<
+ PostWalker<MergeLocals, UnifiedExpressionVisitor<MergeLocals>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new MergeLocals(); }
@@ -94,12 +96,14 @@ struct MergeLocals : public WalkerPass<PostWalker<MergeLocals, UnifiedExpression
}
void optimizeCopies() {
- if (copies.empty()) return;
+ if (copies.empty())
+ return;
// compute all dependencies
LocalGraph preGraph(getFunction());
preGraph.computeInfluences();
// optimize each copy
- std::unordered_map<SetLocal*, SetLocal*> optimizedToCopy, optimizedToTrivial;
+ std::unordered_map<SetLocal*, SetLocal*> optimizedToCopy,
+ optimizedToTrivial;
for (auto* copy : copies) {
auto* trivial = copy->value->cast<SetLocal>();
bool canOptimizeToCopy = false;
@@ -108,8 +112,8 @@ struct MergeLocals : public WalkerPass<PostWalker<MergeLocals, UnifiedExpression
canOptimizeToCopy = true;
for (auto* influencedGet : trivialInfluences) {
// this get uses the trivial write, so it uses the value in the copy.
- // however, it may depend on other writes too, if there is a merge/phi,
- // and in that case we can't do anything
+ // however, it may depend on other writes too, if there is a
+ // merge/phi, and in that case we can't do anything
assert(influencedGet->index == trivial->index);
if (preGraph.getSetses[influencedGet].size() == 1) {
// this is ok
@@ -127,14 +131,17 @@ struct MergeLocals : public WalkerPass<PostWalker<MergeLocals, UnifiedExpression
}
optimizedToCopy[copy] = trivial;
} else {
- // alternatively, we can try to remove the conflict in the opposite way: given
+ // alternatively, we can try to remove the conflict in the opposite way:
+ // given
// (local.set $x
// (local.get $y)
// )
- // we can look for uses of $x that could instead be uses of $y. this extends
- // $y's live range, but if it removes the conflict between $x and $y, it may be
- // worth it
- if (!trivialInfluences.empty()) { // if the trivial set we added has influences, it means $y lives on
+ // we can look for uses of $x that could instead be uses of $y. this
+ // extends $y's live range, but if it removes the conflict between $x
+ // and $y, it may be worth it
+
+ // if the trivial set we added has influences, it means $y lives on
+ if (!trivialInfluences.empty()) {
auto& copyInfluences = preGraph.setInfluences[copy];
if (!copyInfluences.empty()) {
bool canOptimizeToTrivial = true;
@@ -212,9 +219,6 @@ struct MergeLocals : public WalkerPass<PostWalker<MergeLocals, UnifiedExpression
}
};
-Pass *createMergeLocalsPass() {
- return new MergeLocals();
-}
+Pass* createMergeLocalsPass() { return new MergeLocals(); }
} // namespace wasm
-
diff --git a/src/passes/Metrics.cpp b/src/passes/Metrics.cpp
index 8717a86b7..0baca3e8b 100644
--- a/src/passes/Metrics.cpp
+++ b/src/passes/Metrics.cpp
@@ -16,22 +16,23 @@
#include <algorithm>
#include <iomanip>
+#include <ir/module-utils.h>
#include <pass.h>
#include <support/colors.h>
-#include <wasm.h>
#include <wasm-binary.h>
-#include <ir/module-utils.h>
+#include <wasm.h>
using namespace std;
namespace wasm {
-typedef map<const char *, int> Counts;
+typedef map<const char*, int> Counts;
static Counts lastCounts;
// Prints metrics between optimization passes.
-struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<Metrics>>> {
+struct Metrics
+ : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<Metrics>>> {
bool modifiesBinaryenIR() override { return false; }
bool byFunction;
@@ -56,9 +57,8 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
for (auto& curr : module->exports) {
visitExport(curr.get());
}
- ModuleUtils::iterDefinedGlobals(*module, [&](Global* curr) {
- walkGlobal(curr);
- });
+ ModuleUtils::iterDefinedGlobals(*module,
+ [&](Global* curr) { walkGlobal(curr); });
walkTable(&module->table);
walkMemory(&module->memory);
@@ -70,14 +70,14 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
// add memory and table
if (module->memory.exists) {
Index size = 0;
- for (auto& segment: module->memory.segments) {
+ for (auto& segment : module->memory.segments) {
size += segment.data.size();
}
counts["[memory-data]"] = size;
}
if (module->table.exists) {
Index size = 0;
- for (auto& segment: module->table.segments) {
+ for (auto& segment : module->table.segments) {
size += segment.data.size();
}
counts["[table-data]"] = size;
@@ -96,13 +96,15 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
counts.clear();
walkFunction(func);
counts["[vars]"] = func->getNumVars();
- counts["[binary-bytes]"] = writer.tableOfContents.functionBodies[binaryIndex++].size;
+ counts["[binary-bytes]"] =
+ writer.tableOfContents.functionBodies[binaryIndex++].size;
printCounts(std::string("func: ") + func->name.str);
});
// print for each export how much code size is due to it, i.e.,
// how much the module could shrink without it.
auto sizeAfterGlobalCleanup = [](Module* module) {
- PassRunner runner(module, PassOptions::getWithDefaultOptimizationOptions());
+ PassRunner runner(module,
+ PassOptions::getWithDefaultOptimizationOptions());
runner.setIsNested(true);
runner.addDefaultGlobalOptimizationPostPasses(); // remove stuff
runner.run();
@@ -118,13 +120,16 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
baseline = sizeAfterGlobalCleanup(&test);
}
for (auto& exp : module->exports) {
- // create a test module where we remove the export and then see how much can be removed thanks to that
+ // create a test module where we remove the export and then see how much
+ // can be removed thanks to that
Module test;
ModuleUtils::copyModule(*module, test);
test.removeExport(exp->name);
counts.clear();
- counts["[removable-bytes-without-it]"] = baseline - sizeAfterGlobalCleanup(&test);
- printCounts(std::string("export: ") + exp->name.str + " (" + exp->value.str + ')');
+ counts["[removable-bytes-without-it]"] =
+ baseline - sizeAfterGlobalCleanup(&test);
+ printCounts(std::string("export: ") + exp->name.str + " (" +
+ exp->value.str + ')');
}
// check how much size depends on the start method
if (!module->start.isNull()) {
@@ -132,7 +137,8 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
ModuleUtils::copyModule(*module, test);
test.start = Name();
counts.clear();
- counts["[removable-bytes-without-it]"] = baseline - sizeAfterGlobalCleanup(&test);
+ counts["[removable-bytes-without-it]"] =
+ baseline - sizeAfterGlobalCleanup(&test);
printCounts(std::string("start: ") + module->start.str);
}
// can't compare detailed info between passes yet
@@ -153,7 +159,7 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
}
void printCounts(std::string title) {
- ostream &o = cout;
+ ostream& o = cout;
vector<const char*> keys;
// add total
int total = 0;
@@ -173,9 +179,9 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
o << title << "\n";
for (auto* key : keys) {
auto value = counts[key];
- if (value == 0 && key[0] != '[') continue;
- o << " " << left << setw(15) << key << ": " << setw(8)
- << value;
+ if (value == 0 && key[0] != '[')
+ continue;
+ o << " " << left << setw(15) << key << ": " << setw(8) << value;
if (lastCounts.count(key)) {
int before = lastCounts[key];
int after = value;
@@ -195,12 +201,8 @@ struct Metrics : public WalkerPass<PostWalker<Metrics, UnifiedExpressionVisitor<
}
};
-Pass* createMetricsPass() {
- return new Metrics(false);
-}
+Pass* createMetricsPass() { return new Metrics(false); }
-Pass* createFunctionMetricsPass() {
- return new Metrics(true);
-}
+Pass* createFunctionMetricsPass() { return new Metrics(true); }
} // namespace wasm
diff --git a/src/passes/MinifyImportsAndExports.cpp b/src/passes/MinifyImportsAndExports.cpp
index 007f4b629..23dd2a21a 100644
--- a/src/passes/MinifyImportsAndExports.cpp
+++ b/src/passes/MinifyImportsAndExports.cpp
@@ -32,12 +32,12 @@
#include <string>
#include <unordered_set>
-#include <wasm.h>
-#include <pass.h>
-#include <shared-constants.h>
#include <asmjs/shared-constants.h>
#include <ir/import-utils.h>
#include <ir/module-utils.h>
+#include <pass.h>
+#include <shared-constants.h>
+#include <wasm.h>
namespace wasm {
@@ -45,7 +45,8 @@ struct MinifyImportsAndExports : public Pass {
bool minifyExports;
public:
- explicit MinifyImportsAndExports(bool minifyExports):minifyExports(minifyExports) {}
+ explicit MinifyImportsAndExports(bool minifyExports)
+ : minifyExports(minifyExports) {}
private:
// Generates minified names that are valid in JS.
@@ -53,8 +54,8 @@ private:
class MinifiedNames {
public:
MinifiedNames() {
- // Reserved words in JS up to size 4 - size 5 and above would mean we use an astronomical
- // number of symbols, which is not realistic anyhow.
+ // Reserved words in JS up to size 4 - size 5 and above would mean we use
+ // an astronomical number of symbols, which is not realistic anyhow.
reserved.insert("do");
reserved.insert("if");
reserved.insert("in");
@@ -71,7 +72,8 @@ private:
reserved.insert("this");
reserved.insert("with");
- validInitialChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$";
+ validInitialChars =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$";
validLaterChars = validInitialChars + "0123456789";
minifiedState.push_back(0);
@@ -120,14 +122,16 @@ private:
size_t i = 0;
while (1) {
minifiedState[i]++;
- if (minifiedState[i] < (i == 0 ? validInitialChars : validLaterChars).size()) {
+ if (minifiedState[i] <
+ (i == 0 ? validInitialChars : validLaterChars).size()) {
break;
}
// Overflow.
minifiedState[i] = 0;
i++;
if (i == minifiedState.size()) {
- minifiedState.push_back(-1); // will become 0 after increment in next loop head
+ // will become 0 after increment in next loop head
+ minifiedState.push_back(-1);
}
}
}
@@ -170,11 +174,9 @@ private:
}
};
-Pass *createMinifyImportsPass() {
- return new MinifyImportsAndExports(false);
-}
+Pass* createMinifyImportsPass() { return new MinifyImportsAndExports(false); }
-Pass *createMinifyImportsAndExportsPass() {
+Pass* createMinifyImportsAndExportsPass() {
return new MinifyImportsAndExports(true);
}
diff --git a/src/passes/NameList.cpp b/src/passes/NameList.cpp
index 6b1d528e4..1051a31d1 100644
--- a/src/passes/NameList.cpp
+++ b/src/passes/NameList.cpp
@@ -18,24 +18,22 @@
// Write out the name list of the module, similar to `nm`.
//
-#include "wasm.h"
-#include "pass.h"
#include "ir/module-utils.h"
#include "ir/utils.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
struct NameList : public Pass {
void run(PassRunner* runner, Module* module) override {
ModuleUtils::iterDefinedFunctions(*module, [&](Function* func) {
- std::cout << " " << func->name << " : " << Measurer::measure(func->body) << '\n';
+ std::cout << " " << func->name << " : "
+ << Measurer::measure(func->body) << '\n';
});
}
};
-Pass *createNameListPass() {
- return new NameList();
-}
+Pass* createNameListPass() { return new NameList(); }
} // namespace wasm
-
diff --git a/src/passes/NoExitRuntime.cpp b/src/passes/NoExitRuntime.cpp
index 05dd639c9..680b91b23 100644
--- a/src/passes/NoExitRuntime.cpp
+++ b/src/passes/NoExitRuntime.cpp
@@ -20,10 +20,10 @@
// run.
//
+#include <asmjs/shared-constants.h>
#include <pass.h>
-#include <wasm.h>
#include <wasm-builder.h>
-#include <asmjs/shared-constants.h>
+#include <wasm.h>
using namespace std;
@@ -34,27 +34,23 @@ struct NoExitRuntime : public WalkerPass<PostWalker<NoExitRuntime>> {
Pass* create() override { return new NoExitRuntime; }
- // Remove all possible manifestations of atexit, across asm2wasm and llvm wasm backend.
- std::array<Name, 4> ATEXIT_NAMES = {{ "___cxa_atexit",
- "__cxa_atexit",
- "_atexit",
- "atexit" }};
+ // Remove all possible manifestations of atexit, across asm2wasm and llvm wasm
+ // backend.
+ std::array<Name, 4> ATEXIT_NAMES = {
+ {"___cxa_atexit", "__cxa_atexit", "_atexit", "atexit"}};
void visitCall(Call* curr) {
auto* import = getModule()->getFunctionOrNull(curr->target);
- if (!import || !import->imported() || import->module != ENV) return;
+ if (!import || !import->imported() || import->module != ENV)
+ return;
for (auto name : ATEXIT_NAMES) {
if (name == import->base) {
- replaceCurrent(
- Builder(*getModule()).replaceWithIdenticalType(curr)
- );
+ replaceCurrent(Builder(*getModule()).replaceWithIdenticalType(curr));
}
}
}
};
-Pass* createNoExitRuntimePass() {
- return new NoExitRuntime();
-}
+Pass* createNoExitRuntimePass() { return new NoExitRuntime(); }
} // namespace wasm
diff --git a/src/passes/OptimizeAddedConstants.cpp b/src/passes/OptimizeAddedConstants.cpp
index e2cfb1418..b8d011cfb 100644
--- a/src/passes/OptimizeAddedConstants.cpp
+++ b/src/passes/OptimizeAddedConstants.cpp
@@ -15,9 +15,9 @@
*/
//
-// Optimize added constants into load/store offsets. This requires the assumption
-// that low memory is unused, so that we can replace an add (which might wrap)
-// with a load/store offset (which does not).
+// Optimize added constants into load/store offsets. This requires the
+// assumption that low memory is unused, so that we can replace an add (which
+// might wrap) with a load/store offset (which does not).
//
// The propagate option also propagates offsets across set/get local pairs.
//
@@ -30,20 +30,22 @@
// speed, and may lead to code size reductions elsewhere by using fewer locals.
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <ir/local-graph.h>
#include <ir/local-utils.h>
#include <ir/parents.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
-template<typename P, typename T>
-class MemoryAccessOptimizer {
+template<typename P, typename T> class MemoryAccessOptimizer {
public:
- MemoryAccessOptimizer(P* parent, T* curr, Module* module, LocalGraph* localGraph) :
- parent(parent), curr(curr), module(module), localGraph(localGraph) {}
+ MemoryAccessOptimizer(P* parent,
+ T* curr,
+ Module* module,
+ LocalGraph* localGraph)
+ : parent(parent), curr(curr), module(module), localGraph(localGraph) {}
// Tries to optimize, and returns whether we propagated a change.
bool optimize() {
@@ -78,18 +80,20 @@ public:
auto& sets = localGraph->getSetses[get];
if (sets.size() == 1) {
auto* set = *sets.begin();
- // May be a zero-init (in which case, we can ignore it). Must also be valid
- // to propagate, as checked earlier in the parent.
+ // May be a zero-init (in which case, we can ignore it). Must also be
+ // valid to propagate, as checked earlier in the parent.
if (set && parent->isPropagatable(set)) {
auto* value = set->value;
if (auto* add = value->template dynCast<Binary>()) {
if (add->op == AddInt32) {
// We can optimize on either side, but only if both we find
// a constant *and* the other side cannot change in the middle.
- // TODO If it could change, we may add a new local to capture the
- // old value.
- if (tryToOptimizePropagatedAdd(add->right, add->left, get, set) ||
- tryToOptimizePropagatedAdd(add->left, add->right, get, set)) {
+ // TODO If it could change, we may add a new local to capture
+ // the old value.
+ if (tryToOptimizePropagatedAdd(
+ add->right, add->left, get, set) ||
+ tryToOptimizePropagatedAdd(
+ add->left, add->right, get, set)) {
return true;
}
}
@@ -153,7 +157,10 @@ private:
return false;
}
- bool tryToOptimizePropagatedAdd(Expression* oneSide, Expression* otherSide, GetLocal* ptr, SetLocal* set) {
+ bool tryToOptimizePropagatedAdd(Expression* oneSide,
+ Expression* otherSide,
+ GetLocal* ptr,
+ SetLocal* set) {
if (auto* c = oneSide->template dynCast<Const>()) {
if (otherSide->template is<Const>()) {
// Both sides are constant - this is not optimized code, ignore.
@@ -171,16 +178,17 @@ private:
//
// load(x, offset=10)
//
- // If the other side is a get, we may be able to prove that we can just use that same
- // local, if both it and the pointer are in SSA form. In that case,
+ // If the other side is a get, we may be able to prove that we can just
+ // use that same local, if both it and the pointer are in SSA form. In
+ // that case,
//
// y = .. // single assignment that dominates all uses
// x = y + 10 // single assignment that dominates all uses
// [..]
// load(x) => load(y, offset=10)
//
- // This is valid since dominance is transitive, so y's definition dominates the load,
- // and it is ok to replace x with y + 10 there.
+ // This is valid since dominance is transitive, so y's definition
+ // dominates the load, and it is ok to replace x with y + 10 there.
Index index = -1;
bool canReuseIndex = false;
if (auto* get = otherSide->template dynCast<GetLocal>()) {
@@ -228,7 +236,10 @@ private:
}
};
-struct OptimizeAddedConstants : public WalkerPass<PostWalker<OptimizeAddedConstants, UnifiedExpressionVisitor<OptimizeAddedConstants>>> {
+struct OptimizeAddedConstants
+ : public WalkerPass<
+ PostWalker<OptimizeAddedConstants,
+ UnifiedExpressionVisitor<OptimizeAddedConstants>>> {
bool isFunctionParallel() override { return true; }
bool propagate;
@@ -238,14 +249,16 @@ struct OptimizeAddedConstants : public WalkerPass<PostWalker<OptimizeAddedConsta
Pass* create() override { return new OptimizeAddedConstants(propagate); }
void visitLoad(Load* curr) {
- MemoryAccessOptimizer<OptimizeAddedConstants, Load> optimizer(this, curr, getModule(), localGraph.get());
+ MemoryAccessOptimizer<OptimizeAddedConstants, Load> optimizer(
+ this, curr, getModule(), localGraph.get());
if (optimizer.optimize()) {
propagated = true;
}
}
void visitStore(Store* curr) {
- MemoryAccessOptimizer<OptimizeAddedConstants, Store> optimizer(this, curr, getModule(), localGraph.get());
+ MemoryAccessOptimizer<OptimizeAddedConstants, Store> optimizer(
+ this, curr, getModule(), localGraph.get());
if (optimizer.optimize()) {
propagated = true;
}
@@ -254,9 +267,10 @@ struct OptimizeAddedConstants : public WalkerPass<PostWalker<OptimizeAddedConsta
void doWalkFunction(Function* func) {
// This pass is only valid under the assumption of unused low memory.
assert(getPassOptions().lowMemoryUnused);
- // Multiple passes may be needed if we have x + 4 + 8 etc. (nested structs in C
- // can cause this, but it's rare). Note that we only need that for the propagation
- // case (as 4 + 8 would be optimized directly if it were adjacent).
+ // Multiple passes may be needed if we have x + 4 + 8 etc. (nested structs
+ // in C can cause this, but it's rare). Note that we only need that for the
+ // propagation case (as 4 + 8 would be optimized directly if it were
+ // adjacent).
while (1) {
propagated = false;
helperIndexes.clear();
@@ -279,22 +293,21 @@ struct OptimizeAddedConstants : public WalkerPass<PostWalker<OptimizeAddedConsta
}
}
- // For a given expression, store it to a local and return us the local index we can use,
- // in order to get that value someplace else. We are provided not the expression,
- // but the set in which it is in, as the arm of an add that is the set's value (the other
- // arm is a constant, and we are not a constant).
+ // For a given expression, store it to a local and return us the local index
+ // we can use, in order to get that value someplace else. We are provided not
+ // the expression, but the set in which it is in, as the arm of an add that is
+ // the set's value (the other arm is a constant, and we are not a constant).
// We cache these, that is, use a single one for all requests.
Index getHelperIndex(SetLocal* set) {
auto iter = helperIndexes.find(set);
if (iter != helperIndexes.end()) {
return iter->second;
}
- return helperIndexes[set] = Builder(*getModule()).addVar(getFunction(), i32);
+ return helperIndexes[set] =
+ Builder(*getModule()).addVar(getFunction(), i32);
}
- bool isPropagatable(SetLocal* set) {
- return propagatable.count(set);
- }
+ bool isPropagatable(SetLocal* set) { return propagatable.count(set); }
private:
bool propagated;
@@ -305,15 +318,16 @@ private:
std::set<SetLocal*> propagatable;
void findPropagatable() {
- // Conservatively, only propagate if all uses can be removed of the original. That is,
+ // Conservatively, only propagate if all uses can be removed of the
+ // original. That is,
// x = a + 10
// f(x)
// g(x)
// should be optimized to
// f(a, offset=10)
// g(a, offset=10)
- // but if x has other uses, then avoid doing so - we'll be doing that add anyhow, so
- // the load/store offset trick won't actually help.
+ // but if x has other uses, then avoid doing so - we'll be doing that add
+ // anyhow, so the load/store offset trick won't actually help.
Parents parents(getFunction()->body);
for (auto& pair : localGraph->locations) {
auto* location = pair.first;
@@ -323,9 +337,11 @@ private:
if (add->left->is<Const>() || add->right->is<Const>()) {
// Looks like this might be relevant, check all uses.
bool canPropagate = true;
- for (auto* get :localGraph->setInfluences[set]) {
+ for (auto* get : localGraph->setInfluences[set]) {
auto* parent = parents.getParent(get);
- assert(parent); // if this is at the top level, it's the whole body - no set can exist!
+ // if this is at the top level, it's the whole body - no set can
+ // exist!
+ assert(parent);
if (!(parent->is<Load>() || parent->is<Store>())) {
canPropagate = false;
break;
@@ -342,8 +358,8 @@ private:
}
void cleanUpAfterPropagation() {
- // Remove sets that no longer have uses. This allows further propagation by letting
- // us see the accurate amount of uses of each set.
+ // Remove sets that no longer have uses. This allows further propagation by
+ // letting us see the accurate amount of uses of each set.
UnneededSetRemover remover(getFunction(), getPassOptions());
}
@@ -354,7 +370,8 @@ private:
std::map<SetLocal*, Index>& helperIndexes;
Module* module;
- Creator(std::map<SetLocal*, Index>& helperIndexes) : helperIndexes(helperIndexes) {}
+ Creator(std::map<SetLocal*, Index>& helperIndexes)
+ : helperIndexes(helperIndexes) {}
void visitSetLocal(SetLocal* curr) {
auto iter = helperIndexes.find(curr);
@@ -372,11 +389,7 @@ private:
Builder builder(*module);
*target = builder.makeGetLocal(index, i32);
replaceCurrent(
- builder.makeSequence(
- builder.makeSetLocal(index, value),
- curr
- )
- );
+ builder.makeSequence(builder.makeSetLocal(index, value), curr));
}
}
} creator(helperIndexes);
@@ -385,13 +398,12 @@ private:
}
};
-Pass *createOptimizeAddedConstantsPass() {
+Pass* createOptimizeAddedConstantsPass() {
return new OptimizeAddedConstants(false);
}
-Pass *createOptimizeAddedConstantsPropagatePass() {
+Pass* createOptimizeAddedConstantsPropagatePass() {
return new OptimizeAddedConstants(true);
}
} // namespace wasm
-
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp
index c098d0ed7..8a9309554 100644
--- a/src/passes/OptimizeInstructions.cpp
+++ b/src/passes/OptimizeInstructions.cpp
@@ -20,28 +20,29 @@
#include <algorithm>
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-s-parser.h>
-#include <support/threads.h>
#include <ir/abstract.h>
-#include <ir/utils.h>
#include <ir/cost.h>
#include <ir/effects.h>
-#include <ir/manipulation.h>
-#include <ir/properties.h>
#include <ir/literal-utils.h>
#include <ir/load-utils.h>
+#include <ir/manipulation.h>
+#include <ir/properties.h>
+#include <ir/utils.h>
+#include <pass.h>
+#include <support/threads.h>
+#include <wasm-s-parser.h>
+#include <wasm.h>
-// TODO: Use the new sign-extension opcodes where appropriate. This needs to be conditionalized on the availability of atomics.
+// TODO: Use the new sign-extension opcodes where appropriate. This needs to be
+// conditionalized on the availability of atomics.
namespace wasm {
-Name I32_EXPR = "i32.expr",
- I64_EXPR = "i64.expr",
- F32_EXPR = "f32.expr",
- F64_EXPR = "f64.expr",
- ANY_EXPR = "any.expr";
+Name I32_EXPR = "i32.expr";
+Name I64_EXPR = "i64.expr";
+Name F32_EXPR = "f32.expr";
+Name F64_EXPR = "f64.expr";
+Name ANY_EXPR = "any.expr";
// Utilities
@@ -53,28 +54,47 @@ template<typename LocalInfoProvider>
Index getMaxBits(Expression* curr, LocalInfoProvider* localInfoProvider) {
if (auto* const_ = curr->dynCast<Const>()) {
switch (curr->type) {
- case i32: return 32 - const_->value.countLeadingZeroes().geti32();
- case i64: return 64 - const_->value.countLeadingZeroes().geti64();
- default: WASM_UNREACHABLE();
+ case i32:
+ return 32 - const_->value.countLeadingZeroes().geti32();
+ case i64:
+ return 64 - const_->value.countLeadingZeroes().geti64();
+ default:
+ WASM_UNREACHABLE();
}
} else if (auto* binary = curr->dynCast<Binary>()) {
switch (binary->op) {
// 32-bit
- case AddInt32: case SubInt32: case MulInt32:
- case DivSInt32: case DivUInt32: case RemSInt32:
- case RemUInt32: case RotLInt32: case RotRInt32: return 32;
- case AndInt32: return std::min(getMaxBits(binary->left, localInfoProvider), getMaxBits(binary->right, localInfoProvider));
- case OrInt32: case XorInt32: return std::max(getMaxBits(binary->left, localInfoProvider), getMaxBits(binary->right, localInfoProvider));
+ case AddInt32:
+ case SubInt32:
+ case MulInt32:
+ case DivSInt32:
+ case DivUInt32:
+ case RemSInt32:
+ case RemUInt32:
+ case RotLInt32:
+ case RotRInt32:
+ return 32;
+ case AndInt32:
+ return std::min(getMaxBits(binary->left, localInfoProvider),
+ getMaxBits(binary->right, localInfoProvider));
+ case OrInt32:
+ case XorInt32:
+ return std::max(getMaxBits(binary->left, localInfoProvider),
+ getMaxBits(binary->right, localInfoProvider));
case ShlInt32: {
if (auto* shifts = binary->right->dynCast<Const>()) {
- return std::min(Index(32), getMaxBits(binary->left, localInfoProvider) + Bits::getEffectiveShifts(shifts));
+ return std::min(Index(32),
+ getMaxBits(binary->left, localInfoProvider) +
+ Bits::getEffectiveShifts(shifts));
}
return 32;
}
case ShrUInt32: {
if (auto* shift = binary->right->dynCast<Const>()) {
auto maxBits = getMaxBits(binary->left, localInfoProvider);
- auto shifts = std::min(Index(Bits::getEffectiveShifts(shift)), maxBits); // can ignore more shifts than zero us out
+ auto shifts =
+ std::min(Index(Bits::getEffectiveShifts(shift)),
+ maxBits); // can ignore more shifts than zero us out
return std::max(Index(0), maxBits - shifts);
}
return 32;
@@ -82,34 +102,67 @@ Index getMaxBits(Expression* curr, LocalInfoProvider* localInfoProvider) {
case ShrSInt32: {
if (auto* shift = binary->right->dynCast<Const>()) {
auto maxBits = getMaxBits(binary->left, localInfoProvider);
- if (maxBits == 32) return 32;
- auto shifts = std::min(Index(Bits::getEffectiveShifts(shift)), maxBits); // can ignore more shifts than zero us out
+ if (maxBits == 32)
+ return 32;
+ auto shifts =
+ std::min(Index(Bits::getEffectiveShifts(shift)),
+ maxBits); // can ignore more shifts than zero us out
return std::max(Index(0), maxBits - shifts);
}
return 32;
}
// 64-bit TODO
// comparisons
- case EqInt32: case NeInt32: case LtSInt32:
- case LtUInt32: case LeSInt32: case LeUInt32:
- case GtSInt32: case GtUInt32: case GeSInt32:
+ case EqInt32:
+ case NeInt32:
+ case LtSInt32:
+ case LtUInt32:
+ case LeSInt32:
+ case LeUInt32:
+ case GtSInt32:
+ case GtUInt32:
+ case GeSInt32:
case GeUInt32:
- case EqInt64: case NeInt64: case LtSInt64:
- case LtUInt64: case LeSInt64: case LeUInt64:
- case GtSInt64: case GtUInt64: case GeSInt64:
+ case EqInt64:
+ case NeInt64:
+ case LtSInt64:
+ case LtUInt64:
+ case LeSInt64:
+ case LeUInt64:
+ case GtSInt64:
+ case GtUInt64:
+ case GeSInt64:
case GeUInt64:
- case EqFloat32: case NeFloat32:
- case LtFloat32: case LeFloat32: case GtFloat32: case GeFloat32:
- case EqFloat64: case NeFloat64:
- case LtFloat64: case LeFloat64: case GtFloat64: case GeFloat64: return 1;
+ case EqFloat32:
+ case NeFloat32:
+ case LtFloat32:
+ case LeFloat32:
+ case GtFloat32:
+ case GeFloat32:
+ case EqFloat64:
+ case NeFloat64:
+ case LtFloat64:
+ case LeFloat64:
+ case GtFloat64:
+ case GeFloat64:
+ return 1;
default: {}
}
} else if (auto* unary = curr->dynCast<Unary>()) {
switch (unary->op) {
- case ClzInt32: case CtzInt32: case PopcntInt32: return 6;
- case ClzInt64: case CtzInt64: case PopcntInt64: return 7;
- case EqZInt32: case EqZInt64: return 1;
- case WrapInt64: return std::min(Index(32), getMaxBits(unary->value, localInfoProvider));
+ case ClzInt32:
+ case CtzInt32:
+ case PopcntInt32:
+ return 6;
+ case ClzInt64:
+ case CtzInt64:
+ case PopcntInt64:
+ return 7;
+ case EqZInt32:
+ case EqZInt64:
+ return 1;
+ case WrapInt64:
+ return std::min(Index(32), getMaxBits(unary->value, localInfoProvider));
default: {}
}
} else if (auto* set = curr->dynCast<SetLocal>()) {
@@ -125,10 +178,14 @@ Index getMaxBits(Expression* curr, LocalInfoProvider* localInfoProvider) {
}
}
switch (curr->type) {
- case i32: return 32;
- case i64: return 64;
- case unreachable: return 64; // not interesting, but don't crash
- default: WASM_UNREACHABLE();
+ case i32:
+ return 32;
+ case i64:
+ return 64;
+ case unreachable:
+ return 64; // not interesting, but don't crash
+ default:
+ WASM_UNREACHABLE();
}
}
@@ -170,9 +227,11 @@ struct LocalScanner : PostWalker<LocalScanner> {
void visitSetLocal(SetLocal* curr) {
auto* func = getFunction();
- if (func->isParam(curr->index)) return;
+ if (func->isParam(curr->index))
+ return;
auto type = getFunction()->getLocalType(curr->index);
- if (type != i32 && type != i64) return;
+ if (type != i32 && type != i64)
+ return;
// an integer var, worth processing
auto* value = Properties::getFallthrough(curr->value);
auto& info = localInfo[curr->index];
@@ -188,26 +247,32 @@ struct LocalScanner : PostWalker<LocalScanner> {
if (info.signExtedBits == 0) {
info.signExtedBits = signExtBits; // first info we see
} else if (info.signExtedBits != signExtBits) {
- info.signExtedBits = LocalInfo::kUnknown; // contradictory information, give up
+ // contradictory information, give up
+ info.signExtedBits = LocalInfo::kUnknown;
}
}
- // define this for the templated getMaxBits method. we know nothing here yet about locals, so return the maxes
- Index getMaxBitsForLocal(GetLocal* get) {
- return getBitsForType(get->type);
- }
+ // define this for the templated getMaxBits method. we know nothing here yet
+ // about locals, so return the maxes
+ Index getMaxBitsForLocal(GetLocal* get) { return getBitsForType(get->type); }
Index getBitsForType(Type type) {
switch (type) {
- case i32: return 32;
- case i64: return 64;
- default: return -1;
+ case i32:
+ return 32;
+ case i64:
+ return 64;
+ default:
+ return -1;
}
}
};
// Main pass class
-struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions, UnifiedExpressionVisitor<OptimizeInstructions>>> {
+struct OptimizeInstructions
+ : public WalkerPass<
+ PostWalker<OptimizeInstructions,
+ UnifiedExpressionVisitor<OptimizeInstructions>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new OptimizeInstructions; }
@@ -229,7 +294,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
void visitExpression(Expression* curr) {
- // we may be able to apply multiple patterns, one may open opportunities that look deeper NB: patterns must not have cycles
+ // we may be able to apply multiple patterns, one may open opportunities
+ // that look deeper NB: patterns must not have cycles
while (1) {
auto* handOptimized = handOptimize(curr);
if (handOptimized) {
@@ -258,14 +324,15 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
- // Optimizations that don't yet fit in the pattern DSL, but could be eventually maybe
+ // Optimizations that don't yet fit in the pattern DSL, but could be
+ // eventually maybe
Expression* handOptimize(Expression* curr) {
// if this contains dead code, don't bother trying to optimize it, the type
- // might change (if might not be unreachable if just one arm is, for example).
- // this optimization pass focuses on actually executing code. the only
- // exceptions are control flow changes
- if (curr->type == unreachable &&
- !curr->is<Break>() && !curr->is<Switch>() && !curr->is<If>()) {
+ // might change (if might not be unreachable if just one arm is, for
+ // example). this optimization pass focuses on actually executing code. the
+ // only exceptions are control flow changes
+ if (curr->type == unreachable && !curr->is<Break>() &&
+ !curr->is<Switch>() && !curr->is<If>()) {
return nullptr;
}
if (auto* binary = curr->dynCast<Binary>()) {
@@ -277,10 +344,13 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
auto bits = Properties::getAlmostSignExtBits(binary, extraShifts);
if (extraShifts == 0) {
if (auto* load = Properties::getFallthrough(ext)->dynCast<Load>()) {
- // pattern match a load of 8 bits and a sign extend using a shl of 24 then shr_s of 24 as well, etc.
+ // pattern match a load of 8 bits and a sign extend using a shl of
+ // 24 then shr_s of 24 as well, etc.
if (LoadUtils::canBeSigned(load) &&
- ((load->bytes == 1 && bits == 8) || (load->bytes == 2 && bits == 16))) {
- // if the value falls through, we can't alter the load, as it might be captured in a tee
+ ((load->bytes == 1 && bits == 8) ||
+ (load->bytes == 2 && bits == 16))) {
+ // if the value falls through, we can't alter the load, as it
+ // might be captured in a tee
if (load->signed_ == true || load == ext) {
load->signed_ = true;
return ext;
@@ -289,8 +359,10 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
// if the sign-extend input cannot have a sign bit, we don't need it
- // we also don't need it if it already has an identical-sized sign extend
- if (getMaxBits(ext, this) + extraShifts < bits || isSignExted(ext, bits)) {
+ // we also don't need it if it already has an identical-sized sign
+ // extend
+ if (getMaxBits(ext, this) + extraShifts < bits ||
+ isSignExted(ext, bits)) {
return removeAlmostSignExt(binary);
}
} else if (binary->op == EqInt32 || binary->op == NeInt32) {
@@ -300,34 +372,44 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
return Builder(*getModule()).makeUnary(EqZInt32, binary->left);
}
if (auto* ext = Properties::getSignExtValue(binary->left)) {
- // we are comparing a sign extend to a constant, which means we can use a cheaper zext
+ // we are comparing a sign extend to a constant, which means we can
+ // use a cheaper zext
auto bits = Properties::getSignExtBits(binary->left);
binary->left = makeZeroExt(ext, bits);
- // when we replace the sign-ext of the non-constant with a zero-ext, we are forcing
- // the high bits to be all zero, instead of all zero or all one depending on the
- // sign bit. so we may be changing the high bits from all one to all zero:
- // * if the constant value's higher bits are mixed, then it can't be equal anyhow
- // * if they are all zero, we may get a false true if the non-constant's upper bits
- // were one. this can only happen if the non-constant's sign bit is set, so this
- // false true is a risk only if the constant's sign bit is set (otherwise, false).
- // But a constant with a sign bit but with upper bits zero is impossible to be
- // equal to a sign-extended value anyhow, so the entire thing is false.
- // * if they were all one, we may get a false false, if the only difference is in
- // those upper bits. that means we are equal on the other bits, including the sign
- // bit. so we can just mask off the upper bits in the constant value, in this
- // case, forcing them to zero like we do in the zero-extend.
+ // when we replace the sign-ext of the non-constant with a zero-ext,
+ // we are forcing the high bits to be all zero, instead of all zero
+ // or all one depending on the sign bit. so we may be changing the
+ // high bits from all one to all zero:
+ // * if the constant value's higher bits are mixed, then it can't
+ // be equal anyhow
+ // * if they are all zero, we may get a false true if the
+ // non-constant's upper bits were one. this can only happen if
+ // the non-constant's sign bit is set, so this false true is a
+ // risk only if the constant's sign bit is set (otherwise,
+ // false). But a constant with a sign bit but with upper bits
+ // zero is impossible to be equal to a sign-extended value
+ // anyhow, so the entire thing is false.
+ // * if they were all one, we may get a false false, if the only
+ // difference is in those upper bits. that means we are equal on
+ // the other bits, including the sign bit. so we can just mask
+ // off the upper bits in the constant value, in this case,
+ // forcing them to zero like we do in the zero-extend.
int32_t constValue = c->value.geti32();
auto upperConstValue = constValue & ~Bits::lowBitMask(bits);
uint32_t count = PopCount(upperConstValue);
auto constSignBit = constValue & (1 << (bits - 1));
- if ((count > 0 && count < 32 - bits) || (constSignBit && count == 0)) {
- // mixed or [zero upper const bits with sign bit set]; the compared values can never be identical, so
- // force something definitely impossible even after zext
+ if ((count > 0 && count < 32 - bits) ||
+ (constSignBit && count == 0)) {
+ // mixed or [zero upper const bits with sign bit set]; the
+ // compared values can never be identical, so force something
+ // definitely impossible even after zext
assert(bits < 32);
c->value = Literal(int32_t(0x80000000));
- // TODO: if no side effects, we can just replace it all with 1 or 0
+ // TODO: if no side effects, we can just replace it all with 1 or
+ // 0
} else {
- // otherwise, they are all ones, so we can mask them off as mentioned before
+ // otherwise, they are all ones, so we can mask them off as
+ // mentioned before
c->value = c->value.and_(Literal(Bits::lowBitMask(bits)));
}
return binary;
@@ -336,13 +418,15 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
if (auto* right = Properties::getSignExtValue(binary->right)) {
auto bits = Properties::getSignExtBits(binary->left);
if (Properties::getSignExtBits(binary->right) == bits) {
- // we are comparing two sign-exts with the same bits, so we may as well replace both with cheaper zexts
+ // we are comparing two sign-exts with the same bits, so we may as
+ // well replace both with cheaper zexts
binary->left = makeZeroExt(left, bits);
binary->right = makeZeroExt(right, bits);
return binary;
}
} else if (auto* load = binary->right->dynCast<Load>()) {
- // we are comparing a load to a sign-ext, we may be able to switch to zext
+ // we are comparing a load to a sign-ext, we may be able to switch
+ // to zext
auto leftBits = Properties::getSignExtBits(binary->left);
if (load->signed_ && leftBits == load->bytes * 8) {
load->signed_ = false;
@@ -352,7 +436,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
} else if (auto* load = binary->left->dynCast<Load>()) {
if (auto* right = Properties::getSignExtValue(binary->right)) {
- // we are comparing a load to a sign-ext, we may be able to switch to zext
+ // we are comparing a load to a sign-ext, we may be able to switch
+ // to zext
auto rightBits = Properties::getSignExtBits(binary->right);
if (load->signed_ && rightBits == load->bytes * 8) {
load->signed_ = false;
@@ -361,7 +446,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
}
- // note that both left and right may be consts, but then we let precompute compute the constant result
+ // note that both left and right may be consts, but then we let
+ // precompute compute the constant result
} else if (binary->op == AddInt32) {
// try to get rid of (0 - ..), that is, a zero only used to negate an
// int. an add of a subtract can be flipped in order to remove it:
@@ -382,7 +468,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
if (sub->op == SubInt32) {
if (auto* subZero = sub->left->dynCast<Const>()) {
if (subZero->value.geti32() == 0) {
- if (EffectAnalyzer::canReorder(getPassOptions(), sub->right, binary->right)) {
+ if (EffectAnalyzer::canReorder(
+ getPassOptions(), sub->right, binary->right)) {
sub->left = binary->right;
return sub;
}
@@ -414,10 +501,12 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
auto* ret = optimizeAddedConstants(binary);
- if (ret) return ret;
+ if (ret)
+ return ret;
} else if (binary->op == SubInt32) {
auto* ret = optimizeAddedConstants(binary);
- if (ret) return ret;
+ if (ret)
+ return ret;
}
// a bunch of operations on a constant right side can be simplified
if (auto* right = binary->right->dynCast<Const>()) {
@@ -443,7 +532,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
// some math operations have trivial results
Expression* ret = optimizeWithConstantOnRight(binary);
- if (ret) return ret;
+ if (ret)
+ return ret;
// the square of some operations can be merged
if (auto* left = binary->left->dynCast<Binary>()) {
if (left->op == binary->op) {
@@ -454,11 +544,13 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
} else if (left->op == OrInt32) {
leftRight->value = leftRight->value.or_(right->value);
return left;
- } else if (left->op == ShlInt32 || left->op == ShrUInt32 || left->op == ShrSInt32 ||
- left->op == ShlInt64 || left->op == ShrUInt64 || left->op == ShrSInt64) {
- // shifts only use an effective amount from the constant, so adding must
- // be done carefully
- auto total = Bits::getEffectiveShifts(leftRight) + Bits::getEffectiveShifts(right);
+ } else if (left->op == ShlInt32 || left->op == ShrUInt32 ||
+ left->op == ShrSInt32 || left->op == ShlInt64 ||
+ left->op == ShrUInt64 || left->op == ShrSInt64) {
+ // shifts only use an effective amount from the constant, so
+ // adding must be done carefully
+ auto total = Bits::getEffectiveShifts(leftRight) +
+ Bits::getEffectiveShifts(right);
if (total == Bits::getEffectiveShifts(total, right->type)) {
// no overflow, we can do this
leftRight->value = Literal::makeFromInt32(total, right->type);
@@ -483,7 +575,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
// a bunch of operations on a constant left side can be simplified
if (binary->left->is<Const>()) {
Expression* ret = optimizeWithConstantOnLeft(binary);
- if (ret) return ret;
+ if (ret)
+ return ret;
}
// bitwise operations
if (binary->op == AndInt32) {
@@ -540,40 +633,89 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
if (unary->op == EqZInt32) {
if (auto* inner = unary->value->dynCast<Binary>()) {
switch (inner->op) {
- case EqInt32: inner->op = NeInt32; return inner;
- case NeInt32: inner->op = EqInt32; return inner;
- case LtSInt32: inner->op = GeSInt32; return inner;
- case LtUInt32: inner->op = GeUInt32; return inner;
- case LeSInt32: inner->op = GtSInt32; return inner;
- case LeUInt32: inner->op = GtUInt32; return inner;
- case GtSInt32: inner->op = LeSInt32; return inner;
- case GtUInt32: inner->op = LeUInt32; return inner;
- case GeSInt32: inner->op = LtSInt32; return inner;
- case GeUInt32: inner->op = LtUInt32; return inner;
+ case EqInt32:
+ inner->op = NeInt32;
+ return inner;
+ case NeInt32:
+ inner->op = EqInt32;
+ return inner;
+ case LtSInt32:
+ inner->op = GeSInt32;
+ return inner;
+ case LtUInt32:
+ inner->op = GeUInt32;
+ return inner;
+ case LeSInt32:
+ inner->op = GtSInt32;
+ return inner;
+ case LeUInt32:
+ inner->op = GtUInt32;
+ return inner;
+ case GtSInt32:
+ inner->op = LeSInt32;
+ return inner;
+ case GtUInt32:
+ inner->op = LeUInt32;
+ return inner;
+ case GeSInt32:
+ inner->op = LtSInt32;
+ return inner;
+ case GeUInt32:
+ inner->op = LtUInt32;
+ return inner;
- case EqInt64: inner->op = NeInt64; return inner;
- case NeInt64: inner->op = EqInt64; return inner;
- case LtSInt64: inner->op = GeSInt64; return inner;
- case LtUInt64: inner->op = GeUInt64; return inner;
- case LeSInt64: inner->op = GtSInt64; return inner;
- case LeUInt64: inner->op = GtUInt64; return inner;
- case GtSInt64: inner->op = LeSInt64; return inner;
- case GtUInt64: inner->op = LeUInt64; return inner;
- case GeSInt64: inner->op = LtSInt64; return inner;
- case GeUInt64: inner->op = LtUInt64; return inner;
+ case EqInt64:
+ inner->op = NeInt64;
+ return inner;
+ case NeInt64:
+ inner->op = EqInt64;
+ return inner;
+ case LtSInt64:
+ inner->op = GeSInt64;
+ return inner;
+ case LtUInt64:
+ inner->op = GeUInt64;
+ return inner;
+ case LeSInt64:
+ inner->op = GtSInt64;
+ return inner;
+ case LeUInt64:
+ inner->op = GtUInt64;
+ return inner;
+ case GtSInt64:
+ inner->op = LeSInt64;
+ return inner;
+ case GtUInt64:
+ inner->op = LeUInt64;
+ return inner;
+ case GeSInt64:
+ inner->op = LtSInt64;
+ return inner;
+ case GeUInt64:
+ inner->op = LtUInt64;
+ return inner;
- case EqFloat32: inner->op = NeFloat32; return inner;
- case NeFloat32: inner->op = EqFloat32; return inner;
+ case EqFloat32:
+ inner->op = NeFloat32;
+ return inner;
+ case NeFloat32:
+ inner->op = EqFloat32;
+ return inner;
- case EqFloat64: inner->op = NeFloat64; return inner;
- case NeFloat64: inner->op = EqFloat64; return inner;
+ case EqFloat64:
+ inner->op = NeFloat64;
+ return inner;
+ case NeFloat64:
+ inner->op = EqFloat64;
+ return inner;
default: {}
}
}
// eqz of a sign extension can be of zero-extension
if (auto* ext = Properties::getSignExtValue(unary->value)) {
- // we are comparing a sign extend to a constant, which means we can use a cheaper zext
+ // we are comparing a sign extend to a constant, which means we can
+ // use a cheaper zext
auto bits = Properties::getSignExtBits(unary->value);
unary->value = makeZeroExt(ext, bits);
return unary;
@@ -595,24 +737,26 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
std::swap(iff->ifTrue, iff->ifFalse);
}
}
- if (iff->condition->type != unreachable && ExpressionAnalyzer::equal(iff->ifTrue, iff->ifFalse)) {
+ if (iff->condition->type != unreachable &&
+ ExpressionAnalyzer::equal(iff->ifTrue, iff->ifFalse)) {
// sides are identical, fold
- // if we can replace the if with one arm, and no side effects in the condition, do that
- auto needCondition = EffectAnalyzer(getPassOptions(), iff->condition).hasSideEffects();
+ // if we can replace the if with one arm, and no side effects in the
+ // condition, do that
+ auto needCondition =
+ EffectAnalyzer(getPassOptions(), iff->condition).hasSideEffects();
auto typeIsIdentical = iff->ifTrue->type == iff->type;
if (typeIsIdentical && !needCondition) {
return iff->ifTrue;
} else {
Builder builder(*getModule());
if (typeIsIdentical) {
- return builder.makeSequence(
- builder.makeDrop(iff->condition),
- iff->ifTrue
- );
+ return builder.makeSequence(builder.makeDrop(iff->condition),
+ iff->ifTrue);
} else {
- // the types diff. as the condition is reachable, that means the if must be
- // concrete while the arm is not
- assert(isConcreteType(iff->type) && iff->ifTrue->type == unreachable);
+ // the types diff. as the condition is reachable, that means the
+ // if must be concrete while the arm is not
+ assert(isConcreteType(iff->type) &&
+ iff->ifTrue->type == unreachable);
// emit a block with a forced type
auto* ret = builder.makeBlock();
if (needCondition) {
@@ -638,22 +782,24 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
if (auto* c = select->condition->dynCast<Const>()) {
- // constant condition, we can just pick the right side (barring side effects)
+ // constant condition, we can just pick the right side (barring side
+ // effects)
if (c->value.getInteger()) {
- if (!EffectAnalyzer(getPassOptions(), select->ifFalse).hasSideEffects()) {
+ if (!EffectAnalyzer(getPassOptions(), select->ifFalse)
+ .hasSideEffects()) {
return select->ifTrue;
} else {
- // don't bother - we would need to reverse the order using a temp local, which is bad
+ // don't bother - we would need to reverse the order using a temp
+ // local, which is bad
}
} else {
- if (!EffectAnalyzer(getPassOptions(), select->ifTrue).hasSideEffects()) {
+ if (!EffectAnalyzer(getPassOptions(), select->ifTrue)
+ .hasSideEffects()) {
return select->ifFalse;
} else {
Builder builder(*getModule());
- return builder.makeSequence(
- builder.makeDrop(select->ifTrue),
- select->ifFalse
- );
+ return builder.makeSequence(builder.makeDrop(select->ifTrue),
+ select->ifFalse);
}
}
}
@@ -676,10 +822,8 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
// can reorder
if (!condition.invalidates(value)) {
Builder builder(*getModule());
- return builder.makeSequence(
- builder.makeDrop(select->condition),
- select->ifTrue
- );
+ return builder.makeSequence(builder.makeDrop(select->condition),
+ select->ifTrue);
}
}
}
@@ -705,8 +849,9 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
} else if (auto* ext = Properties::getSignExtValue(binary)) {
- // if sign extending the exact bit size we store, we can skip the extension
- // if extending something bigger, then we just alter bits we don't save anyhow
+ // if sign extending the exact bit size we store, we can skip the
+ // extension if extending something bigger, then we just alter bits we
+ // don't save anyhow
if (Properties::getSignExtBits(binary) >= Index(store->bytes) * 8) {
store->value = ext;
}
@@ -736,11 +881,13 @@ private:
void canonicalize(Binary* binary) {
assert(Properties::isSymmetric(binary));
auto swap = [&]() {
- assert(EffectAnalyzer::canReorder(getPassOptions(), binary->left, binary->right));
+ assert(EffectAnalyzer::canReorder(
+ getPassOptions(), binary->left, binary->right));
std::swap(binary->left, binary->right);
};
auto maybeSwap = [&]() {
- if (EffectAnalyzer::canReorder(getPassOptions(), binary->left, binary->right)) {
+ if (EffectAnalyzer::canReorder(
+ getPassOptions(), binary->left, binary->right)) {
swap();
}
};
@@ -748,7 +895,8 @@ private:
if (binary->left->is<Const>() && !binary->right->is<Const>()) {
return swap();
}
- if (binary->right->is<Const>()) return;
+ if (binary->right->is<Const>())
+ return;
// Prefer a get on the right.
if (binary->left->is<GetLocal>() && !binary->right->is<GetLocal>()) {
return maybeSwap();
@@ -793,7 +941,8 @@ private:
}
} else if (auto* binary = boolean->dynCast<Binary>()) {
if (binary->op == OrInt32) {
- // an or flowing into a boolean context can consider each input as boolean
+ // an or flowing into a boolean context can consider each input as
+ // boolean
binary->left = optimizeBoolean(binary->left);
binary->right = optimizeBoolean(binary->right);
} else if (binary->op == NeInt32) {
@@ -805,7 +954,8 @@ private:
}
}
if (auto* ext = Properties::getSignExtValue(binary)) {
- // use a cheaper zero-extent, we just care about the boolean value anyhow
+ // use a cheaper zero-extent, we just care about the boolean value
+ // anyhow
return makeZeroExt(ext, Properties::getSignExtBits(binary));
}
} else if (auto* block = boolean->dynCast<Block>()) {
@@ -822,12 +972,14 @@ private:
return boolean;
}
- // find added constants in an expression tree, including multiplied/shifted, and combine them
- // note that we ignore division/shift-right, as rounding makes this nonlinear, so not a valid opt
+ // find added constants in an expression tree, including multiplied/shifted,
+ // and combine them note that we ignore division/shift-right, as rounding
+ // makes this nonlinear, so not a valid opt
Expression* optimizeAddedConstants(Binary* binary) {
uint32_t constant = 0;
std::vector<Const*> constants;
- std::function<void (Expression*, int)> seek = [&](Expression* curr, int mul) {
+ std::function<void(Expression*, int)> seek = [&](Expression* curr,
+ int mul) {
if (auto* c = curr->dynCast<Const>()) {
uint32_t value = c->value.geti32();
if (value != 0) {
@@ -867,7 +1019,8 @@ private:
// find all factors
seek(binary, 1);
if (constants.size() <= 1) {
- // nothing much to do, except for the trivial case of adding/subbing a zero
+ // nothing much to do, except for the trivial case of adding/subbing a
+ // zero
if (auto* c = binary->right->dynCast<Const>()) {
if (c->value.geti32() == 0) {
return binary->left;
@@ -906,19 +1059,24 @@ private:
return;
}
} else if (curr->op == ShlInt32) {
- // shifting a 0 is a 0, or anything by 0 has no effect, all unless the shift has side effects
- if (((left && left->value.geti32() == 0) || (right && Bits::getEffectiveShifts(right) == 0)) &&
+ // shifting a 0 is a 0, or anything by 0 has no effect, all unless the
+ // shift has side effects
+ if (((left && left->value.geti32() == 0) ||
+ (right && Bits::getEffectiveShifts(right) == 0)) &&
!EffectAnalyzer(passOptions, curr->right).hasSideEffects()) {
replaceCurrent(curr->left);
return;
}
} else if (curr->op == MulInt32) {
- // multiplying by zero is a zero, unless the other side has side effects
- if (left && left->value.geti32() == 0 && !EffectAnalyzer(passOptions, curr->right).hasSideEffects()) {
+ // multiplying by zero is a zero, unless the other side has side
+ // effects
+ if (left && left->value.geti32() == 0 &&
+ !EffectAnalyzer(passOptions, curr->right).hasSideEffects()) {
replaceCurrent(left);
return;
}
- if (right && right->value.geti32() == 0 && !EffectAnalyzer(passOptions, curr->left).hasSideEffects()) {
+ if (right && right->value.geti32() == 0 &&
+ !EffectAnalyzer(passOptions, curr->left).hasSideEffects()) {
replaceCurrent(right);
return;
}
@@ -927,50 +1085,58 @@ private:
};
Expression* walked = binary;
ZeroRemover(getPassOptions()).walk(walked);
- if (constant == 0) return walked; // nothing more to do
+ if (constant == 0)
+ return walked; // nothing more to do
if (auto* c = walked->dynCast<Const>()) {
assert(c->value.geti32() == 0);
c->value = Literal(constant);
return c;
}
Builder builder(*getModule());
- return builder.makeBinary(AddInt32,
- walked,
- builder.makeConst(Literal(constant))
- );
+ return builder.makeBinary(
+ AddInt32, walked, builder.makeConst(Literal(constant)));
}
- // expensive1 | expensive2 can be turned into expensive1 ? 1 : expensive2, and
- // expensive | cheap can be turned into cheap ? 1 : expensive,
+ // expensive1 | expensive2 can be turned into expensive1 ? 1 : expensive2,
+ // and expensive | cheap can be turned into cheap ? 1 : expensive,
// so that we can avoid one expensive computation, if it has no side effects.
Expression* conditionalizeExpensiveOnBitwise(Binary* binary) {
// this operation can increase code size, so don't always do it
auto& options = getPassRunner()->options;
- if (options.optimizeLevel < 2 || options.shrinkLevel > 0) return nullptr;
+ if (options.optimizeLevel < 2 || options.shrinkLevel > 0)
+ return nullptr;
const auto MIN_COST = 7;
assert(binary->op == AndInt32 || binary->op == OrInt32);
- if (binary->right->is<Const>()) return nullptr; // trivial
- // bitwise logical operator on two non-numerical values, check if they are boolean
+ if (binary->right->is<Const>())
+ return nullptr; // trivial
+ // bitwise logical operator on two non-numerical values, check if they are
+ // boolean
auto* left = binary->left;
auto* right = binary->right;
- if (!Properties::emitsBoolean(left) || !Properties::emitsBoolean(right)) return nullptr;
+ if (!Properties::emitsBoolean(left) || !Properties::emitsBoolean(right))
+ return nullptr;
auto leftEffects = EffectAnalyzer(getPassOptions(), left);
auto rightEffects = EffectAnalyzer(getPassOptions(), right);
auto leftHasSideEffects = leftEffects.hasSideEffects();
auto rightHasSideEffects = rightEffects.hasSideEffects();
- if (leftHasSideEffects && rightHasSideEffects) return nullptr; // both must execute
+ if (leftHasSideEffects && rightHasSideEffects)
+ return nullptr; // both must execute
// canonicalize with side effects, if any, happening on the left
if (rightHasSideEffects) {
- if (CostAnalyzer(left).cost < MIN_COST) return nullptr; // avoidable code is too cheap
- if (leftEffects.invalidates(rightEffects)) return nullptr; // cannot reorder
+ if (CostAnalyzer(left).cost < MIN_COST)
+ return nullptr; // avoidable code is too cheap
+ if (leftEffects.invalidates(rightEffects))
+ return nullptr; // cannot reorder
std::swap(left, right);
} else if (leftHasSideEffects) {
- if (CostAnalyzer(right).cost < MIN_COST) return nullptr; // avoidable code is too cheap
+ if (CostAnalyzer(right).cost < MIN_COST)
+ return nullptr; // avoidable code is too cheap
} else {
// no side effects, reorder based on cost estimation
auto leftCost = CostAnalyzer(left).cost;
auto rightCost = CostAnalyzer(right).cost;
- if (std::max(leftCost, rightCost) < MIN_COST) return nullptr; // avoidable code is too cheap
+ if (std::max(leftCost, rightCost) < MIN_COST)
+ return nullptr; // avoidable code is too cheap
// canonicalize with expensive code on the right
if (leftCost > rightCost) {
std::swap(left, right);
@@ -979,9 +1145,11 @@ private:
// worth it! perform conditionalization
Builder builder(*getModule());
if (binary->op == OrInt32) {
- return builder.makeIf(left, builder.makeConst(Literal(int32_t(1))), right);
+ return builder.makeIf(
+ left, builder.makeConst(Literal(int32_t(1))), right);
} else { // &
- return builder.makeIf(left, right, builder.makeConst(Literal(int32_t(0))));
+ return builder.makeIf(
+ left, right, builder.makeConst(Literal(int32_t(0))));
}
}
@@ -1015,8 +1183,9 @@ private:
// fold constant factors into the offset
void optimizeMemoryAccess(Expression*& ptr, Address& offset) {
- // ptr may be a const, but it isn't worth folding that in (we still have a const); in fact,
- // it's better to do the opposite for gzip purposes as well as for readability.
+ // ptr may be a const, but it isn't worth folding that in (we still have a
+ // const); in fact, it's better to do the opposite for gzip purposes as well
+ // as for readability.
auto* last = ptr->dynCast<Const>();
if (last) {
// don't do this if it would wrap the pointer
@@ -1058,7 +1227,8 @@ private:
Expression* makeZeroExt(Expression* curr, int32_t bits) {
Builder builder(*getModule());
- return builder.makeBinary(AndInt32, curr, builder.makeConst(Literal(Bits::lowBitMask(bits))));
+ return builder.makeBinary(
+ AndInt32, curr, builder.makeConst(Literal(Bits::lowBitMask(bits))));
}
// given an "almost" sign extend - either a proper one, or it
@@ -1070,7 +1240,8 @@ private:
auto* outerConst = outer->right->cast<Const>();
auto* innerConst = inner->right->cast<Const>();
auto* value = inner->left;
- if (outerConst->value == innerConst->value) return value;
+ if (outerConst->value == innerConst->value)
+ return value;
// add a shift, by reusing the existing node
innerConst->value = innerConst->value.sub(outerConst->value);
return inner;
@@ -1105,7 +1276,8 @@ private:
return binary->left;
} else if ((binary->op == Abstract::getBinary(type, Abstract::Mul) ||
binary->op == Abstract::getBinary(type, Abstract::And)) &&
- !EffectAnalyzer(getPassOptions(), binary->left).hasSideEffects()) {
+ !EffectAnalyzer(getPassOptions(), binary->left)
+ .hasSideEffects()) {
return binary->right;
}
}
@@ -1116,7 +1288,8 @@ private:
if (binary->op == Abstract::getBinary(type, Abstract::And)) {
return binary->left;
} else if (binary->op == Abstract::getBinary(type, Abstract::Or) &&
- !EffectAnalyzer(getPassOptions(), binary->left).hasSideEffects()) {
+ !EffectAnalyzer(getPassOptions(), binary->left)
+ .hasSideEffects()) {
return binary->right;
}
}
@@ -1129,15 +1302,10 @@ private:
if (binary->op == Abstract::getBinary(type, Abstract::Add) ||
binary->op == Abstract::getBinary(type, Abstract::Sub)) {
auto value = right->value.getInteger();
- if (value == 0x40 ||
- value == 0x2000 ||
- value == 0x100000 ||
- value == 0x8000000 ||
- value == 0x400000000LL ||
- value == 0x20000000000LL ||
- value == 0x1000000000000LL ||
- value == 0x80000000000000LL ||
- value == 0x4000000000000000LL) {
+ if (value == 0x40 || value == 0x2000 || value == 0x100000 ||
+ value == 0x8000000 || value == 0x400000000LL ||
+ value == 0x20000000000LL || value == 0x1000000000000LL ||
+ value == 0x80000000000000LL || value == 0x4000000000000000LL) {
right->value = right->value.neg();
if (binary->op == Abstract::getBinary(type, Abstract::Add)) {
binary->op = Abstract::getBinary(type, Abstract::Sub);
@@ -1202,12 +1370,16 @@ private:
left->op == Abstract::getBinary(type, Abstract::Sub)) {
if (auto* leftConst = left->right->dynCast<Const>()) {
if (auto* rightConst = binary->right->dynCast<Const>()) {
- return combineRelationalConstants(binary, left, leftConst, nullptr, rightConst);
+ return combineRelationalConstants(
+ binary, left, leftConst, nullptr, rightConst);
} else if (auto* rightBinary = binary->right->dynCast<Binary>()) {
- if (rightBinary->op == Abstract::getBinary(type, Abstract::Add) ||
- rightBinary->op == Abstract::getBinary(type, Abstract::Sub)) {
+ if (rightBinary->op ==
+ Abstract::getBinary(type, Abstract::Add) ||
+ rightBinary->op ==
+ Abstract::getBinary(type, Abstract::Sub)) {
if (auto* rightConst = rightBinary->right->dynCast<Const>()) {
- return combineRelationalConstants(binary, left, leftConst, rightBinary, rightConst);
+ return combineRelationalConstants(
+ binary, left, leftConst, rightBinary, rightConst);
}
}
}
@@ -1220,9 +1392,13 @@ private:
}
// given a relational binary with a const on both sides, combine the constants
- // left is also a binary, and has a constant; right may be just a constant, in which
- // case right is nullptr
- Expression* combineRelationalConstants(Binary* binary, Binary* left, Const* leftConst, Binary* right, Const* rightConst) {
+ // left is also a binary, and has a constant; right may be just a constant, in
+ // which case right is nullptr
+ Expression* combineRelationalConstants(Binary* binary,
+ Binary* left,
+ Const* leftConst,
+ Binary* right,
+ Const* rightConst) {
auto type = binary->right->type;
// we fold constants to the right
Literal extra = leftConst->value;
@@ -1237,8 +1413,8 @@ private:
return binary;
}
- // given a binary expression with equal children and no side effects in either,
- // we can fold various things
+ // given a binary expression with equal children and no side effects in
+ // either, we can fold various things
// TODO: trinaries, things like (x & (y & x)) ?
Expression* optimizeBinaryWithEqualEffectlessChildren(Binary* binary) {
// TODO add: perhaps worth doing 2*x if x is quite large?
@@ -1246,7 +1422,8 @@ private:
case SubInt32:
case XorInt32:
case SubInt64:
- case XorInt64: return LiteralUtils::makeZero(binary->left->type, *getModule());
+ case XorInt64:
+ return LiteralUtils::makeZero(binary->left->type, *getModule());
case NeInt64:
case LtSInt64:
case LtUInt64:
@@ -1256,11 +1433,13 @@ private:
case LtSInt32:
case LtUInt32:
case GtSInt32:
- case GtUInt32: return LiteralUtils::makeZero(i32, *getModule());
+ case GtUInt32:
+ return LiteralUtils::makeZero(i32, *getModule());
case AndInt32:
case OrInt32:
case AndInt64:
- case OrInt64: return binary->left;
+ case OrInt64:
+ return binary->left;
case EqInt32:
case LeSInt32:
case LeUInt32:
@@ -1270,14 +1449,14 @@ private:
case LeSInt64:
case LeUInt64:
case GeSInt64:
- case GeUInt64: return LiteralUtils::makeFromInt32(1, i32, *getModule());
- default: return nullptr;
+ case GeUInt64:
+ return LiteralUtils::makeFromInt32(1, i32, *getModule());
+ default:
+ return nullptr;
}
}
};
-Pass *createOptimizeInstructionsPass() {
- return new OptimizeInstructions();
-}
+Pass* createOptimizeInstructionsPass() { return new OptimizeInstructions(); }
} // namespace wasm
diff --git a/src/passes/PickLoadSigns.cpp b/src/passes/PickLoadSigns.cpp
index fce50b4bb..f494159a1 100644
--- a/src/passes/PickLoadSigns.cpp
+++ b/src/passes/PickLoadSigns.cpp
@@ -14,9 +14,9 @@
* limitations under the License.
*/
-#include <wasm.h>
-#include <pass.h>
#include <ir/properties.h>
+#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -39,7 +39,8 @@ struct PickLoadSigns : public WalkerPass<ExpressionStackWalker<PickLoadSigns>> {
};
std::vector<Usage> usages; // local index => usage
- std::unordered_map<Load*, Index> loads; // loads that write to a local => the local
+ // loads that write to a local => the local
+ std::unordered_map<Load*, Index> loads;
void doWalkFunction(Function* func) {
// prepare
@@ -51,7 +52,8 @@ struct PickLoadSigns : public WalkerPass<ExpressionStackWalker<PickLoadSigns>> {
}
void visitGetLocal(GetLocal* curr) {
- // this is a use. check from the context what it is, signed or unsigned, etc.
+ // this is a use. check from the context what it is, signed or unsigned,
+ // etc.
auto& usage = usages[curr->index];
usage.totalUsages++;
if (expressionStack.size() >= 2) {
@@ -97,9 +99,14 @@ struct PickLoadSigns : public WalkerPass<ExpressionStackWalker<PickLoadSigns>> {
auto& usage = usages[index];
// if we can't optimize, give up
if (usage.totalUsages == 0 || // no usages, so no idea
- usage.signedUsages + usage.unsignedUsages != usage.totalUsages || // non-sign/unsigned usages, so cannot change
- (usage.signedUsages != 0 && usage.signedBits != load->bytes * 8) || // sign usages exist but the wrong size
- (usage.unsignedUsages != 0 && usage.unsignedBits != load->bytes * 8)) { // unsigned usages exist but the wrong size
+ usage.signedUsages + usage.unsignedUsages !=
+ usage.totalUsages || // non-sign/unsigned usages, so cannot change
+ (usage.signedUsages != 0 &&
+ usage.signedBits !=
+ load->bytes * 8) || // sign usages exist but the wrong size
+ (usage.unsignedUsages != 0 &&
+ usage.unsignedBits !=
+ load->bytes * 8)) { // unsigned usages exist but the wrong size
continue;
}
// we can pick the optimal one. our hope is to remove 2 items per
@@ -107,11 +114,8 @@ struct PickLoadSigns : public WalkerPass<ExpressionStackWalker<PickLoadSigns>> {
load->signed_ = usage.signedUsages * 2 >= usage.unsignedUsages;
}
}
-
};
-Pass *createPickLoadSignsPass() {
- return new PickLoadSigns();
-}
+Pass* createPickLoadSignsPass() { return new PickLoadSigns(); }
} // namespace wasm
diff --git a/src/passes/PostEmscripten.cpp b/src/passes/PostEmscripten.cpp
index 7e2bacf25..6c9b84d7c 100644
--- a/src/passes/PostEmscripten.cpp
+++ b/src/passes/PostEmscripten.cpp
@@ -19,11 +19,11 @@
// emscripten output.
//
-#include <wasm.h>
+#include <asmjs/shared-constants.h>
+#include <ir/localize.h>
#include <pass.h>
#include <wasm-builder.h>
-#include <ir/localize.h>
-#include <asmjs/shared-constants.h>
+#include <wasm.h>
namespace wasm {
@@ -35,7 +35,8 @@ struct PostEmscripten : public WalkerPass<PostWalker<PostEmscripten>> {
void visitCall(Call* curr) {
// special asm.js imports can be optimized
auto* func = getModule()->getFunction(curr->target);
- if (!func->imported()) return;
+ if (!func->imported())
+ return;
if (func->module == GLOBAL_MATH) {
if (func->base == POW) {
if (auto* exponent = curr->operands[1]->dynCast<Const>()) {
@@ -43,10 +44,14 @@ struct PostEmscripten : public WalkerPass<PostWalker<PostEmscripten>> {
// This is just a square operation, do a multiply
Localizer localizer(curr->operands[0], getFunction(), getModule());
Builder builder(*getModule());
- replaceCurrent(builder.makeBinary(MulFloat64, localizer.expr, builder.makeGetLocal(localizer.index, localizer.expr->type)));
+ replaceCurrent(builder.makeBinary(
+ MulFloat64,
+ localizer.expr,
+ builder.makeGetLocal(localizer.index, localizer.expr->type)));
} else if (exponent->value == Literal(double(0.5))) {
// This is just a square root operation
- replaceCurrent(Builder(*getModule()).makeUnary(SqrtFloat64, curr->operands[0]));
+ replaceCurrent(
+ Builder(*getModule()).makeUnary(SqrtFloat64, curr->operands[0]));
}
}
}
@@ -54,8 +59,6 @@ struct PostEmscripten : public WalkerPass<PostWalker<PostEmscripten>> {
}
};
-Pass *createPostEmscriptenPass() {
- return new PostEmscripten();
-}
+Pass* createPostEmscriptenPass() { return new PostEmscripten(); }
} // namespace wasm
diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp
index 565809ddb..074dd832c 100644
--- a/src/passes/Precompute.cpp
+++ b/src/passes/Precompute.cpp
@@ -27,14 +27,14 @@
// looked at.
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
-#include <wasm-interpreter.h>
-#include <ir/utils.h>
#include <ir/literal-utils.h>
#include <ir/local-graph.h>
#include <ir/manipulation.h>
+#include <ir/utils.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm-interpreter.h>
+#include <wasm.h>
namespace wasm {
@@ -42,38 +42,44 @@ static const Name NOTPRECOMPUTABLE_FLOW("Binaryen|notprecomputable");
typedef std::unordered_map<GetLocal*, Literal> GetValues;
-// Precomputes an expression. Errors if we hit anything that can't be precomputed.
-class PrecomputingExpressionRunner : public ExpressionRunner<PrecomputingExpressionRunner> {
+// Precomputes an expression. Errors if we hit anything that can't be
+// precomputed.
+class PrecomputingExpressionRunner
+ : public ExpressionRunner<PrecomputingExpressionRunner> {
Module* module;
// map gets to constant values, if they are known to be constant
GetValues& getValues;
- // Whether we are trying to precompute down to an expression (which we can do on
- // say 5 + 6) or to a value (which we can't do on a local.tee that flows a 7
- // through it). When we want to replace the expression, we can only do so
- // when it has no side effects. When we don't care about replacing the expression,
- // we just want to know if it will contain a known constant.
+ // Whether we are trying to precompute down to an expression (which we can do
+ // on say 5 + 6) or to a value (which we can't do on a local.tee that flows a
+ // 7 through it). When we want to replace the expression, we can only do so
+ // when it has no side effects. When we don't care about replacing the
+ // expression, we just want to know if it will contain a known constant.
bool replaceExpression;
public:
- PrecomputingExpressionRunner(Module* module, GetValues& getValues, bool replaceExpression) : module(module), getValues(getValues), replaceExpression(replaceExpression) {}
+ PrecomputingExpressionRunner(Module* module,
+ GetValues& getValues,
+ bool replaceExpression)
+ : module(module), getValues(getValues),
+ replaceExpression(replaceExpression) {}
- struct NonstandaloneException {}; // TODO: use a flow with a special name, as this is likely very slow
+ struct NonstandaloneException {
+ }; // TODO: use a flow with a special name, as this is likely very slow
Flow visitLoop(Loop* curr) {
// loops might be infinite, so must be careful
- // but we can't tell if non-infinite, since we don't have state, so loops are just impossible to optimize for now
+ // but we can't tell if non-infinite, since we don't have state, so loops
+ // are just impossible to optimize for now
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitCall(Call* curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
+ Flow visitCall(Call* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
Flow visitCallIndirect(CallIndirect* curr) {
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitGetLocal(GetLocal *curr) {
+ Flow visitGetLocal(GetLocal* curr) {
auto iter = getValues.find(curr);
if (iter != getValues.end()) {
auto value = iter->second;
@@ -83,7 +89,7 @@ public:
}
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitSetLocal(SetLocal *curr) {
+ Flow visitSetLocal(SetLocal* curr) {
// If we don't need to replace the whole expression, see if there
// is a value flowing through a tee.
if (!replaceExpression) {
@@ -94,56 +100,36 @@ public:
}
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitGetGlobal(GetGlobal *curr) {
+ Flow visitGetGlobal(GetGlobal* curr) {
auto* global = module->getGlobal(curr->name);
if (!global->imported() && !global->mutable_) {
return visit(global->init);
}
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitSetGlobal(SetGlobal *curr) {
+ Flow visitSetGlobal(SetGlobal* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitLoad(Load* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitStore(Store* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitAtomicRMW(AtomicRMW* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitAtomicCmpxchg(AtomicCmpxchg* curr) {
return Flow(NOTPRECOMPUTABLE_FLOW);
}
- Flow visitLoad(Load *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitStore(Store *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitAtomicRMW(AtomicRMW *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitAtomicCmpxchg(AtomicCmpxchg *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitAtomicWait(AtomicWait *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitAtomicNotify(AtomicNotify *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitMemoryInit(MemoryInit *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitDataDrop(DataDrop *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitMemoryCopy(MemoryCopy *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitMemoryFill(MemoryFill *curr) {
- return Flow(NOTPRECOMPUTABLE_FLOW);
- }
- Flow visitHost(Host *curr) {
+ Flow visitAtomicWait(AtomicWait* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitAtomicNotify(AtomicNotify* curr) {
return Flow(NOTPRECOMPUTABLE_FLOW);
}
+ Flow visitMemoryInit(MemoryInit* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitDataDrop(DataDrop* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitMemoryCopy(MemoryCopy* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitMemoryFill(MemoryFill* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
+ Flow visitHost(Host* curr) { return Flow(NOTPRECOMPUTABLE_FLOW); }
- void trap(const char* why) override {
- throw NonstandaloneException();
- }
+ void trap(const char* why) override { throw NonstandaloneException(); }
};
-struct Precompute : public WalkerPass<PostWalker<Precompute, UnifiedExpressionVisitor<Precompute>>> {
+struct Precompute
+ : public WalkerPass<
+ PostWalker<Precompute, UnifiedExpressionVisitor<Precompute>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new Precompute(propagate); }
@@ -175,19 +161,25 @@ struct Precompute : public WalkerPass<PostWalker<Precompute, UnifiedExpressionVi
}
void visitExpression(Expression* curr) {
- // TODO: if local.get, only replace with a constant if we don't care about size...?
- if (curr->is<Const>() || curr->is<Nop>()) return;
+ // TODO: if local.get, only replace with a constant if we don't care about
+ // size...?
+ if (curr->is<Const>() || curr->is<Nop>())
+ return;
// Until engines implement v128.const and we have SIMD-aware optimizations
// that can break large v128.const instructions into smaller consts and
// splats, do not try to precompute v128 expressions.
- if (isVectorType(curr->type)) return;
+ if (isVectorType(curr->type))
+ return;
// try to evaluate this into a const
Flow flow = precomputeExpression(curr);
- if (isVectorType(flow.value.type)) return;
+ if (isVectorType(flow.value.type))
+ return;
if (flow.breaking()) {
- if (flow.breakTo == NOTPRECOMPUTABLE_FLOW) return;
+ if (flow.breakTo == NOTPRECOMPUTABLE_FLOW)
+ return;
if (flow.breakTo == RETURN_FLOW) {
- // this expression causes a return. if it's already a return, reuse the node
+ // this expression causes a return. if it's already a return, reuse the
+ // node
if (auto* ret = curr->dynCast<Return>()) {
if (flow.value.type != none) {
// reuse a const value if there is one
@@ -204,11 +196,13 @@ struct Precompute : public WalkerPass<PostWalker<Precompute, UnifiedExpressionVi
}
} else {
Builder builder(*getModule());
- replaceCurrent(builder.makeReturn(flow.value.type != none ? builder.makeConst(flow.value) : nullptr));
+ replaceCurrent(builder.makeReturn(
+ flow.value.type != none ? builder.makeConst(flow.value) : nullptr));
}
return;
}
- // this expression causes a break, emit it directly. if it's already a br, reuse the node.
+ // this expression causes a break, emit it directly. if it's already a br,
+ // reuse the node.
if (auto* br = curr->dynCast<Break>()) {
br->name = flow.breakTo;
br->condition = nullptr;
@@ -229,7 +223,9 @@ struct Precompute : public WalkerPass<PostWalker<Precompute, UnifiedExpressionVi
br->finalize();
} else {
Builder builder(*getModule());
- replaceCurrent(builder.makeBreak(flow.breakTo, flow.value.type != none ? builder.makeConst(flow.value) : nullptr));
+ replaceCurrent(builder.makeBreak(
+ flow.breakTo,
+ flow.value.type != none ? builder.makeConst(flow.value) : nullptr));
}
return;
}
@@ -252,7 +248,9 @@ private:
// (that we can replace the expression with if replaceExpression is set).
Flow precomputeExpression(Expression* curr, bool replaceExpression = true) {
try {
- return PrecomputingExpressionRunner(getModule(), getValues, replaceExpression).visit(curr);
+ return PrecomputingExpressionRunner(
+ getModule(), getValues, replaceExpression)
+ .visit(curr);
} catch (PrecomputingExpressionRunner::NonstandaloneException&) {
return Flow(NOTPRECOMPUTABLE_FLOW);
}
@@ -292,7 +290,8 @@ private:
auto* curr = pair.first;
work.insert(curr);
}
- std::unordered_map<SetLocal*, Literal> setValues; // the constant value, or none if not a constant
+ // the constant value, or none if not a constant
+ std::unordered_map<SetLocal*, Literal> setValues;
// propagate constant values
while (!work.empty()) {
auto iter = work.begin();
@@ -302,7 +301,8 @@ private:
// mark it as such and add everything it influences to the work list,
// as they may be constant too.
if (auto* set = curr->dynCast<SetLocal>()) {
- if (setValues[set].isConcrete()) continue; // already known constant
+ if (setValues[set].isConcrete())
+ continue; // already known constant
auto value = setValues[set] = precomputeValue(set->value);
if (value.isConcrete()) {
for (auto* get : localGraph.setInfluences[set]) {
@@ -311,7 +311,8 @@ private:
}
} else {
auto* get = curr->cast<GetLocal>();
- if (getValues[get].isConcrete()) continue; // already known constant
+ if (getValues[get].isConcrete())
+ continue; // already known constant
// for this get to have constant value, all sets must agree
Literal value;
bool first = true;
@@ -358,12 +359,8 @@ private:
}
};
-Pass *createPrecomputePass() {
- return new Precompute(false);
-}
+Pass* createPrecomputePass() { return new Precompute(false); }
-Pass *createPrecomputePropagatePass() {
- return new Precompute(true);
-}
+Pass* createPrecomputePropagatePass() { return new Precompute(true); }
} // namespace wasm
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index 29d867d76..405097455 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -18,12 +18,12 @@
// Print out text in s-expression format
//
-#include <wasm.h>
-#include <wasm-printing.h>
-#include <wasm-stack.h>
+#include <ir/module-utils.h>
#include <pass.h>
#include <pretty_printing.h>
-#include <ir/module-utils.h>
+#include <wasm-printing.h>
+#include <wasm-stack.h>
+#include <wasm.h>
namespace wasm {
@@ -55,15 +55,14 @@ static Name printableLocal(Index index, Function* func) {
return name;
}
-
// Prints the internal contents of an expression: everything but
// the children.
struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
Function* currFunction = nullptr;
std::ostream& o;
- PrintExpressionContents(Function* currFunction, std::ostream& o) :
- currFunction(currFunction), o(o) {}
+ PrintExpressionContents(Function* currFunction, std::ostream& o)
+ : currFunction(currFunction), o(o) {}
void visitBlock(Block* curr) {
printMedium(o, "block");
@@ -133,7 +132,8 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
}
void visitLoad(Load* curr) {
prepareColor(o) << printType(curr->type);
- if (curr->isAtomic) o << ".atomic";
+ if (curr->isAtomic)
+ o << ".atomic";
o << ".load";
if (curr->type != unreachable && curr->bytes < getTypeSize(curr->type)) {
if (curr->bytes == 1) {
@@ -157,7 +157,8 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
}
void visitStore(Store* curr) {
prepareColor(o) << printType(curr->valueType);
- if (curr->isAtomic) o << ".atomic";
+ if (curr->isAtomic)
+ o << ".atomic";
o << ".store";
if (curr->bytes < 4 || (curr->valueType == i64 && curr->bytes < 8)) {
if (curr->bytes == 1) {
@@ -199,12 +200,24 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
prepareColor(o);
printRMWSize(o, curr->type, curr->bytes);
switch (curr->op) {
- case Add: o << "add"; break;
- case Sub: o << "sub"; break;
- case And: o << "and"; break;
- case Or: o << "or"; break;
- case Xor: o << "xor"; break;
- case Xchg: o << "xchg"; break;
+ case Add:
+ o << "add";
+ break;
+ case Sub:
+ o << "sub";
+ break;
+ case And:
+ o << "and";
+ break;
+ case Or:
+ o << "or";
+ break;
+ case Xor:
+ o << "xor";
+ break;
+ case Xchg:
+ o << "xchg";
+ break;
}
if (curr->type != unreachable && curr->bytes != getTypeSize(curr->type)) {
o << "_u";
@@ -217,7 +230,7 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
prepareColor(o);
printRMWSize(o, curr->type, curr->bytes);
- o << "cmpxchg";
+ o << "cmpxchg";
if (curr->type != unreachable && curr->bytes != getTypeSize(curr->type)) {
o << "_u";
}
@@ -242,26 +255,54 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
void visitSIMDExtract(SIMDExtract* curr) {
prepareColor(o);
switch (curr->op) {
- case ExtractLaneSVecI8x16: o << "i8x16.extract_lane_s"; break;
- case ExtractLaneUVecI8x16: o << "i8x16.extract_lane_u"; break;
- case ExtractLaneSVecI16x8: o << "i16x8.extract_lane_s"; break;
- case ExtractLaneUVecI16x8: o << "i16x8.extract_lane_u"; break;
- case ExtractLaneVecI32x4: o << "i32x4.extract_lane"; break;
- case ExtractLaneVecI64x2: o << "i64x2.extract_lane"; break;
- case ExtractLaneVecF32x4: o << "f32x4.extract_lane"; break;
- case ExtractLaneVecF64x2: o << "f64x2.extract_lane"; break;
+ case ExtractLaneSVecI8x16:
+ o << "i8x16.extract_lane_s";
+ break;
+ case ExtractLaneUVecI8x16:
+ o << "i8x16.extract_lane_u";
+ break;
+ case ExtractLaneSVecI16x8:
+ o << "i16x8.extract_lane_s";
+ break;
+ case ExtractLaneUVecI16x8:
+ o << "i16x8.extract_lane_u";
+ break;
+ case ExtractLaneVecI32x4:
+ o << "i32x4.extract_lane";
+ break;
+ case ExtractLaneVecI64x2:
+ o << "i64x2.extract_lane";
+ break;
+ case ExtractLaneVecF32x4:
+ o << "f32x4.extract_lane";
+ break;
+ case ExtractLaneVecF64x2:
+ o << "f64x2.extract_lane";
+ break;
}
o << " " << int(curr->index);
}
void visitSIMDReplace(SIMDReplace* curr) {
prepareColor(o);
switch (curr->op) {
- case ReplaceLaneVecI8x16: o << "i8x16.replace_lane"; break;
- case ReplaceLaneVecI16x8: o << "i16x8.replace_lane"; break;
- case ReplaceLaneVecI32x4: o << "i32x4.replace_lane"; break;
- case ReplaceLaneVecI64x2: o << "i64x2.replace_lane"; break;
- case ReplaceLaneVecF32x4: o << "f32x4.replace_lane"; break;
- case ReplaceLaneVecF64x2: o << "f64x2.replace_lane"; break;
+ case ReplaceLaneVecI8x16:
+ o << "i8x16.replace_lane";
+ break;
+ case ReplaceLaneVecI16x8:
+ o << "i16x8.replace_lane";
+ break;
+ case ReplaceLaneVecI32x4:
+ o << "i32x4.replace_lane";
+ break;
+ case ReplaceLaneVecI64x2:
+ o << "i64x2.replace_lane";
+ break;
+ case ReplaceLaneVecF32x4:
+ o << "f32x4.replace_lane";
+ break;
+ case ReplaceLaneVecF64x2:
+ o << "f64x2.replace_lane";
+ break;
}
o << " " << int(curr->index);
}
@@ -279,18 +320,42 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
void visitSIMDShift(SIMDShift* curr) {
prepareColor(o);
switch (curr->op) {
- case ShlVecI8x16: o << "i8x16.shl"; break;
- case ShrSVecI8x16: o << "i8x16.shr_s"; break;
- case ShrUVecI8x16: o << "i8x16.shr_u"; break;
- case ShlVecI16x8: o << "i16x8.shl"; break;
- case ShrSVecI16x8: o << "i16x8.shr_s"; break;
- case ShrUVecI16x8: o << "i16x8.shr_u"; break;
- case ShlVecI32x4: o << "i32x4.shl"; break;
- case ShrSVecI32x4: o << "i32x4.shr_s"; break;
- case ShrUVecI32x4: o << "i32x4.shr_u"; break;
- case ShlVecI64x2: o << "i64x2.shl"; break;
- case ShrSVecI64x2: o << "i64x2.shr_s"; break;
- case ShrUVecI64x2: o << "i64x2.shr_u"; break;
+ case ShlVecI8x16:
+ o << "i8x16.shl";
+ break;
+ case ShrSVecI8x16:
+ o << "i8x16.shr_s";
+ break;
+ case ShrUVecI8x16:
+ o << "i8x16.shr_u";
+ break;
+ case ShlVecI16x8:
+ o << "i16x8.shl";
+ break;
+ case ShrSVecI16x8:
+ o << "i16x8.shr_s";
+ break;
+ case ShrUVecI16x8:
+ o << "i16x8.shr_u";
+ break;
+ case ShlVecI32x4:
+ o << "i32x4.shl";
+ break;
+ case ShrSVecI32x4:
+ o << "i32x4.shr_s";
+ break;
+ case ShrUVecI32x4:
+ o << "i32x4.shr_u";
+ break;
+ case ShlVecI64x2:
+ o << "i64x2.shl";
+ break;
+ case ShrSVecI64x2:
+ o << "i64x2.shr_s";
+ break;
+ case ShrUVecI64x2:
+ o << "i64x2.shr_u";
+ break;
}
}
void visitMemoryInit(MemoryInit* curr) {
@@ -309,296 +374,780 @@ struct PrintExpressionContents : public Visitor<PrintExpressionContents> {
prepareColor(o);
o << "memory.fill";
}
- void visitConst(Const* curr) {
- o << curr->value;
- }
+ void visitConst(Const* curr) { o << curr->value; }
void visitUnary(Unary* curr) {
prepareColor(o);
switch (curr->op) {
- case ClzInt32: o << "i32.clz"; break;
- case CtzInt32: o << "i32.ctz"; break;
- case PopcntInt32: o << "i32.popcnt"; break;
- case EqZInt32: o << "i32.eqz"; break;
- case ClzInt64: o << "i64.clz"; break;
- case CtzInt64: o << "i64.ctz"; break;
- case PopcntInt64: o << "i64.popcnt"; break;
- case EqZInt64: o << "i64.eqz"; break;
- case NegFloat32: o << "f32.neg"; break;
- case AbsFloat32: o << "f32.abs"; break;
- case CeilFloat32: o << "f32.ceil"; break;
- case FloorFloat32: o << "f32.floor"; break;
- case TruncFloat32: o << "f32.trunc"; break;
- case NearestFloat32: o << "f32.nearest"; break;
- case SqrtFloat32: o << "f32.sqrt"; break;
- case NegFloat64: o << "f64.neg"; break;
- case AbsFloat64: o << "f64.abs"; break;
- case CeilFloat64: o << "f64.ceil"; break;
- case FloorFloat64: o << "f64.floor"; break;
- case TruncFloat64: o << "f64.trunc"; break;
- case NearestFloat64: o << "f64.nearest"; break;
- case SqrtFloat64: o << "f64.sqrt"; break;
- case ExtendSInt32: o << "i64.extend_i32_s"; break;
- case ExtendUInt32: o << "i64.extend_i32_u"; break;
- case WrapInt64: o << "i32.wrap_i64"; break;
- case TruncSFloat32ToInt32: o << "i32.trunc_f32_s"; break;
- case TruncSFloat32ToInt64: o << "i64.trunc_f32_s"; break;
- case TruncUFloat32ToInt32: o << "i32.trunc_f32_u"; break;
- case TruncUFloat32ToInt64: o << "i64.trunc_f32_u"; break;
- case TruncSFloat64ToInt32: o << "i32.trunc_f64_s"; break;
- case TruncSFloat64ToInt64: o << "i64.trunc_f64_s"; break;
- case TruncUFloat64ToInt32: o << "i32.trunc_f64_u"; break;
- case TruncUFloat64ToInt64: o << "i64.trunc_f64_u"; break;
- case ReinterpretFloat32: o << "i32.reinterpret_f32"; break;
- case ReinterpretFloat64: o << "i64.reinterpret_f64"; break;
- case ConvertUInt32ToFloat32: o << "f32.convert_i32_u"; break;
- case ConvertUInt32ToFloat64: o << "f64.convert_i32_u"; break;
- case ConvertSInt32ToFloat32: o << "f32.convert_i32_s"; break;
- case ConvertSInt32ToFloat64: o << "f64.convert_i32_s"; break;
- case ConvertUInt64ToFloat32: o << "f32.convert_i64_u"; break;
- case ConvertUInt64ToFloat64: o << "f64.convert_i64_u"; break;
- case ConvertSInt64ToFloat32: o << "f32.convert_i64_s"; break;
- case ConvertSInt64ToFloat64: o << "f64.convert_i64_s"; break;
- case PromoteFloat32: o << "f64.promote_f32"; break;
- case DemoteFloat64: o << "f32.demote_f64"; break;
- case ReinterpretInt32: o << "f32.reinterpret_i32"; break;
- case ReinterpretInt64: o << "f64.reinterpret_i64"; break;
- case ExtendS8Int32: o << "i32.extend8_s"; break;
- case ExtendS16Int32: o << "i32.extend16_s"; break;
- case ExtendS8Int64: o << "i64.extend8_s"; break;
- case ExtendS16Int64: o << "i64.extend16_s"; break;
- case ExtendS32Int64: o << "i64.extend32_s"; break;
- case TruncSatSFloat32ToInt32: o << "i32.trunc_sat_f32_s"; break;
- case TruncSatUFloat32ToInt32: o << "i32.trunc_sat_f32_u"; break;
- case TruncSatSFloat64ToInt32: o << "i32.trunc_sat_f64_s"; break;
- case TruncSatUFloat64ToInt32: o << "i32.trunc_sat_f64_u"; break;
- case TruncSatSFloat32ToInt64: o << "i64.trunc_sat_f32_s"; break;
- case TruncSatUFloat32ToInt64: o << "i64.trunc_sat_f32_u"; break;
- case TruncSatSFloat64ToInt64: o << "i64.trunc_sat_f64_s"; break;
- case TruncSatUFloat64ToInt64: o << "i64.trunc_sat_f64_u"; break;
- case SplatVecI8x16: o << "i8x16.splat"; break;
- case SplatVecI16x8: o << "i16x8.splat"; break;
- case SplatVecI32x4: o << "i32x4.splat"; break;
- case SplatVecI64x2: o << "i64x2.splat"; break;
- case SplatVecF32x4: o << "f32x4.splat"; break;
- case SplatVecF64x2: o << "f64x2.splat"; break;
- case NotVec128: o << "v128.not"; break;
- case NegVecI8x16: o << "i8x16.neg"; break;
- case AnyTrueVecI8x16: o << "i8x16.any_true"; break;
- case AllTrueVecI8x16: o << "i8x16.all_true"; break;
- case NegVecI16x8: o << "i16x8.neg"; break;
- case AnyTrueVecI16x8: o << "i16x8.any_true"; break;
- case AllTrueVecI16x8: o << "i16x8.all_true"; break;
- case NegVecI32x4: o << "i32x4.neg"; break;
- case AnyTrueVecI32x4: o << "i32x4.any_true"; break;
- case AllTrueVecI32x4: o << "i32x4.all_true"; break;
- case NegVecI64x2: o << "i64x2.neg"; break;
- case AnyTrueVecI64x2: o << "i64x2.any_true"; break;
- case AllTrueVecI64x2: o << "i64x2.all_true"; break;
- case AbsVecF32x4: o << "f32x4.abs"; break;
- case NegVecF32x4: o << "f32x4.neg"; break;
- case SqrtVecF32x4: o << "f32x4.sqrt"; break;
- case AbsVecF64x2: o << "f64x2.abs"; break;
- case NegVecF64x2: o << "f64x2.neg"; break;
- case SqrtVecF64x2: o << "f64x2.sqrt"; break;
- case TruncSatSVecF32x4ToVecI32x4: o << "i32x4.trunc_sat_f32x4_s"; break;
- case TruncSatUVecF32x4ToVecI32x4: o << "i32x4.trunc_sat_f32x4_u"; break;
- case TruncSatSVecF64x2ToVecI64x2: o << "i64x2.trunc_sat_f64x2_s"; break;
- case TruncSatUVecF64x2ToVecI64x2: o << "i64x2.trunc_sat_f64x2_u"; break;
- case ConvertSVecI32x4ToVecF32x4: o << "f32x4.convert_i32x4_s"; break;
- case ConvertUVecI32x4ToVecF32x4: o << "f32x4.convert_i32x4_u"; break;
- case ConvertSVecI64x2ToVecF64x2: o << "f64x2.convert_i64x2_s"; break;
- case ConvertUVecI64x2ToVecF64x2: o << "f64x2.convert_i64x2_u"; break;
- case InvalidUnary: WASM_UNREACHABLE();
+ case ClzInt32:
+ o << "i32.clz";
+ break;
+ case CtzInt32:
+ o << "i32.ctz";
+ break;
+ case PopcntInt32:
+ o << "i32.popcnt";
+ break;
+ case EqZInt32:
+ o << "i32.eqz";
+ break;
+ case ClzInt64:
+ o << "i64.clz";
+ break;
+ case CtzInt64:
+ o << "i64.ctz";
+ break;
+ case PopcntInt64:
+ o << "i64.popcnt";
+ break;
+ case EqZInt64:
+ o << "i64.eqz";
+ break;
+ case NegFloat32:
+ o << "f32.neg";
+ break;
+ case AbsFloat32:
+ o << "f32.abs";
+ break;
+ case CeilFloat32:
+ o << "f32.ceil";
+ break;
+ case FloorFloat32:
+ o << "f32.floor";
+ break;
+ case TruncFloat32:
+ o << "f32.trunc";
+ break;
+ case NearestFloat32:
+ o << "f32.nearest";
+ break;
+ case SqrtFloat32:
+ o << "f32.sqrt";
+ break;
+ case NegFloat64:
+ o << "f64.neg";
+ break;
+ case AbsFloat64:
+ o << "f64.abs";
+ break;
+ case CeilFloat64:
+ o << "f64.ceil";
+ break;
+ case FloorFloat64:
+ o << "f64.floor";
+ break;
+ case TruncFloat64:
+ o << "f64.trunc";
+ break;
+ case NearestFloat64:
+ o << "f64.nearest";
+ break;
+ case SqrtFloat64:
+ o << "f64.sqrt";
+ break;
+ case ExtendSInt32:
+ o << "i64.extend_i32_s";
+ break;
+ case ExtendUInt32:
+ o << "i64.extend_i32_u";
+ break;
+ case WrapInt64:
+ o << "i32.wrap_i64";
+ break;
+ case TruncSFloat32ToInt32:
+ o << "i32.trunc_f32_s";
+ break;
+ case TruncSFloat32ToInt64:
+ o << "i64.trunc_f32_s";
+ break;
+ case TruncUFloat32ToInt32:
+ o << "i32.trunc_f32_u";
+ break;
+ case TruncUFloat32ToInt64:
+ o << "i64.trunc_f32_u";
+ break;
+ case TruncSFloat64ToInt32:
+ o << "i32.trunc_f64_s";
+ break;
+ case TruncSFloat64ToInt64:
+ o << "i64.trunc_f64_s";
+ break;
+ case TruncUFloat64ToInt32:
+ o << "i32.trunc_f64_u";
+ break;
+ case TruncUFloat64ToInt64:
+ o << "i64.trunc_f64_u";
+ break;
+ case ReinterpretFloat32:
+ o << "i32.reinterpret_f32";
+ break;
+ case ReinterpretFloat64:
+ o << "i64.reinterpret_f64";
+ break;
+ case ConvertUInt32ToFloat32:
+ o << "f32.convert_i32_u";
+ break;
+ case ConvertUInt32ToFloat64:
+ o << "f64.convert_i32_u";
+ break;
+ case ConvertSInt32ToFloat32:
+ o << "f32.convert_i32_s";
+ break;
+ case ConvertSInt32ToFloat64:
+ o << "f64.convert_i32_s";
+ break;
+ case ConvertUInt64ToFloat32:
+ o << "f32.convert_i64_u";
+ break;
+ case ConvertUInt64ToFloat64:
+ o << "f64.convert_i64_u";
+ break;
+ case ConvertSInt64ToFloat32:
+ o << "f32.convert_i64_s";
+ break;
+ case ConvertSInt64ToFloat64:
+ o << "f64.convert_i64_s";
+ break;
+ case PromoteFloat32:
+ o << "f64.promote_f32";
+ break;
+ case DemoteFloat64:
+ o << "f32.demote_f64";
+ break;
+ case ReinterpretInt32:
+ o << "f32.reinterpret_i32";
+ break;
+ case ReinterpretInt64:
+ o << "f64.reinterpret_i64";
+ break;
+ case ExtendS8Int32:
+ o << "i32.extend8_s";
+ break;
+ case ExtendS16Int32:
+ o << "i32.extend16_s";
+ break;
+ case ExtendS8Int64:
+ o << "i64.extend8_s";
+ break;
+ case ExtendS16Int64:
+ o << "i64.extend16_s";
+ break;
+ case ExtendS32Int64:
+ o << "i64.extend32_s";
+ break;
+ case TruncSatSFloat32ToInt32:
+ o << "i32.trunc_sat_f32_s";
+ break;
+ case TruncSatUFloat32ToInt32:
+ o << "i32.trunc_sat_f32_u";
+ break;
+ case TruncSatSFloat64ToInt32:
+ o << "i32.trunc_sat_f64_s";
+ break;
+ case TruncSatUFloat64ToInt32:
+ o << "i32.trunc_sat_f64_u";
+ break;
+ case TruncSatSFloat32ToInt64:
+ o << "i64.trunc_sat_f32_s";
+ break;
+ case TruncSatUFloat32ToInt64:
+ o << "i64.trunc_sat_f32_u";
+ break;
+ case TruncSatSFloat64ToInt64:
+ o << "i64.trunc_sat_f64_s";
+ break;
+ case TruncSatUFloat64ToInt64:
+ o << "i64.trunc_sat_f64_u";
+ break;
+ case SplatVecI8x16:
+ o << "i8x16.splat";
+ break;
+ case SplatVecI16x8:
+ o << "i16x8.splat";
+ break;
+ case SplatVecI32x4:
+ o << "i32x4.splat";
+ break;
+ case SplatVecI64x2:
+ o << "i64x2.splat";
+ break;
+ case SplatVecF32x4:
+ o << "f32x4.splat";
+ break;
+ case SplatVecF64x2:
+ o << "f64x2.splat";
+ break;
+ case NotVec128:
+ o << "v128.not";
+ break;
+ case NegVecI8x16:
+ o << "i8x16.neg";
+ break;
+ case AnyTrueVecI8x16:
+ o << "i8x16.any_true";
+ break;
+ case AllTrueVecI8x16:
+ o << "i8x16.all_true";
+ break;
+ case NegVecI16x8:
+ o << "i16x8.neg";
+ break;
+ case AnyTrueVecI16x8:
+ o << "i16x8.any_true";
+ break;
+ case AllTrueVecI16x8:
+ o << "i16x8.all_true";
+ break;
+ case NegVecI32x4:
+ o << "i32x4.neg";
+ break;
+ case AnyTrueVecI32x4:
+ o << "i32x4.any_true";
+ break;
+ case AllTrueVecI32x4:
+ o << "i32x4.all_true";
+ break;
+ case NegVecI64x2:
+ o << "i64x2.neg";
+ break;
+ case AnyTrueVecI64x2:
+ o << "i64x2.any_true";
+ break;
+ case AllTrueVecI64x2:
+ o << "i64x2.all_true";
+ break;
+ case AbsVecF32x4:
+ o << "f32x4.abs";
+ break;
+ case NegVecF32x4:
+ o << "f32x4.neg";
+ break;
+ case SqrtVecF32x4:
+ o << "f32x4.sqrt";
+ break;
+ case AbsVecF64x2:
+ o << "f64x2.abs";
+ break;
+ case NegVecF64x2:
+ o << "f64x2.neg";
+ break;
+ case SqrtVecF64x2:
+ o << "f64x2.sqrt";
+ break;
+ case TruncSatSVecF32x4ToVecI32x4:
+ o << "i32x4.trunc_sat_f32x4_s";
+ break;
+ case TruncSatUVecF32x4ToVecI32x4:
+ o << "i32x4.trunc_sat_f32x4_u";
+ break;
+ case TruncSatSVecF64x2ToVecI64x2:
+ o << "i64x2.trunc_sat_f64x2_s";
+ break;
+ case TruncSatUVecF64x2ToVecI64x2:
+ o << "i64x2.trunc_sat_f64x2_u";
+ break;
+ case ConvertSVecI32x4ToVecF32x4:
+ o << "f32x4.convert_i32x4_s";
+ break;
+ case ConvertUVecI32x4ToVecF32x4:
+ o << "f32x4.convert_i32x4_u";
+ break;
+ case ConvertSVecI64x2ToVecF64x2:
+ o << "f64x2.convert_i64x2_s";
+ break;
+ case ConvertUVecI64x2ToVecF64x2:
+ o << "f64x2.convert_i64x2_u";
+ break;
+ case InvalidUnary:
+ WASM_UNREACHABLE();
}
}
void visitBinary(Binary* curr) {
prepareColor(o);
switch (curr->op) {
- case AddInt32: o << "i32.add"; break;
- case SubInt32: o << "i32.sub"; break;
- case MulInt32: o << "i32.mul"; break;
- case DivSInt32: o << "i32.div_s"; break;
- case DivUInt32: o << "i32.div_u"; break;
- case RemSInt32: o << "i32.rem_s"; break;
- case RemUInt32: o << "i32.rem_u"; break;
- case AndInt32: o << "i32.and"; break;
- case OrInt32: o << "i32.or"; break;
- case XorInt32: o << "i32.xor"; break;
- case ShlInt32: o << "i32.shl"; break;
- case ShrUInt32: o << "i32.shr_u"; break;
- case ShrSInt32: o << "i32.shr_s"; break;
- case RotLInt32: o << "i32.rotl"; break;
- case RotRInt32: o << "i32.rotr"; break;
- case EqInt32: o << "i32.eq"; break;
- case NeInt32: o << "i32.ne"; break;
- case LtSInt32: o << "i32.lt_s"; break;
- case LtUInt32: o << "i32.lt_u"; break;
- case LeSInt32: o << "i32.le_s"; break;
- case LeUInt32: o << "i32.le_u"; break;
- case GtSInt32: o << "i32.gt_s"; break;
- case GtUInt32: o << "i32.gt_u"; break;
- case GeSInt32: o << "i32.ge_s"; break;
- case GeUInt32: o << "i32.ge_u"; break;
+ case AddInt32:
+ o << "i32.add";
+ break;
+ case SubInt32:
+ o << "i32.sub";
+ break;
+ case MulInt32:
+ o << "i32.mul";
+ break;
+ case DivSInt32:
+ o << "i32.div_s";
+ break;
+ case DivUInt32:
+ o << "i32.div_u";
+ break;
+ case RemSInt32:
+ o << "i32.rem_s";
+ break;
+ case RemUInt32:
+ o << "i32.rem_u";
+ break;
+ case AndInt32:
+ o << "i32.and";
+ break;
+ case OrInt32:
+ o << "i32.or";
+ break;
+ case XorInt32:
+ o << "i32.xor";
+ break;
+ case ShlInt32:
+ o << "i32.shl";
+ break;
+ case ShrUInt32:
+ o << "i32.shr_u";
+ break;
+ case ShrSInt32:
+ o << "i32.shr_s";
+ break;
+ case RotLInt32:
+ o << "i32.rotl";
+ break;
+ case RotRInt32:
+ o << "i32.rotr";
+ break;
+ case EqInt32:
+ o << "i32.eq";
+ break;
+ case NeInt32:
+ o << "i32.ne";
+ break;
+ case LtSInt32:
+ o << "i32.lt_s";
+ break;
+ case LtUInt32:
+ o << "i32.lt_u";
+ break;
+ case LeSInt32:
+ o << "i32.le_s";
+ break;
+ case LeUInt32:
+ o << "i32.le_u";
+ break;
+ case GtSInt32:
+ o << "i32.gt_s";
+ break;
+ case GtUInt32:
+ o << "i32.gt_u";
+ break;
+ case GeSInt32:
+ o << "i32.ge_s";
+ break;
+ case GeUInt32:
+ o << "i32.ge_u";
+ break;
- case AddInt64: o << "i64.add"; break;
- case SubInt64: o << "i64.sub"; break;
- case MulInt64: o << "i64.mul"; break;
- case DivSInt64: o << "i64.div_s"; break;
- case DivUInt64: o << "i64.div_u"; break;
- case RemSInt64: o << "i64.rem_s"; break;
- case RemUInt64: o << "i64.rem_u"; break;
- case AndInt64: o << "i64.and"; break;
- case OrInt64: o << "i64.or"; break;
- case XorInt64: o << "i64.xor"; break;
- case ShlInt64: o << "i64.shl"; break;
- case ShrUInt64: o << "i64.shr_u"; break;
- case ShrSInt64: o << "i64.shr_s"; break;
- case RotLInt64: o << "i64.rotl"; break;
- case RotRInt64: o << "i64.rotr"; break;
- case EqInt64: o << "i64.eq"; break;
- case NeInt64: o << "i64.ne"; break;
- case LtSInt64: o << "i64.lt_s"; break;
- case LtUInt64: o << "i64.lt_u"; break;
- case LeSInt64: o << "i64.le_s"; break;
- case LeUInt64: o << "i64.le_u"; break;
- case GtSInt64: o << "i64.gt_s"; break;
- case GtUInt64: o << "i64.gt_u"; break;
- case GeSInt64: o << "i64.ge_s"; break;
- case GeUInt64: o << "i64.ge_u"; break;
+ case AddInt64:
+ o << "i64.add";
+ break;
+ case SubInt64:
+ o << "i64.sub";
+ break;
+ case MulInt64:
+ o << "i64.mul";
+ break;
+ case DivSInt64:
+ o << "i64.div_s";
+ break;
+ case DivUInt64:
+ o << "i64.div_u";
+ break;
+ case RemSInt64:
+ o << "i64.rem_s";
+ break;
+ case RemUInt64:
+ o << "i64.rem_u";
+ break;
+ case AndInt64:
+ o << "i64.and";
+ break;
+ case OrInt64:
+ o << "i64.or";
+ break;
+ case XorInt64:
+ o << "i64.xor";
+ break;
+ case ShlInt64:
+ o << "i64.shl";
+ break;
+ case ShrUInt64:
+ o << "i64.shr_u";
+ break;
+ case ShrSInt64:
+ o << "i64.shr_s";
+ break;
+ case RotLInt64:
+ o << "i64.rotl";
+ break;
+ case RotRInt64:
+ o << "i64.rotr";
+ break;
+ case EqInt64:
+ o << "i64.eq";
+ break;
+ case NeInt64:
+ o << "i64.ne";
+ break;
+ case LtSInt64:
+ o << "i64.lt_s";
+ break;
+ case LtUInt64:
+ o << "i64.lt_u";
+ break;
+ case LeSInt64:
+ o << "i64.le_s";
+ break;
+ case LeUInt64:
+ o << "i64.le_u";
+ break;
+ case GtSInt64:
+ o << "i64.gt_s";
+ break;
+ case GtUInt64:
+ o << "i64.gt_u";
+ break;
+ case GeSInt64:
+ o << "i64.ge_s";
+ break;
+ case GeUInt64:
+ o << "i64.ge_u";
+ break;
- case AddFloat32: o << "f32.add"; break;
- case SubFloat32: o << "f32.sub"; break;
- case MulFloat32: o << "f32.mul"; break;
- case DivFloat32: o << "f32.div"; break;
- case CopySignFloat32: o << "f32.copysign"; break;
- case MinFloat32: o << "f32.min"; break;
- case MaxFloat32: o << "f32.max"; break;
- case EqFloat32: o << "f32.eq"; break;
- case NeFloat32: o << "f32.ne"; break;
- case LtFloat32: o << "f32.lt"; break;
- case LeFloat32: o << "f32.le"; break;
- case GtFloat32: o << "f32.gt"; break;
- case GeFloat32: o << "f32.ge"; break;
+ case AddFloat32:
+ o << "f32.add";
+ break;
+ case SubFloat32:
+ o << "f32.sub";
+ break;
+ case MulFloat32:
+ o << "f32.mul";
+ break;
+ case DivFloat32:
+ o << "f32.div";
+ break;
+ case CopySignFloat32:
+ o << "f32.copysign";
+ break;
+ case MinFloat32:
+ o << "f32.min";
+ break;
+ case MaxFloat32:
+ o << "f32.max";
+ break;
+ case EqFloat32:
+ o << "f32.eq";
+ break;
+ case NeFloat32:
+ o << "f32.ne";
+ break;
+ case LtFloat32:
+ o << "f32.lt";
+ break;
+ case LeFloat32:
+ o << "f32.le";
+ break;
+ case GtFloat32:
+ o << "f32.gt";
+ break;
+ case GeFloat32:
+ o << "f32.ge";
+ break;
- case AddFloat64: o << "f64.add"; break;
- case SubFloat64: o << "f64.sub"; break;
- case MulFloat64: o << "f64.mul"; break;
- case DivFloat64: o << "f64.div"; break;
- case CopySignFloat64: o << "f64.copysign"; break;
- case MinFloat64: o << "f64.min"; break;
- case MaxFloat64: o << "f64.max"; break;
- case EqFloat64: o << "f64.eq"; break;
- case NeFloat64: o << "f64.ne"; break;
- case LtFloat64: o << "f64.lt"; break;
- case LeFloat64: o << "f64.le"; break;
- case GtFloat64: o << "f64.gt"; break;
- case GeFloat64: o << "f64.ge"; break;
+ case AddFloat64:
+ o << "f64.add";
+ break;
+ case SubFloat64:
+ o << "f64.sub";
+ break;
+ case MulFloat64:
+ o << "f64.mul";
+ break;
+ case DivFloat64:
+ o << "f64.div";
+ break;
+ case CopySignFloat64:
+ o << "f64.copysign";
+ break;
+ case MinFloat64:
+ o << "f64.min";
+ break;
+ case MaxFloat64:
+ o << "f64.max";
+ break;
+ case EqFloat64:
+ o << "f64.eq";
+ break;
+ case NeFloat64:
+ o << "f64.ne";
+ break;
+ case LtFloat64:
+ o << "f64.lt";
+ break;
+ case LeFloat64:
+ o << "f64.le";
+ break;
+ case GtFloat64:
+ o << "f64.gt";
+ break;
+ case GeFloat64:
+ o << "f64.ge";
+ break;
- case EqVecI8x16: o << "i8x16.eq"; break;
- case NeVecI8x16: o << "i8x16.ne"; break;
- case LtSVecI8x16: o << "i8x16.lt_s"; break;
- case LtUVecI8x16: o << "i8x16.lt_u"; break;
- case GtSVecI8x16: o << "i8x16.gt_s"; break;
- case GtUVecI8x16: o << "i8x16.gt_u"; break;
- case LeSVecI8x16: o << "i8x16.le_s"; break;
- case LeUVecI8x16: o << "i8x16.le_u"; break;
- case GeSVecI8x16: o << "i8x16.ge_s"; break;
- case GeUVecI8x16: o << "i8x16.ge_u"; break;
- case EqVecI16x8: o << "i16x8.eq"; break;
- case NeVecI16x8: o << "i16x8.ne"; break;
- case LtSVecI16x8: o << "i16x8.lt_s"; break;
- case LtUVecI16x8: o << "i16x8.lt_u"; break;
- case GtSVecI16x8: o << "i16x8.gt_s"; break;
- case GtUVecI16x8: o << "i16x8.gt_u"; break;
- case LeSVecI16x8: o << "i16x8.le_s"; break;
- case LeUVecI16x8: o << "i16x8.le_u"; break;
- case GeSVecI16x8: o << "i16x8.ge_s"; break;
- case GeUVecI16x8: o << "i16x8.ge_u"; break;
- case EqVecI32x4: o << "i32x4.eq"; break;
- case NeVecI32x4: o << "i32x4.ne"; break;
- case LtSVecI32x4: o << "i32x4.lt_s"; break;
- case LtUVecI32x4: o << "i32x4.lt_u"; break;
- case GtSVecI32x4: o << "i32x4.gt_s"; break;
- case GtUVecI32x4: o << "i32x4.gt_u"; break;
- case LeSVecI32x4: o << "i32x4.le_s"; break;
- case LeUVecI32x4: o << "i32x4.le_u"; break;
- case GeSVecI32x4: o << "i32x4.ge_s"; break;
- case GeUVecI32x4: o << "i32x4.ge_u"; break;
- case EqVecF32x4: o << "f32x4.eq"; break;
- case NeVecF32x4: o << "f32x4.ne"; break;
- case LtVecF32x4: o << "f32x4.lt"; break;
- case GtVecF32x4: o << "f32x4.gt"; break;
- case LeVecF32x4: o << "f32x4.le"; break;
- case GeVecF32x4: o << "f32x4.ge"; break;
- case EqVecF64x2: o << "f64x2.eq"; break;
- case NeVecF64x2: o << "f64x2.ne"; break;
- case LtVecF64x2: o << "f64x2.lt"; break;
- case GtVecF64x2: o << "f64x2.gt"; break;
- case LeVecF64x2: o << "f64x2.le"; break;
- case GeVecF64x2: o << "f64x2.ge"; break;
+ case EqVecI8x16:
+ o << "i8x16.eq";
+ break;
+ case NeVecI8x16:
+ o << "i8x16.ne";
+ break;
+ case LtSVecI8x16:
+ o << "i8x16.lt_s";
+ break;
+ case LtUVecI8x16:
+ o << "i8x16.lt_u";
+ break;
+ case GtSVecI8x16:
+ o << "i8x16.gt_s";
+ break;
+ case GtUVecI8x16:
+ o << "i8x16.gt_u";
+ break;
+ case LeSVecI8x16:
+ o << "i8x16.le_s";
+ break;
+ case LeUVecI8x16:
+ o << "i8x16.le_u";
+ break;
+ case GeSVecI8x16:
+ o << "i8x16.ge_s";
+ break;
+ case GeUVecI8x16:
+ o << "i8x16.ge_u";
+ break;
+ case EqVecI16x8:
+ o << "i16x8.eq";
+ break;
+ case NeVecI16x8:
+ o << "i16x8.ne";
+ break;
+ case LtSVecI16x8:
+ o << "i16x8.lt_s";
+ break;
+ case LtUVecI16x8:
+ o << "i16x8.lt_u";
+ break;
+ case GtSVecI16x8:
+ o << "i16x8.gt_s";
+ break;
+ case GtUVecI16x8:
+ o << "i16x8.gt_u";
+ break;
+ case LeSVecI16x8:
+ o << "i16x8.le_s";
+ break;
+ case LeUVecI16x8:
+ o << "i16x8.le_u";
+ break;
+ case GeSVecI16x8:
+ o << "i16x8.ge_s";
+ break;
+ case GeUVecI16x8:
+ o << "i16x8.ge_u";
+ break;
+ case EqVecI32x4:
+ o << "i32x4.eq";
+ break;
+ case NeVecI32x4:
+ o << "i32x4.ne";
+ break;
+ case LtSVecI32x4:
+ o << "i32x4.lt_s";
+ break;
+ case LtUVecI32x4:
+ o << "i32x4.lt_u";
+ break;
+ case GtSVecI32x4:
+ o << "i32x4.gt_s";
+ break;
+ case GtUVecI32x4:
+ o << "i32x4.gt_u";
+ break;
+ case LeSVecI32x4:
+ o << "i32x4.le_s";
+ break;
+ case LeUVecI32x4:
+ o << "i32x4.le_u";
+ break;
+ case GeSVecI32x4:
+ o << "i32x4.ge_s";
+ break;
+ case GeUVecI32x4:
+ o << "i32x4.ge_u";
+ break;
+ case EqVecF32x4:
+ o << "f32x4.eq";
+ break;
+ case NeVecF32x4:
+ o << "f32x4.ne";
+ break;
+ case LtVecF32x4:
+ o << "f32x4.lt";
+ break;
+ case GtVecF32x4:
+ o << "f32x4.gt";
+ break;
+ case LeVecF32x4:
+ o << "f32x4.le";
+ break;
+ case GeVecF32x4:
+ o << "f32x4.ge";
+ break;
+ case EqVecF64x2:
+ o << "f64x2.eq";
+ break;
+ case NeVecF64x2:
+ o << "f64x2.ne";
+ break;
+ case LtVecF64x2:
+ o << "f64x2.lt";
+ break;
+ case GtVecF64x2:
+ o << "f64x2.gt";
+ break;
+ case LeVecF64x2:
+ o << "f64x2.le";
+ break;
+ case GeVecF64x2:
+ o << "f64x2.ge";
+ break;
- case AndVec128: o << "v128.and"; break;
- case OrVec128: o << "v128.or"; break;
- case XorVec128: o << "v128.xor"; break;
+ case AndVec128:
+ o << "v128.and";
+ break;
+ case OrVec128:
+ o << "v128.or";
+ break;
+ case XorVec128:
+ o << "v128.xor";
+ break;
- case AddVecI8x16: o << "i8x16.add"; break;
- case AddSatSVecI8x16: o << "i8x16.add_saturate_s"; break;
- case AddSatUVecI8x16: o << "i8x16.add_saturate_u"; break;
- case SubVecI8x16: o << "i8x16.sub"; break;
- case SubSatSVecI8x16: o << "i8x16.sub_saturate_s"; break;
- case SubSatUVecI8x16: o << "i8x16.sub_saturate_u"; break;
- case MulVecI8x16: o << "i8x16.mul"; break;
- case AddVecI16x8: o << "i16x8.add"; break;
- case AddSatSVecI16x8: o << "i16x8.add_saturate_s"; break;
- case AddSatUVecI16x8: o << "i16x8.add_saturate_u"; break;
- case SubVecI16x8: o << "i16x8.sub"; break;
- case SubSatSVecI16x8: o << "i16x8.sub_saturate_s"; break;
- case SubSatUVecI16x8: o << "i16x8.sub_saturate_u"; break;
- case MulVecI16x8: o << "i16x8.mul"; break;
- case AddVecI32x4: o << "i32x4.add"; break;
- case SubVecI32x4: o << "i32x4.sub"; break;
- case MulVecI32x4: o << "i32x4.mul"; break;
- case AddVecI64x2: o << "i64x2.add"; break;
- case SubVecI64x2: o << "i64x2.sub"; break;
+ case AddVecI8x16:
+ o << "i8x16.add";
+ break;
+ case AddSatSVecI8x16:
+ o << "i8x16.add_saturate_s";
+ break;
+ case AddSatUVecI8x16:
+ o << "i8x16.add_saturate_u";
+ break;
+ case SubVecI8x16:
+ o << "i8x16.sub";
+ break;
+ case SubSatSVecI8x16:
+ o << "i8x16.sub_saturate_s";
+ break;
+ case SubSatUVecI8x16:
+ o << "i8x16.sub_saturate_u";
+ break;
+ case MulVecI8x16:
+ o << "i8x16.mul";
+ break;
+ case AddVecI16x8:
+ o << "i16x8.add";
+ break;
+ case AddSatSVecI16x8:
+ o << "i16x8.add_saturate_s";
+ break;
+ case AddSatUVecI16x8:
+ o << "i16x8.add_saturate_u";
+ break;
+ case SubVecI16x8:
+ o << "i16x8.sub";
+ break;
+ case SubSatSVecI16x8:
+ o << "i16x8.sub_saturate_s";
+ break;
+ case SubSatUVecI16x8:
+ o << "i16x8.sub_saturate_u";
+ break;
+ case MulVecI16x8:
+ o << "i16x8.mul";
+ break;
+ case AddVecI32x4:
+ o << "i32x4.add";
+ break;
+ case SubVecI32x4:
+ o << "i32x4.sub";
+ break;
+ case MulVecI32x4:
+ o << "i32x4.mul";
+ break;
+ case AddVecI64x2:
+ o << "i64x2.add";
+ break;
+ case SubVecI64x2:
+ o << "i64x2.sub";
+ break;
- case AddVecF32x4: o << "f32x4.add"; break;
- case SubVecF32x4: o << "f32x4.sub"; break;
- case MulVecF32x4: o << "f32x4.mul"; break;
- case DivVecF32x4: o << "f32x4.div"; break;
- case MinVecF32x4: o << "f32x4.min"; break;
- case MaxVecF32x4: o << "f32x4.max"; break;
- case AddVecF64x2: o << "f64x2.add"; break;
- case SubVecF64x2: o << "f64x2.sub"; break;
- case MulVecF64x2: o << "f64x2.mul"; break;
- case DivVecF64x2: o << "f64x2.div"; break;
- case MinVecF64x2: o << "f64x2.min"; break;
- case MaxVecF64x2: o << "f64x2.max"; break;
+ case AddVecF32x4:
+ o << "f32x4.add";
+ break;
+ case SubVecF32x4:
+ o << "f32x4.sub";
+ break;
+ case MulVecF32x4:
+ o << "f32x4.mul";
+ break;
+ case DivVecF32x4:
+ o << "f32x4.div";
+ break;
+ case MinVecF32x4:
+ o << "f32x4.min";
+ break;
+ case MaxVecF32x4:
+ o << "f32x4.max";
+ break;
+ case AddVecF64x2:
+ o << "f64x2.add";
+ break;
+ case SubVecF64x2:
+ o << "f64x2.sub";
+ break;
+ case MulVecF64x2:
+ o << "f64x2.mul";
+ break;
+ case DivVecF64x2:
+ o << "f64x2.div";
+ break;
+ case MinVecF64x2:
+ o << "f64x2.min";
+ break;
+ case MaxVecF64x2:
+ o << "f64x2.max";
+ break;
- case InvalidBinary: WASM_UNREACHABLE();
+ case InvalidBinary:
+ WASM_UNREACHABLE();
}
restoreNormalColor(o);
}
- void visitSelect(Select* curr) {
- prepareColor(o) << "select";
- }
- void visitDrop(Drop* curr) {
- printMedium(o, "drop");
- }
- void visitReturn(Return* curr) {
- printMedium(o, "return");
- }
+ void visitSelect(Select* curr) { prepareColor(o) << "select"; }
+ void visitDrop(Drop* curr) { printMedium(o, "drop"); }
+ void visitReturn(Return* curr) { printMedium(o, "return"); }
void visitHost(Host* curr) {
switch (curr->op) {
- case CurrentMemory: printMedium(o, "current_memory"); break;
- case GrowMemory: printMedium(o, "grow_memory"); break;
+ case CurrentMemory:
+ printMedium(o, "current_memory");
+ break;
+ case GrowMemory:
+ printMedium(o, "grow_memory");
+ break;
}
}
- void visitNop(Nop* curr) {
- printMinor(o, "nop");
- }
- void visitUnreachable(Unreachable* curr) {
- printMinor(o, "unreachable");
- }
+ void visitNop(Nop* curr) { printMinor(o, "nop"); }
+ void visitUnreachable(Unreachable* curr) { printMinor(o, "unreachable"); }
};
// Prints an expression in s-expr format, including both the
@@ -608,8 +1157,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
unsigned indent = 0;
bool minify;
- const char *maybeSpace;
- const char *maybeNewLine;
+ const char* maybeSpace;
+ const char* maybeNewLine;
bool full = false; // whether to not elide nodes in output when possible
// (like implicit blocks) and to emit types
@@ -625,16 +1174,18 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
PrintSExpression(std::ostream& o) : o(o) {
setMinify(false);
- if (!full) full = isFullForced();
+ if (!full)
+ full = isFullForced();
}
- void printDebugLocation(const Function::DebugLocation &location) {
+ void printDebugLocation(const Function::DebugLocation& location) {
if (lastPrintedLocation == location) {
return;
}
lastPrintedLocation = location;
auto fileName = currModule->debugInfoFileNames[location.fileIndex];
- o << ";;@ " << fileName << ":" << location.lineNumber << ":" << location.columnNumber << '\n';
+ o << ";;@ " << fileName << ":" << location.lineNumber << ":"
+ << location.columnNumber << '\n';
doIndent(o, indent);
}
@@ -663,7 +1214,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
void setFull(bool full_) { full = full_; }
void incIndent() {
- if (minify) return;
+ if (minify)
+ return;
o << '\n';
indent++;
}
@@ -675,7 +1227,7 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
}
o << ')';
}
- void printFullLine(Expression *expression) {
+ void printFullLine(Expression* expression) {
!minify && doIndent(o, indent);
if (full) {
o << "[" << printType(expression->type) << "] ";
@@ -685,7 +1237,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
}
void visitBlock(Block* curr) {
- // special-case Block, because Block nesting (in their first element) can be incredibly deep
+ // special-case Block, because Block nesting (in their first element) can be
+ // incredibly deep
std::vector<Block*> stack;
while (1) {
if (stack.size() > 0) {
@@ -743,13 +1296,17 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
incIndent();
printFullLine(curr->condition);
// ifTrue and False have implict blocks, avoid printing them if possible
- if (!full && curr->ifTrue->is<Block>() && curr->ifTrue->dynCast<Block>()->name.isNull() && curr->ifTrue->dynCast<Block>()->list.size() == 1) {
+ if (!full && curr->ifTrue->is<Block>() &&
+ curr->ifTrue->dynCast<Block>()->name.isNull() &&
+ curr->ifTrue->dynCast<Block>()->list.size() == 1) {
printFullLine(curr->ifTrue->dynCast<Block>()->list.back());
} else {
printFullLine(curr->ifTrue);
}
if (curr->ifFalse) {
- if (!full && curr->ifFalse->is<Block>() && curr->ifFalse->dynCast<Block>()->name.isNull() && curr->ifFalse->dynCast<Block>()->list.size() == 1) {
+ if (!full && curr->ifFalse->is<Block>() &&
+ curr->ifFalse->dynCast<Block>()->name.isNull() &&
+ curr->ifFalse->dynCast<Block>()->list.size() == 1) {
printFullLine(curr->ifFalse->dynCast<Block>()->list.back());
} else {
printFullLine(curr->ifFalse);
@@ -795,7 +1352,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
}
incIndent();
}
- if (curr->value && !curr->value->is<Nop>()) printFullLine(curr->value);
+ if (curr->value && !curr->value->is<Nop>())
+ printFullLine(curr->value);
if (curr->condition) {
printFullLine(curr->condition);
}
@@ -805,13 +1363,13 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
o << '(';
PrintExpressionContents(currFunction, o).visit(curr);
incIndent();
- if (curr->value && !curr->value->is<Nop>()) printFullLine(curr->value);
+ if (curr->value && !curr->value->is<Nop>())
+ printFullLine(curr->value);
printFullLine(curr->condition);
decIndent();
}
- template<typename CallBase>
- void printCallOperands(CallBase* curr) {
+ template<typename CallBase> void printCallOperands(CallBase* curr) {
if (curr->operands.size() > 0) {
incIndent();
for (auto operand : curr->operands) {
@@ -895,7 +1453,7 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
decIndent();
}
void visitAtomicWait(AtomicWait* curr) {
- o << '(' ;
+ o << '(';
PrintExpressionContents(currFunction, o).visit(curr);
restoreNormalColor(o);
incIndent();
@@ -1060,7 +1618,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
// Module-level visitors
void visitFunctionType(FunctionType* curr, Name* internalName = nullptr) {
o << "(func";
- if (internalName) o << ' ' << *internalName;
+ if (internalName)
+ o << ' ' << *internalName;
if (curr->params.size() > 0) {
o << maybeSpace;
o << '(';
@@ -1082,11 +1641,20 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
printMedium(o, "export ");
printText(o, curr->name.str) << " (";
switch (curr->kind) {
- case ExternalKind::Function: o << "func"; break;
- case ExternalKind::Table: o << "table"; break;
- case ExternalKind::Memory: o << "memory"; break;
- case ExternalKind::Global: o << "global"; break;
- case ExternalKind::Invalid: WASM_UNREACHABLE();
+ case ExternalKind::Function:
+ o << "func";
+ break;
+ case ExternalKind::Table:
+ o << "table";
+ break;
+ case ExternalKind::Memory:
+ o << "memory";
+ break;
+ case ExternalKind::Global:
+ o << "global";
+ break;
+ case ExternalKind::Invalid:
+ WASM_UNREACHABLE();
}
o << ' ';
printName(curr->value, o) << "))";
@@ -1140,7 +1708,7 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
void visitImportedFunction(Function* curr) {
doIndent(o, indent);
currFunction = curr;
- lastPrintedLocation = { 0, 0, 0 };
+ lastPrintedLocation = {0, 0, 0};
o << '(';
emitImportHeader(curr);
if (curr->type.is()) {
@@ -1155,7 +1723,7 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
void visitDefinedFunction(Function* curr) {
doIndent(o, indent);
currFunction = curr;
- lastPrintedLocation = { 0, 0, 0 };
+ lastPrintedLocation = {0, 0, 0};
if (currFunction->prologLocation.size()) {
printDebugLocation(*currFunction->prologLocation.begin());
}
@@ -1180,7 +1748,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
for (size_t i = 0; i < curr->params.size(); i++) {
o << maybeSpace;
o << '(';
- printMinor(o, "param ") << printableLocal(i, currFunction) << ' ' << printType(curr->getLocalType(i)) << ')';
+ printMinor(o, "param ") << printableLocal(i, currFunction) << ' '
+ << printType(curr->getLocalType(i)) << ')';
}
}
if (curr->result != none) {
@@ -1192,14 +1761,17 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
for (size_t i = curr->getVarIndexBase(); i < curr->getNumLocals(); i++) {
doIndent(o, indent);
o << '(';
- printMinor(o, "local ") << printableLocal(i, currFunction) << ' ' << printType(curr->getLocalType(i)) << ')';
+ printMinor(o, "local ") << printableLocal(i, currFunction) << ' '
+ << printType(curr->getLocalType(i)) << ')';
o << maybeNewLine;
}
// Print the body.
if (!printStackIR || !curr->stackIR) {
- // It is ok to emit a block here, as a function can directly contain a list, even if our
- // ast avoids that for simplicity. We can just do that optimization here..
- if (!full && curr->body->is<Block>() && curr->body->cast<Block>()->name.isNull()) {
+ // It is ok to emit a block here, as a function can directly contain a
+ // list, even if our ast avoids that for simplicity. We can just do that
+ // optimization here..
+ if (!full && curr->body->is<Block>() &&
+ curr->body->cast<Block>()->name.isNull()) {
Block* block = curr->body->cast<Block>();
for (auto item : block->list) {
printFullLine(item);
@@ -1211,8 +1783,10 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
// Print the stack IR.
WasmPrinter::printStackIR(curr->stackIR.get(), o, curr);
}
- if (currFunction->epilogLocation.size() && lastPrintedLocation != *currFunction->epilogLocation.begin()) {
- // Print last debug location: mix of decIndent and printDebugLocation logic.
+ if (currFunction->epilogLocation.size() &&
+ lastPrintedLocation != *currFunction->epilogLocation.begin()) {
+ // Print last debug location: mix of decIndent and printDebugLocation
+ // logic.
doIndent(o, indent);
if (!minify) {
indent--;
@@ -1229,11 +1803,13 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
printMedium(o, "table") << ' ';
printName(curr->name, o) << ' ';
o << curr->initial;
- if (curr->hasMax()) o << ' ' << curr->max;
+ if (curr->hasMax())
+ o << ' ' << curr->max;
o << " funcref)";
}
void visitTable(Table* curr) {
- if (!curr->exists) return;
+ if (!curr->exists)
+ return;
if (curr->imported()) {
doIndent(o, indent);
o << '(';
@@ -1247,7 +1823,8 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
}
for (auto& segment : curr->segments) {
// Don't print empty segments
- if (segment.data.empty()) continue;
+ if (segment.data.empty())
+ continue;
doIndent(o, indent);
o << '(';
printMajor(o, "elem ");
@@ -1268,12 +1845,15 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
printMedium(o, "shared ");
}
o << curr->initial;
- if (curr->hasMax()) o << ' ' << curr->max;
- if (curr->shared) o << ")";
+ if (curr->hasMax())
+ o << ' ' << curr->max;
+ if (curr->shared)
+ o << ")";
o << ")";
}
void visitMemory(Memory* curr) {
- if (!curr->exists) return;
+ if (!curr->exists)
+ return;
if (curr->imported()) {
doIndent(o, indent);
o << '(';
@@ -1298,19 +1878,35 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
for (size_t i = 0; i < segment.data.size(); i++) {
unsigned char c = segment.data[i];
switch (c) {
- case '\n': o << "\\n"; break;
- case '\r': o << "\\0d"; break;
- case '\t': o << "\\t"; break;
- case '\f': o << "\\0c"; break;
- case '\b': o << "\\08"; break;
- case '\\': o << "\\\\"; break;
- case '"' : o << "\\\""; break;
- case '\'' : o << "\\'"; break;
+ case '\n':
+ o << "\\n";
+ break;
+ case '\r':
+ o << "\\0d";
+ break;
+ case '\t':
+ o << "\\t";
+ break;
+ case '\f':
+ o << "\\0c";
+ break;
+ case '\b':
+ o << "\\08";
+ break;
+ case '\\':
+ o << "\\\\";
+ break;
+ case '"':
+ o << "\\\"";
+ break;
+ case '\'':
+ o << "\\'";
+ break;
default: {
if (c >= 32 && c < 127) {
o << c;
} else {
- o << std::hex << '\\' << (c/16) << (c%16) << std::dec;
+ o << std::hex << '\\' << (c / 16) << (c % 16) << std::dec;
}
}
}
@@ -1331,27 +1927,20 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
visitFunctionType(child.get());
o << ")" << maybeNewLine;
}
- ModuleUtils::iterImportedMemories(*curr, [&](Memory* memory) {
- visitMemory(memory);
- });
- ModuleUtils::iterImportedTables(*curr, [&](Table* table) {
- visitTable(table);
- });
- ModuleUtils::iterImportedGlobals(*curr, [&](Global* global) {
- visitGlobal(global);
- });
- ModuleUtils::iterImportedFunctions(*curr, [&](Function* func) {
- visitFunction(func);
- });
- ModuleUtils::iterDefinedMemories(*curr, [&](Memory* memory) {
- visitMemory(memory);
- });
- ModuleUtils::iterDefinedTables(*curr, [&](Table* table) {
- visitTable(table);
- });
- ModuleUtils::iterDefinedGlobals(*curr, [&](Global* global) {
- visitGlobal(global);
- });
+ ModuleUtils::iterImportedMemories(
+ *curr, [&](Memory* memory) { visitMemory(memory); });
+ ModuleUtils::iterImportedTables(*curr,
+ [&](Table* table) { visitTable(table); });
+ ModuleUtils::iterImportedGlobals(
+ *curr, [&](Global* global) { visitGlobal(global); });
+ ModuleUtils::iterImportedFunctions(
+ *curr, [&](Function* func) { visitFunction(func); });
+ ModuleUtils::iterDefinedMemories(
+ *curr, [&](Memory* memory) { visitMemory(memory); });
+ ModuleUtils::iterDefinedTables(*curr,
+ [&](Table* table) { visitTable(table); });
+ ModuleUtils::iterDefinedGlobals(
+ *curr, [&](Global* global) { visitGlobal(global); });
for (auto& child : curr->exports) {
doIndent(o, indent);
visitExport(child.get());
@@ -1363,12 +1952,12 @@ struct PrintSExpression : public Visitor<PrintSExpression> {
printMedium(o, "start") << ' ' << curr->start << ')';
o << maybeNewLine;
}
- ModuleUtils::iterDefinedFunctions(*curr, [&](Function* func) {
- visitFunction(func);
- });
+ ModuleUtils::iterDefinedFunctions(
+ *curr, [&](Function* func) { visitFunction(func); });
for (auto& section : curr->userSections) {
doIndent(o, indent);
- o << ";; custom section \"" << section.name << "\", size " << section.data.size();
+ o << ";; custom section \"" << section.name << "\", size "
+ << section.data.size();
o << maybeNewLine;
}
decIndent();
@@ -1394,9 +1983,7 @@ public:
}
};
-Pass *createPrinterPass() {
- return new Printer();
-}
+Pass* createPrinterPass() { return new Printer(); }
// Prints out a minified module
@@ -1412,9 +1999,7 @@ public:
}
};
-Pass *createMinifiedPrinterPass() {
- return new MinifiedPrinter();
-}
+Pass* createMinifiedPrinterPass() { return new MinifiedPrinter(); }
// Prints out a module withough elision, i.e., the full ast
@@ -1430,9 +2015,7 @@ public:
}
};
-Pass *createFullPrinterPass() {
- return new FullPrinter();
-}
+Pass* createFullPrinterPass() { return new FullPrinter(); }
// Print Stack IR (if present)
@@ -1448,9 +2031,7 @@ public:
}
};
-Pass* createPrintStackIRPass() {
- return new PrintStackIR();
-}
+Pass* createPrintStackIRPass() { return new PrintStackIR(); }
// Print individual expressions
@@ -1466,7 +2047,10 @@ std::ostream& WasmPrinter::printModule(Module* module) {
return printModule(module, std::cout);
}
-std::ostream& WasmPrinter::printExpression(Expression* expression, std::ostream& o, bool minify, bool full) {
+std::ostream& WasmPrinter::printExpression(Expression* expression,
+ std::ostream& o,
+ bool minify,
+ bool full) {
if (!expression) {
o << "(null expression)";
return o;
@@ -1481,7 +2065,8 @@ std::ostream& WasmPrinter::printExpression(Expression* expression, std::ostream&
return o;
}
-std::ostream& WasmPrinter::printStackInst(StackInst* inst, std::ostream& o, Function* func) {
+std::ostream&
+WasmPrinter::printStackInst(StackInst* inst, std::ostream& o, Function* func) {
switch (inst->op) {
case StackInst::Basic: {
PrintExpressionContents(func, o).visit(inst->origin);
@@ -1503,12 +2088,14 @@ std::ostream& WasmPrinter::printStackInst(StackInst* inst, std::ostream& o, Func
o << "else";
break;
}
- default: WASM_UNREACHABLE();
+ default:
+ WASM_UNREACHABLE();
}
return o;
}
-std::ostream& WasmPrinter::printStackIR(StackIR* ir, std::ostream& o, Function* func) {
+std::ostream&
+WasmPrinter::printStackIR(StackIR* ir, std::ostream& o, Function* func) {
size_t indent = func ? 2 : 0;
auto doIndent = [&indent, &o]() {
for (size_t j = 0; j < indent; j++) {
@@ -1517,7 +2104,8 @@ std::ostream& WasmPrinter::printStackIR(StackIR* ir, std::ostream& o, Function*
};
for (Index i = 0; i < (*ir).size(); i++) {
auto* inst = (*ir)[i];
- if (!inst) continue;
+ if (!inst)
+ continue;
switch (inst->op) {
case StackInst::Basic: {
doIndent();
@@ -1548,7 +2136,8 @@ std::ostream& WasmPrinter::printStackIR(StackIR* ir, std::ostream& o, Function*
doIndent();
break;
}
- default: WASM_UNREACHABLE();
+ default:
+ WASM_UNREACHABLE();
}
std::cout << '\n';
}
diff --git a/src/passes/PrintCallGraph.cpp b/src/passes/PrintCallGraph.cpp
index 2a82b7aa1..7df5a8875 100644
--- a/src/passes/PrintCallGraph.cpp
+++ b/src/passes/PrintCallGraph.cpp
@@ -15,17 +15,17 @@
*/
//
-// Prints the call graph in .dot format. You can use http://www.graphviz.org/ to view .dot files.
+// Prints the call graph in .dot format. You can use http://www.graphviz.org/ to
+// view .dot files.
//
-
-#include <memory>
#include <iomanip>
+#include <memory>
-#include "wasm.h"
-#include "pass.h"
#include "ir/module-utils.h"
#include "ir/utils.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
@@ -33,7 +33,7 @@ struct PrintCallGraph : public Pass {
bool modifiesBinaryenIR() override { return false; }
void run(PassRunner* runner, Module* module) override {
- std::ostream &o = std::cout;
+ std::ostream& o = std::cout;
o << "digraph call {\n"
" rankdir = LR;\n"
" subgraph cluster_key {\n"
@@ -42,35 +42,40 @@ struct PrintCallGraph : public Pass {
" label = \"Key\";\n"
" \"Import\" [style=\"filled\", fillcolor=\"turquoise\"];\n"
" \"Export\" [style=\"filled\", fillcolor=\"gray\"];\n"
- " \"Indirect Target\" [style=\"filled, rounded\", fillcolor=\"white\"];\n"
- " \"A\" -> \"B\" [style=\"filled, rounded\", label = \"Direct Call\"];\n"
+ " \"Indirect Target\" [style=\"filled, rounded\", "
+ "fillcolor=\"white\"];\n"
+ " \"A\" -> \"B\" [style=\"filled, rounded\", label = \"Direct "
+ "Call\"];\n"
" }\n\n"
" node [shape=box, fontname=courier, fontsize=10];\n";
// Defined functions
ModuleUtils::iterDefinedFunctions(*module, [&](Function* curr) {
- std::cout << " \"" << curr->name << "\" [style=\"filled\", fillcolor=\"white\"];\n";
+ std::cout << " \"" << curr->name
+ << "\" [style=\"filled\", fillcolor=\"white\"];\n";
});
// Imported functions
ModuleUtils::iterImportedFunctions(*module, [&](Function* curr) {
- o << " \"" << curr->name << "\" [style=\"filled\", fillcolor=\"turquoise\"];\n";
+ o << " \"" << curr->name
+ << "\" [style=\"filled\", fillcolor=\"turquoise\"];\n";
});
// Exports
for (auto& curr : module->exports) {
if (curr->kind == ExternalKind::Function) {
Function* func = module->getFunction(curr->value);
- o << " \"" << func->name << "\" [style=\"filled\", fillcolor=\"gray\"];\n";
+ o << " \"" << func->name
+ << "\" [style=\"filled\", fillcolor=\"gray\"];\n";
}
}
struct CallPrinter : public PostWalker<CallPrinter> {
- Module *module;
- Function *currFunction;
+ Module* module;
+ Function* currFunction;
std::set<Name> visitedTargets; // Used to avoid printing duplicate edges.
std::vector<Function*> allIndirectTargets;
- CallPrinter(Module *module) : module(module) {
+ CallPrinter(Module* module) : module(module) {
// Walk function bodies.
ModuleUtils::iterDefinedFunctions(*module, [&](Function* curr) {
currFunction = curr;
@@ -78,11 +83,13 @@ struct PrintCallGraph : public Pass {
walk(curr->body);
});
}
- void visitCall(Call *curr) {
+ void visitCall(Call* curr) {
auto* target = module->getFunction(curr->target);
- if (visitedTargets.count(target->name) > 0) return;
+ if (visitedTargets.count(target->name) > 0)
+ return;
visitedTargets.insert(target->name);
- std::cout << " \"" << currFunction->name << "\" -> \"" << target->name << "\"; // call\n";
+ std::cout << " \"" << currFunction->name << "\" -> \"" << target->name
+ << "\"; // call\n";
}
};
CallPrinter printer(module);
@@ -99,8 +106,6 @@ struct PrintCallGraph : public Pass {
}
};
-Pass *createPrintCallGraphPass() {
- return new PrintCallGraph();
-}
+Pass* createPrintCallGraphPass() { return new PrintCallGraph(); }
} // namespace wasm
diff --git a/src/passes/PrintFeatures.cpp b/src/passes/PrintFeatures.cpp
index 9c7172eee..53ef6676f 100644
--- a/src/passes/PrintFeatures.cpp
+++ b/src/passes/PrintFeatures.cpp
@@ -18,9 +18,9 @@
// Print out the feature options corresponding to enabled features
//
-#include "wasm.h"
-#include "wasm-features.h"
#include "pass.h"
+#include "wasm-features.h"
+#include "wasm.h"
namespace wasm {
@@ -32,8 +32,6 @@ struct PrintFeatures : public Pass {
}
};
-Pass* createPrintFeaturesPass() {
- return new PrintFeatures();
-}
+Pass* createPrintFeaturesPass() { return new PrintFeatures(); }
} // namespace wasm
diff --git a/src/passes/ReReloop.cpp b/src/passes/ReReloop.cpp
index 760bab2b5..3a7c2ad87 100644
--- a/src/passes/ReReloop.cpp
+++ b/src/passes/ReReloop.cpp
@@ -23,13 +23,13 @@
#include <memory>
-#include "wasm.h"
-#include "wasm-builder.h"
-#include "wasm-traversal.h"
-#include "pass.h"
#include "cfg/Relooper.h"
#include "ir/flat.h"
#include "ir/utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm-traversal.h"
+#include "wasm.h"
#ifdef RERELOOP_DEBUG
#include <wasm-printing.h>
@@ -62,21 +62,13 @@ struct ReReloop final : public Pass {
return currCFGBlock = curr;
}
- CFG::Block* startCFGBlock() {
- return setCurrCFGBlock(makeCFGBlock());
- }
+ CFG::Block* startCFGBlock() { return setCurrCFGBlock(makeCFGBlock()); }
- CFG::Block* getCurrCFGBlock() {
- return currCFGBlock;
- }
+ CFG::Block* getCurrCFGBlock() { return currCFGBlock; }
- Block* getCurrBlock() {
- return currCFGBlock->Code->cast<Block>();
- }
+ Block* getCurrBlock() { return currCFGBlock->Code->cast<Block>(); }
- void finishBlock() {
- getCurrBlock()->finalize();
- }
+ void finishBlock() { getCurrBlock()->finalize(); }
// break handling
@@ -86,19 +78,21 @@ struct ReReloop final : public Pass {
breakTargets[name] = target;
}
- CFG::Block* getBreakTarget(Name name) {
- return breakTargets[name];
- }
+ CFG::Block* getBreakTarget(Name name) { return breakTargets[name]; }
// branch handling
- void addBranch(CFG::Block* from, CFG::Block* to, Expression* condition = nullptr) {
+ void
+ addBranch(CFG::Block* from, CFG::Block* to, Expression* condition = nullptr) {
from->AddBranchTo(to, condition);
}
- void addSwitchBranch(CFG::Block* from, CFG::Block* to, const std::set<Index>& values) {
+ void addSwitchBranch(CFG::Block* from,
+ CFG::Block* to,
+ const std::set<Index>& values) {
std::vector<Index> list;
- for (auto i : values) list.push_back(i);
+ for (auto i : values)
+ list.push_back(i);
from->AddSwitchBranchTo(to, std::move(list));
}
@@ -107,9 +101,7 @@ struct ReReloop final : public Pass {
struct Task {
ReReloop& parent;
Task(ReReloop& parent) : parent(parent) {}
- virtual void run() {
- WASM_UNREACHABLE();
- }
+ virtual void run() { WASM_UNREACHABLE(); }
};
typedef std::shared_ptr<Task> TaskPtr;
@@ -120,9 +112,7 @@ struct ReReloop final : public Pass {
TriageTask(ReReloop& parent, Expression* curr) : Task(parent), curr(curr) {}
- void run() override {
- parent.triage(curr);
- }
+ void run() override { parent.triage(curr); }
};
struct BlockTask final : public Task {
@@ -183,10 +173,12 @@ struct ReReloop final : public Pass {
parent.addBranch(task->condition, ifTrueBegin, curr->condition);
if (curr->ifFalse) {
parent.stack.push_back(task);
- parent.stack.push_back(std::make_shared<TriageTask>(parent, curr->ifFalse));
+ parent.stack.push_back(
+ std::make_shared<TriageTask>(parent, curr->ifFalse));
}
parent.stack.push_back(task);
- parent.stack.push_back(std::make_shared<TriageTask>(parent, curr->ifTrue));
+ parent.stack.push_back(
+ std::make_shared<TriageTask>(parent, curr->ifTrue));
}
void run() override {
@@ -194,7 +186,8 @@ struct ReReloop final : public Pass {
// end of ifTrue
ifTrueEnd = parent.getCurrCFGBlock();
auto* after = parent.startCFGBlock();
- parent.addBranch(condition, after); // if condition was false, go after the ifTrue, to ifFalse or outside
+ // if condition was false, go after the ifTrue, to ifFalse or outside
+ parent.addBranch(condition, after);
if (!curr->ifFalse) {
parent.addBranch(ifTrueEnd, after);
}
@@ -213,9 +206,11 @@ struct ReReloop final : public Pass {
struct BreakTask : public Task {
static void handle(ReReloop& parent, Break* curr) {
- // add the branch. note how if the condition is false, it is the right value there as well
+ // add the branch. note how if the condition is false, it is the right
+ // value there as well
auto* before = parent.getCurrCFGBlock();
- parent.addBranch(before, parent.getBreakTarget(curr->name), curr->condition);
+ parent.addBranch(
+ before, parent.getBreakTarget(curr->name), curr->condition);
if (curr->condition) {
auto* after = parent.startCFGBlock();
parent.addBranch(before, after);
@@ -238,12 +233,14 @@ struct ReReloop final : public Pass {
targetValues[targets[i]].insert(i);
}
for (auto& iter : targetValues) {
- parent.addSwitchBranch(before, parent.getBreakTarget(iter.first), iter.second);
+ parent.addSwitchBranch(
+ before, parent.getBreakTarget(iter.first), iter.second);
}
- // the default may be among the targets, in which case, we can't add it simply as
- // it would be a duplicate, so create a temp block
+ // the default may be among the targets, in which case, we can't add it
+ // simply as it would be a duplicate, so create a temp block
if (targetValues.count(curr->default_) == 0) {
- parent.addSwitchBranch(before, parent.getBreakTarget(curr->default_), std::set<Index>());
+ parent.addSwitchBranch(
+ before, parent.getBreakTarget(curr->default_), std::set<Index>());
} else {
auto* temp = parent.startCFGBlock();
parent.addSwitchBranch(before, temp, std::set<Index>());
@@ -297,7 +294,9 @@ struct ReReloop final : public Pass {
// TODO: optimize with this?
}
- void runOnFunction(PassRunner* runner, Module* module, Function* function) override {
+ void runOnFunction(PassRunner* runner,
+ Module* module,
+ Function* function) override {
Flat::verifyFlatness(function);
// since control flow is flattened, this is pretty simple
@@ -316,15 +315,14 @@ struct ReReloop final : public Pass {
// finish the current block
finishBlock();
// blocks that do not have any exits are dead ends in the relooper. we need
- // to make sure that are in fact dead ends, and do not flow control anywhere.
- // add a return as needed
+ // to make sure that are in fact dead ends, and do not flow control
+ // anywhere. add a return as needed
for (auto* cfgBlock : relooper->Blocks) {
auto* block = cfgBlock->Code->cast<Block>();
if (cfgBlock->BranchesOut.empty() && block->type != unreachable) {
- block->list.push_back(
- function->result == none ? (Expression*)builder->makeReturn()
- : (Expression*)builder->makeUnreachable()
- );
+ block->list.push_back(function->result == none
+ ? (Expression*)builder->makeReturn()
+ : (Expression*)builder->makeUnreachable());
block->finalize();
}
}
@@ -356,10 +354,8 @@ struct ReReloop final : public Pass {
// code, for example, which could be optimized out later
// but isn't yet), then make sure it has a proper type
if (function->result != none && function->body->type == none) {
- function->body = builder.makeSequence(
- function->body,
- builder.makeUnreachable()
- );
+ function->body =
+ builder.makeSequence(function->body, builder.makeUnreachable());
}
}
// TODO: should this be in the relooper itself?
@@ -367,8 +363,6 @@ struct ReReloop final : public Pass {
}
};
-Pass *createReReloopPass() {
- return new ReReloop();
-}
+Pass* createReReloopPass() { return new ReReloop(); }
} // namespace wasm
diff --git a/src/passes/RedundantSetElimination.cpp b/src/passes/RedundantSetElimination.cpp
index 6f39fce9f..e03020da0 100644
--- a/src/passes/RedundantSetElimination.cpp
+++ b/src/passes/RedundantSetElimination.cpp
@@ -33,13 +33,13 @@
// here).
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <cfg/cfg-traversal.h>
#include <ir/literal-utils.h>
#include <ir/utils.h>
+#include <pass.h>
#include <support/unique_deferring_queue.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -57,7 +57,10 @@ struct Info {
std::vector<Expression**> setps;
};
-struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimination, Visitor<RedundantSetElimination>, Info>> {
+struct RedundantSetElimination
+ : public WalkerPass<CFGWalker<RedundantSetElimination,
+ Visitor<RedundantSetElimination>,
+ Info>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new RedundantSetElimination(); }
@@ -66,7 +69,8 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
// cfg traversal work
- static void doVisitSetLocal(RedundantSetElimination* self, Expression** currp) {
+ static void doVisitSetLocal(RedundantSetElimination* self,
+ Expression** currp) {
if (self->currBasicBlock) {
self->currBasicBlock->contents.setps.push_back(currp);
}
@@ -77,7 +81,8 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
void doWalkFunction(Function* func) {
numLocals = func->getNumLocals();
// create the CFG by walking the IR
- CFGWalker<RedundantSetElimination, Visitor<RedundantSetElimination>, Info>::doWalkFunction(func);
+ CFGWalker<RedundantSetElimination, Visitor<RedundantSetElimination>, Info>::
+ doWalkFunction(func);
// flow values across blocks
flowValues(func);
// remove redundant sets
@@ -88,8 +93,10 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
Index nextValue = 1; // 0 is reserved for the "unseen value"
std::unordered_map<Literal, Index> literalValues; // each constant has a value
- std::unordered_map<Expression*, Index> expressionValues; // each value can have a value
- std::unordered_map<BasicBlock*, std::unordered_map<Index, Index>> blockMergeValues; // each block has values for each merge
+ std::unordered_map<Expression*, Index>
+ expressionValues; // each value can have a value
+ std::unordered_map<BasicBlock*, std::unordered_map<Index, Index>>
+ blockMergeValues; // each block has values for each merge
Index getUnseenValue() { // we haven't seen this location yet
return 0;
@@ -130,17 +137,20 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
return iter->second;
}
#ifdef RSE_DEBUG
- std::cout << "new block-merge value for " << block << " : " << index << '\n';
+ std::cout << "new block-merge value for " << block << " : " << index
+ << '\n';
#endif
return mergeValues[index] = getUniqueValue();
}
bool isBlockMergeValue(BasicBlock* block, Index index, Index value) {
auto iter = blockMergeValues.find(block);
- if (iter == blockMergeValues.end()) return false;
+ if (iter == blockMergeValues.end())
+ return false;
auto& mergeValues = iter->second;
auto iter2 = mergeValues.find(index);
- if (iter2 == mergeValues.end()) return false;
+ if (iter2 == mergeValues.end())
+ return false;
return value == iter2->second;
}
@@ -172,7 +182,8 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
#endif
start[i] = getUniqueValue();
} else {
- start[i] = getLiteralValue(Literal::makeZero(func->getLocalType(i)));
+ start[i] =
+ getLiteralValue(Literal::makeZero(func->getLocalType(i)));
}
}
} else {
@@ -274,7 +285,8 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
#ifdef RSE_DEBUG
dump("start", curr->contents.start);
#endif
- // flow values through it, then add those we can reach if they need an update.
+ // flow values through it, then add those we can reach if they need an
+ // update.
auto currValues = curr->contents.start; // we'll modify this as we go
auto& setps = curr->contents.setps;
for (auto** setp : setps) {
@@ -351,7 +363,8 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
}
}
for (Index i = 0; i < block->contents.start.size(); i++) {
- std::cout << " start[" << i << "] = " << block->contents.start[i] << '\n';
+ std::cout << " start[" << i << "] = " << block->contents.start[i]
+ << '\n';
}
for (auto** setp : block->contents.setps) {
std::cout << " " << *setp << '\n';
@@ -370,7 +383,7 @@ struct RedundantSetElimination : public WalkerPass<CFGWalker<RedundantSetElimina
} // namespace
-Pass *createRedundantSetEliminationPass() {
+Pass* createRedundantSetEliminationPass() {
return new RedundantSetElimination();
}
diff --git a/src/passes/RelooperJumpThreading.cpp b/src/passes/RelooperJumpThreading.cpp
index db865d1bb..9fcf7a4a8 100644
--- a/src/passes/RelooperJumpThreading.cpp
+++ b/src/passes/RelooperJumpThreading.cpp
@@ -19,14 +19,13 @@
// This assumes the very specific output the fastcomp relooper emits,
// including the name of the 'label' variable.
-#include "wasm.h"
-#include "pass.h"
-#include "ir/utils.h"
#include "ir/manipulation.h"
+#include "ir/utils.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
-
static Name LABEL("label");
static Name getInnerName(int i) {
@@ -38,13 +37,17 @@ static Name getOuterName(int i) {
}
static If* isLabelCheckingIf(Expression* curr, Index labelIndex) {
- if (!curr) return nullptr;
+ if (!curr)
+ return nullptr;
auto* iff = curr->dynCast<If>();
- if (!iff) return nullptr;
+ if (!iff)
+ return nullptr;
auto* condition = iff->condition->dynCast<Binary>();
- if (!(condition && condition->op == EqInt32)) return nullptr;
+ if (!(condition && condition->op == EqInt32))
+ return nullptr;
auto* left = condition->left->dynCast<GetLocal>();
- if (!(left && left->index == labelIndex)) return nullptr;
+ if (!(left && left->index == labelIndex))
+ return nullptr;
return iff;
}
@@ -53,10 +56,13 @@ static Index getCheckedLabelValue(If* iff) {
}
static SetLocal* isLabelSettingSetLocal(Expression* curr, Index labelIndex) {
- if (!curr) return nullptr;
+ if (!curr)
+ return nullptr;
auto* set = curr->dynCast<SetLocal>();
- if (!set) return nullptr;
- if (set->index != labelIndex) return nullptr;
+ if (!set)
+ return nullptr;
+ if (set->index != labelIndex)
+ return nullptr;
return set;
}
@@ -69,7 +75,10 @@ struct LabelUseFinder : public PostWalker<LabelUseFinder> {
std::map<Index, Index>& checks; // label value => number of checks on it
std::map<Index, Index>& sets; // label value => number of sets to it
- LabelUseFinder(Index labelIndex, std::map<Index, Index>& checks, std::map<Index, Index>& sets) : labelIndex(labelIndex), checks(checks), sets(sets) {}
+ LabelUseFinder(Index labelIndex,
+ std::map<Index, Index>& checks,
+ std::map<Index, Index>& sets)
+ : labelIndex(labelIndex), checks(checks), sets(sets) {}
void visitIf(If* curr) {
if (isLabelCheckingIf(curr, labelIndex)) {
@@ -84,7 +93,8 @@ struct LabelUseFinder : public PostWalker<LabelUseFinder> {
}
};
-struct RelooperJumpThreading : public WalkerPass<ExpressionStackWalker<RelooperJumpThreading>> {
+struct RelooperJumpThreading
+ : public WalkerPass<ExpressionStackWalker<RelooperJumpThreading>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new RelooperJumpThreading; }
@@ -98,9 +108,11 @@ struct RelooperJumpThreading : public WalkerPass<ExpressionStackWalker<RelooperJ
void visitBlock(Block* curr) {
// look for the if label == X pattern
auto& list = curr->list;
- if (list.size() == 0) return;
+ if (list.size() == 0)
+ return;
for (Index i = 0; i < list.size() - 1; i++) {
- // once we see something that might be irreducible, we must skip that if and the rest of the dependents
+ // once we see something that might be irreducible, we must skip that if
+ // and the rest of the dependents
bool irreducible = false;
Index origin = i;
for (Index j = i + 1; j < list.size(); j++) {
@@ -113,15 +125,20 @@ struct RelooperJumpThreading : public WalkerPass<ExpressionStackWalker<RelooperJ
i++;
continue;
}
- // if the next element is a block, it may be the holding block of label-checking ifs
+ // if the next element is a block, it may be the holding block of
+ // label-checking ifs
if (auto* holder = list[j]->dynCast<Block>()) {
if (holder->list.size() > 0) {
if (If* iff = isLabelCheckingIf(holder->list[0], labelIndex)) {
irreducible |= hasIrreducibleControlFlow(iff, list[origin]);
if (!irreducible) {
- // this is indeed a holder. we can process the ifs, and must also move
- // the block to enclose the origin, so it is properly reachable
- assert(holder->list.size() == 1); // must be size 1, a relooper multiple will have its own label, and is an if-else sequence and nothing more
+ // this is indeed a holder. we can process the ifs, and must
+ // also move the block to enclose the origin, so it is properly
+ // reachable
+
+ // must be size 1, a relooper multiple will have its own label,
+ // and is an if-else sequence and nothing more
+ assert(holder->list.size() == 1);
optimizeJumpsToLabelCheck(list[origin], iff);
holder->list[0] = list[origin];
list[origin] = holder;
@@ -155,13 +172,13 @@ struct RelooperJumpThreading : public WalkerPass<ExpressionStackWalker<RelooperJ
}
private:
-
bool hasIrreducibleControlFlow(If* iff, Expression* origin) {
- // Gather the checks in this if chain. If all the label values checked are only set in origin,
- // then since origin is right before us, this is not irreducible - we can replace all sets
- // in origin with jumps forward to us, and since there is nothing else, this is safe and complete.
- // We must also have the property that there is just one check for the label value, as otherwise
- // node splitting has complicated things.
+ // Gather the checks in this if chain. If all the label values checked are
+ // only set in origin, then since origin is right before us, this is not
+ // irreducible - we can replace all sets in origin with jumps forward to us,
+ // and since there is nothing else, this is safe and complete. We must also
+ // have the property that there is just one check for the label value, as
+ // otherwise node splitting has complicated things.
std::map<Index, Index> labelChecksInOrigin;
std::map<Index, Index> labelSetsInOrigin;
LabelUseFinder finder(labelIndex, labelChecksInOrigin, labelSetsInOrigin);
@@ -169,23 +186,27 @@ private:
while (iff) {
auto num = getCheckedLabelValue(iff);
assert(labelChecks[num] > 0);
- if (labelChecks[num] > 1) return true; // checked more than once, somewhere in function
+ if (labelChecks[num] > 1)
+ return true; // checked more than once, somewhere in function
assert(labelChecksInOrigin[num] == 0);
if (labelSetsInOrigin[num] != labelSets[num]) {
assert(labelSetsInOrigin[num] < labelSets[num]);
// the label is set outside of the origin
- // if the only other location is inside the if body, then it is ok - it must be in a loop
- // and returning to the top of the loop body, so we don't need to do anything for that
- // label setting anyhow
+ // if the only other location is inside the if body, then it is ok - it
+ // must be in a loop and returning to the top of the loop body, so we
+ // don't need to do anything for that label setting anyhow
std::map<Index, Index> labelChecksInIfTrue;
std::map<Index, Index> labelSetsInIfTrue;
- LabelUseFinder finder(labelIndex, labelChecksInIfTrue, labelSetsInIfTrue);
+ LabelUseFinder finder(
+ labelIndex, labelChecksInIfTrue, labelSetsInIfTrue);
finder.walk(iff->ifTrue);
if (labelSetsInOrigin[num] + labelSetsInIfTrue[num] < labelSets[num]) {
- // label set somewhere we can't see now, could be irreducible control flow
- // TODO: one case where this happens is instead of an if-chain, we have
- // ifs and a switch on label|0, in separate elements. perhaps not
- // emitting switches on label|0 in the relooper would avoid that.
+ // label set somewhere we can't see now, could be irreducible control
+ // flow
+ // TODO: one case where this happens is instead of an if-chain, we
+ // have ifs and a switch on label|0, in separate elements.
+ // perhaps not emitting switches on label|0 in the relooper
+ // would avoid that.
return true;
}
}
@@ -195,19 +216,23 @@ private:
}
// optimizes jumps to a label check
- // * origin is where the jumps originate, and also where we should write our output
+ // * origin is where the jumps originate, and also where we should write our
+ // output
// * iff is the if
void optimizeJumpsToLabelCheck(Expression*& origin, If* iff) {
Index nameCounter = newNameCounter++;
Index num = getCheckedLabelValue(iff);
// create a new block for this jump target
Builder builder(*getModule());
- // origin is where all jumps to this target must come from - the element right before this if
- // we break out of inner to reach the target. instead of flowing out of normally, we break out of the outer, so we skip the target.
+ // origin is where all jumps to this target must come from - the element
+ // right before this if we break out of inner to reach the target. instead
+ // of flowing out of normally, we break out of the outer, so we skip the
+ // target.
auto innerName = getInnerName(nameCounter);
auto outerName = getOuterName(nameCounter);
auto* ifFalse = iff->ifFalse;
- // all assignments of label to the target can be replaced with breaks to the target, via innerName
+ // all assignments of label to the target can be replaced with breaks to the
+ // target, via innerName
struct JumpUpdater : public PostWalker<JumpUpdater> {
Index labelIndex;
Index targetNum;
@@ -228,7 +253,8 @@ private:
updater.setModule(getModule());
updater.walk(origin);
// restructure code
- auto* inner = builder.blockifyWithName(origin, innerName, builder.makeBreak(outerName));
+ auto* inner =
+ builder.blockifyWithName(origin, innerName, builder.makeBreak(outerName));
auto* outer = builder.makeSequence(inner, iff->ifTrue);
outer->name = outerName;
origin = outer;
@@ -241,9 +267,6 @@ private:
// declare pass
-Pass *createRelooperJumpThreadingPass() {
- return new RelooperJumpThreading();
-}
+Pass* createRelooperJumpThreadingPass() { return new RelooperJumpThreading(); }
} // namespace wasm
-
diff --git a/src/passes/RemoveImports.cpp b/src/passes/RemoveImports.cpp
index e70cbb3ac..a3ca45b16 100644
--- a/src/passes/RemoveImports.cpp
+++ b/src/passes/RemoveImports.cpp
@@ -22,14 +22,14 @@
// look at all the rest of the code).
//
-#include "wasm.h"
-#include "pass.h"
#include "ir/module-utils.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
struct RemoveImports : public WalkerPass<PostWalker<RemoveImports>> {
- void visitCall(Call *curr) {
+ void visitCall(Call* curr) {
auto* func = getModule()->getFunction(curr->target);
if (!func->imported()) {
return;
@@ -44,19 +44,16 @@ struct RemoveImports : public WalkerPass<PostWalker<RemoveImports>> {
}
}
- void visitModule(Module *curr) {
+ void visitModule(Module* curr) {
std::vector<Name> names;
- ModuleUtils::iterImportedFunctions(*curr, [&](Function* func) {
- names.push_back(func->name);
- });
+ ModuleUtils::iterImportedFunctions(
+ *curr, [&](Function* func) { names.push_back(func->name); });
for (auto& name : names) {
curr->removeFunction(name);
}
}
};
-Pass *createRemoveImportsPass() {
- return new RemoveImports();
-}
+Pass* createRemoveImportsPass() { return new RemoveImports(); }
} // namespace wasm
diff --git a/src/passes/RemoveMemory.cpp b/src/passes/RemoveMemory.cpp
index 33d9e6da5..399a32933 100644
--- a/src/passes/RemoveMemory.cpp
+++ b/src/passes/RemoveMemory.cpp
@@ -18,8 +18,8 @@
// Removeds memory segments, leaving only code in the module.
//
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -29,8 +29,6 @@ struct RemoveMemory : public Pass {
}
};
-Pass *createRemoveMemoryPass() {
- return new RemoveMemory();
-}
+Pass* createRemoveMemoryPass() { return new RemoveMemory(); }
} // namespace wasm
diff --git a/src/passes/RemoveNonJSOps.cpp b/src/passes/RemoveNonJSOps.cpp
index fc3e42185..26ede65c5 100644
--- a/src/passes/RemoveNonJSOps.cpp
+++ b/src/passes/RemoveNonJSOps.cpp
@@ -27,17 +27,17 @@
// after walking the current module.
//
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
-#include "asmjs/shared-constants.h"
-#include "wasm-builder.h"
-#include "wasm-s-parser.h"
#include "abi/js.h"
+#include "asmjs/shared-constants.h"
+#include "ir/find_all.h"
#include "ir/memory-utils.h"
#include "ir/module-utils.h"
-#include "ir/find_all.h"
#include "passes/intrinsics-module.h"
+#include "wasm-builder.h"
+#include "wasm-s-parser.h"
namespace wasm {
@@ -56,7 +56,8 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
// Discover all of the intrinsics that we need to inject, lowering all
// operations to intrinsic calls while we're at it.
- if (!builder) builder = make_unique<Builder>(*module);
+ if (!builder)
+ builder = make_unique<Builder>(*module);
PostWalker<RemoveNonJSOpsPass>::doWalkModule(module);
if (neededIntrinsics.size() == 0) {
@@ -86,7 +87,7 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
// Recursively probe all needed intrinsics for transitively used
// functions. This is building up a set of functions we'll link into our
// module.
- for (auto &name : neededIntrinsics) {
+ for (auto& name : neededIntrinsics) {
addNeededFunctions(intrinsicsModule, name, neededFunctions);
}
neededIntrinsics.clear();
@@ -94,10 +95,11 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
// Link in everything that wasn't already linked in. After we've done the
// copy we then walk the function to rewrite any non-js operations it has
// as well.
- for (auto &name : neededFunctions) {
+ for (auto& name : neededFunctions) {
auto* func = module->getFunctionOrNull(name);
if (!func) {
- func = ModuleUtils::copyFunction(intrinsicsModule.getFunction(name), *module);
+ func = ModuleUtils::copyFunction(intrinsicsModule.getFunction(name),
+ *module);
}
doWalkFunction(func);
}
@@ -123,7 +125,7 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
}
}
- void addNeededFunctions(Module &m, Name name, std::set<Name> &needed) {
+ void addNeededFunctions(Module& m, Name name, std::set<Name>& needed) {
if (needed.count(name)) {
return;
}
@@ -140,7 +142,8 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
}
void doWalkFunction(Function* func) {
- if (!builder) builder = make_unique<Builder>(*getModule());
+ if (!builder)
+ builder = make_unique<Builder>(*getModule());
PostWalker<RemoveNonJSOpsPass>::doWalkFunction(func);
}
@@ -224,10 +227,12 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
name = WASM_I64_UREM;
break;
- default: return;
+ default:
+ return;
}
neededIntrinsics.insert(name);
- replaceCurrent(builder->makeCall(name, {curr->left, curr->right}, curr->type));
+ replaceCurrent(
+ builder->makeCall(name, {curr->left, curr->right}, curr->type));
}
void rewriteCopysign(Binary* curr) {
@@ -253,33 +258,20 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
otherBits = Literal((uint64_t(1) << 63) - 1);
break;
- default: return;
+ default:
+ return;
}
- replaceCurrent(
- builder->makeUnary(
- int2float,
- builder->makeBinary(
- bitOr,
- builder->makeBinary(
- bitAnd,
- builder->makeUnary(
- float2int,
- curr->left
- ),
- builder->makeConst(otherBits)
- ),
- builder->makeBinary(
- bitAnd,
- builder->makeUnary(
- float2int,
- curr->right
- ),
- builder->makeConst(signBit)
- )
- )
- )
- );
+ replaceCurrent(builder->makeUnary(
+ int2float,
+ builder->makeBinary(
+ bitOr,
+ builder->makeBinary(bitAnd,
+ builder->makeUnary(float2int, curr->left),
+ builder->makeConst(otherBits)),
+ builder->makeBinary(bitAnd,
+ builder->makeUnary(float2int, curr->right),
+ builder->makeConst(signBit)))));
}
void visitUnary(Unary* curr) {
@@ -313,7 +305,8 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
functionCall = WASM_CTZ32;
break;
- default: return;
+ default:
+ return;
}
neededIntrinsics.insert(functionCall);
replaceCurrent(builder->makeCall(functionCall, {curr->value}, curr->type));
@@ -324,9 +317,6 @@ struct RemoveNonJSOpsPass : public WalkerPass<PostWalker<RemoveNonJSOpsPass>> {
}
};
-Pass* createRemoveNonJSOpsPass() {
- return new RemoveNonJSOpsPass();
-}
+Pass* createRemoveNonJSOpsPass() { return new RemoveNonJSOpsPass(); }
} // namespace wasm
-
diff --git a/src/passes/RemoveUnusedBrs.cpp b/src/passes/RemoveUnusedBrs.cpp
index 614503581..4b5c9613e 100644
--- a/src/passes/RemoveUnusedBrs.cpp
+++ b/src/passes/RemoveUnusedBrs.cpp
@@ -18,26 +18,31 @@
// Removes branches for which we go to where they go anyhow
//
-#include <wasm.h>
-#include <pass.h>
-#include <parsing.h>
#include <ir/branch-utils.h>
#include <ir/cost.h>
#include <ir/effects.h>
#include <ir/utils.h>
+#include <parsing.h>
+#include <pass.h>
#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
// to turn an if into a br-if, we must be able to reorder the
// condition and possible value, and the possible value must
// not have side effects (as they would run unconditionally)
-static bool canTurnIfIntoBrIf(Expression* ifCondition, Expression* brValue, PassOptions& options) {
+static bool canTurnIfIntoBrIf(Expression* ifCondition,
+ Expression* brValue,
+ PassOptions& options) {
// if the if isn't even reached, this is all dead code anyhow
- if (ifCondition->type == unreachable) return false;
- if (!brValue) return true;
+ if (ifCondition->type == unreachable)
+ return false;
+ if (!brValue)
+ return true;
EffectAnalyzer value(options, brValue);
- if (value.hasSideEffects()) return false;
+ if (value.hasSideEffects())
+ return false;
return !EffectAnalyzer(options, ifCondition).invalidates(value);
}
@@ -50,8 +55,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
typedef std::vector<Expression**> Flows;
- // list of breaks that are currently flowing. if they reach their target without
- // interference, they can be removed (or their value forwarded TODO)
+ // list of breaks that are currently flowing. if they reach their target
+ // without interference, they can be removed (or their value forwarded TODO)
Flows flows;
// a stack for if-else contents, we merge their outputs
@@ -160,23 +165,22 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
}
- void stopFlow() {
- flows.clear();
- }
+ void stopFlow() { flows.clear(); }
void removeValueFlow(Flows& currFlows) {
- currFlows.erase(std::remove_if(currFlows.begin(), currFlows.end(), [&](Expression** currp) {
- auto* curr = *currp;
- if (auto* ret = curr->dynCast<Return>()) {
- return ret->value;
- }
- return curr->cast<Break>()->value;
- }), currFlows.end());
+ currFlows.erase(std::remove_if(currFlows.begin(),
+ currFlows.end(),
+ [&](Expression** currp) {
+ auto* curr = *currp;
+ if (auto* ret = curr->dynCast<Return>()) {
+ return ret->value;
+ }
+ return curr->cast<Break>()->value;
+ }),
+ currFlows.end());
}
- void stopValueFlow() {
- removeValueFlow(flows);
- }
+ void stopValueFlow() { removeValueFlow(flows); }
static void clear(RemoveUnusedBrs* self, Expression** currp) {
self->flows.clear();
@@ -186,20 +190,20 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
self->ifStack.push_back(std::move(self->flows));
}
- void visitLoop(Loop* curr) {
- loops.push_back(curr);
- }
+ void visitLoop(Loop* curr) { loops.push_back(curr); }
void optimizeSwitch(Switch* curr) {
// if the final element is the default, we don't need it
while (!curr->targets.empty() && curr->targets.back() == curr->default_) {
curr->targets.pop_back();
}
- // if the first element is the default, we can remove it by shifting everything (which
- // does add a subtraction of a constant, but often that is worth it as the constant can
- // be folded away and/or we remove multiple elements here)
+ // if the first element is the default, we can remove it by shifting
+ // everything (which does add a subtraction of a constant, but often that is
+ // worth it as the constant can be folded away and/or we remove multiple
+ // elements here)
Index removable = 0;
- while (removable < curr->targets.size() && curr->targets[removable] == curr->default_) {
+ while (removable < curr->targets.size() &&
+ curr->targets[removable] == curr->default_) {
removable++;
}
if (removable > 0) {
@@ -208,50 +212,47 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
curr->targets.resize(curr->targets.size() - removable);
Builder builder(*getModule());
- curr->condition = builder.makeBinary(SubInt32,
- curr->condition,
- builder.makeConst(Literal(int32_t(removable)))
- );
+ curr->condition =
+ builder.makeBinary(SubInt32,
+ curr->condition,
+ builder.makeConst(Literal(int32_t(removable))));
}
- // when there isn't a value, we can do some trivial optimizations without worrying about
- // the value being executed before the condition
- if (curr->value) return;
+ // when there isn't a value, we can do some trivial optimizations without
+ // worrying about the value being executed before the condition
+ if (curr->value)
+ return;
if (curr->targets.size() == 0) {
// a switch with just a default always goes there
Builder builder(*getModule());
- replaceCurrent(builder.makeSequence(
- builder.makeDrop(curr->condition),
- builder.makeBreak(curr->default_)
- ));
+ replaceCurrent(builder.makeSequence(builder.makeDrop(curr->condition),
+ builder.makeBreak(curr->default_)));
} else if (curr->targets.size() == 1) {
// a switch with two options is basically an if
Builder builder(*getModule());
- replaceCurrent(builder.makeIf(
- curr->condition,
- builder.makeBreak(curr->default_),
- builder.makeBreak(curr->targets.front())
- ));
+ replaceCurrent(builder.makeIf(curr->condition,
+ builder.makeBreak(curr->default_),
+ builder.makeBreak(curr->targets.front())));
} else {
- // there are also some other cases where we want to convert a switch into ifs,
- // especially if the switch is large and we are focusing on size.
- // an especially egregious case is a switch like this:
- // [a b b [..] b b c] with default b
- // (which may be arrived at after we trim away excess default values on both
- // sides). in this case, we really have 3 values in a simple form, so it is the
- // next logical case after handling 1 and 2 values right above here.
- // to optimize this, we must add a local + a bunch of nodes (if*2, tee, eq,
- // get, const, break*3), so the table must be big enough for it to make sense
+ // there are also some other cases where we want to convert a switch into
+ // ifs, especially if the switch is large and we are focusing on size. an
+ // especially egregious case is a switch like this: [a b b [..] b b c]
+ // with default b (which may be arrived at after we trim away excess
+ // default values on both sides). in this case, we really have 3 values in
+ // a simple form, so it is the next logical case after handling 1 and 2
+ // values right above here. to optimize this, we must add a local + a
+ // bunch of nodes (if*2, tee, eq, get, const, break*3), so the table must
+ // be big enough for it to make sense
- // How many targets we need when shrinking. This is literally the size at which
- // the transformation begins to be smaller.
+ // How many targets we need when shrinking. This is literally the size at
+ // which the transformation begins to be smaller.
const uint32_t MIN_SHRINK = 13;
- // How many targets we need when not shrinking, in which case, 2 ifs may be slower,
- // so we do this when the table is ridiculously large for one with just 3 values
- // in it.
+ // How many targets we need when not shrinking, in which case, 2 ifs may
+ // be slower, so we do this when the table is ridiculously large for one
+ // with just 3 values in it.
const uint32_t MIN_GENERAL = 128;
auto shrink = getPassRunner()->options.shrinkLevel > 0;
- if ((curr->targets.size() >= MIN_SHRINK && shrink) ||
+ if ((curr->targets.size() >= MIN_SHRINK && shrink) ||
(curr->targets.size() >= MIN_GENERAL && !shrink)) {
for (Index i = 1; i < curr->targets.size() - 1; i++) {
if (curr->targets[i] != curr->default_) {
@@ -262,25 +263,24 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
Builder builder(*getModule());
auto temp = builder.addVar(getFunction(), i32);
Expression* z;
- replaceCurrent(z = builder.makeIf(
- builder.makeTeeLocal(temp, curr->condition),
- builder.makeIf(
- builder.makeBinary(EqInt32,
- builder.makeGetLocal(temp, i32),
- builder.makeConst(Literal(int32_t(curr->targets.size() - 1)))
- ),
- builder.makeBreak(curr->targets.back()),
- builder.makeBreak(curr->default_)
- ),
- builder.makeBreak(curr->targets.front())
- ));
+ replaceCurrent(
+ z = builder.makeIf(
+ builder.makeTeeLocal(temp, curr->condition),
+ builder.makeIf(builder.makeBinary(EqInt32,
+ builder.makeGetLocal(temp, i32),
+ builder.makeConst(Literal(int32_t(
+ curr->targets.size() - 1)))),
+ builder.makeBreak(curr->targets.back()),
+ builder.makeBreak(curr->default_)),
+ builder.makeBreak(curr->targets.front())));
}
}
}
void visitIf(If* curr) {
if (!curr->ifFalse) {
- // if without an else. try to reduce if (condition) br => br_if (condition)
+ // if without an else. try to reduce if (condition) br => br_if
+ // (condition)
Break* br = curr->ifTrue->dynCast<Break>();
if (br && !br->condition) { // TODO: if there is a condition, join them
if (canTurnIfIntoBrIf(curr->condition, br->value, getPassOptions())) {
@@ -291,9 +291,9 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
}
}
- // TODO: if-else can be turned into a br_if as well, if one of the sides is a dead end
- // we handle the case of a returned value to a local.set later down, see
- // visitSetLocal.
+ // TODO: if-else can be turned into a br_if as well, if one of the sides is
+ // a dead end we handle the case of a returned value to a local.set
+ // later down, see visitSetLocal.
}
// override scan to add a pre and a post check task to all nodes
@@ -309,9 +309,11 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
self->pushTask(doVisitIf, currp);
if (iff->ifFalse) {
- // we need to join up if-else control flow, and clear after the condition
+ // we need to join up if-else control flow, and clear after the
+ // condition
self->pushTask(scan, &iff->ifFalse);
- self->pushTask(saveIfTrue, currp); // safe the ifTrue flow, we'll join it later
+ // safe the ifTrue flow, we'll join it later
+ self->pushTask(saveIfTrue, currp);
}
self->pushTask(scan, &iff->ifTrue);
self->pushTask(clear, currp); // clear all flow after the condition
@@ -342,24 +344,32 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// helpful, as it shortens the logical loop. it is also good to generate
// an if-else instead of an if, as it might allow an eqz to be removed
// by flipping arms)
- if (!loop->name.is()) return false;
+ if (!loop->name.is())
+ return false;
auto* block = loop->body->dynCast<Block>();
- if (!block) return false;
+ if (!block)
+ return false;
// does the last element break to the top of the loop?
auto& list = block->list;
- if (list.size() <= 1) return false;
+ if (list.size() <= 1)
+ return false;
auto* last = list.back()->dynCast<Break>();
- if (!last || !ExpressionAnalyzer::isSimple(last) || last->name != loop->name) return false;
- // last is a simple break to the top of the loop. if we can conditionalize it,
- // it won't block things from flowing out and not needing breaks to do so.
+ if (!last || !ExpressionAnalyzer::isSimple(last) ||
+ last->name != loop->name)
+ return false;
+ // last is a simple break to the top of the loop. if we can conditionalize
+ // it, it won't block things from flowing out and not needing breaks to do
+ // so.
Index i = list.size() - 2;
Builder builder(*getModule());
while (1) {
auto* curr = list[i];
if (auto* iff = curr->dynCast<If>()) {
- // let's try to move the code going to the top of the loop into the if-else
+ // let's try to move the code going to the top of the loop into the
+ // if-else
if (!iff->ifFalse) {
- // we need the ifTrue to break, so it cannot reach the code we want to move
+ // we need the ifTrue to break, so it cannot reach the code we want to
+ // move
if (iff->ifTrue->type == unreachable) {
iff->ifFalse = builder.stealSlice(block, i + 1, list.size());
iff->finalize();
@@ -367,20 +377,25 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
return true;
}
} else {
- // this is already an if-else. if one side is a dead end, we can append to the other, if
- // there is no returned value to concern us
- assert(!isConcreteType(iff->type)); // can't be, since in the middle of a block
+ // this is already an if-else. if one side is a dead end, we can
+ // append to the other, if there is no returned value to concern us
- // ensures the first node is a block, if it isn't already, and merges in the second,
- // either as a single element or, if a block, by appending to the first block. this
- // keeps the order of operations in place, that is, the appended element will be
- // executed after the first node's elements
- auto blockifyMerge = [&](Expression* any, Expression* append) -> Block* {
+ // can't be, since in the middle of a block
+ assert(!isConcreteType(iff->type));
+
+ // ensures the first node is a block, if it isn't already, and merges
+ // in the second, either as a single element or, if a block, by
+ // appending to the first block. this keeps the order of operations in
+ // place, that is, the appended element will be executed after the
+ // first node's elements
+ auto blockifyMerge = [&](Expression* any,
+ Expression* append) -> Block* {
Block* block = nullptr;
- if (any) block = any->dynCast<Block>();
- // if the first isn't a block, or it's a block with a name (so we might
- // branch to the end, and so can't append to it, we might skip that code!)
- // then make a new block
+ if (any)
+ block = any->dynCast<Block>();
+ // if the first isn't a block, or it's a block with a name (so we
+ // might branch to the end, and so can't append to it, we might skip
+ // that code!) then make a new block
if (!block || block->name.is()) {
block = builder.makeBlock(any);
} else {
@@ -399,12 +414,14 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
};
if (iff->ifTrue->type == unreachable) {
- iff->ifFalse = blockifyMerge(iff->ifFalse, builder.stealSlice(block, i + 1, list.size()));
+ iff->ifFalse = blockifyMerge(
+ iff->ifFalse, builder.stealSlice(block, i + 1, list.size()));
iff->finalize();
block->finalize();
return true;
} else if (iff->ifFalse->type == unreachable) {
- iff->ifTrue = blockifyMerge(iff->ifTrue, builder.stealSlice(block, i + 1, list.size()));
+ iff->ifTrue = blockifyMerge(
+ iff->ifTrue, builder.stealSlice(block, i + 1, list.size()));
iff->finalize();
block->finalize();
return true;
@@ -415,7 +432,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// br_if is similar to if.
if (brIf->condition && !brIf->value && brIf->name != loop->name) {
if (i == list.size() - 2) {
- // there is the br_if, and then the br to the top, so just flip them and the condition
+ // there is the br_if, and then the br to the top, so just flip them
+ // and the condition
brIf->condition = builder.makeUnary(EqZInt32, brIf->condition);
last->name = brIf->name;
brIf->name = loop->name;
@@ -428,11 +446,18 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// we can convert the br_if to an if. this has a cost, though,
// so only do it if it looks useful, which it definitely is if
// (a) $somewhere is straight out (so the br out vanishes), and
- // (b) this br_if is the only branch to that block (so the block will vanish)
- if (brIf->name == block->name && BranchUtils::BranchSeeker::countNamed(block, block->name) == 1) {
- // note that we could drop the last element here, it is a br we know for sure is removable,
- // but telling stealSlice to steal all to the end is more efficient, it can just truncate.
- list[i] = builder.makeIf(brIf->condition, builder.makeBreak(brIf->name), builder.stealSlice(block, i + 1, list.size()));
+ // (b) this br_if is the only branch to that block (so the block
+ // will vanish)
+ if (brIf->name == block->name &&
+ BranchUtils::BranchSeeker::countNamed(block, block->name) ==
+ 1) {
+ // note that we could drop the last element here, it is a br we
+ // know for sure is removable, but telling stealSlice to steal all
+ // to the end is more efficient, it can just truncate.
+ list[i] =
+ builder.makeIf(brIf->condition,
+ builder.makeBreak(brIf->name),
+ builder.stealSlice(block, i + 1, list.size()));
return true;
}
}
@@ -443,7 +468,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
if (EffectAnalyzer(getPassOptions(), curr).branches) {
return false;
}
- if (i == 0) return false;
+ if (i == 0)
+ return false;
i--;
}
}
@@ -453,11 +479,11 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
bool worked = false;
void visitBlock(Block* curr) {
- // If the block has a single child which is a loop, and the block is named,
- // then it is the exit for the loop. It's better to move it into the loop,
- // where it can be better optimized by other passes.
- // Similar logic for ifs: if the block is an exit for the if, we can
- // move the block in, consider for example:
+ // If the block has a single child which is a loop, and the block is
+ // named, then it is the exit for the loop. It's better to move it into
+ // the loop, where it can be better optimized by other passes. Similar
+ // logic for ifs: if the block is an exit for the if, we can move the
+ // block in, consider for example:
// (block $label
// (if (..condition1..)
// (block
@@ -484,27 +510,31 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
worked = true;
} else if (auto* iff = curr->list[0]->dynCast<If>()) {
// The label can't be used in the condition.
- if (BranchUtils::BranchSeeker::countNamed(iff->condition, curr->name) == 0) {
- // We can move the block into either arm, if there are no uses in the other.
+ if (BranchUtils::BranchSeeker::countNamed(iff->condition,
+ curr->name) == 0) {
+ // We can move the block into either arm, if there are no uses in
+ // the other.
Expression** target = nullptr;
- if (!iff->ifFalse ||
- BranchUtils::BranchSeeker::countNamed(iff->ifFalse, curr->name) == 0) {
+ if (!iff->ifFalse || BranchUtils::BranchSeeker::countNamed(
+ iff->ifFalse, curr->name) == 0) {
target = &iff->ifTrue;
- } else if (BranchUtils::BranchSeeker::countNamed(iff->ifTrue, curr->name) == 0) {
+ } else if (BranchUtils::BranchSeeker::countNamed(
+ iff->ifTrue, curr->name) == 0) {
target = &iff->ifFalse;
}
if (target) {
curr->list[0] = *target;
*target = curr;
- // The block used to contain the if, and may have changed type from unreachable
- // to none, for example, if the if has an unreachable condition but the arm
- // is not unreachable.
+ // The block used to contain the if, and may have changed type
+ // from unreachable to none, for example, if the if has an
+ // unreachable condition but the arm is not unreachable.
curr->finalize();
iff->finalize();
replaceCurrent(iff);
worked = true;
- // Note that the type might change, e.g. if the if condition is unreachable
- // but the block that was on the outside had a break.
+ // Note that the type might change, e.g. if the if condition is
+ // unreachable but the block that was on the outside had a
+ // break.
}
}
}
@@ -526,10 +556,12 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
anotherCycle = false;
super::doWalkFunction(func);
assert(ifStack.empty());
- // flows may contain returns, which are flowing out and so can be optimized
+ // flows may contain returns, which are flowing out and so can be
+ // optimized
for (Index i = 0; i < flows.size(); i++) {
auto* flow = (*flows[i])->dynCast<Return>();
- if (!flow) continue;
+ if (!flow)
+ continue;
if (!flow->value) {
// return => nop
ExpressionManipulator::nop(flow);
@@ -541,7 +573,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
}
flows.clear();
- // optimize loops (we don't do it while tracking flows, as they can interfere)
+ // optimize loops (we don't do it while tracking flows, as they can
+ // interfere)
for (auto* loop : loops) {
anotherCycle |= optimizeLoop(loop);
}
@@ -557,7 +590,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// thread trivial jumps
struct JumpThreader : public ControlFlowWalker<JumpThreader> {
- // map of all value-less breaks and switches going to a block (and not a loop)
+ // map of all value-less breaks and switches going to a block (and not a
+ // loop)
std::map<Block*, std::vector<Expression*>> branchesToBlock;
bool worked = false;
@@ -582,19 +616,25 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
void visitBlock(Block* curr) {
auto& list = curr->list;
if (list.size() == 1 && curr->name.is()) {
- // if this block has just one child, a sub-block, then jumps to the former are jumps to us, really
+ // if this block has just one child, a sub-block, then jumps to the
+ // former are jumps to us, really
if (auto* child = list[0]->dynCast<Block>()) {
- // the two blocks must have the same type for us to update the branch, as otherwise
- // one block may be unreachable and the other concrete, so one might lack a value
- if (child->name.is() && child->name != curr->name && child->type == curr->type) {
+ // the two blocks must have the same type for us to update the
+ // branch, as otherwise one block may be unreachable and the other
+ // concrete, so one might lack a value
+ if (child->name.is() && child->name != curr->name &&
+ child->type == curr->type) {
redirectBranches(child, curr->name);
}
}
} else if (list.size() == 2) {
- // if this block has two children, a child-block and a simple jump, then jumps to child-block can be replaced with jumps to the new target
+ // if this block has two children, a child-block and a simple jump,
+ // then jumps to child-block can be replaced with jumps to the new
+ // target
auto* child = list[0]->dynCast<Block>();
auto* jump = list[1]->dynCast<Break>();
- if (child && child->name.is() && jump && ExpressionAnalyzer::isSimple(jump)) {
+ if (child && child->name.is() && jump &&
+ ExpressionAnalyzer::isSimple(jump)) {
redirectBranches(child, jump->name);
}
}
@@ -607,7 +647,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
worked = true;
}
}
- // if the jump is to another block then we can update the list, and maybe push it even more later
+ // if the jump is to another block then we can update the list, and
+ // maybe push it even more later
if (auto* newTarget = findBreakTarget(to)->dynCast<Block>()) {
for (auto* branch : branches) {
branchesToBlock[newTarget].push_back(branch);
@@ -637,18 +678,27 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
FinalOptimizer(PassOptions& passOptions) : passOptions(passOptions) {}
void visitBlock(Block* curr) {
- // if a block has an if br else br, we can un-conditionalize the latter, allowing
- // the if to become a br_if.
- // * note that if not in a block already, then we need to create a block for this, so not useful otherwise
- // * note that this only happens at the end of a block, as code after the if is dead
- // * note that we do this at the end, because un-conditionalizing can interfere with optimizeLoop()ing.
+ // if a block has an if br else br, we can un-conditionalize the latter,
+ // allowing the if to become a br_if.
+ // * note that if not in a block already, then we need to create a block
+ // for this, so not useful otherwise
+ // * note that this only happens at the end of a block, as code after
+ // the if is dead
+ // * note that we do this at the end, because un-conditionalizing can
+ // interfere with optimizeLoop()ing.
auto& list = curr->list;
for (Index i = 0; i < list.size(); i++) {
auto* iff = list[i]->dynCast<If>();
- if (!iff || !iff->ifFalse) continue; // if it lacked an if-false, it would already be a br_if, as that's the easy case
+ if (!iff || !iff->ifFalse)
+ // if it lacked an if-false, it would already be a br_if, as that's
+ // the easy case
+ continue;
auto* ifTrueBreak = iff->ifTrue->dynCast<Break>();
- if (ifTrueBreak && !ifTrueBreak->condition && canTurnIfIntoBrIf(iff->condition, ifTrueBreak->value, passOptions)) {
- // we are an if-else where the ifTrue is a break without a condition, so we can do this
+ if (ifTrueBreak && !ifTrueBreak->condition &&
+ canTurnIfIntoBrIf(
+ iff->condition, ifTrueBreak->value, passOptions)) {
+ // we are an if-else where the ifTrue is a break without a
+ // condition, so we can do this
ifTrueBreak->condition = iff->condition;
ifTrueBreak->finalize();
list[i] = Builder(*getModule()).dropIfConcretelyTyped(ifTrueBreak);
@@ -657,8 +707,11 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
// otherwise, perhaps we can flip the if
auto* ifFalseBreak = iff->ifFalse->dynCast<Break>();
- if (ifFalseBreak && !ifFalseBreak->condition && canTurnIfIntoBrIf(iff->condition, ifFalseBreak->value, passOptions)) {
- ifFalseBreak->condition = Builder(*getModule()).makeUnary(EqZInt32, iff->condition);
+ if (ifFalseBreak && !ifFalseBreak->condition &&
+ canTurnIfIntoBrIf(
+ iff->condition, ifFalseBreak->value, passOptions)) {
+ ifFalseBreak->condition =
+ Builder(*getModule()).makeUnary(EqZInt32, iff->condition);
ifFalseBreak->finalize();
list[i] = Builder(*getModule()).dropIfConcretelyTyped(ifFalseBreak);
ExpressionManipulator::spliceIntoBlock(curr, i + 1, iff->ifTrue);
@@ -669,22 +722,26 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// combine/optimize adjacent br_ifs + a br (maybe _if) right after it
for (Index i = 0; i < list.size() - 1; i++) {
auto* br1 = list[i]->dynCast<Break>();
- // avoid unreachable brs, as they are dead code anyhow, and after merging
- // them the outer scope could need type changes
- if (!br1 || !br1->condition || br1->type == unreachable) continue;
+ // avoid unreachable brs, as they are dead code anyhow, and after
+ // merging them the outer scope could need type changes
+ if (!br1 || !br1->condition || br1->type == unreachable)
+ continue;
assert(!br1->value);
auto* br2 = list[i + 1]->dynCast<Break>();
- if (!br2 || br1->name != br2->name) continue;
+ if (!br2 || br1->name != br2->name)
+ continue;
assert(!br2->value); // same target as previous, which has no value
// a br_if and then a br[_if] with the same target right after it
if (br2->condition) {
if (shrink && br2->type != unreachable) {
- // Join adjacent br_ifs to the same target, making one br_if with
- // a "selectified" condition that executes both.
- if (!EffectAnalyzer(passOptions, br2->condition).hasSideEffects()) {
+ // Join adjacent br_ifs to the same target, making one br_if
+ // with a "selectified" condition that executes both.
+ if (!EffectAnalyzer(passOptions, br2->condition)
+ .hasSideEffects()) {
// it's ok to execute them both, do it
Builder builder(*getModule());
- br1->condition = builder.makeBinary(OrInt32, br1->condition, br2->condition);
+ br1->condition =
+ builder.makeBinary(OrInt32, br1->condition, br2->condition);
ExpressionManipulator::nop(br2);
}
}
@@ -706,12 +763,9 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
if (BranchUtils::getUniqueTargets(curr).size() == 1) {
// This switch has just one target no matter what; replace with a br.
Builder builder(*getModule());
- replaceCurrent(
- builder.makeSequence(
- builder.makeDrop(curr->condition), // might have side effects
- builder.makeBreak(curr->default_, curr->value)
- )
- );
+ replaceCurrent(builder.makeSequence(
+ builder.makeDrop(curr->condition), // might have side effects
+ builder.makeBreak(curr->default_, curr->value)));
}
}
@@ -754,36 +808,32 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
} else {
br = list[0]->dynCast<Break>();
}
- // Check if the br is conditional and goes to the block. It may or may not have
- // a value, depending on if it was dropped or not.
- // If the type is unreachable that means it is not actually reached,
- // which we can ignore.
- if (br && br->condition && br->name == curr->name && br->type != unreachable) {
+ // Check if the br is conditional and goes to the block. It may or may
+ // not have a value, depending on if it was dropped or not. If the
+ // type is unreachable that means it is not actually reached, which we
+ // can ignore.
+ if (br && br->condition && br->name == curr->name &&
+ br->type != unreachable) {
if (BranchUtils::BranchSeeker::countNamed(curr, curr->name) == 1) {
// no other breaks to that name, so we can do this
if (!drop) {
assert(!br->value);
Builder builder(*getModule());
replaceCurrent(builder.makeIf(
- builder.makeUnary(EqZInt32, br->condition),
- curr
- ));
+ builder.makeUnary(EqZInt32, br->condition), curr));
ExpressionManipulator::nop(br);
curr->finalize(curr->type);
} else {
- // If the items we move around have side effects, we can't do this.
+ // If the items we move around have side effects, we can't do
+ // this.
// TODO: we could use a select, in some cases..?
if (!EffectAnalyzer(passOptions, br->value).hasSideEffects() &&
- !EffectAnalyzer(passOptions, br->condition).hasSideEffects()) {
+ !EffectAnalyzer(passOptions, br->condition)
+ .hasSideEffects()) {
ExpressionManipulator::nop(list[0]);
Builder builder(*getModule());
replaceCurrent(
- builder.makeIf(
- br->condition,
- br->value,
- curr
- )
- );
+ builder.makeIf(br->condition, br->value, curr));
}
}
}
@@ -800,20 +850,20 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// Convert an if into a select, if possible and beneficial to do so.
Select* selectify(If* iff) {
- if (!iff->ifFalse ||
- !isConcreteType(iff->ifTrue->type) ||
+ if (!iff->ifFalse || !isConcreteType(iff->ifTrue->type) ||
!isConcreteType(iff->ifFalse->type)) {
return nullptr;
}
- // This is always helpful for code size, but can be a tradeoff with performance
- // as we run both code paths. So when shrinking we always try to do this, but
- // otherwise must consider more carefully.
+ // This is always helpful for code size, but can be a tradeoff with
+ // performance as we run both code paths. So when shrinking we always
+ // try to do this, but otherwise must consider more carefully.
if (!passOptions.shrinkLevel) {
// Consider the cost of executing all the code unconditionally
const auto MAX_COST = 7;
- auto total = CostAnalyzer(iff->ifTrue).cost +
- CostAnalyzer(iff->ifFalse).cost;
- if (total >= MAX_COST) return nullptr;
+ auto total =
+ CostAnalyzer(iff->ifTrue).cost + CostAnalyzer(iff->ifFalse).cost;
+ if (total >= MAX_COST)
+ return nullptr;
}
// Check if side effects allow this.
EffectAnalyzer condition(passOptions, iff->condition);
@@ -822,11 +872,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
if (!ifTrue.hasSideEffects()) {
EffectAnalyzer ifFalse(passOptions, iff->ifFalse);
if (!ifFalse.hasSideEffects()) {
- return Builder(*getModule()).makeSelect(
- iff->condition,
- iff->ifTrue,
- iff->ifFalse
- );
+ return Builder(*getModule())
+ .makeSelect(iff->condition, iff->ifTrue, iff->ifFalse);
}
}
}
@@ -842,8 +889,10 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
void optimizeSetIf(Expression** currp) {
- if (optimizeSetIfWithBrArm(currp)) return;
- if (optimizeSetIfWithCopyArm(currp)) return;
+ if (optimizeSetIfWithBrArm(currp))
+ return;
+ if (optimizeSetIfWithCopyArm(currp))
+ return;
}
// If one arm is a br, we prefer a br_if and the set later:
@@ -865,33 +914,33 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
bool optimizeSetIfWithBrArm(Expression** currp) {
auto* set = (*currp)->cast<SetLocal>();
auto* iff = set->value->dynCast<If>();
- if (!iff ||
- !isConcreteType(iff->type) ||
+ if (!iff || !isConcreteType(iff->type) ||
!isConcreteType(iff->condition->type)) {
return false;
}
- auto tryToOptimize = [&](Expression* one, Expression* two, bool flipCondition) {
- if (one->type == unreachable && two->type != unreachable) {
- if (auto* br = one->dynCast<Break>()) {
- if (ExpressionAnalyzer::isSimple(br)) {
- // Wonderful, do it!
- Builder builder(*getModule());
- if (flipCondition) {
- builder.flip(iff);
+ auto tryToOptimize =
+ [&](Expression* one, Expression* two, bool flipCondition) {
+ if (one->type == unreachable && two->type != unreachable) {
+ if (auto* br = one->dynCast<Break>()) {
+ if (ExpressionAnalyzer::isSimple(br)) {
+ // Wonderful, do it!
+ Builder builder(*getModule());
+ if (flipCondition) {
+ builder.flip(iff);
+ }
+ br->condition = iff->condition;
+ br->finalize();
+ set->value = two;
+ auto* block = builder.makeSequence(br, set);
+ *currp = block;
+ // Recurse on the set, which now has a new value.
+ optimizeSetIf(&block->list[1]);
+ return true;
}
- br->condition = iff->condition;
- br->finalize();
- set->value = two;
- auto* block = builder.makeSequence(br, set);
- *currp = block;
- // Recurse on the set, which now has a new value.
- optimizeSetIf(&block->list[1]);
- return true;
}
}
- }
- return false;
- };
+ return false;
+ };
return tryToOptimize(iff->ifTrue, iff->ifFalse, false) ||
tryToOptimize(iff->ifFalse, iff->ifTrue, true);
}
@@ -940,8 +989,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
bool optimizeSetIfWithCopyArm(Expression** currp) {
auto* set = (*currp)->cast<SetLocal>();
auto* iff = set->value->dynCast<If>();
- if (!iff ||
- !isConcreteType(iff->type) ||
+ if (!iff || !isConcreteType(iff->type) ||
!isConcreteType(iff->condition->type)) {
return false;
}
@@ -955,7 +1003,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
get = nullptr;
}
}
- if (!get) return false;
+ if (!get)
+ return false;
// We can do it!
bool tee = set->isTee();
assert(set->index == get->index);
@@ -969,9 +1018,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
if (tee) {
set->setTee(false);
// We need a block too.
- replacement = builder.makeSequence(
- iff,
- get // reuse the get
+ replacement = builder.makeSequence(iff,
+ get // reuse the get
);
}
*currp = replacement;
@@ -996,10 +1044,12 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// )
// TODO: consider also looking at <= etc. and not just eq
void tablify(Block* block) {
- auto &list = block->list;
- if (list.size() <= 1) return;
+ auto& list = block->list;
+ if (list.size() <= 1)
+ return;
- // Heuristics. These are slightly inspired by the constants from the asm.js backend.
+ // Heuristics. These are slightly inspired by the constants from the
+ // asm.js backend.
// How many br_ifs we need to see to consider doing this
const uint32_t MIN_NUM = 3;
@@ -1009,35 +1059,51 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
// this is high, we allow larger ranges.
const uint32_t NUM_TO_RANGE_FACTOR = 3;
- // check if the input is a proper br_if on an i32.eq of a condition value to a const,
- // and the const is in the proper range, [0-int32_max), to avoid overflow concerns.
- // returns the br_if if so, or nullptr otherwise
- auto getProperBrIf = [](Expression* curr) -> Break*{
+ // check if the input is a proper br_if on an i32.eq of a condition
+ // value to a const, and the const is in the proper range,
+ // [0-int32_max), to avoid overflow concerns. returns the br_if if so,
+ // or nullptr otherwise
+ auto getProperBrIf = [](Expression* curr) -> Break* {
auto* br = curr->dynCast<Break>();
- if (!br) return nullptr;
- if (!br->condition || br->value) return nullptr;
- if (br->type != none) return nullptr; // no value, so can be unreachable or none. ignore unreachable ones, dce will clean it up
+ if (!br)
+ return nullptr;
+ if (!br->condition || br->value)
+ return nullptr;
+ if (br->type != none)
+ // no value, so can be unreachable or none. ignore unreachable ones,
+ // dce will clean it up
+ return nullptr;
auto* binary = br->condition->dynCast<Binary>();
- if (!binary) return nullptr;
- if (binary->op != EqInt32) return nullptr;
+ if (!binary)
+ return nullptr;
+ if (binary->op != EqInt32)
+ return nullptr;
auto* c = binary->right->dynCast<Const>();
- if (!c) return nullptr;
+ if (!c)
+ return nullptr;
uint32_t value = c->value.geti32();
- if (value >= uint32_t(std::numeric_limits<int32_t>::max())) return nullptr;
+ if (value >= uint32_t(std::numeric_limits<int32_t>::max()))
+ return nullptr;
return br;
};
// check if the input is a proper br_if
// and returns the condition if so, or nullptr otherwise
- auto getProperBrIfConditionValue = [&getProperBrIf](Expression* curr) -> Expression* {
+ auto getProperBrIfConditionValue =
+ [&getProperBrIf](Expression* curr) -> Expression* {
auto* br = getProperBrIf(curr);
- if (!br) return nullptr;
+ if (!br)
+ return nullptr;
return br->condition->cast<Binary>()->left;
};
// returns the constant value, as a uint32_t
- auto getProperBrIfConstant = [&getProperBrIf](Expression* curr) -> uint32_t {
- return getProperBrIf(curr)->condition->cast<Binary>()->right->cast<Const>()->value.geti32();
+ auto getProperBrIfConstant =
+ [&getProperBrIf](Expression* curr) -> uint32_t {
+ return getProperBrIf(curr)
+ ->condition->cast<Binary>()
+ ->right->cast<Const>()
+ ->value.geti32();
};
Index start = 0;
while (start < list.size() - 1) {
@@ -1046,23 +1112,25 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
start++;
continue;
}
- // if the condition has side effects, we can't replace many appearances of it
- // with a single one
+ // if the condition has side effects, we can't replace many
+ // appearances of it with a single one
if (EffectAnalyzer(passOptions, conditionValue).hasSideEffects()) {
start++;
continue;
}
- // look for a "run" of br_ifs with all the same conditionValue, and having
- // unique constants (an overlapping constant could be handled, just the first
- // branch is taken, but we can't remove the other br_if (it may be the only
- // branch keeping a block reachable), which may make this bad for code size.
+ // look for a "run" of br_ifs with all the same conditionValue, and
+ // having unique constants (an overlapping constant could be handled,
+ // just the first branch is taken, but we can't remove the other br_if
+ // (it may be the only branch keeping a block reachable), which may
+ // make this bad for code size.
Index end = start + 1;
std::unordered_set<uint32_t> usedConstants;
usedConstants.insert(getProperBrIfConstant(list[start]));
while (end < list.size() &&
- ExpressionAnalyzer::equal(getProperBrIfConditionValue(list[end]),
- conditionValue)) {
- if (!usedConstants.insert(getProperBrIfConstant(list[end])).second) {
+ ExpressionAnalyzer::equal(
+ getProperBrIfConditionValue(list[end]), conditionValue)) {
+ if (!usedConstants.insert(getProperBrIfConstant(list[end]))
+ .second) {
// this constant already appeared
break;
}
@@ -1081,8 +1149,7 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
uint32_t range = max - min;
// decision time
- if (range <= MAX_RANGE &&
- range <= num * NUM_TO_RANGE_FACTOR) {
+ if (range <= MAX_RANGE && range <= num * NUM_TO_RANGE_FACTOR) {
// great! let's do this
std::unordered_set<Name> usedNames;
for (Index i = start; i < end; i++) {
@@ -1093,7 +1160,8 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
Index i = 0;
while (1) {
defaultName = "tablify|" + std::to_string(i++);
- if (usedNames.count(defaultName) == 0) break;
+ if (usedNames.count(defaultName) == 0)
+ break;
}
std::vector<Name> table;
for (Index i = start; i < end; i++) {
@@ -1103,26 +1171,21 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
while (table.size() <= index) {
table.push_back(defaultName);
}
- assert(table[index] == defaultName); // we should have made sure there are no overlaps
+ // we should have made sure there are no overlaps
+ assert(table[index] == defaultName);
table[index] = name;
}
Builder builder(*getModule());
// the table and condition are offset by the min
if (min != 0) {
- conditionValue = builder.makeBinary(
- SubInt32,
- conditionValue,
- builder.makeConst(Literal(int32_t(min)))
- );
+ conditionValue =
+ builder.makeBinary(SubInt32,
+ conditionValue,
+ builder.makeConst(Literal(int32_t(min))));
}
list[end - 1] = builder.makeBlock(
defaultName,
- builder.makeSwitch(
- table,
- defaultName,
- conditionValue
- )
- );
+ builder.makeSwitch(table, defaultName, conditionValue));
for (Index i = start; i < end - 1; i++) {
ExpressionManipulator::nop(list[i]);
}
@@ -1145,8 +1208,6 @@ struct RemoveUnusedBrs : public WalkerPass<PostWalker<RemoveUnusedBrs>> {
}
};
-Pass *createRemoveUnusedBrsPass() {
- return new RemoveUnusedBrs();
-}
+Pass* createRemoveUnusedBrsPass() { return new RemoveUnusedBrs(); }
} // namespace wasm
diff --git a/src/passes/RemoveUnusedModuleElements.cpp b/src/passes/RemoveUnusedModuleElements.cpp
index e91fafcba..3338bb756 100644
--- a/src/passes/RemoveUnusedModuleElements.cpp
+++ b/src/passes/RemoveUnusedModuleElements.cpp
@@ -20,21 +20,17 @@
// and remove if unneeded)
//
-
#include <memory>
-#include "wasm.h"
-#include "pass.h"
+#include "asm_v_wasm.h"
#include "ir/module-utils.h"
#include "ir/utils.h"
-#include "asm_v_wasm.h"
+#include "pass.h"
+#include "wasm.h"
namespace wasm {
-enum class ModuleElementKind {
- Function,
- Global
-};
+enum class ModuleElementKind { Function, Global };
typedef std::pair<ModuleElementKind, Name> ModuleElement;
@@ -48,7 +44,8 @@ struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
bool usesMemory = false;
bool usesTable = false;
- ReachabilityAnalyzer(Module* module, const std::vector<ModuleElement>& roots) : module(module) {
+ ReachabilityAnalyzer(Module* module, const std::vector<ModuleElement>& roots)
+ : module(module) {
queue = roots;
// Globals used in memory/table init expressions are also roots
for (auto& segment : module->memory.segments) {
@@ -83,55 +80,36 @@ struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
}
void visitCall(Call* curr) {
- if (reachable.count(ModuleElement(ModuleElementKind::Function, curr->target)) == 0) {
+ if (reachable.count(
+ ModuleElement(ModuleElementKind::Function, curr->target)) == 0) {
queue.emplace_back(ModuleElementKind::Function, curr->target);
}
}
- void visitCallIndirect(CallIndirect* curr) {
- usesTable = true;
- }
+ void visitCallIndirect(CallIndirect* curr) { usesTable = true; }
void visitGetGlobal(GetGlobal* curr) {
- if (reachable.count(ModuleElement(ModuleElementKind::Global, curr->name)) == 0) {
+ if (reachable.count(ModuleElement(ModuleElementKind::Global, curr->name)) ==
+ 0) {
queue.emplace_back(ModuleElementKind::Global, curr->name);
}
}
void visitSetGlobal(SetGlobal* curr) {
- if (reachable.count(ModuleElement(ModuleElementKind::Global, curr->name)) == 0) {
+ if (reachable.count(ModuleElement(ModuleElementKind::Global, curr->name)) ==
+ 0) {
queue.emplace_back(ModuleElementKind::Global, curr->name);
}
}
- void visitLoad(Load* curr) {
- usesMemory = true;
- }
- void visitStore(Store* curr) {
- usesMemory = true;
- }
- void visitAtomicCmpxchg(AtomicCmpxchg* curr) {
- usesMemory = true;
- }
- void visitAtomicRMW(AtomicRMW* curr) {
- usesMemory = true;
- }
- void visitAtomicWait(AtomicWait* curr) {
- usesMemory = true;
- }
- void visitAtomicNotify(AtomicNotify* curr) {
- usesMemory = true;
- }
- void visitMemoryInit(MemoryInit* curr) {
- usesMemory = true;
- }
- void visitDataDrop(DataDrop* curr) {
- usesMemory = true;
- }
- void visitMemoryCopy(MemoryCopy* curr) {
- usesMemory = true;
- }
- void visitMemoryFill(MemoryFill* curr) {
- usesMemory = true;
- }
+ void visitLoad(Load* curr) { usesMemory = true; }
+ void visitStore(Store* curr) { usesMemory = true; }
+ void visitAtomicCmpxchg(AtomicCmpxchg* curr) { usesMemory = true; }
+ void visitAtomicRMW(AtomicRMW* curr) { usesMemory = true; }
+ void visitAtomicWait(AtomicWait* curr) { usesMemory = true; }
+ void visitAtomicNotify(AtomicNotify* curr) { usesMemory = true; }
+ void visitMemoryInit(MemoryInit* curr) { usesMemory = true; }
+ void visitDataDrop(DataDrop* curr) { usesMemory = true; }
+ void visitMemoryCopy(MemoryCopy* curr) { usesMemory = true; }
+ void visitMemoryFill(MemoryFill* curr) { usesMemory = true; }
void visitHost(Host* curr) {
if (curr->op == CurrentMemory || curr->op == GrowMemory) {
usesMemory = true;
@@ -156,15 +134,14 @@ struct FunctionTypeAnalyzer : public PostWalker<FunctionTypeAnalyzer> {
}
}
- void visitCallIndirect(CallIndirect* curr) {
- indirectCalls.push_back(curr);
- }
+ void visitCallIndirect(CallIndirect* curr) { indirectCalls.push_back(curr); }
};
struct RemoveUnusedModuleElements : public Pass {
bool rootAllFunctions;
- RemoveUnusedModuleElements(bool rootAllFunctions) : rootAllFunctions(rootAllFunctions) {}
+ RemoveUnusedModuleElements(bool rootAllFunctions)
+ : rootAllFunctions(rootAllFunctions) {}
void run(PassRunner* runner, Module* module) override {
optimizeGlobalsAndFunctions(module);
@@ -223,21 +200,32 @@ struct RemoveUnusedModuleElements : public Pass {
// Remove unreachable elements.
{
auto& v = module->functions;
- v.erase(std::remove_if(v.begin(), v.end(), [&](const std::unique_ptr<Function>& curr) {
- return analyzer.reachable.count(ModuleElement(ModuleElementKind::Function, curr->name)) == 0;
- }), v.end());
+ v.erase(std::remove_if(v.begin(),
+ v.end(),
+ [&](const std::unique_ptr<Function>& curr) {
+ return analyzer.reachable.count(ModuleElement(
+ ModuleElementKind::Function,
+ curr->name)) == 0;
+ }),
+ v.end());
}
{
auto& v = module->globals;
- v.erase(std::remove_if(v.begin(), v.end(), [&](const std::unique_ptr<Global>& curr) {
- return analyzer.reachable.count(ModuleElement(ModuleElementKind::Global, curr->name)) == 0;
- }), v.end());
+ v.erase(std::remove_if(v.begin(),
+ v.end(),
+ [&](const std::unique_ptr<Global>& curr) {
+ return analyzer.reachable.count(
+ ModuleElement(ModuleElementKind::Global,
+ curr->name)) == 0;
+ }),
+ v.end());
}
module->updateMaps();
// Handle the memory and table
if (!exportsMemory && !analyzer.usesMemory) {
if (!importsMemory) {
- // The memory is unobservable to the outside, we can remove the contents.
+ // The memory is unobservable to the outside, we can remove the
+ // contents.
module->memory.segments.clear();
}
if (module->memory.segments.empty()) {
@@ -268,7 +256,8 @@ struct RemoveUnusedModuleElements : public Pass {
std::unordered_map<std::string, FunctionType*> canonicals;
std::unordered_set<FunctionType*> needed;
auto canonicalize = [&](Name name) {
- if (!name.is()) return name;
+ if (!name.is())
+ return name;
FunctionType* type = module->getFunctionType(name);
auto sig = getSig(type);
auto iter = canonicals.find(sig);
@@ -291,9 +280,13 @@ struct RemoveUnusedModuleElements : public Pass {
call->fullType = canonicalize(call->fullType);
}
// remove no-longer used types
- module->functionTypes.erase(std::remove_if(module->functionTypes.begin(), module->functionTypes.end(), [&needed](std::unique_ptr<FunctionType>& type) {
- return needed.count(type.get()) == 0;
- }), module->functionTypes.end());
+ module->functionTypes.erase(
+ std::remove_if(module->functionTypes.begin(),
+ module->functionTypes.end(),
+ [&needed](std::unique_ptr<FunctionType>& type) {
+ return needed.count(type.get()) == 0;
+ }),
+ module->functionTypes.end());
module->updateMaps();
}
};
diff --git a/src/passes/RemoveUnusedNames.cpp b/src/passes/RemoveUnusedNames.cpp
index e5aaf5509..86db53b0c 100644
--- a/src/passes/RemoveUnusedNames.cpp
+++ b/src/passes/RemoveUnusedNames.cpp
@@ -19,8 +19,8 @@
// merge names when possible (by merging their blocks)
//
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -33,11 +33,9 @@ struct RemoveUnusedNames : public WalkerPass<PostWalker<RemoveUnusedNames>> {
// a parent block, we know if it was branched to
std::map<Name, std::set<Expression*>> branchesSeen;
- void visitBreak(Break *curr) {
- branchesSeen[curr->name].insert(curr);
- }
+ void visitBreak(Break* curr) { branchesSeen[curr->name].insert(curr); }
- void visitSwitch(Switch *curr) {
+ void visitSwitch(Switch* curr) {
for (auto name : curr->targets) {
branchesSeen[name].insert(curr);
}
@@ -54,20 +52,24 @@ struct RemoveUnusedNames : public WalkerPass<PostWalker<RemoveUnusedNames>> {
}
}
- void visitBlock(Block *curr) {
+ void visitBlock(Block* curr) {
if (curr->name.is() && curr->list.size() == 1) {
auto* child = curr->list[0]->dynCast<Block>();
if (child && child->name.is() && child->type == curr->type) {
- // we have just one child, this block, so breaking out of it goes to the same place as breaking out of us, we just need one name (and block)
+ // we have just one child, this block, so breaking out of it goes to the
+ // same place as breaking out of us, we just need one name (and block)
auto& branches = branchesSeen[curr->name];
for (auto* branch : branches) {
if (Break* br = branch->dynCast<Break>()) {
- if (br->name == curr->name) br->name = child->name;
+ if (br->name == curr->name)
+ br->name = child->name;
} else if (Switch* sw = branch->dynCast<Switch>()) {
for (auto& target : sw->targets) {
- if (target == curr->name) target = child->name;
+ if (target == curr->name)
+ target = child->name;
}
- if (sw->default_ == curr->name) sw->default_ = child->name;
+ if (sw->default_ == curr->name)
+ sw->default_ = child->name;
} else {
WASM_UNREACHABLE();
}
@@ -79,20 +81,16 @@ struct RemoveUnusedNames : public WalkerPass<PostWalker<RemoveUnusedNames>> {
handleBreakTarget(curr->name);
}
- void visitLoop(Loop *curr) {
+ void visitLoop(Loop* curr) {
handleBreakTarget(curr->name);
if (!curr->name.is()) {
replaceCurrent(curr->body);
}
}
- void visitFunction(Function *curr) {
- assert(branchesSeen.empty());
- }
+ void visitFunction(Function* curr) { assert(branchesSeen.empty()); }
};
-Pass *createRemoveUnusedNamesPass() {
- return new RemoveUnusedNames();
-}
+Pass* createRemoveUnusedNamesPass() { return new RemoveUnusedNames(); }
} // namespace wasm
diff --git a/src/passes/ReorderFunctions.cpp b/src/passes/ReorderFunctions.cpp
index 5312ee90a..5cd70c20f 100644
--- a/src/passes/ReorderFunctions.cpp
+++ b/src/passes/ReorderFunctions.cpp
@@ -22,16 +22,15 @@
// This may incur a tradeoff, though, as while it reduces binary size, it may
// increase gzip size. This might be because the new order has the functions in
// a less beneficial position for compression, that is, mutually-compressible
-// functions are no longer together (when they were before, in the original order,
-// the has some natural tendency one way or the other). TODO: investigate
+// functions are no longer together (when they were before, in the original
+// order, the has some natural tendency one way or the other). TODO: investigate
// similarity ordering here.
//
-
#include <memory>
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -42,12 +41,11 @@ struct CallCountScanner : public WalkerPass<PostWalker<CallCountScanner>> {
CallCountScanner(NameCountMap* counts) : counts(counts) {}
- CallCountScanner* create() override {
- return new CallCountScanner(counts);
- }
+ CallCountScanner* create() override { return new CallCountScanner(counts); }
void visitCall(Call* curr) {
- assert(counts->count(curr->target) > 0); // can't add a new element in parallel
+ // can't add a new element in parallel
+ assert(counts->count(curr->target) > 0);
(*counts)[curr->target]++;
}
@@ -58,7 +56,8 @@ private:
struct ReorderFunctions : public Pass {
void run(PassRunner* runner, Module* module) override {
NameCountMap counts;
- // fill in info, as we operate on it in parallel (each function to its own entry)
+ // fill in info, as we operate on it in parallel (each function to its own
+ // entry)
for (auto& func : module->functions) {
counts[func->name];
}
@@ -82,19 +81,18 @@ struct ReorderFunctions : public Pass {
}
}
// sort
- std::sort(module->functions.begin(), module->functions.end(), [&counts](
- const std::unique_ptr<Function>& a,
- const std::unique_ptr<Function>& b) -> bool {
- if (counts[a->name] == counts[b->name]) {
- return strcmp(a->name.str, b->name.str) > 0;
- }
- return counts[a->name] > counts[b->name];
- });
+ std::sort(module->functions.begin(),
+ module->functions.end(),
+ [&counts](const std::unique_ptr<Function>& a,
+ const std::unique_ptr<Function>& b) -> bool {
+ if (counts[a->name] == counts[b->name]) {
+ return strcmp(a->name.str, b->name.str) > 0;
+ }
+ return counts[a->name] > counts[b->name];
+ });
}
};
-Pass *createReorderFunctionsPass() {
- return new ReorderFunctions();
-}
+Pass* createReorderFunctionsPass() { return new ReorderFunctions(); }
} // namespace wasm
diff --git a/src/passes/ReorderLocals.cpp b/src/passes/ReorderLocals.cpp
index fe4f775e9..45796c0aa 100644
--- a/src/passes/ReorderLocals.cpp
+++ b/src/passes/ReorderLocals.cpp
@@ -24,8 +24,8 @@
#include <memory>
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -35,27 +35,32 @@ struct ReorderLocals : public WalkerPass<PostWalker<ReorderLocals>> {
Pass* create() override { return new ReorderLocals; }
std::map<Index, Index> counts; // local => times it is used
- std::map<Index, Index> firstUses; // local => index in the list of which local is first seen
+ // local => index in the list of which local is first seen
+ std::map<Index, Index> firstUses;
- void visitFunction(Function *curr) {
+ void visitFunction(Function* curr) {
Index num = curr->getNumLocals();
std::vector<Index> newToOld;
for (size_t i = 0; i < num; i++) {
newToOld.push_back(i);
}
// sort, keeping params in front (where they will not be moved)
- sort(newToOld.begin(), newToOld.end(), [this, curr](Index a, Index b) -> bool {
- if (curr->isParam(a) && !curr->isParam(b)) return true;
- if (curr->isParam(b) && !curr->isParam(a)) return false;
- if (curr->isParam(b) && curr->isParam(a)) {
- return a < b;
- }
- if (counts[a] == counts[b]) {
- if (counts[a] == 0) return a < b;
- return firstUses[a] < firstUses[b];
- }
- return counts[a] > counts[b];
- });
+ sort(
+ newToOld.begin(), newToOld.end(), [this, curr](Index a, Index b) -> bool {
+ if (curr->isParam(a) && !curr->isParam(b))
+ return true;
+ if (curr->isParam(b) && !curr->isParam(a))
+ return false;
+ if (curr->isParam(b) && curr->isParam(a)) {
+ return a < b;
+ }
+ if (counts[a] == counts[b]) {
+ if (counts[a] == 0)
+ return a < b;
+ return firstUses[a] < firstUses[b];
+ }
+ return counts[a] > counts[b];
+ });
// sorting left params in front, perhaps slightly reordered. verify and fix.
for (size_t i = 0; i < curr->params.size(); i++) {
assert(newToOld[i] < curr->params.size());
@@ -90,15 +95,16 @@ struct ReorderLocals : public WalkerPass<PostWalker<ReorderLocals>> {
Function* func;
std::vector<Index>& oldToNew;
- ReIndexer(Function* func, std::vector<Index>& oldToNew) : func(func), oldToNew(oldToNew) {}
+ ReIndexer(Function* func, std::vector<Index>& oldToNew)
+ : func(func), oldToNew(oldToNew) {}
- void visitGetLocal(GetLocal *curr) {
+ void visitGetLocal(GetLocal* curr) {
if (func->isVar(curr->index)) {
curr->index = oldToNew[curr->index];
}
}
- void visitSetLocal(SetLocal *curr) {
+ void visitSetLocal(SetLocal* curr) {
if (func->isVar(curr->index)) {
curr->index = oldToNew[curr->index];
}
@@ -120,14 +126,14 @@ struct ReorderLocals : public WalkerPass<PostWalker<ReorderLocals>> {
}
}
- void visitGetLocal(GetLocal *curr) {
+ void visitGetLocal(GetLocal* curr) {
counts[curr->index]++;
if (firstUses.count(curr->index) == 0) {
firstUses[curr->index] = firstUses.size();
}
}
- void visitSetLocal(SetLocal *curr) {
+ void visitSetLocal(SetLocal* curr) {
counts[curr->index]++;
if (firstUses.count(curr->index) == 0) {
firstUses[curr->index] = firstUses.size();
@@ -135,8 +141,6 @@ struct ReorderLocals : public WalkerPass<PostWalker<ReorderLocals>> {
}
};
-Pass *createReorderLocalsPass() {
- return new ReorderLocals();
-}
+Pass* createReorderLocalsPass() { return new ReorderLocals(); }
} // namespace wasm
diff --git a/src/passes/SSAify.cpp b/src/passes/SSAify.cpp
index 1ed3b976f..2f0f9439c 100644
--- a/src/passes/SSAify.cpp
+++ b/src/passes/SSAify.cpp
@@ -50,13 +50,13 @@
#include <iterator>
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
-#include "support/permutations.h"
#include "ir/find_all.h"
#include "ir/literal-utils.h"
#include "ir/local-graph.h"
+#include "pass.h"
+#include "support/permutations.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -77,9 +77,11 @@ struct SSAify : public Pass {
Module* module;
Function* func;
- std::vector<Expression*> functionPrepends; // things we add to the function prologue
+ // things we add to the function prologue
+ std::vector<Expression*> functionPrepends;
- void runOnFunction(PassRunner* runner, Module* module_, Function* func_) override {
+ void
+ runOnFunction(PassRunner* runner, Module* module_, Function* func_) override {
module = module_;
func = func_;
LocalGraph graph(func);
@@ -87,7 +89,8 @@ struct SSAify : public Pass {
graph.computeSSAIndexes();
// create new local indexes, one for each set
createNewIndexes(graph);
- // we now know the sets for each get, and can compute get indexes and handle phis
+ // we now know the sets for each get, and can compute get indexes and handle
+ // phis
computeGetsAndPhis(graph);
// add prepends to function
addPrepends();
@@ -96,9 +99,9 @@ struct SSAify : public Pass {
void createNewIndexes(LocalGraph& graph) {
FindAll<SetLocal> sets(func->body);
for (auto* set : sets.list) {
- // Indexes already in SSA form do not need to be modified - there is already
- // just one set for that index. Otherwise, use a new index, unless merges
- // are disallowed.
+ // Indexes already in SSA form do not need to be modified - there is
+ // already just one set for that index. Otherwise, use a new index, unless
+ // merges are disallowed.
if (!graph.isSSA(set->index) && (allowMerges || !hasMerges(set, graph))) {
set->index = addLocal(func->getLocalType(set->index));
}
@@ -132,12 +135,14 @@ struct SSAify : public Pass {
// leave it, it's fine
} else {
// zero it out
- (*graph.locations[get]) = LiteralUtils::makeZero(get->type, *module);
+ (*graph.locations[get]) =
+ LiteralUtils::makeZero(get->type, *module);
}
}
continue;
}
- if (!allowMerges) continue;
+ if (!allowMerges)
+ continue;
// more than 1 set, need a phi: a new local written to at each of the sets
auto new_ = addLocal(get->type);
auto old = get->index;
@@ -148,10 +153,7 @@ struct SSAify : public Pass {
if (set) {
// a set exists, just add a tee of its value
auto* value = set->value;
- auto* tee = builder.makeTeeLocal(
- new_,
- value
- );
+ auto* tee = builder.makeTeeLocal(new_, value);
set->value = tee;
// the value may have been something we tracked the location
// of. if so, update that, since we moved it into the tee
@@ -165,9 +167,7 @@ struct SSAify : public Pass {
// we add a set with the proper
// param value at the beginning of the function
auto* set = builder.makeSetLocal(
- new_,
- builder.makeGetLocal(old, func->getLocalType(old))
- );
+ new_, builder.makeGetLocal(old, func->getLocalType(old)));
functionPrepends.push_back(set);
} else {
// this is a zero init, so we don't need to do anything actually
@@ -177,9 +177,7 @@ struct SSAify : public Pass {
}
}
- Index addLocal(Type type) {
- return Builder::addVar(func, type);
- }
+ Index addLocal(Type type) { return Builder::addVar(func, type); }
void addPrepends() {
if (functionPrepends.size() > 0) {
@@ -195,13 +193,8 @@ struct SSAify : public Pass {
}
};
-Pass* createSSAifyPass() {
- return new SSAify(true);
-}
+Pass* createSSAifyPass() { return new SSAify(true); }
-Pass* createSSAifyNoMergePass() {
- return new SSAify(false);
-}
+Pass* createSSAifyNoMergePass() { return new SSAify(false); }
} // namespace wasm
-
diff --git a/src/passes/SafeHeap.cpp b/src/passes/SafeHeap.cpp
index 738a201ee..a293ee5e9 100644
--- a/src/passes/SafeHeap.cpp
+++ b/src/passes/SafeHeap.cpp
@@ -20,21 +20,21 @@
// top of sbrk()-addressible memory, and incorrect alignment notation.
//
-#include "wasm.h"
-#include "pass.h"
#include "asm_v_wasm.h"
#include "asmjs/shared-constants.h"
-#include "wasm-builder.h"
#include "ir/bits.h"
#include "ir/function-type-utils.h"
#include "ir/import-utils.h"
#include "ir/load-utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
-const Name DYNAMICTOP_PTR_IMPORT("DYNAMICTOP_PTR"),
- SEGFAULT_IMPORT("segfault"),
- ALIGNFAULT_IMPORT("alignfault");
+const Name DYNAMICTOP_PTR_IMPORT("DYNAMICTOP_PTR");
+const Name SEGFAULT_IMPORT("segfault");
+const Name ALIGNFAULT_IMPORT("alignfault");
static Name getLoadName(Load* curr) {
std::string ret = "SAFE_HEAP_LOAD_";
@@ -69,34 +69,30 @@ struct AccessInstrumenter : public WalkerPass<PostWalker<AccessInstrumenter>> {
AccessInstrumenter* create() override { return new AccessInstrumenter; }
void visitLoad(Load* curr) {
- if (curr->type == unreachable) return;
+ if (curr->type == unreachable)
+ return;
Builder builder(*getModule());
replaceCurrent(
- builder.makeCall(
- getLoadName(curr),
- {
- curr->ptr,
- builder.makeConst(Literal(int32_t(curr->offset))),
- },
- curr->type
- )
- );
+ builder.makeCall(getLoadName(curr),
+ {
+ curr->ptr,
+ builder.makeConst(Literal(int32_t(curr->offset))),
+ },
+ curr->type));
}
void visitStore(Store* curr) {
- if (curr->type == unreachable) return;
+ if (curr->type == unreachable)
+ return;
Builder builder(*getModule());
replaceCurrent(
- builder.makeCall(
- getStoreName(curr),
- {
- curr->ptr,
- builder.makeConst(Literal(int32_t(curr->offset))),
- curr->value,
- },
- none
- )
- );
+ builder.makeCall(getStoreName(curr),
+ {
+ curr->ptr,
+ builder.makeConst(Literal(int32_t(curr->offset))),
+ curr->value,
+ },
+ none));
}
};
@@ -156,32 +152,35 @@ struct SafeHeap : public Pass {
}
}
- bool isPossibleAtomicOperation(Index align, Index bytes, bool shared, Type type) {
+ bool
+ isPossibleAtomicOperation(Index align, Index bytes, bool shared, Type type) {
return align == bytes && shared && isIntegerType(type);
}
void addGlobals(Module* module, FeatureSet features) {
// load funcs
Load load;
- for (auto type : { i32, i64, f32, f64, v128 }) {
- if (type == v128 && !features.hasSIMD()) continue;
+ for (auto type : {i32, i64, f32, f64, v128}) {
+ if (type == v128 && !features.hasSIMD())
+ continue;
load.type = type;
- for (Index bytes : { 1, 2, 4, 8, 16 }) {
+ for (Index bytes : {1, 2, 4, 8, 16}) {
load.bytes = bytes;
- if (bytes > getTypeSize(type) ||
- (type == f32 && bytes != 4) ||
- (type == f64 && bytes != 8) ||
- (type == v128 && bytes != 16)) continue;
- for (auto signed_ : { true, false }) {
+ if (bytes > getTypeSize(type) || (type == f32 && bytes != 4) ||
+ (type == f64 && bytes != 8) || (type == v128 && bytes != 16))
+ continue;
+ for (auto signed_ : {true, false}) {
load.signed_ = signed_;
- if (isFloatType(type) && signed_) continue;
- for (Index align : { 1, 2, 4, 8, 16 }) {
+ if (isFloatType(type) && signed_)
+ continue;
+ for (Index align : {1, 2, 4, 8, 16}) {
load.align = align;
- if (align > bytes) continue;
- for (auto isAtomic : { true, false }) {
+ if (align > bytes)
+ continue;
+ for (auto isAtomic : {true, false}) {
load.isAtomic = isAtomic;
- if (isAtomic &&
- !isPossibleAtomicOperation(align, bytes, module->memory.shared, type)) {
+ if (isAtomic && !isPossibleAtomicOperation(
+ align, bytes, module->memory.shared, type)) {
continue;
}
addLoadFunc(load, module);
@@ -192,23 +191,26 @@ struct SafeHeap : public Pass {
}
// store funcs
Store store;
- for (auto valueType : { i32, i64, f32, f64, v128 }) {
- if (valueType == v128 && !features.hasSIMD()) continue;
+ for (auto valueType : {i32, i64, f32, f64, v128}) {
+ if (valueType == v128 && !features.hasSIMD())
+ continue;
store.valueType = valueType;
store.type = none;
- for (Index bytes : { 1, 2, 4, 8, 16 }) {
+ for (Index bytes : {1, 2, 4, 8, 16}) {
store.bytes = bytes;
if (bytes > getTypeSize(valueType) ||
(valueType == f32 && bytes != 4) ||
(valueType == f64 && bytes != 8) ||
- (valueType == v128 && bytes != 16)) continue;
- for (Index align : { 1, 2, 4, 8, 16 }) {
+ (valueType == v128 && bytes != 16))
+ continue;
+ for (Index align : {1, 2, 4, 8, 16}) {
store.align = align;
- if (align > bytes) continue;
- for (auto isAtomic : { true, false }) {
+ if (align > bytes)
+ continue;
+ for (auto isAtomic : {true, false}) {
store.isAtomic = isAtomic;
- if (isAtomic &&
- !isPossibleAtomicOperation(align, bytes, module->memory.shared, valueType)) {
+ if (isAtomic && !isPossibleAtomicOperation(
+ align, bytes, module->memory.shared, valueType)) {
continue;
}
addStoreFunc(store, module);
@@ -221,34 +223,25 @@ struct SafeHeap : public Pass {
// creates a function for a particular style of load
void addLoadFunc(Load style, Module* module) {
auto name = getLoadName(&style);
- if (module->getFunctionOrNull(name)) return;
+ if (module->getFunctionOrNull(name))
+ return;
auto* func = new Function;
func->name = name;
func->params.push_back(i32); // pointer
func->params.push_back(i32); // offset
- func->vars.push_back(i32); // pointer + offset
+ func->vars.push_back(i32); // pointer + offset
func->result = style.type;
Builder builder(*module);
auto* block = builder.makeBlock();
- block->list.push_back(
- builder.makeSetLocal(
- 2,
- builder.makeBinary(
- AddInt32,
- builder.makeGetLocal(0, i32),
- builder.makeGetLocal(1, i32)
- )
- )
- );
+ block->list.push_back(builder.makeSetLocal(
+ 2,
+ builder.makeBinary(
+ AddInt32, builder.makeGetLocal(0, i32), builder.makeGetLocal(1, i32))));
// check for reading past valid memory: if pointer + offset + bytes
- block->list.push_back(
- makeBoundsCheck(style.type, builder, 2, style.bytes)
- );
+ block->list.push_back(makeBoundsCheck(style.type, builder, 2, style.bytes));
// check proper alignment
if (style.align > 1) {
- block->list.push_back(
- makeAlignCheck(style.align, builder, 2)
- );
+ block->list.push_back(makeAlignCheck(style.align, builder, 2));
}
// do the load
auto* load = module->allocator.alloc<Load>();
@@ -269,35 +262,27 @@ struct SafeHeap : public Pass {
// creates a function for a particular type of store
void addStoreFunc(Store style, Module* module) {
auto name = getStoreName(&style);
- if (module->getFunctionOrNull(name)) return;
+ if (module->getFunctionOrNull(name))
+ return;
auto* func = new Function;
func->name = name;
- func->params.push_back(i32); // pointer
- func->params.push_back(i32); // offset
+ func->params.push_back(i32); // pointer
+ func->params.push_back(i32); // offset
func->params.push_back(style.valueType); // value
- func->vars.push_back(i32); // pointer + offset
+ func->vars.push_back(i32); // pointer + offset
func->result = none;
Builder builder(*module);
auto* block = builder.makeBlock();
- block->list.push_back(
- builder.makeSetLocal(
- 3,
- builder.makeBinary(
- AddInt32,
- builder.makeGetLocal(0, i32),
- builder.makeGetLocal(1, i32)
- )
- )
- );
+ block->list.push_back(builder.makeSetLocal(
+ 3,
+ builder.makeBinary(
+ AddInt32, builder.makeGetLocal(0, i32), builder.makeGetLocal(1, i32))));
// check for reading past valid memory: if pointer + offset + bytes
block->list.push_back(
- makeBoundsCheck(style.valueType, builder, 3, style.bytes)
- );
+ makeBoundsCheck(style.valueType, builder, 3, style.bytes));
// check proper alignment
if (style.align > 1) {
- block->list.push_back(
- makeAlignCheck(style.align, builder, 3)
- );
+ block->list.push_back(makeAlignCheck(style.align, builder, 3));
}
// do the store
auto* store = module->allocator.alloc<Store>();
@@ -312,45 +297,33 @@ struct SafeHeap : public Pass {
Expression* makeAlignCheck(Address align, Builder& builder, Index local) {
return builder.makeIf(
- builder.makeBinary(
- AndInt32,
- builder.makeGetLocal(local, i32),
- builder.makeConst(Literal(int32_t(align - 1)))
- ),
- builder.makeCall(alignfault, {}, none)
- );
+ builder.makeBinary(AndInt32,
+ builder.makeGetLocal(local, i32),
+ builder.makeConst(Literal(int32_t(align - 1)))),
+ builder.makeCall(alignfault, {}, none));
}
- Expression* makeBoundsCheck(Type type, Builder& builder, Index local, Index bytes) {
+ Expression*
+ makeBoundsCheck(Type type, Builder& builder, Index local, Index bytes) {
auto upperOp = options.lowMemoryUnused ? LtUInt32 : EqInt32;
auto upperBound = options.lowMemoryUnused ? PassOptions::LowMemoryBound : 0;
return builder.makeIf(
builder.makeBinary(
OrInt32,
- builder.makeBinary(
- upperOp,
- builder.makeGetLocal(local, i32),
- builder.makeConst(Literal(int32_t(upperBound)))
- ),
+ builder.makeBinary(upperOp,
+ builder.makeGetLocal(local, i32),
+ builder.makeConst(Literal(int32_t(upperBound)))),
builder.makeBinary(
GtUInt32,
- builder.makeBinary(
- AddInt32,
- builder.makeGetLocal(local, i32),
- builder.makeConst(Literal(int32_t(bytes)))
- ),
- builder.makeLoad(4, false, 0, 4,
- builder.makeGetGlobal(dynamicTopPtr, i32), i32
- )
- )
- ),
- builder.makeCall(segfault, {}, none)
- );
+ builder.makeBinary(AddInt32,
+ builder.makeGetLocal(local, i32),
+ builder.makeConst(Literal(int32_t(bytes)))),
+ builder.makeLoad(
+ 4, false, 0, 4, builder.makeGetGlobal(dynamicTopPtr, i32), i32))),
+ builder.makeCall(segfault, {}, none));
}
};
-Pass *createSafeHeapPass() {
- return new SafeHeap();
-}
+Pass* createSafeHeapPass() { return new SafeHeap(); }
} // namespace wasm
diff --git a/src/passes/SimplifyLocals.cpp b/src/passes/SimplifyLocals.cpp
index 8007d92ff..f366c7085 100644
--- a/src/passes/SimplifyLocals.cpp
+++ b/src/passes/SimplifyLocals.cpp
@@ -46,33 +46,40 @@
// removing redundant locals.
//
-#include <wasm.h>
-#include <wasm-builder.h>
-#include <wasm-traversal.h>
-#include <pass.h>
+#include "ir/equivalent_sets.h"
#include <ir/branch-utils.h>
-#include <ir/local-utils.h>
#include <ir/effects.h>
-#include "ir/equivalent_sets.h"
#include <ir/find_all.h>
+#include <ir/local-utils.h>
#include <ir/manipulation.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm-traversal.h>
+#include <wasm.h>
namespace wasm {
// Main class
-template<bool allowTee = true, bool allowStructure = true, bool allowNesting = true>
-struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<allowTee, allowStructure, allowNesting>>> {
+template<bool allowTee = true,
+ bool allowStructure = true,
+ bool allowNesting = true>
+struct SimplifyLocals
+ : public WalkerPass<LinearExecutionWalker<
+ SimplifyLocals<allowTee, allowStructure, allowNesting>>> {
bool isFunctionParallel() override { return true; }
- Pass* create() override { return new SimplifyLocals<allowTee, allowStructure, allowNesting>(); }
+ Pass* create() override {
+ return new SimplifyLocals<allowTee, allowStructure, allowNesting>();
+ }
// information for a local.set we can sink
struct SinkableInfo {
Expression** item;
EffectAnalyzer effects;
- SinkableInfo(Expression** item, PassOptions& passOptions) : item(item), effects(passOptions, *item) {}
+ SinkableInfo(Expression** item, PassOptions& passOptions)
+ : item(item), effects(passOptions, *item) {}
};
// a list of sinkables in a linear execution trace
@@ -112,7 +119,9 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// local => # of local.gets for it
GetLocalCounter getCounter;
- static void doNoteNonLinear(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void
+ doNoteNonLinear(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
// Main processing.
auto* curr = *currp;
if (curr->is<Break>()) {
@@ -121,12 +130,14 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// value means the block already has a return value
self->unoptimizableBlocks.insert(br->name);
} else {
- self->blockBreaks[br->name].push_back({ currp, std::move(self->sinkables) });
+ self->blockBreaks[br->name].push_back(
+ {currp, std::move(self->sinkables)});
}
} else if (curr->is<Block>()) {
return; // handled in visitBlock
} else if (curr->is<If>()) {
- assert(!curr->cast<If>()->ifFalse); // if-elses are handled by doNoteIf* methods
+ assert(!curr->cast<If>()
+ ->ifFalse); // if-elses are handled by doNoteIf* methods
} else if (curr->is<Switch>()) {
auto* sw = curr->cast<Switch>();
auto targets = BranchUtils::getUniqueTargets(sw);
@@ -138,13 +149,17 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
self->sinkables.clear();
}
- static void doNoteIfCondition(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void doNoteIfCondition(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
// we processed the condition of this if-else, and now control flow branches
// into either the true or the false sides
self->sinkables.clear();
}
- static void doNoteIfTrue(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void
+ doNoteIfTrue(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
auto* iff = (*currp)->dynCast<If>();
if (iff->ifFalse) {
// We processed the ifTrue side of this if-else, save it on the stack.
@@ -159,7 +174,9 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
- static void doNoteIfFalse(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void
+ doNoteIfFalse(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
// we processed the ifFalse side of this if-else, we can now try to
// mere with the ifTrue side and optimize a return value, if possible
auto* iff = (*currp)->cast<If>();
@@ -199,13 +216,16 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
- void visitGetLocal(GetLocal *curr) {
+ void visitGetLocal(GetLocal* curr) {
auto found = sinkables.find(curr->index);
if (found != sinkables.end()) {
- auto* set = (*found->second.item)->template cast<SetLocal>(); // the set we may be sinking
+ auto* set = (*found->second.item)
+ ->template cast<SetLocal>(); // the set we may be sinking
bool oneUse = firstCycle || getCounter.num[curr->index] == 1;
- auto* get = set->value->template dynCast<GetLocal>(); // the set's value may be a get (i.e., the set is a copy)
- // if nesting is not allowed, and this might cause nesting, check if the sink would cause such a thing
+ // the set's value may be a get (i.e., the set is a copy)
+ auto* get = set->value->template dynCast<GetLocal>();
+ // if nesting is not allowed, and this might cause nesting, check if the
+ // sink would cause such a thing
if (!allowNesting) {
// a get is always ok to sink
if (!get) {
@@ -213,8 +233,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
assert(expressionStack[expressionStack.size() - 1] == curr);
auto* parent = expressionStack[expressionStack.size() - 2];
bool parentIsSet = parent->template is<SetLocal>();
- // if the parent of this get is a set, we can sink into the set's value,
- // it would not be nested.
+ // if the parent of this get is a set, we can sink into the set's
+ // value, it would not be nested.
if (!parentIsSet) {
return;
}
@@ -273,7 +293,9 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// a sink would cause nesting
ExpressionStack expressionStack;
- static void visitPre(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void
+ visitPre(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
Expression* curr = *currp;
EffectAnalyzer effects(self->getPassOptions());
@@ -286,14 +308,16 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
- static void visitPost(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void
+ visitPost(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
// perform main SetLocal processing here, since we may be the result of
// replaceCurrent, i.e., the visitor was not called.
auto* set = (*currp)->dynCast<SetLocal>();
if (set) {
- // if we see a set that was already potentially-sinkable, then the previous
- // store is dead, leave just the value
+ // if we see a set that was already potentially-sinkable, then the
+ // previous store is dead, leave just the value
auto found = self->sinkables.find(set->index);
if (found != self->sinkables.end()) {
auto* previous = (*found->second.item)->template cast<SetLocal>();
@@ -315,7 +339,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
if (set && self->canSink(set)) {
Index index = set->index;
assert(self->sinkables.count(index) == 0);
- self->sinkables.emplace(std::make_pair(index, SinkableInfo(currp, self->getPassOptions())));
+ self->sinkables.emplace(
+ std::make_pair(index, SinkableInfo(currp, self->getPassOptions())));
}
if (!allowNesting) {
@@ -325,9 +350,12 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
bool canSink(SetLocal* set) {
// we can never move a tee
- if (set->isTee()) return false;
- // if in the first cycle, or not allowing tees, then we cannot sink if >1 use as that would make a tee
- if ((firstCycle || !allowTee) && getCounter.num[set->index] > 1) return false;
+ if (set->isTee())
+ return false;
+ // if in the first cycle, or not allowing tees, then we cannot sink if >1
+ // use as that would make a tee
+ if ((firstCycle || !allowTee) && getCounter.num[set->index] > 1)
+ return false;
return true;
}
@@ -338,13 +366,16 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
void optimizeLoopReturn(Loop* loop) {
// If there is a sinkable thing in an eligible loop, we can optimize
// it in a trivial way to the outside of the loop.
- if (loop->type != none) return;
- if (sinkables.empty()) return;
+ if (loop->type != none)
+ return;
+ if (sinkables.empty())
+ return;
Index goodIndex = sinkables.begin()->first;
// Ensure we have a place to write the return values for, if not, we
// need another cycle.
auto* block = loop->body->dynCast<Block>();
- if (!block || block->name.is() || block->list.size() == 0 || !block->list.back()->is<Nop>()) {
+ if (!block || block->name.is() || block->list.size() == 0 ||
+ !block->list.back()->is<Nop>()) {
loopsToEnlarge.push_back(loop);
return;
}
@@ -371,8 +402,12 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
auto breaks = std::move(blockBreaks[block->name]);
blockBreaks.erase(block->name);
- if (breaks.size() == 0) return; // block has no branches TODO we might optimize trivial stuff here too
- assert(!(*breaks[0].brp)->template cast<Break>()->value); // block does not already have a return value (if one break has one, they all do)
+ if (breaks.size() == 0)
+ // block has no branches TODO we might optimize trivial stuff here too
+ return;
+ // block does not already have a return value (if one break has one, they
+ // all do)
+ assert(!(*breaks[0].brp)->template cast<Break>()->value);
// look for a local.set that is present in them all
bool found = false;
Index sharedIndex = -1;
@@ -391,7 +426,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
break;
}
}
- if (!found) return;
+ if (!found)
+ return;
// If one of our brs is a br_if, then we will give it a value. since
// the value executes before the condition, it is dangerous if we are
// moving code out of the condition,
@@ -446,7 +482,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
blocksToEnlarge.push_back(block);
return;
}
- // move block local.set's value to the end, in return position, and nop the set
+ // move block local.set's value to the end, in return position, and nop the
+ // set
auto* blockSetLocalPointer = sinkables.at(sharedIndex).item;
auto* value = (*blockSetLocalPointer)->template cast<SetLocal>()->value;
block->list[block->list.size() - 1] = value;
@@ -458,13 +495,16 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
auto* brp = breaks[j].brp;
auto* br = (*brp)->template cast<Break>();
assert(!br->value);
- // if the break is conditional, then we must set the value here - if the break is not reached, we must still have the new value in the local
+ // if the break is conditional, then we must set the value here - if the
+ // break is not reached, we must still have the new value in the local
auto* set = (*breakSetLocalPointer)->template cast<SetLocal>();
if (br->condition) {
br->value = set;
set->setTee(true);
- *breakSetLocalPointer = this->getModule()->allocator.template alloc<Nop>();
- // in addition, as this is a conditional br that now has a value, it now returns a value, so it must be dropped
+ *breakSetLocalPointer =
+ this->getModule()->allocator.template alloc<Nop>();
+ // in addition, as this is a conditional br that now has a value, it now
+ // returns a value, so it must be dropped
br->finalize();
*brp = Builder(*this->getModule()).makeDrop(br);
} else {
@@ -473,7 +513,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
// finally, create a local.set on the block itself
- auto* newSetLocal = Builder(*this->getModule()).makeSetLocal(sharedIndex, block);
+ auto* newSetLocal =
+ Builder(*this->getModule()).makeSetLocal(sharedIndex, block);
this->replaceCurrent(newSetLocal);
sinkables.clear();
anotherCycle = true;
@@ -484,7 +525,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
assert(iff->ifFalse);
// if this if already has a result, or is unreachable code, we have
// nothing to do
- if (iff->type != none) return;
+ if (iff->type != none)
+ return;
// We now have the sinkables from both sides of the if, and can look
// for something to sink. That is either a shared index on both sides,
// *or* if one side is unreachable, we can sink anything from the other,
@@ -527,35 +569,42 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
}
- if (!found) return;
+ if (!found)
+ return;
// great, we can optimize!
// ensure we have a place to write the return values for, if not, we
// need another cycle
- auto* ifTrueBlock = iff->ifTrue->dynCast<Block>();
+ auto* ifTrueBlock = iff->ifTrue->dynCast<Block>();
if (iff->ifTrue->type != unreachable) {
- if (!ifTrueBlock || ifTrueBlock->name.is() || ifTrueBlock->list.size() == 0 || !ifTrueBlock->list.back()->is<Nop>()) {
+ if (!ifTrueBlock || ifTrueBlock->name.is() ||
+ ifTrueBlock->list.size() == 0 ||
+ !ifTrueBlock->list.back()->is<Nop>()) {
ifsToEnlarge.push_back(iff);
return;
}
}
auto* ifFalseBlock = iff->ifFalse->dynCast<Block>();
if (iff->ifFalse->type != unreachable) {
- if (!ifFalseBlock || ifFalseBlock->name.is() || ifFalseBlock->list.size() == 0 || !ifFalseBlock->list.back()->is<Nop>()) {
+ if (!ifFalseBlock || ifFalseBlock->name.is() ||
+ ifFalseBlock->list.size() == 0 ||
+ !ifFalseBlock->list.back()->is<Nop>()) {
ifsToEnlarge.push_back(iff);
return;
}
}
// all set, go
if (iff->ifTrue->type != unreachable) {
- auto *ifTrueItem = ifTrue.at(goodIndex).item;
- ifTrueBlock->list[ifTrueBlock->list.size() - 1] = (*ifTrueItem)->template cast<SetLocal>()->value;
+ auto* ifTrueItem = ifTrue.at(goodIndex).item;
+ ifTrueBlock->list[ifTrueBlock->list.size() - 1] =
+ (*ifTrueItem)->template cast<SetLocal>()->value;
ExpressionManipulator::nop(*ifTrueItem);
ifTrueBlock->finalize();
assert(ifTrueBlock->type != none);
}
if (iff->ifFalse->type != unreachable) {
- auto *ifFalseItem = ifFalse.at(goodIndex).item;
- ifFalseBlock->list[ifFalseBlock->list.size() - 1] = (*ifFalseItem)->template cast<SetLocal>()->value;
+ auto* ifFalseItem = ifFalse.at(goodIndex).item;
+ ifFalseBlock->list[ifFalseBlock->list.size() - 1] =
+ (*ifFalseItem)->template cast<SetLocal>()->value;
ExpressionManipulator::nop(*ifFalseItem);
ifFalseBlock->finalize();
assert(ifFalseBlock->type != none);
@@ -563,7 +612,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
iff->finalize(); // update type
assert(iff->type != none);
// finally, create a local.set on the iff itself
- auto* newSetLocal = Builder(*this->getModule()).makeSetLocal(goodIndex, iff);
+ auto* newSetLocal =
+ Builder(*this->getModule()).makeSetLocal(goodIndex, iff);
*currp = newSetLocal;
anotherCycle = true;
}
@@ -592,14 +642,17 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// arm into a one-sided if.
void optimizeIfReturn(If* iff, Expression** currp) {
// If this if is unreachable code, we have nothing to do.
- if (iff->type != none || iff->ifTrue->type != none) return;
+ if (iff->type != none || iff->ifTrue->type != none)
+ return;
// Anything sinkable is good for us.
- if (sinkables.empty()) return;
+ if (sinkables.empty())
+ return;
Index goodIndex = sinkables.begin()->first;
// Ensure we have a place to write the return values for, if not, we
// need another cycle.
auto* ifTrueBlock = iff->ifTrue->dynCast<Block>();
- if (!ifTrueBlock || ifTrueBlock->name.is() || ifTrueBlock->list.size() == 0 || !ifTrueBlock->list.back()->is<Nop>()) {
+ if (!ifTrueBlock || ifTrueBlock->name.is() ||
+ ifTrueBlock->list.size() == 0 || !ifTrueBlock->list.back()->is<Nop>()) {
ifsToEnlarge.push_back(iff);
return;
}
@@ -625,7 +678,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
// override scan to add a pre and a post check task to all nodes
- static void scan(SimplifyLocals<allowTee, allowStructure, allowNesting>* self, Expression** currp) {
+ static void scan(SimplifyLocals<allowTee, allowStructure, allowNesting>* self,
+ Expression** currp) {
self->pushTask(visitPost, currp);
auto* curr = *currp;
@@ -633,15 +687,29 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
if (auto* iff = curr->dynCast<If>()) {
// handle if in a special manner, using the ifStack for if-elses etc.
if (iff->ifFalse) {
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::doNoteIfFalse, currp);
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::scan, &iff->ifFalse);
+ self->pushTask(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>::doNoteIfFalse,
+ currp);
+ self->pushTask(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>::scan,
+ &iff->ifFalse);
}
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::doNoteIfTrue, currp);
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::scan, &iff->ifTrue);
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::doNoteIfCondition, currp);
- self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::scan, &iff->condition);
+ self->pushTask(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>::doNoteIfTrue,
+ currp);
+ self->pushTask(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>::scan,
+ &iff->ifTrue);
+ self->pushTask(SimplifyLocals<allowTee, allowStructure, allowNesting>::
+ doNoteIfCondition,
+ currp);
+ self->pushTask(
+ SimplifyLocals<allowTee, allowStructure, allowNesting>::scan,
+ &iff->condition);
} else {
- WalkerPass<LinearExecutionWalker<SimplifyLocals<allowTee, allowStructure, allowNesting>>>::scan(self, currp);
+ WalkerPass<LinearExecutionWalker<
+ SimplifyLocals<allowTee, allowStructure, allowNesting>>>::scan(self,
+ currp);
}
self->pushTask(visitPre, currp);
@@ -685,11 +753,14 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
bool runMainOptimizations(Function* func) {
anotherCycle = false;
- WalkerPass<LinearExecutionWalker<SimplifyLocals<allowTee, allowStructure, allowNesting>>>::doWalkFunction(func);
+ WalkerPass<LinearExecutionWalker<
+ SimplifyLocals<allowTee, allowStructure, allowNesting>>>::
+ doWalkFunction(func);
// enlarge blocks that were marked, for the next round
if (blocksToEnlarge.size() > 0) {
for (auto* block : blocksToEnlarge) {
- block->list.push_back(this->getModule()->allocator.template alloc<Nop>());
+ block->list.push_back(
+ this->getModule()->allocator.template alloc<Nop>());
}
blocksToEnlarge.clear();
anotherCycle = true;
@@ -697,16 +768,22 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// enlarge ifs that were marked, for the next round
if (ifsToEnlarge.size() > 0) {
for (auto* iff : ifsToEnlarge) {
- auto ifTrue = Builder(*this->getModule()).blockifyWithName(iff->ifTrue, Name());
+ auto ifTrue =
+ Builder(*this->getModule()).blockifyWithName(iff->ifTrue, Name());
iff->ifTrue = ifTrue;
- if (ifTrue->list.size() == 0 || !ifTrue->list.back()->template is<Nop>()) {
- ifTrue->list.push_back(this->getModule()->allocator.template alloc<Nop>());
+ if (ifTrue->list.size() == 0 ||
+ !ifTrue->list.back()->template is<Nop>()) {
+ ifTrue->list.push_back(
+ this->getModule()->allocator.template alloc<Nop>());
}
if (iff->ifFalse) {
- auto ifFalse = Builder(*this->getModule()).blockifyWithName(iff->ifFalse, Name());
+ auto ifFalse =
+ Builder(*this->getModule()).blockifyWithName(iff->ifFalse, Name());
iff->ifFalse = ifFalse;
- if (ifFalse->list.size() == 0 || !ifFalse->list.back()->template is<Nop>()) {
- ifFalse->list.push_back(this->getModule()->allocator.template alloc<Nop>());
+ if (ifFalse->list.size() == 0 ||
+ !ifFalse->list.back()->template is<Nop>()) {
+ ifFalse->list.push_back(
+ this->getModule()->allocator.template alloc<Nop>());
}
}
}
@@ -716,10 +793,13 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// enlarge loops that were marked, for the next round
if (loopsToEnlarge.size() > 0) {
for (auto* loop : loopsToEnlarge) {
- auto block = Builder(*this->getModule()).blockifyWithName(loop->body, Name());
+ auto block =
+ Builder(*this->getModule()).blockifyWithName(loop->body, Name());
loop->body = block;
- if (block->list.size() == 0 || !block->list.back()->template is<Nop>()) {
- block->list.push_back(this->getModule()->allocator.template alloc<Nop>());
+ if (block->list.size() == 0 ||
+ !block->list.back()->template is<Nop>()) {
+ block->list.push_back(
+ this->getModule()->allocator.template alloc<Nop>());
}
}
loopsToEnlarge.clear();
@@ -750,7 +830,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// )
// )
// will inhibit us creating an if return value.
- struct EquivalentOptimizer : public LinearExecutionWalker<EquivalentOptimizer> {
+ struct EquivalentOptimizer
+ : public LinearExecutionWalker<EquivalentOptimizer> {
std::vector<Index>* numGetLocals;
bool removeEquivalentSets;
Module* module;
@@ -760,13 +841,14 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
// We track locals containing the same value.
EquivalentSets equivalences;
- static void doNoteNonLinear(EquivalentOptimizer* self, Expression** currp) {
- // TODO do this across non-linear paths too, in coalesce-locals perhaps? (would inhibit structure
- // opts here, though.
+ static void doNoteNonLinear(EquivalentOptimizer* self,
+ Expression** currp) {
+ // TODO do this across non-linear paths too, in coalesce-locals perhaps?
+ // (would inhibit structure opts here, though.
self->equivalences.clear();
}
- void visitSetLocal(SetLocal *curr) {
+ void visitSetLocal(SetLocal* curr) {
// Remove trivial copies, even through a tee
auto* value = curr->value;
while (auto* subSet = value->dynCast<SetLocal>()) {
@@ -794,14 +876,14 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
}
- void visitGetLocal(GetLocal *curr) {
+ void visitGetLocal(GetLocal* curr) {
// Canonicalize gets: if some are equivalent, then we can pick more
// then one, and other passes may benefit from having more uniformity.
if (auto* set = equivalences.getEquivalents(curr->index)) {
// Pick the index with the most uses - maximizing the chance to
// lower one's uses to zero.
- // Helper method that returns the # of gets *ignoring the current get*,
- // as we want to see what is best overall, treating this one as
+ // Helper method that returns the # of gets *ignoring the current
+ // get*, as we want to see what is best overall, treating this one as
// to be decided upon.
auto getNumGetsIgnoringCurr = [&](Index index) {
auto ret = (*numGetLocals)[index];
@@ -821,8 +903,8 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
assert(best != Index(-1));
// Due to ordering, the best index may be different from us but have
// the same # of locals - make sure we actually improve.
- if (best != curr->index &&
- getNumGetsIgnoringCurr(best) > getNumGetsIgnoringCurr(curr->index)) {
+ if (best != curr->index && getNumGetsIgnoringCurr(best) >
+ getNumGetsIgnoringCurr(curr->index)) {
// Update the get counts.
(*numGetLocals)[best]++;
assert((*numGetLocals)[curr->index] >= 1);
@@ -850,23 +932,21 @@ struct SimplifyLocals : public WalkerPass<LinearExecutionWalker<SimplifyLocals<a
}
};
-Pass *createSimplifyLocalsPass() {
- return new SimplifyLocals<true, true>();
-}
+Pass* createSimplifyLocalsPass() { return new SimplifyLocals<true, true>(); }
-Pass *createSimplifyLocalsNoTeePass() {
+Pass* createSimplifyLocalsNoTeePass() {
return new SimplifyLocals<false, true>();
}
-Pass *createSimplifyLocalsNoStructurePass() {
+Pass* createSimplifyLocalsNoStructurePass() {
return new SimplifyLocals<true, false>();
}
-Pass *createSimplifyLocalsNoTeeNoStructurePass() {
+Pass* createSimplifyLocalsNoTeeNoStructurePass() {
return new SimplifyLocals<false, false>();
}
-Pass *createSimplifyLocalsNoNestingPass() {
+Pass* createSimplifyLocalsNoNestingPass() {
return new SimplifyLocals<false, false, false>();
}
diff --git a/src/passes/Souperify.cpp b/src/passes/Souperify.cpp
index f6700a698..1cc3037fe 100644
--- a/src/passes/Souperify.cpp
+++ b/src/passes/Souperify.cpp
@@ -35,15 +35,15 @@
// directly, without the need for *-propagate techniques.
//
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-builder.h"
+#include "dataflow/graph.h"
+#include "dataflow/node.h"
+#include "dataflow/utils.h"
#include "ir/flat.h"
#include "ir/local-graph.h"
#include "ir/utils.h"
-#include "dataflow/node.h"
-#include "dataflow/graph.h"
-#include "dataflow/utils.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
@@ -62,7 +62,8 @@ struct UseFinder {
// (or rather, their values) that contain a get that uses that value.
// There may also be non-set uses of the value, for example in a drop
// or a return. We represent those with a nullptr, meaning "other".
- std::vector<Expression*> getUses(Expression* origin, Graph& graph, LocalGraph& localGraph) {
+ std::vector<Expression*>
+ getUses(Expression* origin, Graph& graph, LocalGraph& localGraph) {
if (debug() >= 2) {
std::cout << "getUses\n" << origin << '\n';
}
@@ -80,9 +81,13 @@ struct UseFinder {
// There may be loops of sets with copies between them.
std::unordered_set<SetLocal*> seenSets;
- void addSetUses(SetLocal* set, Graph& graph, LocalGraph& localGraph, std::vector<Expression*>& ret) {
+ void addSetUses(SetLocal* set,
+ Graph& graph,
+ LocalGraph& localGraph,
+ std::vector<Expression*>& ret) {
// If already handled, nothing to do here.
- if (seenSets.count(set)) return;
+ if (seenSets.count(set))
+ return;
seenSets.insert(set);
// Find all the uses of that set.
auto& gets = localGraph.setInfluences[set];
@@ -165,7 +170,12 @@ struct Trace {
// The local information graph. Used to check if a node has external uses.
LocalGraph& localGraph;
- Trace(Graph& graph, Node* toInfer, std::unordered_set<Node*>& excludeAsChildren, LocalGraph& localGraph) : graph(graph), toInfer(toInfer), excludeAsChildren(excludeAsChildren), localGraph(localGraph) {
+ Trace(Graph& graph,
+ Node* toInfer,
+ std::unordered_set<Node*>& excludeAsChildren,
+ LocalGraph& localGraph)
+ : graph(graph), toInfer(toInfer), excludeAsChildren(excludeAsChildren),
+ localGraph(localGraph) {
if (debug() >= 2) {
std::cout << "\nstart a trace (in " << graph.func->name << ")\n";
}
@@ -180,7 +190,8 @@ struct Trace {
}
// Pull in all the dependencies, starting from the value itself.
add(toInfer, 0);
- if (bad) return;
+ if (bad)
+ return;
// If we are trivial before adding pcs, we are still trivial, and
// can ignore this.
auto sizeBeforePathConditions = nodes.size();
@@ -238,7 +249,8 @@ struct Trace {
// If we've gone too deep, emit a var instead.
// Do the same if this is a node we should exclude from traces.
if (depth >= depthLimit || nodes.size() >= totalLimit ||
- (node != toInfer && excludeAsChildren.find(node) != excludeAsChildren.end())) {
+ (node != toInfer &&
+ excludeAsChildren.find(node) != excludeAsChildren.end())) {
auto type = node->getWasmType();
assert(isConcreteType(type));
auto* var = Node::makeVar(type);
@@ -293,7 +305,8 @@ struct Trace {
bad = true;
return nullptr;
}
- default: WASM_UNREACHABLE();
+ default:
+ WASM_UNREACHABLE();
}
// Assert on no cycles
assert(addedNodes.find(node) == addedNodes.end());
@@ -319,7 +332,9 @@ struct Trace {
// curr is a child of parent, and parent has a Block which we are
// give as 'node'. Add a path condition for reaching the child.
- void addPathTo(Expression* parent, Expression* curr, std::vector<Node*> conditions) {
+ void addPathTo(Expression* parent,
+ Expression* curr,
+ std::vector<Node*> conditions) {
if (auto* iff = parent->dynCast<If>()) {
Index index;
if (curr == iff->ifTrue) {
@@ -340,9 +355,7 @@ struct Trace {
}
}
- bool isBad() {
- return bad;
- }
+ bool isBad() { return bad; }
static bool isTraceable(Node* node) {
if (!node->origin) {
@@ -372,7 +385,8 @@ struct Trace {
}
}
for (auto& node : nodes) {
- if (node == toInfer) continue;
+ if (node == toInfer)
+ continue;
if (auto* origin = node->origin) {
auto uses = UseFinder().getUses(origin, graph, localGraph);
for (auto* use : uses) {
@@ -407,7 +421,8 @@ struct Printer {
std::cout << "\n; start LHS (in " << graph.func->name << ")\n";
// Index the nodes.
for (auto* node : trace.nodes) {
- if (!node->isCond()) { // pcs and blockpcs are not instructions and do not need to be indexed
+ // pcs and blockpcs are not instructions and do not need to be indexed
+ if (!node->isCond()) {
auto index = indexing.size();
indexing[node] = index;
}
@@ -440,7 +455,8 @@ struct Printer {
assert(node);
switch (node->type) {
case Node::Type::Var: {
- std::cout << "%" << indexing[node] << ":" << printType(node->wasmType) << " = var";
+ std::cout << "%" << indexing[node] << ":" << printType(node->wasmType)
+ << " = var";
break; // nothing more to add
}
case Node::Type::Expr: {
@@ -464,18 +480,21 @@ struct Printer {
break;
}
case Node::Type::Cond: {
- std::cout << "blockpc %" << indexing[node->getValue(0)] << ' ' << node->index << ' ';
+ std::cout << "blockpc %" << indexing[node->getValue(0)] << ' '
+ << node->index << ' ';
printInternal(node->getValue(1));
std::cout << " 1:i1";
break;
}
case Node::Type::Block: {
- std::cout << "%" << indexing[node] << " = block " << node->values.size();
+ std::cout << "%" << indexing[node] << " = block "
+ << node->values.size();
break;
}
case Node::Type::Zext: {
auto* child = node->getValue(0);
- std::cout << "%" << indexing[node] << ':' << printType(child->getWasmType());
+ std::cout << "%" << indexing[node] << ':'
+ << printType(child->getWasmType());
std::cout << " = zext ";
printInternal(child);
break;
@@ -484,10 +503,12 @@ struct Printer {
std::cout << "!!!BAD!!!";
WASM_UNREACHABLE();
}
- default: WASM_UNREACHABLE();
+ default:
+ WASM_UNREACHABLE();
}
if (node->isExpr() || node->isPhi()) {
- if (node->origin != trace.toInfer->origin && trace.hasExternalUses.count(node) > 0) {
+ if (node->origin != trace.toInfer->origin &&
+ trace.hasExternalUses.count(node) > 0) {
std::cout << " (hasExternalUses)";
printedHasExternalUses = true;
}
@@ -523,12 +544,19 @@ struct Printer {
} else if (auto* unary = curr->dynCast<Unary>()) {
switch (unary->op) {
case ClzInt32:
- case ClzInt64: std::cout << "ctlz"; break;
+ case ClzInt64:
+ std::cout << "ctlz";
+ break;
case CtzInt32:
- case CtzInt64: std::cout << "cttz"; break;
+ case CtzInt64:
+ std::cout << "cttz";
+ break;
case PopcntInt32:
- case PopcntInt64: std::cout << "ctpop"; break;
- default: WASM_UNREACHABLE();
+ case PopcntInt64:
+ std::cout << "ctpop";
+ break;
+ default:
+ WASM_UNREACHABLE();
}
std::cout << ' ';
auto* value = node->getValue(0);
@@ -536,48 +564,91 @@ struct Printer {
} else if (auto* binary = curr->dynCast<Binary>()) {
switch (binary->op) {
case AddInt32:
- case AddInt64: std::cout << "add"; break;
+ case AddInt64:
+ std::cout << "add";
+ break;
case SubInt32:
- case SubInt64: std::cout << "sub"; break;
+ case SubInt64:
+ std::cout << "sub";
+ break;
case MulInt32:
- case MulInt64: std::cout << "mul"; break;
+ case MulInt64:
+ std::cout << "mul";
+ break;
case DivSInt32:
- case DivSInt64: std::cout << "sdiv"; break;
+ case DivSInt64:
+ std::cout << "sdiv";
+ break;
case DivUInt32:
- case DivUInt64: std::cout << "udiv"; break;
+ case DivUInt64:
+ std::cout << "udiv";
+ break;
case RemSInt32:
- case RemSInt64: std::cout << "srem"; break;
+ case RemSInt64:
+ std::cout << "srem";
+ break;
case RemUInt32:
- case RemUInt64: std::cout << "urem"; break;
+ case RemUInt64:
+ std::cout << "urem";
+ break;
case AndInt32:
- case AndInt64: std::cout << "and"; break;
+ case AndInt64:
+ std::cout << "and";
+ break;
case OrInt32:
- case OrInt64: std::cout << "or"; break;
+ case OrInt64:
+ std::cout << "or";
+ break;
case XorInt32:
- case XorInt64: std::cout << "xor"; break;
+ case XorInt64:
+ std::cout << "xor";
+ break;
case ShlInt32:
- case ShlInt64: std::cout << "shl"; break;
+ case ShlInt64:
+ std::cout << "shl";
+ break;
case ShrUInt32:
- case ShrUInt64: std::cout << "lshr"; break;
+ case ShrUInt64:
+ std::cout << "lshr";
+ break;
case ShrSInt32:
- case ShrSInt64: std::cout << "ashr"; break;
+ case ShrSInt64:
+ std::cout << "ashr";
+ break;
case RotLInt32:
- case RotLInt64: std::cout << "rotl"; break;
+ case RotLInt64:
+ std::cout << "rotl";
+ break;
case RotRInt32:
- case RotRInt64: std::cout << "rotr"; break;
+ case RotRInt64:
+ std::cout << "rotr";
+ break;
case EqInt32:
- case EqInt64: std::cout << "eq"; break;
+ case EqInt64:
+ std::cout << "eq";
+ break;
case NeInt32:
- case NeInt64: std::cout << "ne"; break;
+ case NeInt64:
+ std::cout << "ne";
+ break;
case LtSInt32:
- case LtSInt64: std::cout << "slt"; break;
+ case LtSInt64:
+ std::cout << "slt";
+ break;
case LtUInt32:
- case LtUInt64: std::cout << "ult"; break;
+ case LtUInt64:
+ std::cout << "ult";
+ break;
case LeSInt32:
- case LeSInt64: std::cout << "sle"; break;
+ case LeSInt64:
+ std::cout << "sle";
+ break;
case LeUInt32:
- case LeUInt64: std::cout << "ule"; break;
- default: WASM_UNREACHABLE();
+ case LeUInt64:
+ std::cout << "ule";
+ break;
+ default:
+ WASM_UNREACHABLE();
}
std::cout << ' ';
auto* left = node->getValue(0);
@@ -616,11 +687,13 @@ struct Printer {
}
}
if (allInputsIdentical(node)) {
- std::cout << "^^ suspicious identical inputs! missing optimization in " << graph.func->name << "? ^^\n";
+ std::cout << "^^ suspicious identical inputs! missing optimization in "
+ << graph.func->name << "? ^^\n";
return;
}
if (!node->isPhi() && allInputsConstant(node)) {
- std::cout << "^^ suspicious constant inputs! missing optimization in " << graph.func->name << "? ^^\n";
+ std::cout << "^^ suspicious constant inputs! missing optimization in "
+ << graph.func->name << "? ^^\n";
return;
}
}
@@ -642,7 +715,8 @@ struct Souperify : public WalkerPass<PostWalker<Souperify>> {
// Build the data-flow IR.
DataFlow::Graph graph;
graph.build(func, getModule());
- if (debug() >= 2) dump(graph, std::cout);
+ if (debug() >= 2)
+ dump(graph, std::cout);
// Build the local graph data structure.
LocalGraph localGraph(func);
localGraph.computeInfluences();
@@ -653,7 +727,8 @@ struct Souperify : public WalkerPass<PostWalker<Souperify>> {
auto* node = nodePtr.get();
if (node->origin) {
// TODO: work for identical origins could be saved
- auto uses = DataFlow::UseFinder().getUses(node->origin, graph, localGraph);
+ auto uses =
+ DataFlow::UseFinder().getUses(node->origin, graph, localGraph);
if (debug() >= 2) {
std::cout << "following node has " << uses.size() << " uses\n";
dump(node, std::cout);
@@ -681,12 +756,8 @@ struct Souperify : public WalkerPass<PostWalker<Souperify>> {
}
};
-Pass *createSouperifyPass() {
- return new Souperify(false);
-}
+Pass* createSouperifyPass() { return new Souperify(false); }
-Pass *createSouperifySingleUsePass() {
- return new Souperify(true);
-}
+Pass* createSouperifySingleUsePass() { return new Souperify(true); }
} // namespace wasm
diff --git a/src/passes/SpillPointers.cpp b/src/passes/SpillPointers.cpp
index 36c2ae948..75fa72652 100644
--- a/src/passes/SpillPointers.cpp
+++ b/src/passes/SpillPointers.cpp
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-
//
// Spills values that might be pointers to the C stack. This allows
// Boehm-style GC to see them properly.
@@ -26,16 +25,17 @@
// * There is currently no check that there is enough stack space.
//
-#include "wasm.h"
-#include "pass.h"
-#include "cfg/liveness-traversal.h"
-#include "wasm-builder.h"
#include "abi/abi.h"
#include "abi/stack.h"
+#include "cfg/liveness-traversal.h"
+#include "pass.h"
+#include "wasm-builder.h"
+#include "wasm.h"
namespace wasm {
-struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<SpillPointers>>> {
+struct SpillPointers
+ : public WalkerPass<LivenessWalker<SpillPointers, Visitor<SpillPointers>>> {
bool isFunctionParallel() override { return true; }
Pass* create() override { return new SpillPointers; }
@@ -48,21 +48,18 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
std::unordered_map<Expression**, Expression**> actualPointers;
// note calls in basic blocks
- template<typename T>
- void visitSpillable(T* curr) {
- // if in unreachable code, ignore
- if (!currBasicBlock) return;
+ template<typename T> void visitSpillable(T* curr) {
+ // if in unreachable code, ignore
+ if (!currBasicBlock)
+ return;
auto* pointer = getCurrentPointer();
currBasicBlock->contents.actions.emplace_back(pointer);
- actualPointers[pointer] = pointer; // starts out as correct, may change later
+ // starts out as correct, may change later
+ actualPointers[pointer] = pointer;
}
- void visitCall(Call* curr) {
- visitSpillable(curr);
- }
- void visitCallIndirect(CallIndirect* curr) {
- visitSpillable(curr);
- }
+ void visitCall(Call* curr) { visitSpillable(curr); }
+ void visitCallIndirect(CallIndirect* curr) { visitSpillable(curr); }
// main entry point
@@ -73,7 +70,7 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
// map pointers to their offset in the spill area
typedef std::unordered_map<Index, Index> PointerMap;
-
+
void spillPointers() {
// we only care about possible pointers
auto* func = getFunction();
@@ -88,7 +85,8 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
bool spilled = false;
Index spillLocal = -1;
for (auto& curr : basicBlocks) {
- if (liveBlocks.count(curr.get()) == 0) continue; // ignore dead blocks
+ if (liveBlocks.count(curr.get()) == 0)
+ continue; // ignore dead blocks
auto& liveness = curr->contents;
auto& actions = liveness.actions;
Index lastCall = -1;
@@ -98,7 +96,8 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
lastCall = i;
}
}
- if (lastCall == Index(-1)) continue; // nothing to see here
+ if (lastCall == Index(-1))
+ continue; // nothing to see here
// scan through the block, spilling around the calls
// TODO: we can filter on pointerMap everywhere
LocalSet live = liveness.end;
@@ -119,12 +118,15 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
// we now have a call + the information about which locals
// should be spilled
if (!spilled) {
- // prepare stack support: get a pointer to stack space big enough for all our data
+ // prepare stack support: get a pointer to stack space big enough
+ // for all our data
spillLocal = Builder::addVar(func, ABI::PointerType);
spilled = true;
}
- auto* pointer = actualPointers[action.origin]; // the origin was seen at walk, but the thing may have moved
- spillPointersAroundCall(pointer, toSpill, spillLocal, pointerMap, func, getModule());
+ // the origin was seen at walk, but the thing may have moved
+ auto* pointer = actualPointers[action.origin];
+ spillPointersAroundCall(
+ pointer, toSpill, spillLocal, pointerMap, func, getModule());
}
} else {
WASM_UNREACHABLE();
@@ -133,13 +135,22 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
}
if (spilled) {
// get the stack space, and set the local to it
- ABI::getStackSpace(spillLocal, func, getTypeSize(ABI::PointerType) * pointerMap.size(), *getModule());
+ ABI::getStackSpace(spillLocal,
+ func,
+ getTypeSize(ABI::PointerType) * pointerMap.size(),
+ *getModule());
}
}
- void spillPointersAroundCall(Expression** origin, std::vector<Index>& toSpill, Index spillLocal, PointerMap& pointerMap, Function* func, Module* module) {
+ void spillPointersAroundCall(Expression** origin,
+ std::vector<Index>& toSpill,
+ Index spillLocal,
+ PointerMap& pointerMap,
+ Function* func,
+ Module* module) {
auto* call = *origin;
- if (call->type == unreachable) return; // the call is never reached anyhow, ignore
+ if (call->type == unreachable)
+ return; // the call is never reached anyhow, ignore
Builder builder(*module);
auto* block = builder.makeBlock();
// move the operands into locals, as we must spill after they are executed
@@ -168,14 +179,13 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
}
// add the spills
for (auto index : toSpill) {
- block->list.push_back(builder.makeStore(
- getTypeSize(ABI::PointerType),
- pointerMap[index],
- getTypeSize(ABI::PointerType),
- builder.makeGetLocal(spillLocal, ABI::PointerType),
- builder.makeGetLocal(index, ABI::PointerType),
- ABI::PointerType
- ));
+ block->list.push_back(
+ builder.makeStore(getTypeSize(ABI::PointerType),
+ pointerMap[index],
+ getTypeSize(ABI::PointerType),
+ builder.makeGetLocal(spillLocal, ABI::PointerType),
+ builder.makeGetLocal(index, ABI::PointerType),
+ ABI::PointerType));
}
// add the (modified) call
block->list.push_back(call);
@@ -184,8 +194,6 @@ struct SpillPointers : public WalkerPass<LivenessWalker<SpillPointers, Visitor<S
}
};
-Pass *createSpillPointersPass() {
- return new SpillPointers();
-}
+Pass* createSpillPointersPass() { return new SpillPointers(); }
} // namespace wasm
diff --git a/src/passes/StackIR.cpp b/src/passes/StackIR.cpp
index a8d66ae42..2506eca27 100644
--- a/src/passes/StackIR.cpp
+++ b/src/passes/StackIR.cpp
@@ -18,11 +18,11 @@
// Operations on Stack IR.
//
-#include "wasm.h"
-#include "pass.h"
-#include "wasm-stack.h"
#include "ir/iteration.h"
#include "ir/local-graph.h"
+#include "pass.h"
+#include "wasm-stack.h"
+#include "wasm.h"
namespace wasm {
@@ -43,23 +43,16 @@ struct GenerateStackIR : public WalkerPass<PostWalker<GenerateStackIR>> {
Module* module;
Parent(Module* module) : module(module) {}
- Module* getModule() {
- return module;
- }
+ Module* getModule() { return module; }
void writeDebugLocation(Expression* curr, Function* func) {
WASM_UNREACHABLE();
}
- Index getFunctionIndex(Name name) {
- WASM_UNREACHABLE();
- }
- Index getFunctionTypeIndex(Name name) {
- WASM_UNREACHABLE();
- }
- Index getGlobalIndex(Name name) {
- WASM_UNREACHABLE();
- }
+ Index getFunctionIndex(Name name) { WASM_UNREACHABLE(); }
+ Index getFunctionTypeIndex(Name name) { WASM_UNREACHABLE(); }
+ Index getGlobalIndex(Name name) { WASM_UNREACHABLE(); }
} parent(getModule());
- StackWriter<StackWriterMode::Binaryen2Stack, Parent> stackWriter(parent, buffer, false);
+ StackWriter<StackWriterMode::Binaryen2Stack, Parent> stackWriter(
+ parent, buffer, false);
stackWriter.setFunction(func);
stackWriter.visitPossibleBlockContents(func->body);
func->stackIR = make_unique<StackIR>();
@@ -67,9 +60,7 @@ struct GenerateStackIR : public WalkerPass<PostWalker<GenerateStackIR>> {
}
};
-Pass* createGenerateStackIRPass() {
- return new GenerateStackIR();
-}
+Pass* createGenerateStackIRPass() { return new GenerateStackIR(); }
// Optimize
@@ -79,8 +70,8 @@ class StackIROptimizer {
StackIR& insts;
public:
- StackIROptimizer(Function* func, PassOptions& passOptions) :
- func(func), passOptions(passOptions), insts(*func->stackIR.get()) {
+ StackIROptimizer(Function* func, PassOptions& passOptions)
+ : func(func), passOptions(passOptions), insts(*func->stackIR.get()) {
assert(func->stackIR);
}
@@ -103,7 +94,8 @@ private:
bool inUnreachableCode = false;
for (Index i = 0; i < insts.size(); i++) {
auto* inst = insts[i];
- if (!inst) continue;
+ if (!inst)
+ continue;
if (inUnreachableCode) {
// Does the unreachable code end here?
if (isControlFlowBarrier(inst)) {
@@ -151,12 +143,15 @@ private:
#endif
for (Index i = 0; i < insts.size(); i++) {
auto* inst = insts[i];
- if (!inst) continue;
+ if (!inst)
+ continue;
// First, consume values from the stack as required.
auto consumed = getNumConsumedValues(inst);
#ifdef STACK_OPT_DEBUG
- std::cout << " " << i << " : " << *inst << ", " << values.size() << " on stack, will consume " << consumed << "\n ";
- for (auto s : values) std::cout << s << ' ';
+ std::cout << " " << i << " : " << *inst << ", " << values.size()
+ << " on stack, will consume " << consumed << "\n ";
+ for (auto s : values)
+ std::cout << s << ' ';
std::cout << '\n';
#endif
// TODO: currently we run dce before this, but if we didn't, we'd need
@@ -199,7 +194,8 @@ private:
while (1) {
// If there's an actual value in the way, we've failed.
auto index = values[j];
- if (index == null) break;
+ if (index == null)
+ break;
auto* set = insts[index]->origin->cast<SetLocal>();
if (set->index == get->index) {
// This might be a proper set-get pair, where the set is
@@ -228,7 +224,8 @@ private:
}
}
// We failed here. Can we look some more?
- if (j == 0) break;
+ if (j == 0)
+ break;
j--;
}
}
@@ -250,7 +247,8 @@ private:
// a branch to that if body
void removeUnneededBlocks() {
for (auto*& inst : insts) {
- if (!inst) continue;
+ if (!inst)
+ continue;
if (auto* block = inst->origin->dynCast<Block>()) {
if (!BranchUtils::BranchSeeker::hasNamed(block, block->name)) {
// TODO optimize, maybe run remove-unused-names
@@ -272,9 +270,7 @@ private:
case StackInst::LoopEnd: {
return true;
}
- default: {
- return false;
- }
+ default: { return false; }
}
}
@@ -286,9 +282,7 @@ private:
case StackInst::LoopBegin: {
return true;
}
- default: {
- return false;
- }
+ default: { return false; }
}
}
@@ -300,15 +294,11 @@ private:
case StackInst::LoopEnd: {
return true;
}
- default: {
- return false;
- }
+ default: { return false; }
}
}
- bool isControlFlow(StackInst* inst) {
- return inst->op != StackInst::Basic;
- }
+ bool isControlFlow(StackInst* inst) { return inst->op != StackInst::Basic; }
// Remove the instruction at index i. If the instruction
// is control flow, and so has been expanded to multiple
@@ -359,9 +349,6 @@ struct OptimizeStackIR : public WalkerPass<PostWalker<OptimizeStackIR>> {
}
};
-Pass* createOptimizeStackIRPass() {
- return new OptimizeStackIR();
-}
+Pass* createOptimizeStackIRPass() { return new OptimizeStackIR(); }
} // namespace wasm
-
diff --git a/src/passes/Strip.cpp b/src/passes/Strip.cpp
index edc171ab3..e85379003 100644
--- a/src/passes/Strip.cpp
+++ b/src/passes/Strip.cpp
@@ -21,9 +21,9 @@
#include <functional>
-#include "wasm.h"
-#include "wasm-binary.h"
#include "pass.h"
+#include "wasm-binary.h"
+#include "wasm.h"
using namespace std;
@@ -31,7 +31,7 @@ namespace wasm {
struct Strip : public Pass {
// A function that returns true if the method should be removed.
- typedef std::function<bool (UserSection&)> Decider;
+ typedef std::function<bool(UserSection&)> Decider;
Decider decider;
Strip(Decider decider) : decider(decider) {}
@@ -39,14 +39,8 @@ struct Strip : public Pass {
void run(PassRunner* runner, Module* module) override {
// Remove name and debug sections.
auto& sections = module->userSections;
- sections.erase(
- std::remove_if(
- sections.begin(),
- sections.end(),
- decider
- ),
- sections.end()
- );
+ sections.erase(std::remove_if(sections.begin(), sections.end(), decider),
+ sections.end());
// If we're cleaning up debug info, clear on the function and module too.
UserSection temp;
temp.name = BinaryConsts::UserSections::Name;
@@ -60,16 +54,15 @@ struct Strip : public Pass {
}
};
-Pass *createStripDebugPass() {
+Pass* createStripDebugPass() {
return new Strip([&](const UserSection& curr) {
return curr.name == BinaryConsts::UserSections::Name ||
curr.name == BinaryConsts::UserSections::SourceMapUrl ||
- curr.name.find(".debug") == 0 ||
- curr.name.find("reloc..debug") == 0;
+ curr.name.find(".debug") == 0 || curr.name.find("reloc..debug") == 0;
});
}
-Pass *createStripProducersPass() {
+Pass* createStripProducersPass() {
return new Strip([&](const UserSection& curr) {
return curr.name == BinaryConsts::UserSections::Producers;
});
diff --git a/src/passes/StripTargetFeatures.cpp b/src/passes/StripTargetFeatures.cpp
index 8eb7b0b75..542b4a6c1 100644
--- a/src/passes/StripTargetFeatures.cpp
+++ b/src/passes/StripTargetFeatures.cpp
@@ -24,8 +24,6 @@ struct StripTargetFeatures : public Pass {
}
};
-Pass *createStripTargetFeaturesPass() {
- return new StripTargetFeatures();
-}
+Pass* createStripTargetFeaturesPass() { return new StripTargetFeatures(); }
} // namespace wasm
diff --git a/src/passes/TrapMode.cpp b/src/passes/TrapMode.cpp
index e6327479c..b36427138 100644
--- a/src/passes/TrapMode.cpp
+++ b/src/passes/TrapMode.cpp
@@ -26,66 +26,84 @@
#include "ir/trapping.h"
#include "mixed_arena.h"
#include "pass.h"
-#include "wasm.h"
+#include "support/name.h"
#include "wasm-builder.h"
#include "wasm-printing.h"
#include "wasm-type.h"
-#include "support/name.h"
+#include "wasm.h"
namespace wasm {
-Name I64S_REM("i64s-rem"),
- I64U_REM("i64u-rem"),
- I64S_DIV("i64s-div"),
- I64U_DIV("i64u-div");
+Name I64S_REM("i64s-rem");
+Name I64U_REM("i64u-rem");
+Name I64S_DIV("i64s-div");
+Name I64U_DIV("i64u-div");
Name getBinaryFuncName(Binary* curr) {
switch (curr->op) {
- case RemSInt32: return I32S_REM;
- case RemUInt32: return I32U_REM;
- case DivSInt32: return I32S_DIV;
- case DivUInt32: return I32U_DIV;
- case RemSInt64: return I64S_REM;
- case RemUInt64: return I64U_REM;
- case DivSInt64: return I64S_DIV;
- case DivUInt64: return I64U_DIV;
- default: return Name();
+ case RemSInt32:
+ return I32S_REM;
+ case RemUInt32:
+ return I32U_REM;
+ case DivSInt32:
+ return I32S_DIV;
+ case DivUInt32:
+ return I32U_DIV;
+ case RemSInt64:
+ return I64S_REM;
+ case RemUInt64:
+ return I64U_REM;
+ case DivSInt64:
+ return I64S_DIV;
+ case DivUInt64:
+ return I64U_DIV;
+ default:
+ return Name();
}
}
Name getUnaryFuncName(Unary* curr) {
switch (curr->op) {
- case TruncSFloat32ToInt32: return F32_TO_INT;
- case TruncUFloat32ToInt32: return F32_TO_UINT;
- case TruncSFloat32ToInt64: return F32_TO_INT64;
- case TruncUFloat32ToInt64: return F32_TO_UINT64;
- case TruncSFloat64ToInt32: return F64_TO_INT;
- case TruncUFloat64ToInt32: return F64_TO_UINT;
- case TruncSFloat64ToInt64: return F64_TO_INT64;
- case TruncUFloat64ToInt64: return F64_TO_UINT64;
- default: return Name();
+ case TruncSFloat32ToInt32:
+ return F32_TO_INT;
+ case TruncUFloat32ToInt32:
+ return F32_TO_UINT;
+ case TruncSFloat32ToInt64:
+ return F32_TO_INT64;
+ case TruncUFloat32ToInt64:
+ return F32_TO_UINT64;
+ case TruncSFloat64ToInt32:
+ return F64_TO_INT;
+ case TruncUFloat64ToInt32:
+ return F64_TO_UINT;
+ case TruncSFloat64ToInt64:
+ return F64_TO_INT64;
+ case TruncUFloat64ToInt64:
+ return F64_TO_UINT64;
+ default:
+ return Name();
}
}
bool isTruncOpSigned(UnaryOp op) {
switch (op) {
- case TruncUFloat32ToInt32:
- case TruncUFloat32ToInt64:
- case TruncUFloat64ToInt32:
- case TruncUFloat64ToInt64: return false;
- default: return true;
+ case TruncUFloat32ToInt32:
+ case TruncUFloat32ToInt64:
+ case TruncUFloat64ToInt32:
+ case TruncUFloat64ToInt64:
+ return false;
+ default:
+ return true;
}
}
-Function* generateBinaryFunc(Module& wasm, Binary *curr) {
+Function* generateBinaryFunc(Module& wasm, Binary* curr) {
BinaryOp op = curr->op;
Type type = curr->type;
bool isI64 = type == i64;
Builder builder(wasm);
- Expression* result = builder.makeBinary(op,
- builder.makeGetLocal(0, type),
- builder.makeGetLocal(1, type)
- );
+ Expression* result = builder.makeBinary(
+ op, builder.makeGetLocal(0, type), builder.makeGetLocal(1, type));
BinaryOp divSIntOp = isI64 ? DivSInt64 : DivSInt32;
UnaryOp eqZOp = isI64 ? EqZInt64 : EqZInt32;
Literal minLit = isI64 ? Literal(std::numeric_limits<int64_t>::min())
@@ -96,32 +114,24 @@ Function* generateBinaryFunc(Module& wasm, Binary *curr) {
BinaryOp eqOp = isI64 ? EqInt64 : EqInt32;
Literal negLit = isI64 ? Literal(int64_t(-1)) : Literal(int32_t(-1));
result = builder.makeIf(
- builder.makeBinary(AndInt32,
- builder.makeBinary(eqOp,
- builder.makeGetLocal(0, type),
- builder.makeConst(minLit)
- ),
- builder.makeBinary(eqOp,
- builder.makeGetLocal(1, type),
- builder.makeConst(negLit)
- )
- ),
+ builder.makeBinary(
+ AndInt32,
+ builder.makeBinary(
+ eqOp, builder.makeGetLocal(0, type), builder.makeConst(minLit)),
+ builder.makeBinary(
+ eqOp, builder.makeGetLocal(1, type), builder.makeConst(negLit))),
builder.makeConst(zeroLit),
- result
- );
+ result);
}
auto func = new Function;
func->name = getBinaryFuncName(curr);
func->params.push_back(type);
func->params.push_back(type);
func->result = type;
- func->body = builder.makeIf(
- builder.makeUnary(eqZOp,
- builder.makeGetLocal(1, type)
- ),
- builder.makeConst(zeroLit),
- result
- );
+ func->body =
+ builder.makeIf(builder.makeUnary(eqZOp, builder.makeGetLocal(1, type)),
+ builder.makeConst(zeroLit),
+ result);
return func;
}
@@ -134,7 +144,7 @@ void makeClampLimitLiterals(Literal& iMin, Literal& fMin, Literal& fMax) {
fMax = Literal(FloatType(maxVal) + 1);
}
-Function* generateUnaryFunc(Module& wasm, Unary *curr) {
+Function* generateUnaryFunc(Module& wasm, Unary* curr) {
Type type = curr->value->type;
Type retType = curr->type;
UnaryOp truncOp = curr->op;
@@ -148,59 +158,66 @@ Function* generateUnaryFunc(Module& wasm, Unary *curr) {
Literal iMin, fMin, fMax;
switch (truncOp) {
- case TruncSFloat32ToInt32: makeClampLimitLiterals< int32_t, float>(iMin, fMin, fMax); break;
- case TruncUFloat32ToInt32: makeClampLimitLiterals<uint32_t, float>(iMin, fMin, fMax); break;
- case TruncSFloat32ToInt64: makeClampLimitLiterals< int64_t, float>(iMin, fMin, fMax); break;
- case TruncUFloat32ToInt64: makeClampLimitLiterals<uint64_t, float>(iMin, fMin, fMax); break;
- case TruncSFloat64ToInt32: makeClampLimitLiterals< int32_t, double>(iMin, fMin, fMax); break;
- case TruncUFloat64ToInt32: makeClampLimitLiterals<uint32_t, double>(iMin, fMin, fMax); break;
- case TruncSFloat64ToInt64: makeClampLimitLiterals< int64_t, double>(iMin, fMin, fMax); break;
- case TruncUFloat64ToInt64: makeClampLimitLiterals<uint64_t, double>(iMin, fMin, fMax); break;
- default: WASM_UNREACHABLE();
+ case TruncSFloat32ToInt32:
+ makeClampLimitLiterals<int32_t, float>(iMin, fMin, fMax);
+ break;
+ case TruncUFloat32ToInt32:
+ makeClampLimitLiterals<uint32_t, float>(iMin, fMin, fMax);
+ break;
+ case TruncSFloat32ToInt64:
+ makeClampLimitLiterals<int64_t, float>(iMin, fMin, fMax);
+ break;
+ case TruncUFloat32ToInt64:
+ makeClampLimitLiterals<uint64_t, float>(iMin, fMin, fMax);
+ break;
+ case TruncSFloat64ToInt32:
+ makeClampLimitLiterals<int32_t, double>(iMin, fMin, fMax);
+ break;
+ case TruncUFloat64ToInt32:
+ makeClampLimitLiterals<uint32_t, double>(iMin, fMin, fMax);
+ break;
+ case TruncSFloat64ToInt64:
+ makeClampLimitLiterals<int64_t, double>(iMin, fMin, fMax);
+ break;
+ case TruncUFloat64ToInt64:
+ makeClampLimitLiterals<uint64_t, double>(iMin, fMin, fMax);
+ break;
+ default:
+ WASM_UNREACHABLE();
}
auto func = new Function;
func->name = getUnaryFuncName(curr);
func->params.push_back(type);
func->result = retType;
- func->body = builder.makeUnary(truncOp,
- builder.makeGetLocal(0, type)
- );
+ func->body = builder.makeUnary(truncOp, builder.makeGetLocal(0, type));
// too small XXX this is different than asm.js, which does frem. here we
// clamp, which is much simpler/faster, and similar to native builds
- func->body = builder.makeIf(
- builder.makeBinary(leOp,
- builder.makeGetLocal(0, type),
- builder.makeConst(fMin)
- ),
- builder.makeConst(iMin),
- func->body
- );
+ func->body = builder.makeIf(builder.makeBinary(leOp,
+ builder.makeGetLocal(0, type),
+ builder.makeConst(fMin)),
+ builder.makeConst(iMin),
+ func->body);
// too big XXX see above
func->body = builder.makeIf(
- builder.makeBinary(geOp,
- builder.makeGetLocal(0, type),
- builder.makeConst(fMax)
- ),
+ builder.makeBinary(
+ geOp, builder.makeGetLocal(0, type), builder.makeConst(fMax)),
// NB: min here as well. anything out of range => to the min
builder.makeConst(iMin),
- func->body
- );
+ func->body);
// nan
func->body = builder.makeIf(
- builder.makeBinary(neOp,
- builder.makeGetLocal(0, type),
- builder.makeGetLocal(0, type)
- ),
+ builder.makeBinary(
+ neOp, builder.makeGetLocal(0, type), builder.makeGetLocal(0, type)),
// NB: min here as well. anything invalid => to the min
builder.makeConst(iMin),
- func->body
- );
+ func->body);
return func;
}
-void ensureBinaryFunc(Binary* curr, Module& wasm,
- TrappingFunctionContainer &trappingFunctions) {
+void ensureBinaryFunc(Binary* curr,
+ Module& wasm,
+ TrappingFunctionContainer& trappingFunctions) {
Name name = getBinaryFuncName(curr);
if (trappingFunctions.hasFunction(name)) {
return;
@@ -208,8 +225,9 @@ void ensureBinaryFunc(Binary* curr, Module& wasm,
trappingFunctions.addFunction(generateBinaryFunc(wasm, curr));
}
-void ensureUnaryFunc(Unary *curr, Module& wasm,
- TrappingFunctionContainer &trappingFunctions) {
+void ensureUnaryFunc(Unary* curr,
+ Module& wasm,
+ TrappingFunctionContainer& trappingFunctions) {
Name name = getUnaryFuncName(curr);
if (trappingFunctions.hasFunction(name)) {
return;
@@ -217,7 +235,7 @@ void ensureUnaryFunc(Unary *curr, Module& wasm,
trappingFunctions.addFunction(generateUnaryFunc(wasm, curr));
}
-void ensureF64ToI64JSImport(TrappingFunctionContainer &trappingFunctions) {
+void ensureF64ToI64JSImport(TrappingFunctionContainer& trappingFunctions) {
if (trappingFunctions.hasImport(F64_TO_INT)) {
return;
}
@@ -233,7 +251,8 @@ void ensureF64ToI64JSImport(TrappingFunctionContainer &trappingFunctions) {
trappingFunctions.addImport(import);
}
-Expression* makeTrappingBinary(Binary* curr, TrappingFunctionContainer &trappingFunctions) {
+Expression* makeTrappingBinary(Binary* curr,
+ TrappingFunctionContainer& trappingFunctions) {
Name name = getBinaryFuncName(curr);
if (!name.is() || trappingFunctions.getMode() == TrapMode::Allow) {
return curr;
@@ -247,7 +266,8 @@ Expression* makeTrappingBinary(Binary* curr, TrappingFunctionContainer &trapping
return builder.makeCall(name, {curr->left, curr->right}, type);
}
-Expression* makeTrappingUnary(Unary* curr, TrappingFunctionContainer &trappingFunctions) {
+Expression* makeTrappingUnary(Unary* curr,
+ TrappingFunctionContainer& trappingFunctions) {
Name name = getUnaryFuncName(curr);
TrapMode mode = trappingFunctions.getMode();
if (!name.is() || mode == TrapMode::Allow) {
@@ -256,13 +276,15 @@ Expression* makeTrappingUnary(Unary* curr, TrappingFunctionContainer &trappingFu
Module& wasm = trappingFunctions.getModule();
Builder builder(wasm);
- // WebAssembly traps on float-to-int overflows, but asm.js wouldn't, so we must do something
- // We can handle this in one of two ways: clamping, which is fast, or JS, which
- // is precisely like JS but in order to do that we do a slow ffi
- // If i64, there is no "JS" way to handle this, as no i64s in JS, so always clamp if we don't allow traps
- // asm.js doesn't have unsigned f64-to-int, so just use the signed one.
+ // WebAssembly traps on float-to-int overflows, but asm.js wouldn't, so we
+ // must do something We can handle this in one of two ways: clamping, which is
+ // fast, or JS, which is precisely like JS but in order to do that we do a
+ // slow ffi If i64, there is no "JS" way to handle this, as no i64s in JS, so
+ // always clamp if we don't allow traps asm.js doesn't have unsigned
+ // f64-to-int, so just use the signed one.
if (curr->type != i64 && mode == TrapMode::JS) {
- // WebAssembly traps on float-to-int overflows, but asm.js wouldn't, so we must emulate that
+ // WebAssembly traps on float-to-int overflows, but asm.js wouldn't, so we
+ // must emulate that
ensureF64ToI64JSImport(trappingFunctions);
Expression* f64Value = ensureDouble(curr->value, wasm.allocator);
return builder.makeCall(F64_TO_INT, {f64Value}, i32);
@@ -274,14 +296,11 @@ Expression* makeTrappingUnary(Unary* curr, TrappingFunctionContainer &trappingFu
struct TrapModePass : public WalkerPass<PostWalker<TrapModePass>> {
public:
-
// Needs to be non-parallel so that visitModule gets called after visiting
// each node in the module, so we can add the functions that we created.
bool isFunctionParallel() override { return false; }
- TrapModePass(TrapMode mode) : mode(mode) {
- assert(mode != TrapMode::Allow);
- }
+ TrapModePass(TrapMode mode) : mode(mode) { assert(mode != TrapMode::Allow); }
Pass* create() override { return new TrapModePass(mode); }
@@ -293,9 +312,7 @@ public:
replaceCurrent(makeTrappingBinary(curr, *trappingFunctions));
}
- void visitModule(Module* curr) {
- trappingFunctions->addToModule();
- }
+ void visitModule(Module* curr) { trappingFunctions->addToModule(); }
void doWalkModule(Module* module) {
trappingFunctions = make_unique<TrappingFunctionContainer>(mode, *module);
@@ -309,12 +326,8 @@ private:
std::unique_ptr<TrappingFunctionContainer> trappingFunctions;
};
-Pass *createTrapModeClamp() {
- return new TrapModePass(TrapMode::Clamp);
-}
+Pass* createTrapModeClamp() { return new TrapModePass(TrapMode::Clamp); }
-Pass *createTrapModeJS() {
- return new TrapModePass(TrapMode::JS);
-}
+Pass* createTrapModeJS() { return new TrapModePass(TrapMode::JS); }
} // namespace wasm
diff --git a/src/passes/Untee.cpp b/src/passes/Untee.cpp
index 00f2ffe5d..713962aeb 100644
--- a/src/passes/Untee.cpp
+++ b/src/passes/Untee.cpp
@@ -22,9 +22,9 @@
// more effective.
//
-#include <wasm.h>
#include <pass.h>
#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -33,7 +33,7 @@ struct Untee : public WalkerPass<PostWalker<Untee>> {
Pass* create() override { return new Untee; }
- void visitSetLocal(SetLocal *curr) {
+ void visitSetLocal(SetLocal* curr) {
if (curr->isTee()) {
if (curr->value->type == unreachable) {
// we don't reach the tee, just remove it
@@ -41,21 +41,14 @@ struct Untee : public WalkerPass<PostWalker<Untee>> {
} else {
// a normal tee. replace with set and get
Builder builder(*getModule());
- replaceCurrent(
- builder.makeSequence(
- curr,
- builder.makeGetLocal(curr->index, curr->value->type)
- )
- );
+ replaceCurrent(builder.makeSequence(
+ curr, builder.makeGetLocal(curr->index, curr->value->type)));
curr->setTee(false);
}
}
}
};
-Pass *createUnteePass() {
- return new Untee();
-}
+Pass* createUnteePass() { return new Untee(); }
} // namespace wasm
-
diff --git a/src/passes/Vacuum.cpp b/src/passes/Vacuum.cpp
index 08581a3eb..8874ffed2 100644
--- a/src/passes/Vacuum.cpp
+++ b/src/passes/Vacuum.cpp
@@ -18,14 +18,14 @@
// Removes obviously unneeded code
//
-#include <wasm.h>
-#include <pass.h>
-#include <wasm-builder.h>
#include <ir/block-utils.h>
#include <ir/effects.h>
#include <ir/literal-utils.h>
#include <ir/type-updating.h>
#include <ir/utils.h>
+#include <pass.h>
+#include <wasm-builder.h>
+#include <wasm.h>
namespace wasm {
@@ -49,15 +49,17 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
walk(func->body);
}
- // Returns nullptr if curr is dead, curr if it must stay as is, or another node if it can be replaced.
- // Takes into account:
+ // Returns nullptr if curr is dead, curr if it must stay as is, or another
+ // node if it can be replaced. Takes into account:
// * The result may be used or unused.
// * The type may or may not matter (a drop can drop anything, for example).
Expression* optimize(Expression* curr, bool resultUsed, bool typeMatters) {
auto type = curr->type;
// An unreachable node must not be changed.
- if (type == unreachable) return curr;
- // We iterate on possible replacements. If a replacement changes the type, stop and go back.
+ if (type == unreachable)
+ return curr;
+ // We iterate on possible replacements. If a replacement changes the type,
+ // stop and go back.
auto* prev = curr;
while (1) {
if (typeMatters && curr->type != type) {
@@ -65,12 +67,17 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
prev = curr;
switch (curr->_id) {
- case Expression::Id::NopId: return nullptr; // never needed
+ case Expression::Id::NopId:
+ return nullptr; // never needed
- case Expression::Id::BlockId: return curr; // not always needed, but handled in visitBlock()
- case Expression::Id::IfId: return curr; // not always needed, but handled in visitIf()
- case Expression::Id::LoopId: return curr; // not always needed, but handled in visitLoop()
- case Expression::Id::DropId: return curr; // not always needed, but handled in visitDrop()
+ case Expression::Id::BlockId:
+ return curr; // not always needed, but handled in visitBlock()
+ case Expression::Id::IfId:
+ return curr; // not always needed, but handled in visitIf()
+ case Expression::Id::LoopId:
+ return curr; // not always needed, but handled in visitLoop()
+ case Expression::Id::DropId:
+ return curr; // not always needed, but handled in visitDrop()
case Expression::Id::BreakId:
case Expression::Id::SwitchId:
@@ -81,12 +88,15 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
case Expression::Id::ReturnId:
case Expression::Id::SetGlobalId:
case Expression::Id::HostId:
- case Expression::Id::UnreachableId: return curr; // always needed
+ case Expression::Id::UnreachableId:
+ return curr; // always needed
case Expression::Id::LoadId: {
// it is ok to remove a load if the result is not used, and it has no
- // side effects (the load itself may trap, if we are not ignoring such things)
- if (!resultUsed && !EffectAnalyzer(getPassOptions(), curr).hasSideEffects()) {
+ // side effects (the load itself may trap, if we are not ignoring such
+ // things)
+ if (!resultUsed &&
+ !EffectAnalyzer(getPassOptions(), curr).hasSideEffects()) {
return curr->cast<Load>()->ptr;
}
return curr;
@@ -94,7 +104,8 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
case Expression::Id::ConstId:
case Expression::Id::GetLocalId:
case Expression::Id::GetGlobalId: {
- if (!resultUsed) return nullptr;
+ if (!resultUsed)
+ return nullptr;
return curr;
}
@@ -104,15 +115,17 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
if (resultUsed) {
return curr; // used, keep it
}
- // for unary, binary, and select, we need to check their arguments for side effects,
- // as well as the node itself, as some unaries and binaries have implicit traps
+ // for unary, binary, and select, we need to check their arguments for
+ // side effects, as well as the node itself, as some unaries and
+ // binaries have implicit traps
if (auto* unary = curr->dynCast<Unary>()) {
EffectAnalyzer tester(getPassOptions());
tester.visitUnary(unary);
if (tester.hasSideEffects()) {
return curr;
}
- if (EffectAnalyzer(getPassOptions(), unary->value).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), unary->value)
+ .hasSideEffects()) {
curr = unary->value;
continue;
} else {
@@ -124,15 +137,18 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
if (tester.hasSideEffects()) {
return curr;
}
- if (EffectAnalyzer(getPassOptions(), binary->left).hasSideEffects()) {
- if (EffectAnalyzer(getPassOptions(), binary->right).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), binary->left)
+ .hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), binary->right)
+ .hasSideEffects()) {
return curr; // leave them
} else {
curr = binary->left;
continue;
}
} else {
- if (EffectAnalyzer(getPassOptions(), binary->right).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), binary->right)
+ .hasSideEffects()) {
curr = binary->right;
continue;
} else {
@@ -140,13 +156,17 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
}
} else {
- // TODO: if two have side effects, we could replace the select with say an add?
+ // TODO: if two have side effects, we could replace the select with
+ // say an add?
auto* select = curr->cast<Select>();
- if (EffectAnalyzer(getPassOptions(), select->ifTrue).hasSideEffects()) {
- if (EffectAnalyzer(getPassOptions(), select->ifFalse).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->ifTrue)
+ .hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->ifFalse)
+ .hasSideEffects()) {
return curr; // leave them
} else {
- if (EffectAnalyzer(getPassOptions(), select->condition).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->condition)
+ .hasSideEffects()) {
return curr; // leave them
} else {
curr = select->ifTrue;
@@ -154,15 +174,18 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
}
} else {
- if (EffectAnalyzer(getPassOptions(), select->ifFalse).hasSideEffects()) {
- if (EffectAnalyzer(getPassOptions(), select->condition).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->ifFalse)
+ .hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->condition)
+ .hasSideEffects()) {
return curr; // leave them
} else {
curr = select->ifFalse;
continue;
}
} else {
- if (EffectAnalyzer(getPassOptions(), select->condition).hasSideEffects()) {
+ if (EffectAnalyzer(getPassOptions(), select->condition)
+ .hasSideEffects()) {
curr = select->condition;
continue;
} else {
@@ -173,12 +196,13 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
}
- default: return curr; // assume needed
+ default:
+ return curr; // assume needed
}
}
}
- void visitBlock(Block *curr) {
+ void visitBlock(Block* curr) {
// compress out nops and other dead code
int skip = 0;
auto& list = curr->list;
@@ -186,18 +210,20 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
for (size_t z = 0; z < size; z++) {
auto* child = list[z];
// The last element may be used.
- bool used = z == size - 1 &&
- isConcreteType(curr->type) &&
- ExpressionAnalyzer::isResultUsed(expressionStack, getFunction());
+ bool used =
+ z == size - 1 && isConcreteType(curr->type) &&
+ ExpressionAnalyzer::isResultUsed(expressionStack, getFunction());
auto* optimized = optimize(child, used, true);
if (!optimized) {
if (isConcreteType(child->type)) {
- // We can't just skip a final concrete element, even if it isn't used. Instead,
- // replace it with something that's easy to optimize out (for example, code-folding
- // can merge out identical zeros at the end of if arms).
+ // We can't just skip a final concrete element, even if it isn't used.
+ // Instead, replace it with something that's easy to optimize out (for
+ // example, code-folding can merge out identical zeros at the end of
+ // if arms).
optimized = LiteralUtils::makeZero(child->type, *getModule());
} else if (child->type == unreachable) {
- // Don't try to optimize out an unreachable child (dce can do that properly).
+ // Don't try to optimize out an unreachable child (dce can do that
+ // properly).
optimized = child;
}
}
@@ -232,7 +258,8 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
list.resize(size - skip);
typeUpdater.maybeUpdateTypeToUnreachable(curr);
}
- // the block may now be a trivial one that we can get rid of and just leave its contents
+ // the block may now be a trivial one that we can get rid of and just leave
+ // its contents
replaceCurrent(BlockUtils::simplifyToContents(curr, this));
}
@@ -275,9 +302,11 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
} else if (curr->ifTrue->is<Nop>()) {
curr->ifTrue = curr->ifFalse;
curr->ifFalse = nullptr;
- curr->condition = Builder(*getModule()).makeUnary(EqZInt32, curr->condition);
+ curr->condition =
+ Builder(*getModule()).makeUnary(EqZInt32, curr->condition);
} else if (curr->ifTrue->is<Drop>() && curr->ifFalse->is<Drop>()) {
- // instead of dropping both sides, drop the if, if they are the same type
+ // instead of dropping both sides, drop the if, if they are the same
+ // type
auto* left = curr->ifTrue->cast<Drop>()->value;
auto* right = curr->ifFalse->cast<Drop>()->value;
if (left->type == right->type) {
@@ -297,7 +326,8 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
void visitLoop(Loop* curr) {
- if (curr->body->is<Nop>()) ExpressionManipulator::nop(curr);
+ if (curr->body->is<Nop>())
+ ExpressionManipulator::nop(curr);
}
void visitDrop(Drop* curr) {
@@ -314,12 +344,13 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
replaceCurrent(set);
return;
}
- // if we are dropping a block's return value, we might be able to remove it entirely
+ // if we are dropping a block's return value, we might be able to remove it
+ // entirely
if (auto* block = curr->value->dynCast<Block>()) {
auto* last = block->list.back();
// note that the last element may be concrete but not the block, if the
- // block has an unreachable element in the middle, making the block unreachable
- // despite later elements and in particular the last
+ // block has an unreachable element in the middle, making the block
+ // unreachable despite later elements and in particular the last
if (isConcreteType(last->type) && block->type == last->type) {
last = optimize(last, false, false);
if (!last) {
@@ -338,7 +369,8 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
block->list.back() = last;
block->list.pop_back();
block->type = none;
- // we don't need the drop anymore, let's see what we have left in the block
+ // we don't need the drop anymore, let's see what we have left in
+ // the block
if (block->list.size() > 1) {
replaceCurrent(block);
} else if (block->list.size() == 1) {
@@ -351,16 +383,20 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
}
}
}
- // sink a drop into an arm of an if-else if the other arm ends in an unreachable, as it if is a branch, this can make that branch optimizable and more vaccuming possible
+ // sink a drop into an arm of an if-else if the other arm ends in an
+ // unreachable, as it if is a branch, this can make that branch optimizable
+ // and more vaccuming possible
auto* iff = curr->value->dynCast<If>();
if (iff && iff->ifFalse && isConcreteType(iff->type)) {
// reuse the drop in both cases
- if (iff->ifTrue->type == unreachable && isConcreteType(iff->ifFalse->type)) {
+ if (iff->ifTrue->type == unreachable &&
+ isConcreteType(iff->ifFalse->type)) {
curr->value = iff->ifFalse;
iff->ifFalse = curr;
iff->type = none;
replaceCurrent(iff);
- } else if (iff->ifFalse->type == unreachable && isConcreteType(iff->ifTrue->type)) {
+ } else if (iff->ifFalse->type == unreachable &&
+ isConcreteType(iff->ifTrue->type)) {
curr->value = iff->ifTrue;
iff->ifTrue = curr;
iff->type = none;
@@ -376,15 +412,13 @@ struct Vacuum : public WalkerPass<ExpressionStackWalker<Vacuum>> {
} else {
ExpressionManipulator::nop(curr->body);
}
- if (curr->result == none && !EffectAnalyzer(getPassOptions(), curr->body).hasSideEffects()) {
+ if (curr->result == none &&
+ !EffectAnalyzer(getPassOptions(), curr->body).hasSideEffects()) {
ExpressionManipulator::nop(curr->body);
}
}
};
-Pass *createVacuumPass() {
- return new Vacuum();
-}
+Pass* createVacuumPass() { return new Vacuum(); }
} // namespace wasm
-
diff --git a/src/passes/intrinsics-module.h b/src/passes/intrinsics-module.h
index c9a757dc0..e7f7a3a6e 100644
--- a/src/passes/intrinsics-module.h
+++ b/src/passes/intrinsics-module.h
@@ -24,4 +24,3 @@ extern const char* IntrinsicsModuleWast;
} // namespace wasm
#endif // passes_intrinsics_module_h
-
diff --git a/src/passes/opt-utils.h b/src/passes/opt-utils.h
index a880e2623..9ac6da4a2 100644
--- a/src/passes/opt-utils.h
+++ b/src/passes/opt-utils.h
@@ -19,8 +19,8 @@
#include <unordered_set>
-#include <wasm.h>
#include <pass.h>
+#include <wasm.h>
namespace wasm {
@@ -28,7 +28,9 @@ namespace OptUtils {
// Run useful optimizations after inlining new code into a set
// of functions.
-inline void optimizeAfterInlining(std::unordered_set<Function*>& funcs, Module* module, PassRunner* parentRunner) {
+inline void optimizeAfterInlining(std::unordered_set<Function*>& funcs,
+ Module* module,
+ PassRunner* parentRunner) {
// save the full list of functions on the side
std::vector<std::unique_ptr<Function>> all;
all.swap(module->functions);
@@ -39,7 +41,8 @@ inline void optimizeAfterInlining(std::unordered_set<Function*>& funcs, Module*
PassRunner runner(module, parentRunner->options);
runner.setIsNested(true);
runner.setValidateGlobally(false); // not a full valid module
- runner.add("precompute-propagate"); // this is especially useful after inlining
+ // this is especially useful after inlining
+ runner.add("precompute-propagate");
runner.addDefaultFunctionOptimizationPasses(); // do all the usual stuff
runner.run();
// restore all the funcs
diff --git a/src/passes/pass.cpp b/src/passes/pass.cpp
index 41d2026bc..ae940a56d 100644
--- a/src/passes/pass.cpp
+++ b/src/passes/pass.cpp
@@ -17,35 +17,34 @@
#include <chrono>
#include <sstream>
-#include "support/colors.h"
-#include "passes/passes.h"
-#include "pass.h"
-#include "wasm-validator.h"
-#include "wasm-io.h"
#include "ir/hashed.h"
#include "ir/module-utils.h"
+#include "pass.h"
+#include "passes/passes.h"
+#include "support/colors.h"
+#include "wasm-io.h"
+#include "wasm-validator.h"
namespace wasm {
// PassRegistry
-PassRegistry::PassRegistry() {
- registerPasses();
-}
+PassRegistry::PassRegistry() { registerPasses(); }
static PassRegistry singleton;
-PassRegistry* PassRegistry::get() {
- return &singleton;
-}
+PassRegistry* PassRegistry::get() { return &singleton; }
-void PassRegistry::registerPass(const char* name, const char *description, Creator create) {
+void PassRegistry::registerPass(const char* name,
+ const char* description,
+ Creator create) {
assert(passInfos.find(name) == passInfos.end());
passInfos[name] = PassInfo(description, create);
}
Pass* PassRegistry::createPass(std::string name) {
- if (passInfos.find(name) == passInfos.end()) return nullptr;
+ if (passInfos.find(name) == passInfos.end())
+ return nullptr;
auto ret = passInfos[name].create();
ret->name = name;
return ret;
@@ -67,87 +66,234 @@ std::string PassRegistry::getPassDescription(std::string name) {
// PassRunner
void PassRegistry::registerPasses() {
- registerPass("dae", "removes arguments to calls in an lto-like manner", createDAEPass);
- registerPass("dae-optimizing", "removes arguments to calls in an lto-like manner, and optimizes where we removed", createDAEOptimizingPass);
- registerPass("coalesce-locals", "reduce # of locals by coalescing", createCoalesceLocalsPass);
- registerPass("coalesce-locals-learning", "reduce # of locals by coalescing and learning", createCoalesceLocalsWithLearningPass);
- registerPass("code-pushing", "push code forward, potentially making it not always execute", createCodePushingPass);
- registerPass("code-folding", "fold code, merging duplicates", createCodeFoldingPass);
- registerPass("const-hoisting", "hoist repeated constants to a local", createConstHoistingPass);
- registerPass("dce", "removes unreachable code", createDeadCodeEliminationPass);
- registerPass("directize", "turns indirect calls into direct ones", createDirectizePass);
- registerPass("dfo", "optimizes using the DataFlow SSA IR", createDataFlowOptsPass);
- registerPass("duplicate-function-elimination", "removes duplicate functions", createDuplicateFunctionEliminationPass);
- registerPass("extract-function", "leaves just one function (useful for debugging)", createExtractFunctionPass);
- registerPass("flatten", "flattens out code, removing nesting", createFlattenPass);
- registerPass("fpcast-emu", "emulates function pointer casts, allowing incorrect indirect calls to (sometimes) work", createFuncCastEmulationPass);
- registerPass("func-metrics", "reports function metrics", createFunctionMetricsPass);
- registerPass("generate-stack-ir", "generate Stack IR", createGenerateStackIRPass);
- registerPass("inlining", "inline functions (you probably want inlining-optimizing)", createInliningPass);
- registerPass("inlining-optimizing", "inline functions and optimizes where we inlined", createInliningOptimizingPass);
- registerPass("legalize-js-interface", "legalizes i64 types on the import/export boundary", createLegalizeJSInterfacePass);
- registerPass("legalize-js-interface-minimally", "legalizes i64 types on the import/export boundary in a minimal manner, only on things only JS will call", createLegalizeJSInterfaceMinimallyPass);
- registerPass("local-cse", "common subexpression elimination inside basic blocks", createLocalCSEPass);
- registerPass("log-execution", "instrument the build with logging of where execution goes", createLogExecutionPass);
- registerPass("i64-to-i32-lowering", "lower all uses of i64s to use i32s instead", createI64ToI32LoweringPass);
- registerPass("instrument-locals", "instrument the build with code to intercept all loads and stores", createInstrumentLocalsPass);
- registerPass("instrument-memory", "instrument the build with code to intercept all loads and stores", createInstrumentMemoryPass);
- registerPass("licm", "loop invariant code motion", createLoopInvariantCodeMotionPass);
- registerPass("limit-segments", "attempt to merge segments to fit within web limits", createLimitSegmentsPass);
- registerPass("memory-packing", "packs memory into separate segments, skipping zeros", createMemoryPackingPass);
- registerPass("merge-blocks", "merges blocks to their parents", createMergeBlocksPass);
- registerPass("merge-locals", "merges locals when beneficial", createMergeLocalsPass);
+ registerPass(
+ "dae", "removes arguments to calls in an lto-like manner", createDAEPass);
+ registerPass("dae-optimizing",
+ "removes arguments to calls in an lto-like manner, and "
+ "optimizes where we removed",
+ createDAEOptimizingPass);
+ registerPass("coalesce-locals",
+ "reduce # of locals by coalescing",
+ createCoalesceLocalsPass);
+ registerPass("coalesce-locals-learning",
+ "reduce # of locals by coalescing and learning",
+ createCoalesceLocalsWithLearningPass);
+ registerPass("code-pushing",
+ "push code forward, potentially making it not always execute",
+ createCodePushingPass);
+ registerPass(
+ "code-folding", "fold code, merging duplicates", createCodeFoldingPass);
+ registerPass("const-hoisting",
+ "hoist repeated constants to a local",
+ createConstHoistingPass);
+ registerPass(
+ "dce", "removes unreachable code", createDeadCodeEliminationPass);
+ registerPass(
+ "directize", "turns indirect calls into direct ones", createDirectizePass);
+ registerPass(
+ "dfo", "optimizes using the DataFlow SSA IR", createDataFlowOptsPass);
+ registerPass("duplicate-function-elimination",
+ "removes duplicate functions",
+ createDuplicateFunctionEliminationPass);
+ registerPass("extract-function",
+ "leaves just one function (useful for debugging)",
+ createExtractFunctionPass);
+ registerPass(
+ "flatten", "flattens out code, removing nesting", createFlattenPass);
+ registerPass("fpcast-emu",
+ "emulates function pointer casts, allowing incorrect indirect "
+ "calls to (sometimes) work",
+ createFuncCastEmulationPass);
+ registerPass(
+ "func-metrics", "reports function metrics", createFunctionMetricsPass);
+ registerPass(
+ "generate-stack-ir", "generate Stack IR", createGenerateStackIRPass);
+ registerPass("inlining",
+ "inline functions (you probably want inlining-optimizing)",
+ createInliningPass);
+ registerPass("inlining-optimizing",
+ "inline functions and optimizes where we inlined",
+ createInliningOptimizingPass);
+ registerPass("legalize-js-interface",
+ "legalizes i64 types on the import/export boundary",
+ createLegalizeJSInterfacePass);
+ registerPass("legalize-js-interface-minimally",
+ "legalizes i64 types on the import/export boundary in a minimal "
+ "manner, only on things only JS will call",
+ createLegalizeJSInterfaceMinimallyPass);
+ registerPass("local-cse",
+ "common subexpression elimination inside basic blocks",
+ createLocalCSEPass);
+ registerPass("log-execution",
+ "instrument the build with logging of where execution goes",
+ createLogExecutionPass);
+ registerPass("i64-to-i32-lowering",
+ "lower all uses of i64s to use i32s instead",
+ createI64ToI32LoweringPass);
+ registerPass(
+ "instrument-locals",
+ "instrument the build with code to intercept all loads and stores",
+ createInstrumentLocalsPass);
+ registerPass(
+ "instrument-memory",
+ "instrument the build with code to intercept all loads and stores",
+ createInstrumentMemoryPass);
+ registerPass(
+ "licm", "loop invariant code motion", createLoopInvariantCodeMotionPass);
+ registerPass("limit-segments",
+ "attempt to merge segments to fit within web limits",
+ createLimitSegmentsPass);
+ registerPass("memory-packing",
+ "packs memory into separate segments, skipping zeros",
+ createMemoryPackingPass);
+ registerPass(
+ "merge-blocks", "merges blocks to their parents", createMergeBlocksPass);
+ registerPass(
+ "merge-locals", "merges locals when beneficial", createMergeLocalsPass);
registerPass("metrics", "reports metrics", createMetricsPass);
- registerPass("minify-imports", "minifies import names (only those, and not export names), and emits a mapping to the minified ones", createMinifyImportsPass);
- registerPass("minify-imports-and-exports", "minifies both import and export names, and emits a mapping to the minified ones", createMinifyImportsAndExportsPass);
+ registerPass("minify-imports",
+ "minifies import names (only those, and not export names), and "
+ "emits a mapping to the minified ones",
+ createMinifyImportsPass);
+ registerPass("minify-imports-and-exports",
+ "minifies both import and export names, and emits a mapping to "
+ "the minified ones",
+ createMinifyImportsAndExportsPass);
registerPass("nm", "name list", createNameListPass);
- registerPass("no-exit-runtime", "removes calls to atexit(), which is valid if the C runtime will never be exited", createNoExitRuntimePass);
- registerPass("optimize-added-constants", "optimizes added constants into load/store offsets", createOptimizeAddedConstantsPass);
- registerPass("optimize-added-constants-propagate", "optimizes added constants into load/store offsets, propagating them across locals too", createOptimizeAddedConstantsPropagatePass);
- registerPass("optimize-instructions", "optimizes instruction combinations", createOptimizeInstructionsPass);
- registerPass("optimize-stack-ir", "optimize Stack IR", createOptimizeStackIRPass);
- registerPass("pick-load-signs", "pick load signs based on their uses", createPickLoadSignsPass);
- registerPass("post-emscripten", "miscellaneous optimizations for Emscripten-generated code", createPostEmscriptenPass);
- registerPass("precompute", "computes compile-time evaluatable expressions", createPrecomputePass);
- registerPass("precompute-propagate", "computes compile-time evaluatable expressions and propagates them through locals", createPrecomputePropagatePass);
+ registerPass("no-exit-runtime",
+ "removes calls to atexit(), which is valid if the C runtime "
+ "will never be exited",
+ createNoExitRuntimePass);
+ registerPass("optimize-added-constants",
+ "optimizes added constants into load/store offsets",
+ createOptimizeAddedConstantsPass);
+ registerPass("optimize-added-constants-propagate",
+ "optimizes added constants into load/store offsets, propagating "
+ "them across locals too",
+ createOptimizeAddedConstantsPropagatePass);
+ registerPass("optimize-instructions",
+ "optimizes instruction combinations",
+ createOptimizeInstructionsPass);
+ registerPass(
+ "optimize-stack-ir", "optimize Stack IR", createOptimizeStackIRPass);
+ registerPass("pick-load-signs",
+ "pick load signs based on their uses",
+ createPickLoadSignsPass);
+ registerPass("post-emscripten",
+ "miscellaneous optimizations for Emscripten-generated code",
+ createPostEmscriptenPass);
+ registerPass("precompute",
+ "computes compile-time evaluatable expressions",
+ createPrecomputePass);
+ registerPass("precompute-propagate",
+ "computes compile-time evaluatable expressions and propagates "
+ "them through locals",
+ createPrecomputePropagatePass);
registerPass("print", "print in s-expression format", createPrinterPass);
- registerPass("print-minified", "print in minified s-expression format", createMinifiedPrinterPass);
- registerPass("print-features", "print options for enabled features", createPrintFeaturesPass);
- registerPass("print-full", "print in full s-expression format", createFullPrinterPass);
- registerPass("print-call-graph", "print call graph", createPrintCallGraphPass);
- registerPass("print-stack-ir", "print out Stack IR (useful for internal debugging)", createPrintStackIRPass);
- registerPass("relooper-jump-threading", "thread relooper jumps (fastcomp output only)", createRelooperJumpThreadingPass);
- registerPass("remove-non-js-ops", "removes operations incompatible with js", createRemoveNonJSOpsPass);
- registerPass("remove-imports", "removes imports and replaces them with nops", createRemoveImportsPass);
- registerPass("remove-memory", "removes memory segments", createRemoveMemoryPass);
- registerPass("remove-unused-brs", "removes breaks from locations that are not needed", createRemoveUnusedBrsPass);
- registerPass("remove-unused-module-elements", "removes unused module elements", createRemoveUnusedModuleElementsPass);
- registerPass("remove-unused-nonfunction-module-elements", "removes unused module elements that are not functions", createRemoveUnusedNonFunctionModuleElementsPass);
- registerPass("remove-unused-names", "removes names from locations that are never branched to", createRemoveUnusedNamesPass);
- registerPass("reorder-functions", "sorts functions by access frequency", createReorderFunctionsPass);
- registerPass("reorder-locals", "sorts locals by access frequency", createReorderLocalsPass);
- registerPass("rereloop", "re-optimize control flow using the relooper algorithm", createReReloopPass);
- registerPass("rse", "remove redundant local.sets", createRedundantSetEliminationPass);
- registerPass("safe-heap", "instrument loads and stores to check for invalid behavior", createSafeHeapPass);
- registerPass("simplify-locals", "miscellaneous locals-related optimizations", createSimplifyLocalsPass);
- registerPass("simplify-locals-nonesting", "miscellaneous locals-related optimizations (no nesting at all; preserves flatness)", createSimplifyLocalsNoNestingPass);
- registerPass("simplify-locals-notee", "miscellaneous locals-related optimizations (no tees)", createSimplifyLocalsNoTeePass);
- registerPass("simplify-locals-nostructure", "miscellaneous locals-related optimizations (no structure)", createSimplifyLocalsNoStructurePass);
- registerPass("simplify-locals-notee-nostructure", "miscellaneous locals-related optimizations (no tees or structure)", createSimplifyLocalsNoTeeNoStructurePass);
+ registerPass("print-minified",
+ "print in minified s-expression format",
+ createMinifiedPrinterPass);
+ registerPass("print-features",
+ "print options for enabled features",
+ createPrintFeaturesPass);
+ registerPass(
+ "print-full", "print in full s-expression format", createFullPrinterPass);
+ registerPass(
+ "print-call-graph", "print call graph", createPrintCallGraphPass);
+ registerPass("print-stack-ir",
+ "print out Stack IR (useful for internal debugging)",
+ createPrintStackIRPass);
+ registerPass("relooper-jump-threading",
+ "thread relooper jumps (fastcomp output only)",
+ createRelooperJumpThreadingPass);
+ registerPass("remove-non-js-ops",
+ "removes operations incompatible with js",
+ createRemoveNonJSOpsPass);
+ registerPass("remove-imports",
+ "removes imports and replaces them with nops",
+ createRemoveImportsPass);
+ registerPass(
+ "remove-memory", "removes memory segments", createRemoveMemoryPass);
+ registerPass("remove-unused-brs",
+ "removes breaks from locations that are not needed",
+ createRemoveUnusedBrsPass);
+ registerPass("remove-unused-module-elements",
+ "removes unused module elements",
+ createRemoveUnusedModuleElementsPass);
+ registerPass("remove-unused-nonfunction-module-elements",
+ "removes unused module elements that are not functions",
+ createRemoveUnusedNonFunctionModuleElementsPass);
+ registerPass("remove-unused-names",
+ "removes names from locations that are never branched to",
+ createRemoveUnusedNamesPass);
+ registerPass("reorder-functions",
+ "sorts functions by access frequency",
+ createReorderFunctionsPass);
+ registerPass("reorder-locals",
+ "sorts locals by access frequency",
+ createReorderLocalsPass);
+ registerPass("rereloop",
+ "re-optimize control flow using the relooper algorithm",
+ createReReloopPass);
+ registerPass(
+ "rse", "remove redundant local.sets", createRedundantSetEliminationPass);
+ registerPass("safe-heap",
+ "instrument loads and stores to check for invalid behavior",
+ createSafeHeapPass);
+ registerPass("simplify-locals",
+ "miscellaneous locals-related optimizations",
+ createSimplifyLocalsPass);
+ registerPass("simplify-locals-nonesting",
+ "miscellaneous locals-related optimizations (no nesting at all; "
+ "preserves flatness)",
+ createSimplifyLocalsNoNestingPass);
+ registerPass("simplify-locals-notee",
+ "miscellaneous locals-related optimizations (no tees)",
+ createSimplifyLocalsNoTeePass);
+ registerPass("simplify-locals-nostructure",
+ "miscellaneous locals-related optimizations (no structure)",
+ createSimplifyLocalsNoStructurePass);
+ registerPass(
+ "simplify-locals-notee-nostructure",
+ "miscellaneous locals-related optimizations (no tees or structure)",
+ createSimplifyLocalsNoTeeNoStructurePass);
registerPass("souperify", "emit Souper IR in text form", createSouperifyPass);
- registerPass("souperify-single-use", "emit Souper IR in text form (single-use nodes only)", createSouperifySingleUsePass);
- registerPass("spill-pointers", "spill pointers to the C stack (useful for Boehm-style GC)", createSpillPointersPass);
- registerPass("ssa", "ssa-ify variables so that they have a single assignment", createSSAifyPass);
- registerPass("ssa-nomerge", "ssa-ify variables so that they have a single assignment, ignoring merges", createSSAifyNoMergePass);
- registerPass("strip", "deprecated; same as strip-debug", createStripDebugPass);
- registerPass("strip-debug", "strip debug info (including the names section)", createStripDebugPass);
- registerPass("strip-producers", "strip the wasm producers section", createStripProducersPass);
- registerPass("strip-target-features", "strip the wasm target features section", createStripTargetFeaturesPass);
- registerPass("trap-mode-clamp", "replace trapping operations with clamping semantics", createTrapModeClamp);
- registerPass("trap-mode-js", "replace trapping operations with js semantics", createTrapModeJS);
- registerPass("untee", "removes local.tees, replacing them with sets and gets", createUnteePass);
+ registerPass("souperify-single-use",
+ "emit Souper IR in text form (single-use nodes only)",
+ createSouperifySingleUsePass);
+ registerPass("spill-pointers",
+ "spill pointers to the C stack (useful for Boehm-style GC)",
+ createSpillPointersPass);
+ registerPass("ssa",
+ "ssa-ify variables so that they have a single assignment",
+ createSSAifyPass);
+ registerPass(
+ "ssa-nomerge",
+ "ssa-ify variables so that they have a single assignment, ignoring merges",
+ createSSAifyNoMergePass);
+ registerPass(
+ "strip", "deprecated; same as strip-debug", createStripDebugPass);
+ registerPass("strip-debug",
+ "strip debug info (including the names section)",
+ createStripDebugPass);
+ registerPass("strip-producers",
+ "strip the wasm producers section",
+ createStripProducersPass);
+ registerPass("strip-target-features",
+ "strip the wasm target features section",
+ createStripTargetFeaturesPass);
+ registerPass("trap-mode-clamp",
+ "replace trapping operations with clamping semantics",
+ createTrapModeClamp);
+ registerPass("trap-mode-js",
+ "replace trapping operations with js semantics",
+ createTrapModeJS);
+ registerPass("untee",
+ "removes local.tees, replacing them with sets and gets",
+ createUnteePass);
registerPass("vacuum", "removes obviously unneeded code", createVacuumPass);
-// registerPass("lower-i64", "lowers i64 into pairs of i32s", createLowerInt64Pass);
+ // registerPass(
+ // "lower-i64", "lowers i64 into pairs of i32s", createLowerInt64Pass);
}
void PassRunner::addDefaultOptimizationPasses() {
@@ -191,10 +337,13 @@ void PassRunner::addDefaultFunctionOptimizationPasses() {
if (options.optimizeLevel >= 2 || options.shrinkLevel >= 2) {
add("code-pushing");
}
- add("simplify-locals-nostructure"); // don't create if/block return values yet, as coalesce can remove copies that that could inhibit
+ // don't create if/block return values yet, as coalesce can remove copies that
+ // that could inhibit
+ add("simplify-locals-nostructure");
add("vacuum"); // previous pass creates garbage
add("reorder-locals");
- add("remove-unused-brs"); // simplify-locals opens opportunities for optimizations
+ // simplify-locals opens opportunities for optimizations
+ add("remove-unused-brs");
// if we are willing to work hard, also optimize copies before coalescing
if (options.optimizeLevel >= 3 || options.shrinkLevel >= 2) {
add("merge-locals"); // very slow on e.g. sqlite
@@ -209,10 +358,10 @@ void PassRunner::addDefaultFunctionOptimizationPasses() {
if (options.optimizeLevel >= 3 || options.shrinkLevel >= 1) {
add("code-folding");
}
- add("merge-blocks"); // makes remove-unused-brs more effective
- add("remove-unused-brs"); // coalesce-locals opens opportunities
+ add("merge-blocks"); // makes remove-unused-brs more effective
+ add("remove-unused-brs"); // coalesce-locals opens opportunities
add("remove-unused-names"); // remove-unused-brs opens opportunities
- add("merge-blocks"); // clean up remove-unused-brs new blocks
+ add("merge-blocks"); // clean up remove-unused-brs new blocks
// late propagation
if (options.optimizeLevel >= 3 || options.shrinkLevel >= 2) {
add("precompute-propagate");
@@ -237,10 +386,12 @@ void PassRunner::addDefaultGlobalOptimizationPostPasses() {
if (options.optimizeLevel >= 2 || options.shrinkLevel >= 2) {
add("inlining-optimizing");
}
- add("duplicate-function-elimination"); // optimizations show more functions as duplicate
+ // optimizations show more functions as duplicate
+ add("duplicate-function-elimination");
add("remove-unused-module-elements");
add("memory-packing");
- add("directize"); // may allow more inlining/dae/etc., need --converge for that
+ // may allow more inlining/dae/etc., need --converge for that
+ add("directize");
// perform Stack IR optimizations here, at the very end of the
// optimization pipeline
if (options.optimizeLevel >= 2 || options.shrinkLevel >= 1) {
@@ -266,7 +417,8 @@ static void dumpWast(Name name, Module* wasm) {
void PassRunner::run() {
static const int passDebug = getPassDebug();
if (!isNested && (options.debug || passDebug)) {
- // for debug logging purposes, run each pass in full before running the other
+ // for debug logging purposes, run each pass in full before running the
+ // other
auto totalTime = std::chrono::duration<double>(0);
size_t padding = 0;
WasmValidator::Flags validationFlags = WasmValidator::Minimal;
@@ -281,7 +433,8 @@ void PassRunner::run() {
dumpWast("before", wasm);
}
for (auto* pass : passes) {
- // ignoring the time, save a printout of the module before, in case this pass breaks it, so we can print the before and after
+ // ignoring the time, save a printout of the module before, in case this
+ // pass breaks it, so we can print the before and after
std::stringstream moduleBefore;
if (passDebug == 2) {
WasmPrinter::printModule(wasm, moduleBefore);
@@ -294,9 +447,8 @@ void PassRunner::run() {
auto before = std::chrono::steady_clock::now();
if (pass->isFunctionParallel()) {
// function-parallel passes should get a new instance per function
- ModuleUtils::iterDefinedFunctions(*wasm, [&](Function* func) {
- runPassOnFunction(pass, func);
- });
+ ModuleUtils::iterDefinedFunctions(
+ *wasm, [&](Function* func) { runPassOnFunction(pass, func); });
} else {
runPass(pass);
}
@@ -310,9 +462,14 @@ void PassRunner::run() {
if (!WasmValidator().validate(*wasm, validationFlags)) {
WasmPrinter::printModule(wasm);
if (passDebug >= 2) {
- std::cerr << "Last pass (" << pass->name << ") broke validation. Here is the module before: \n" << moduleBefore.str() << "\n";
+ std::cerr << "Last pass (" << pass->name
+ << ") broke validation. Here is the module before: \n"
+ << moduleBefore.str() << "\n";
} else {
- std::cerr << "Last pass (" << pass->name << ") broke validation. Run with BINARYEN_PASS_DEBUG=2 in the env to see the earlier state, or 3 to dump byn-* files for each pass\n";
+ std::cerr << "Last pass (" << pass->name
+ << ") broke validation. Run with BINARYEN_PASS_DEBUG=2 "
+ "in the env to see the earlier state, or 3 to dump "
+ "byn-* files for each pass\n";
}
abort();
}
@@ -321,7 +478,8 @@ void PassRunner::run() {
dumpWast(pass->name, wasm);
}
}
- std::cerr << "[PassRunner] passes took " << totalTime.count() << " seconds." << std::endl;
+ std::cerr << "[PassRunner] passes took " << totalTime.count() << " seconds."
+ << std::endl;
if (options.validate) {
std::cerr << "[PassRunner] (final validation)\n";
if (!WasmValidator().validate(*wasm, validationFlags)) {
@@ -331,14 +489,15 @@ void PassRunner::run() {
}
}
} else {
- // non-debug normal mode, run them in an optimal manner - for locality it is better
- // to run as many passes as possible on a single function before moving to the next
+ // non-debug normal mode, run them in an optimal manner - for locality it is
+ // better to run as many passes as possible on a single function before
+ // moving to the next
std::vector<Pass*> stack;
auto flush = [&]() {
if (stack.size() > 0) {
// run the stack of passes on all the functions, in parallel
size_t num = ThreadPool::get()->size();
- std::vector<std::function<ThreadWorkState ()>> doWorkers;
+ std::vector<std::function<ThreadWorkState()>> doWorkers;
std::atomic<size_t> nextFunction;
nextFunction.store(0);
size_t numFunctions = wasm->functions.size();
@@ -380,7 +539,8 @@ void PassRunner::run() {
void PassRunner::runOnFunction(Function* func) {
if (options.debug) {
- std::cerr << "[PassRunner] running passes on function " << func->name << std::endl;
+ std::cerr << "[PassRunner] running passes on function " << func->name
+ << std::endl;
}
for (auto* pass : passes) {
runPassOnFunction(pass, func);
@@ -425,14 +585,18 @@ struct AfterEffectFunctionChecker {
if (beganWithStackIR && func->stackIR) {
auto after = FunctionHasher::hashFunction(func);
if (after != originalFunctionHash) {
- Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before and after the pass ran, and the pass modified the main IR, which invalidates Stack IR - pass should have been marked 'modifiesBinaryenIR'";
+ Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before "
+ "and after the pass ran, and the pass modified the main IR, "
+ "which invalidates Stack IR - pass should have been marked "
+ "'modifiesBinaryenIR'";
}
}
}
};
// Runs checks on the entire module, in a non-function-parallel pass.
-// In particular, in such a pass functions may be removed or renamed, track that.
+// In particular, in such a pass functions may be removed or renamed, track
+// that.
struct AfterEffectModuleChecker {
Module* module;
@@ -473,7 +637,9 @@ struct AfterEffectModuleChecker {
}
void error() {
- Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before and after the pass ran, and the pass modified global function state - pass should have been marked 'modifiesBinaryenIR'";
+ Fatal() << "[PassRunner] PASS_DEBUG check failed: had Stack IR before and "
+ "after the pass ran, and the pass modified global function "
+ "state - pass should have been marked 'modifiesBinaryenIR'";
}
bool hasAnyStackIR() {
@@ -530,7 +696,8 @@ void PassRunner::handleAfterEffects(Pass* pass, Function* func) {
}
int PassRunner::getPassDebug() {
- static const int passDebug = getenv("BINARYEN_PASS_DEBUG") ? atoi(getenv("BINARYEN_PASS_DEBUG")) : 0;
+ static const int passDebug =
+ getenv("BINARYEN_PASS_DEBUG") ? atoi(getenv("BINARYEN_PASS_DEBUG")) : 0;
return passDebug;
}
diff --git a/src/passes/passes.h b/src/passes/passes.h
index af9141ac9..fc01c1cd5 100644
--- a/src/passes/passes.h
+++ b/src/passes/passes.h
@@ -102,6 +102,6 @@ Pass* createTrapModeJS();
Pass* createUnteePass();
Pass* createVacuumPass();
-}
+} // namespace wasm
#endif