diff options
Diffstat (limited to 'src/passes/OptimizeInstructions.cpp')
-rw-r--r-- | src/passes/OptimizeInstructions.cpp | 138 |
1 files changed, 79 insertions, 59 deletions
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp index 24b7ee845..3952c765f 100644 --- a/src/passes/OptimizeInstructions.cpp +++ b/src/passes/OptimizeInstructions.cpp @@ -133,6 +133,57 @@ struct LocalScanner : PostWalker<LocalScanner> { } }; +namespace { +// perform some final optimizations +struct FinalOptimizer : public PostWalker<FinalOptimizer> { + const PassOptions& passOptions; + + FinalOptimizer(const PassOptions& passOptions) : passOptions(passOptions) {} + + void visitBinary(Binary* curr) { + if (auto* replacement = optimize(curr)) { + replaceCurrent(replacement); + } + } + + Binary* optimize(Binary* curr) { + using namespace Abstract; + using namespace Match; + { + Const* c; + if (matches(curr, binary(Add, any(), ival(&c)))) { + // normalize x + (-C) ==> x - C + if (c->value.isNegative()) { + c->value = c->value.neg(); + curr->op = Abstract::getBinary(c->type, Sub); + } + // Wasm binary encoding uses signed LEBs, which slightly favor negative + // numbers: -64 is more efficient than +64 etc., as well as other powers + // of two 7 bits etc. higher. we therefore prefer x - -64 over x + 64. + // in theory we could just prefer negative numbers over positive, but + // that can have bad effects on gzip compression (as it would mean more + // subtractions than the more common additions). + int64_t value = c->value.getInteger(); + if (value == 0x40LL || value == 0x2000LL || value == 0x100000LL || + value == 0x8000000LL || value == 0x400000000LL || + value == 0x20000000000LL || value == 0x1000000000000LL || + value == 0x80000000000000LL || value == 0x4000000000000000LL) { + c->value = c->value.neg(); + if (curr->op == Abstract::getBinary(c->type, Add)) { + curr->op = Abstract::getBinary(c->type, Sub); + } else { + curr->op = Abstract::getBinary(c->type, Add); + } + } + return curr; + } + } + return nullptr; + } +}; + +} // anonymous namespace + // Create a custom matcher for checking side effects template<class Opt> struct PureMatcherKind {}; template<class Opt> @@ -167,6 +218,10 @@ struct OptimizeInstructions } // main walk super::doWalkFunction(func); + { + FinalOptimizer optimizer(getPassOptions()); + optimizer.walkFunction(func); + } } void visitExpression(Expression* curr) { @@ -210,7 +265,7 @@ struct OptimizeInstructions FeatureSet features = getModule()->features; if (auto* binary = curr->dynCast<Binary>()) { - if (isSymmetricOrRelational(binary)) { + if (shouldCanonicalize(binary)) { canonicalize(binary); } } @@ -517,11 +572,8 @@ struct OptimizeInstructions } // note that both left and right may be consts, but then we let // precompute compute the constant result - } else if (binary->op == AddInt32 || binary->op == AddInt64) { - if (auto* ret = optimizeAddedConstants(binary)) { - return ret; - } - } else if (binary->op == SubInt32 || binary->op == SubInt64) { + } else if (binary->op == AddInt32 || binary->op == AddInt64 || + binary->op == SubInt32 || binary->op == SubInt64) { if (auto* ret = optimizeAddedConstants(binary)) { return ret; } @@ -895,7 +947,7 @@ private: // Canonicalizing the order of a symmetric binary helps us // write more concise pattern matching code elsewhere. void canonicalize(Binary* binary) { - assert(isSymmetricOrRelational(binary)); + assert(shouldCanonicalize(binary)); auto swap = [&]() { assert(canReorder(binary->left, binary->right)); if (binary->isRelational()) { @@ -912,7 +964,13 @@ private: if (binary->left->is<Const>() && !binary->right->is<Const>()) { return swap(); } - if (binary->right->is<Const>()) { + if (auto* c = binary->right->dynCast<Const>()) { + // x - C ==> x + (-C) + // Prefer use addition if there is a constant on the right. + if (binary->op == Abstract::getBinary(c->type, Abstract::Sub)) { + c->value = c->value.neg(); + binary->op = Abstract::getBinary(c->type, Abstract::Add); + } return; } // Prefer a get on the right. @@ -1199,6 +1257,10 @@ private: auto type = curr->type; auto* left = curr->left->dynCast<Const>(); auto* right = curr->right->dynCast<Const>(); + // Canonicalization prefers an add instead of a subtract wherever + // possible. That prevents a subtracted constant on the right, + // as it would be added. And for a zero on the left, it can't be + // removed (it is how we negate ints). if (curr->op == Abstract::getBinary(type, Abstract::Add)) { if (left && left->value.isZero()) { replaceCurrent(curr->right); @@ -1208,12 +1270,6 @@ private: replaceCurrent(curr->left); return; } - } else if (curr->op == Abstract::getBinary(type, Abstract::Sub)) { - // we must leave a left zero, as it is how we negate ints - if (right && right->value.isZero()) { - replaceCurrent(curr->left); - return; - } } else if (curr->op == Abstract::getBinary(type, Abstract::Shl)) { // shifting a 0 is a 0, or anything by 0 has no effect, all unless the // shift has side effects @@ -1713,30 +1769,6 @@ private: } } { - // Wasm binary encoding uses signed LEBs, which slightly favor negative - // numbers: -64 is more efficient than +64 etc., as well as other powers - // of two 7 bits etc. higher. we therefore prefer x - -64 over x + 64. in - // theory we could just prefer negative numbers over positive, but that - // can have bad effects on gzip compression (as it would mean more - // subtractions than the more common additions). TODO: Simplify this by - // adding an ival matcher than can bind int64_t vars. - int64_t value; - if ((matches(curr, binary(Add, any(), ival(&value))) || - matches(curr, binary(Sub, any(), ival(&value)))) && - (value == 0x40 || value == 0x2000 || value == 0x100000 || - value == 0x8000000 || value == 0x400000000LL || - value == 0x20000000000LL || value == 0x1000000000000LL || - value == 0x80000000000000LL || value == 0x4000000000000000LL)) { - right->value = right->value.neg(); - if (matches(curr, binary(Add, any(), constant()))) { - curr->op = Abstract::getBinary(type, Sub); - } else { - curr->op = Abstract::getBinary(type, Add); - } - return curr; - } - } - { double value; if (matches(curr, binary(Sub, any(), fval(&value))) && value == 0.0) { // x - (-0.0) ==> x + 0.0 @@ -1837,24 +1869,11 @@ private: curr->right = x; return curr; } - // C1 - (x - C2) ==> (C1 + C2) - x - if (matches(curr, - binary(Sub, ival(&c1), binary(Sub, any(&x), ival(&c2))))) { - left->value = c1->value.add(c2->value); - curr->right = x; - return curr; - } // C1 - (C2 - x) ==> x + (C1 - C2) if (matches(curr, binary(Sub, ival(&c1), binary(Sub, ival(&c2), any(&x))))) { left->value = c1->value.sub(c2->value); - if (left->value.isNegative()) { - // -C1 - (C2 - x) ==> x - (C1 - C2) - left->value = left->value.neg(); - curr->op = Abstract::getBinary(type, Sub); - } else { - curr->op = Abstract::getBinary(type, Add); - } + curr->op = Abstract::getBinary(type, Add); curr->right = x; std::swap(curr->left, curr->right); return curr; @@ -1884,17 +1903,14 @@ private: // x + 5 == 7 // => // x == 2 - if (left->op == Abstract::getBinary(type, Abstract::Add) || - left->op == Abstract::getBinary(type, Abstract::Sub)) { + if (left->op == Abstract::getBinary(type, Abstract::Add)) { if (auto* leftConst = left->right->dynCast<Const>()) { if (auto* rightConst = curr->right->dynCast<Const>()) { return combineRelationalConstants( curr, left, leftConst, nullptr, rightConst); } else if (auto* rightBinary = curr->right->dynCast<Binary>()) { if (rightBinary->op == - Abstract::getBinary(type, Abstract::Add) || - rightBinary->op == - Abstract::getBinary(type, Abstract::Sub)) { + Abstract::getBinary(type, Abstract::Add)) { if (auto* rightConst = rightBinary->right->dynCast<Const>()) { return combineRelationalConstants( curr, left, leftConst, rightBinary, rightConst); @@ -2389,7 +2405,11 @@ private: } } - bool isSymmetricOrRelational(Binary* binary) { + bool shouldCanonicalize(Binary* binary) { + if ((binary->op == SubInt32 || binary->op == SubInt64) && + binary->right->is<Const>() && !binary->left->is<Const>()) { + return true; + } if (Properties::isSymmetric(binary) || binary->isRelational()) { return true; } @@ -2413,6 +2433,6 @@ private: } }; -Pass* createOptimizeInstructionsPass() { return new OptimizeInstructions(); } +Pass* createOptimizeInstructionsPass() { return new OptimizeInstructions; } } // namespace wasm |