summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlon Zakai <alonzakai@gmail.com>2018-02-14 08:06:52 -0800
committerGitHub <noreply@github.com>2018-02-14 08:06:52 -0800
commite97d485bb1f1818e2c2118d28507d49cb61ea57b (patch)
treee1ef9db3f9b95ba4d279ec6c421e0b1c4f2a3b22 /src
parent41faf2409f3e1d8d2dcaf456141f4ce6ac6a3d75 (diff)
downloadbinaryen-e97d485bb1f1818e2c2118d28507d49cb61ea57b.tar.gz
binaryen-e97d485bb1f1818e2c2118d28507d49cb61ea57b.tar.bz2
binaryen-e97d485bb1f1818e2c2118d28507d49cb61ea57b.zip
More simple math opts (#1414)
* optimize more simple math operations: mul of 0, or of 0, and of 0, mul of 1, mul of a power of 2, urem of a power of 2 * fix asm2wasm callImport parsing: the optimizer may get rid of the added offset to a function table * update js builds
Diffstat (limited to 'src')
-rw-r--r--src/asm2wasm.h7
-rw-r--r--src/passes/OptimizeInstructions.cpp47
-rw-r--r--src/support/bits.h7
-rw-r--r--src/support/utilities.h8
4 files changed, 62 insertions, 7 deletions
diff --git a/src/asm2wasm.h b/src/asm2wasm.h
index 63b88f5fe..e2731eb08 100644
--- a/src/asm2wasm.h
+++ b/src/asm2wasm.h
@@ -1303,6 +1303,13 @@ void Asm2WasmBuilder::processAsm(Ref ast) {
if (auto* block = target->dynCast<Block>()) {
target = block->list.back();
}
+ // the something might have been optimized out, leaving only the call
+ if (auto* call = target->dynCast<CallImport>()) {
+ auto tableName = call->target;
+ if (parent->functionTableStarts.find(tableName) == parent->functionTableStarts.end()) return;
+ curr->target = parent->builder.makeConst(Literal((int32_t)parent->functionTableStarts[tableName]));
+ return;
+ }
auto* add = target->dynCast<Binary>();
if (!add) return;
if (add->right->is<CallImport>()) {
diff --git a/src/passes/OptimizeInstructions.cpp b/src/passes/OptimizeInstructions.cpp
index 7bd7aaa51..8fd98cfda 100644
--- a/src/passes/OptimizeInstructions.cpp
+++ b/src/passes/OptimizeInstructions.cpp
@@ -567,9 +567,16 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
}
- // some operations have no effect TODO: many more
+ // some math operations have trivial results TODO: many more
if (right->value == Literal(int32_t(0))) {
- if (binary->op == ShlInt32 || binary->op == ShrUInt32 || binary->op == ShrSInt32) {
+ if (binary->op == ShlInt32 || binary->op == ShrUInt32 || binary->op == ShrSInt32 || binary->op == OrInt32) {
+ return binary->left;
+ } else if ((binary->op == MulInt32 || binary->op == AndInt32) &&
+ !EffectAnalyzer(getPassOptions(), binary->left).hasSideEffects()) {
+ return binary->right;
+ }
+ } else if (right->value == Literal(int32_t(1))) {
+ if (binary->op == MulInt32) {
return binary->left;
}
}
@@ -597,6 +604,17 @@ struct OptimizeInstructions : public WalkerPass<PostWalker<OptimizeInstructions,
}
}
}
+ // math operations on a constant power of 2 right side can be optimized
+ if (right->type == i32) {
+ uint32_t c = right->value.geti32();
+ if (IsPowerOf2(c)) {
+ if (binary->op == MulInt32) {
+ return optimizePowerOf2Mul(binary, c);
+ } else if (binary->op == RemUInt32) {
+ return optimizePowerOf2URem(binary, c);
+ }
+ }
+ }
}
// bitwise operations
if (binary->op == AndInt32) {
@@ -1024,6 +1042,31 @@ private:
}
}
+ // Optimize a multiply by a power of two on the right, which
+ // can be a shift.
+ // This doesn't shrink code size, and VMs likely optimize it anyhow,
+ // but it's still worth doing since
+ // * Often shifts are more common than muls.
+ // * The constant is smaller.
+ Expression* optimizePowerOf2Mul(Binary* binary, uint32_t c) {
+ uint32_t shifts = CountTrailingZeroes(c);
+ binary->op = ShlInt32;
+ binary->right->cast<Const>()->value = Literal(int32_t(shifts));
+ return binary;
+ }
+
+ // Optimize an unsigned divide by a power of two on the right,
+ // which can be an AND mask
+ // This doesn't shrink code size, and VMs likely optimize it anyhow,
+ // but it's still worth doing since
+ // * Usually ands are more common than urems.
+ // * The constant is slightly smaller.
+ Expression* optimizePowerOf2URem(Binary* binary, uint32_t c) {
+ binary->op = AndInt32;
+ binary->right->cast<Const>()->value = Literal(int32_t(c - 1));
+ return binary;
+ }
+
Expression* makeZeroExt(Expression* curr, int32_t bits) {
Builder builder(*getModule());
return builder.makeBinary(AndInt32, curr, builder.makeConst(Literal(Bits::lowBitMask(bits))));
diff --git a/src/support/bits.h b/src/support/bits.h
index 1fba58bf1..73d71e804 100644
--- a/src/support/bits.h
+++ b/src/support/bits.h
@@ -29,6 +29,9 @@
*
* We instead use portable and reasonably-fast implementations, while
* avoiding implementations with large lookup tables.
+ *
+ * TODO: The convention here should be changed PopCount => popCount,
+ * initial lowercase, to match the rest of the codebase.
*/
namespace wasm {
@@ -65,6 +68,10 @@ template <typename T>
int CountLeadingZeroes(T v) {
return CountLeadingZeroes(typename std::make_unsigned<T>::type(v));
}
+template <typename T>
+bool IsPowerOf2(T v) {
+ return v != 0 && PopCount(v) == 1;
+}
template <typename T, typename U>
inline static T RotateLeft(T val, U count) {
diff --git a/src/support/utilities.h b/src/support/utilities.h
index 66e7ed797..a2fff7f0a 100644
--- a/src/support/utilities.h
+++ b/src/support/utilities.h
@@ -26,6 +26,8 @@
#include <iostream>
#include <type_traits>
+#include "support/bits.h"
+
namespace wasm {
// Type punning needs to be done through this function to avoid undefined
@@ -41,12 +43,8 @@ inline Destination bit_cast(const Source& source) {
return destination;
}
-inline bool isPowerOf2(uint32_t v) {
- return v && !(v & (v - 1));
-}
-
inline size_t alignAddr(size_t address, size_t alignment) {
- assert(alignment && isPowerOf2((uint32_t)alignment) &&
+ assert(alignment && IsPowerOf2((uint32_t)alignment) &&
"Alignment is not a power of two!");
assert(address + alignment - 1 >= address);