diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/ast/ExpressionManipulator.cpp | 16 | ||||
-rw-r--r-- | src/ast/cost.h | 10 | ||||
-rw-r--r-- | src/ast/effects.h | 35 | ||||
-rw-r--r-- | src/ast_utils.h | 2 | ||||
-rw-r--r-- | src/passes/DeadCodeElimination.cpp | 98 | ||||
-rw-r--r-- | src/passes/InstrumentMemory.cpp | 1 | ||||
-rw-r--r-- | src/passes/MergeBlocks.cpp | 27 | ||||
-rw-r--r-- | src/passes/Precompute.cpp | 6 | ||||
-rw-r--r-- | src/wasm-builder.h | 35 |
9 files changed, 155 insertions, 75 deletions
diff --git a/src/ast/ExpressionManipulator.cpp b/src/ast/ExpressionManipulator.cpp index 3868ca316..cca799e10 100644 --- a/src/ast/ExpressionManipulator.cpp +++ b/src/ast/ExpressionManipulator.cpp @@ -96,11 +96,27 @@ Expression* flexibleCopy(Expression* original, Module& wasm, CustomCopier custom return builder.makeSetGlobal(curr->name, copy(curr->value)); } Expression* visitLoad(Load *curr) { + if (curr->isAtomic) { + return builder.makeAtomicLoad(curr->bytes, curr->signed_, curr->offset, + copy(curr->ptr), curr->type); + } return builder.makeLoad(curr->bytes, curr->signed_, curr->offset, curr->align, copy(curr->ptr), curr->type); } Expression* visitStore(Store *curr) { + if (curr->isAtomic) { + return builder.makeAtomicStore(curr->bytes, curr->offset, copy(curr->ptr), copy(curr->value), curr->valueType); + } return builder.makeStore(curr->bytes, curr->offset, curr->align, copy(curr->ptr), copy(curr->value), curr->valueType); } + Expression* visitAtomicRMW(AtomicRMW* curr) { + return builder.makeAtomicRMW(curr->op, curr->bytes, curr->offset, + copy(curr->ptr), copy(curr->value), curr->type); + } + Expression* visitAtomicCmpxchg(AtomicCmpxchg* curr) { + return builder.makeAtomicCmpxchg(curr->bytes, curr->offset, + copy(curr->ptr), copy(curr->expected), copy(curr->replacement), + curr->type); + } Expression* visitConst(Const *curr) { return builder.makeConst(curr->value); } diff --git a/src/ast/cost.h b/src/ast/cost.h index 151468650..56050b189 100644 --- a/src/ast/cost.h +++ b/src/ast/cost.h @@ -78,10 +78,16 @@ struct CostAnalyzer : public Visitor<CostAnalyzer, Index> { return 2; } Index visitLoad(Load *curr) { - return 1 + visit(curr->ptr); + return 1 + visit(curr->ptr) + 10 * curr->isAtomic; } Index visitStore(Store *curr) { - return 2 + visit(curr->ptr) + visit(curr->value); + return 2 + visit(curr->ptr) + visit(curr->value) + 10 * curr->isAtomic; + } + Index visitAtomicRMW(AtomicRMW *curr) { + return 100; + } + Index visitAtomicCmpxchg(AtomicCmpxchg* curr) { + return 100; } Index visitConst(Const *curr) { return 1; diff --git a/src/ast/effects.h b/src/ast/effects.h index 6e4bb617e..5392c0e50 100644 --- a/src/ast/effects.h +++ b/src/ast/effects.h @@ -53,12 +53,14 @@ struct EffectAnalyzer : public PostWalker<EffectAnalyzer> { // (so a trap may occur later or earlier, if it is // going to occur anyhow), but we can't remove them, // they count as side effects + bool isAtomic = false; // An atomic load/store/RMW/Cmpxchg or an operator that + // has a defined ordering wrt atomics (e.g. grow_memory) bool accessesLocal() { return localsRead.size() + localsWritten.size() > 0; } bool accessesGlobal() { return globalsRead.size() + globalsWritten.size() > 0; } bool accessesMemory() { return calls || readsMemory || writesMemory; } - bool hasSideEffects() { return calls || localsWritten.size() > 0 || writesMemory || branches || globalsWritten.size() > 0 || implicitTrap; } - bool hasAnything() { return branches || calls || accessesLocal() || readsMemory || writesMemory || accessesGlobal() || implicitTrap; } + bool hasSideEffects() { return calls || localsWritten.size() > 0 || writesMemory || branches || globalsWritten.size() > 0 || implicitTrap || isAtomic; } + bool hasAnything() { return branches || calls || accessesLocal() || readsMemory || writesMemory || accessesGlobal() || implicitTrap || isAtomic; } // checks if these effects would invalidate another set (e.g., if we write, we invalidate someone that reads, they can't be moved past us) bool invalidates(EffectAnalyzer& other) { @@ -67,6 +69,12 @@ struct EffectAnalyzer : public PostWalker<EffectAnalyzer> { || (accessesMemory() && (other.writesMemory || other.calls))) { return true; } + // All atomics are sequentially consistent for now, and ordered wrt other + // memory references. + if ((isAtomic && other.accessesMemory()) || + (other.isAtomic && accessesMemory())) { + return true; + } for (auto local : localsWritten) { if (other.localsWritten.count(local) || other.localsRead.count(local)) { return true; @@ -176,10 +184,24 @@ struct EffectAnalyzer : public PostWalker<EffectAnalyzer> { } void visitLoad(Load *curr) { readsMemory = true; + isAtomic |= curr->isAtomic; if (!ignoreImplicitTraps) implicitTrap = true; } void visitStore(Store *curr) { writesMemory = true; + isAtomic |= curr->isAtomic; + if (!ignoreImplicitTraps) implicitTrap = true; + } + void visitAtomicRMW(AtomicRMW* curr) { + readsMemory = true; + writesMemory = true; + isAtomic = true; + if (!ignoreImplicitTraps) implicitTrap = true; + } + void visitAtomicCmpxchg(AtomicCmpxchg* curr) { + readsMemory = true; + writesMemory = true; + isAtomic = true; if (!ignoreImplicitTraps) implicitTrap = true; } void visitUnary(Unary *curr) { @@ -219,11 +241,16 @@ struct EffectAnalyzer : public PostWalker<EffectAnalyzer> { } } void visitReturn(Return *curr) { branches = true; } - void visitHost(Host *curr) { calls = true; } + void visitHost(Host *curr) { + calls = true; + // grow_memory modifies the set of valid addresses, and thus can be modeled as modifying memory + writesMemory = true; + // Atomics are also sequentially consistent with grow_memory. + isAtomic = true; + } void visitUnreachable(Unreachable *curr) { branches = true; } }; } // namespace wasm #endif // wasm_ast_effects_h - diff --git a/src/ast_utils.h b/src/ast_utils.h index 253da8050..1f781b87e 100644 --- a/src/ast_utils.h +++ b/src/ast_utils.h @@ -154,6 +154,8 @@ struct ReFinalize : public WalkerPass<PostWalker<ReFinalize>> { void visitSetGlobal(SetGlobal *curr) { curr->finalize(); } void visitLoad(Load *curr) { curr->finalize(); } void visitStore(Store *curr) { curr->finalize(); } + void visitAtomicRMW(AtomicRMW *curr) { curr->finalize(); } + void visitAtomicCmpxchg(AtomicCmpxchg *curr) { curr->finalize(); } void visitConst(Const *curr) { curr->finalize(); } void visitUnary(Unary *curr) { curr->finalize(); } void visitBinary(Binary *curr) { curr->finalize(); } diff --git a/src/passes/DeadCodeElimination.cpp b/src/passes/DeadCodeElimination.cpp index 321bc0f9a..5017569aa 100644 --- a/src/passes/DeadCodeElimination.cpp +++ b/src/passes/DeadCodeElimination.cpp @@ -28,6 +28,7 @@ // have no side effects. // +#include <vector> #include <wasm.h> #include <pass.h> #include <wasm-builder.h> @@ -321,84 +322,62 @@ struct DeadCodeElimination : public WalkerPass<PostWalker<DeadCodeElimination>> } } - void visitSetLocal(SetLocal* curr) { - if (isUnreachable(curr->value)) { - replaceCurrent(curr->value); + // Append the reachable operands of the current node to a block, and replace + // it with the block + void blockifyReachableOperands(std::vector<Expression*>&& list, WasmType type) { + for (size_t i = 0; i < list.size(); ++i) { + auto* elem = list[i]; + if (isUnreachable(elem)) { + auto* replacement = elem; + if (i > 0) { + auto* block = getModule()->allocator.alloc<Block>(); + for (size_t j = 0; j < i; ++j) { + block->list.push_back(drop(list[j])); + } + block->list.push_back(list[i]); + block->finalize(type); + replacement = block; + } + replaceCurrent(replacement); + return; + } } } + void visitSetLocal(SetLocal* curr) { + blockifyReachableOperands({ curr->value }, curr->type); + } + void visitLoad(Load* curr) { - if (isUnreachable(curr->ptr)) { - replaceCurrent(curr->ptr); - } + blockifyReachableOperands({ curr->ptr}, curr->type); } void visitStore(Store* curr) { - if (isUnreachable(curr->ptr)) { - replaceCurrent(curr->ptr); - return; - } - if (isUnreachable(curr->value)) { - auto* block = getModule()->allocator.alloc<Block>(); - block->list.resize(2); - block->list[0] = drop(curr->ptr); - block->list[1] = curr->value; - block->finalize(curr->type); - replaceCurrent(block); - } + blockifyReachableOperands({ curr->ptr, curr->value }, curr->type); + } + + void visitAtomicRMW(AtomicRMW* curr) { + blockifyReachableOperands({ curr->ptr, curr->value }, curr->type); + } + + void visitAtomicCmpxchg(AtomicCmpxchg* curr) { + blockifyReachableOperands({ curr->ptr, curr->expected, curr->replacement }, curr->type); } void visitUnary(Unary* curr) { - if (isUnreachable(curr->value)) { - replaceCurrent(curr->value); - } + blockifyReachableOperands({ curr->value }, curr->type); } void visitBinary(Binary* curr) { - if (isUnreachable(curr->left)) { - replaceCurrent(curr->left); - return; - } - if (isUnreachable(curr->right)) { - auto* block = getModule()->allocator.alloc<Block>(); - block->list.resize(2); - block->list[0] = drop(curr->left); - block->list[1] = curr->right; - block->finalize(curr->type); - replaceCurrent(block); - } + blockifyReachableOperands({ curr->left, curr->right}, curr->type); } void visitSelect(Select* curr) { - if (isUnreachable(curr->ifTrue)) { - replaceCurrent(curr->ifTrue); - return; - } - if (isUnreachable(curr->ifFalse)) { - auto* block = getModule()->allocator.alloc<Block>(); - block->list.resize(2); - block->list[0] = drop(curr->ifTrue); - block->list[1] = curr->ifFalse; - block->finalize(curr->type); - replaceCurrent(block); - return; - } - if (isUnreachable(curr->condition)) { - auto* block = getModule()->allocator.alloc<Block>(); - block->list.resize(3); - block->list[0] = drop(curr->ifTrue); - block->list[1] = drop(curr->ifFalse); - block->list[2] = curr->condition; - block->finalize(curr->type); - replaceCurrent(block); - return; - } + blockifyReachableOperands({ curr->ifTrue, curr->ifFalse, curr->condition}, curr->type); } void visitDrop(Drop* curr) { - if (isUnreachable(curr->value)) { - replaceCurrent(curr->value); - } + blockifyReachableOperands({ curr->value }, curr->type); } void visitHost(Host* curr) { @@ -415,4 +394,3 @@ Pass *createDeadCodeEliminationPass() { } } // namespace wasm - diff --git a/src/passes/InstrumentMemory.cpp b/src/passes/InstrumentMemory.cpp index 536031064..d9a5a4316 100644 --- a/src/passes/InstrumentMemory.cpp +++ b/src/passes/InstrumentMemory.cpp @@ -66,6 +66,7 @@ namespace wasm { Name load("load"); Name store("store"); +// TODO: Add support for atomicRMW/cmpxchg struct InstrumentMemory : public WalkerPass<PostWalker<InstrumentMemory>> { void visitLoad(Load* curr) { diff --git a/src/passes/MergeBlocks.cpp b/src/passes/MergeBlocks.cpp index bc5fea6fb..455e54971 100644 --- a/src/passes/MergeBlocks.cpp +++ b/src/passes/MergeBlocks.cpp @@ -286,17 +286,27 @@ struct MergeBlocks : public WalkerPass<PostWalker<MergeBlocks>> { void visitStore(Store* curr) { optimize(curr, curr->value, optimize(curr, curr->ptr), &curr->ptr); } - - void visitSelect(Select* curr) { + void visitAtomicRMW(AtomicRMW* curr) { + optimize(curr, curr->value, optimize(curr, curr->ptr), &curr->ptr); + } + void optimizeTernary(Expression* curr, + Expression*& first, Expression*& second, Expression*& third) { // TODO: for now, just stop when we see any side effect. instead, we could // check effects carefully for reordering Block* outer = nullptr; - if (EffectAnalyzer(getPassOptions(), curr->ifTrue).hasSideEffects()) return; - outer = optimize(curr, curr->ifTrue, outer); - if (EffectAnalyzer(getPassOptions(), curr->ifFalse).hasSideEffects()) return; - outer = optimize(curr, curr->ifFalse, outer); - if (EffectAnalyzer(getPassOptions(), curr->condition).hasSideEffects()) return; - optimize(curr, curr->condition, outer); + if (EffectAnalyzer(getPassOptions(), first).hasSideEffects()) return; + outer = optimize(curr, first, outer); + if (EffectAnalyzer(getPassOptions(), second).hasSideEffects()) return; + outer = optimize(curr, second, outer); + if (EffectAnalyzer(getPassOptions(), third).hasSideEffects()) return; + optimize(curr, third, outer); + } + void visitAtomicCmpxchg(AtomicCmpxchg* curr) { + optimizeTernary(curr, curr->ptr, curr->expected, curr->replacement); + } + + void visitSelect(Select* curr) { + optimizeTernary(curr, curr->ifTrue, curr->ifFalse, curr->condition); } void visitDrop(Drop* curr) { @@ -344,4 +354,3 @@ Pass *createMergeBlocksPass() { } } // namespace wasm - diff --git a/src/passes/Precompute.cpp b/src/passes/Precompute.cpp index 122501de9..c4702fdeb 100644 --- a/src/passes/Precompute.cpp +++ b/src/passes/Precompute.cpp @@ -67,6 +67,12 @@ public: Flow visitStore(Store *curr) { return Flow(NONSTANDALONE_FLOW); } + Flow visitAtomicRMW(AtomicRMW *curr) { + return Flow(NONSTANDALONE_FLOW); + } + Flow visitAtomicCmpxchg(AtomicCmpxchg *curr) { + return Flow(NONSTANDALONE_FLOW); + } Flow visitHost(Host *curr) { return Flow(NONSTANDALONE_FLOW); } diff --git a/src/wasm-builder.h b/src/wasm-builder.h index f702342d2..a1f2ec9b3 100644 --- a/src/wasm-builder.h +++ b/src/wasm-builder.h @@ -193,6 +193,11 @@ public: ret->type = type; return ret; } + Load* makeAtomicLoad(unsigned bytes, bool signed_, uint32_t offset, Expression* ptr, WasmType type) { + Load* load = makeLoad(bytes, signed_, offset, getWasmTypeSize(type), ptr, type); + load->isAtomic = true; + return load; + } Store* makeStore(unsigned bytes, uint32_t offset, unsigned align, Expression *ptr, Expression *value, WasmType type) { auto* ret = allocator.alloc<Store>(); ret->isAtomic = false; @@ -201,6 +206,36 @@ public: assert(isConcreteWasmType(ret->value->type) ? ret->value->type == type : true); return ret; } + Store* makeAtomicStore(unsigned bytes, uint32_t offset, Expression* ptr, Expression* value, WasmType type) { + Store* store = makeStore(bytes, offset, getWasmTypeSize(type), ptr, value, type); + store->isAtomic = true; + return store; + } + AtomicRMW* makeAtomicRMW(AtomicRMWOp op, unsigned bytes, uint32_t offset, + Expression* ptr, Expression* value, WasmType type) { + auto* ret = allocator.alloc<AtomicRMW>(); + ret->op = op; + ret->bytes = bytes; + ret->offset = offset; + ret->ptr = ptr; + ret->value = value; + ret->type = type; + ret->finalize(); + return ret; + } + AtomicCmpxchg* makeAtomicCmpxchg(unsigned bytes, uint32_t offset, + Expression* ptr, Expression* expected, Expression* replacement, + WasmType type) { + auto* ret = allocator.alloc<AtomicCmpxchg>(); + ret->bytes = bytes; + ret->offset = offset; + ret->ptr = ptr; + ret->expected = expected; + ret->replacement = replacement; + ret->type = type; + ret->finalize(); + return ret; + } Const* makeConst(Literal value) { assert(isConcreteWasmType(value.type)); auto* ret = allocator.alloc<Const>(); |