diff options
Diffstat (limited to 'src/passes/MemoryPacking.cpp')
-rw-r--r-- | src/passes/MemoryPacking.cpp | 69 |
1 files changed, 66 insertions, 3 deletions
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp index 27168942a..3ab170ea9 100644 --- a/src/passes/MemoryPacking.cpp +++ b/src/passes/MemoryPacking.cpp @@ -32,12 +32,15 @@ #include "ir/module-utils.h" #include "ir/utils.h" #include "pass.h" +#include "support/space.h" #include "wasm-binary.h" #include "wasm-builder.h" #include "wasm.h" namespace wasm { +namespace { + // A subsection of an orginal memory segment. If `isZero` is true, memory.fill // will be used instead of memory.init for this range. struct Range { @@ -74,8 +77,6 @@ const size_t MEMORY_FILL_SIZE = 9; // data.drop: 2 byte opcode + ~1 byte index immediate const size_t DATA_DROP_SIZE = 3; -namespace { - Expression* makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) { return builder.makeBinary( @@ -97,6 +98,7 @@ struct MemoryPacking : public Pass { uint32_t maxSegments; void run(PassRunner* runner, Module* module) override; + bool canOptimize(const std::vector<Memory::Segment>& segments); void optimizeBulkMemoryOps(PassRunner* runner, Module* module); void getSegmentReferrers(Module* module, std::vector<Referrers>& referrers); void dropUnusedSegments(std::vector<Memory::Segment>& segments, @@ -125,10 +127,15 @@ void MemoryPacking::run(PassRunner* runner, Module* module) { return; } + auto& segments = module->memory.segments; + + if (!canOptimize(segments)) { + return; + } + maxSegments = module->features.hasBulkMemory() ? 63 : uint32_t(WebLimitations::MaxDataSegments); - auto& segments = module->memory.segments; // For each segment, a list of bulk memory instructions that refer to it std::vector<Referrers> referrers(segments.size()); @@ -176,6 +183,62 @@ void MemoryPacking::run(PassRunner* runner, Module* module) { } } +bool MemoryPacking::canOptimize(const std::vector<Memory::Segment>& segments) { + // One segment is always ok to optimize, as it does not have the potential + // problems handled below. + if (segments.size() <= 1) { + return true; + } + // Check if it is ok for us to optimize. + Address maxAddress = 0; + for (auto& segment : segments) { + if (!segment.isPassive) { + auto* c = segment.offset->dynCast<Const>(); + // If an active segment has a non-constant offset, then what gets written + // cannot be known until runtime. That is, the active segments are written + // out at startup, in order, and one may trample the data of another, like + // + // (data (i32.const 100) "a") + // (data (i32.const 100) "\00") + // + // It is *not* ok to optimize out the zero in the last segment, as it is + // actually needed, it will zero out the "a" that was written earlier. And + // if a segment has an imported offset, + // + // (data (i32.const 100) "a") + // (data (global.get $x) "\00") + // + // then we can't tell if that last segment will end up overwriting or not. + // The only case we can easily handle is if there is just a single + // segment, which we handled earlier. (Note that that includes the main + // case of having a non-constant offset, dynamic linking, in which we have + // a single segment.) + if (!c) { + return false; + } + // Note the maximum address so far. + maxAddress = std::max( + maxAddress, Address(c->value.getInteger() + segment.data.size())); + } + } + // All active segments have constant offsets, known at this time, so we may be + // able to optimize, but must still check for the trampling problem mentioned + // earlier. + // TODO: optimize in the trampling case + DisjointSpans space; + for (auto& segment : segments) { + if (!segment.isPassive) { + auto* c = segment.offset->cast<Const>(); + Address start = c->value.getInteger(); + DisjointSpans::Span span{start, start + segment.data.size()}; + if (space.addAndCheckOverlap(span)) { + return false; + } + } + } + return true; +} + bool MemoryPacking::canSplit(const Memory::Segment& segment, const Referrers& referrers) { if (segment.isPassive) { |