summaryrefslogtreecommitdiff
path: root/src/passes/MemoryPacking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/passes/MemoryPacking.cpp')
-rw-r--r--src/passes/MemoryPacking.cpp120
1 files changed, 65 insertions, 55 deletions
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 7b4caf11e..3be08e7b9 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -95,19 +95,22 @@ makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) {
struct MemoryPacking : public Pass {
void run(PassRunner* runner, Module* module) override;
- bool canOptimize(const Memory& memory, const PassOptions& passOptions);
+ bool canOptimize(const Memory& memory,
+ std::vector<std::unique_ptr<DataSegment>>& dataSegments,
+ const PassOptions& passOptions);
void optimizeBulkMemoryOps(PassRunner* runner, Module* module);
void getSegmentReferrers(Module* module, ReferrersMap& referrers);
- void dropUnusedSegments(std::vector<Memory::Segment>& segments,
+ void dropUnusedSegments(std::vector<std::unique_ptr<DataSegment>>& segments,
ReferrersMap& referrers);
- bool canSplit(const Memory::Segment& segment, const Referrers& referrers);
- void calculateRanges(const Memory::Segment& segment,
+ bool canSplit(const std::unique_ptr<DataSegment>& segment,
+ const Referrers& referrers);
+ void calculateRanges(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers,
std::vector<Range>& ranges);
void createSplitSegments(Builder& builder,
- const Memory::Segment& segment,
+ const DataSegment* segment,
std::vector<Range>& ranges,
- std::vector<Memory::Segment>& packed,
+ std::vector<std::unique_ptr<DataSegment>>& packed,
size_t segmentsRemaining);
void createReplacements(Module* module,
const std::vector<Range>& ranges,
@@ -120,11 +123,11 @@ struct MemoryPacking : public Pass {
};
void MemoryPacking::run(PassRunner* runner, Module* module) {
- if (!canOptimize(module->memory, runner->options)) {
+ if (!canOptimize(module->memory, module->dataSegments, runner->options)) {
return;
}
- auto& segments = module->memory.segments;
+ auto& segments = module->dataSegments;
// For each segment, a list of bulk memory instructions that refer to it
ReferrersMap referrers;
@@ -141,7 +144,7 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
// The new, split memory segments
- std::vector<Memory::Segment> packed;
+ std::vector<std::unique_ptr<DataSegment>> packed;
Replacements replacements;
Builder builder(*module);
@@ -156,12 +159,13 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
} else {
// A single range covers the entire segment. Set isZero to false so the
// original memory.init will be used even if segment is all zeroes.
- ranges.push_back({false, 0, segment.data.size()});
+ ranges.push_back({false, 0, segment->data.size()});
}
Index firstNewIndex = packed.size();
size_t segmentsRemaining = segments.size() - origIndex;
- createSplitSegments(builder, segment, ranges, packed, segmentsRemaining);
+ createSplitSegments(
+ builder, segment.get(), ranges, packed, segmentsRemaining);
createReplacements(
module, ranges, currReferrers, replacements, firstNewIndex);
}
@@ -173,8 +177,10 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
}
-bool MemoryPacking::canOptimize(const Memory& memory,
- const PassOptions& passOptions) {
+bool MemoryPacking::canOptimize(
+ const Memory& memory,
+ std::vector<std::unique_ptr<DataSegment>>& dataSegments,
+ const PassOptions& passOptions) {
if (!memory.exists) {
return false;
}
@@ -186,18 +192,16 @@ bool MemoryPacking::canOptimize(const Memory& memory,
return false;
}
- auto& segments = memory.segments;
-
// One segment is always ok to optimize, as it does not have the potential
// problems handled below.
- if (segments.size() <= 1) {
+ if (dataSegments.size() <= 1) {
return true;
}
// Check if it is ok for us to optimize.
Address maxAddress = 0;
- for (auto& segment : segments) {
- if (!segment.isPassive) {
- auto* c = segment.offset->dynCast<Const>();
+ for (auto& segment : dataSegments) {
+ if (!segment->isPassive) {
+ auto* c = segment->offset->dynCast<Const>();
// If an active segment has a non-constant offset, then what gets written
// cannot be known until runtime. That is, the active segments are written
// out at startup, in order, and one may trample the data of another, like
@@ -222,7 +226,7 @@ bool MemoryPacking::canOptimize(const Memory& memory,
}
// Note the maximum address so far.
maxAddress = std::max(
- maxAddress, Address(c->value.getUnsigned() + segment.data.size()));
+ maxAddress, Address(c->value.getUnsigned() + segment->data.size()));
}
}
// All active segments have constant offsets, known at this time, so we may be
@@ -230,11 +234,11 @@ bool MemoryPacking::canOptimize(const Memory& memory,
// earlier.
// TODO: optimize in the trampling case
DisjointSpans space;
- for (auto& segment : segments) {
- if (!segment.isPassive) {
- auto* c = segment.offset->cast<Const>();
+ for (auto& segment : dataSegments) {
+ if (!segment->isPassive) {
+ auto* c = segment->offset->cast<Const>();
Address start = c->value.getUnsigned();
- DisjointSpans::Span span{start, start + segment.data.size()};
+ DisjointSpans::Span span{start, start + segment->data.size()};
if (space.addAndCheckOverlap(span)) {
std::cerr << "warning: active memory segments have overlap, which "
<< "prevents some optimizations.\n";
@@ -245,17 +249,17 @@ bool MemoryPacking::canOptimize(const Memory& memory,
return true;
}
-bool MemoryPacking::canSplit(const Memory::Segment& segment,
+bool MemoryPacking::canSplit(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers) {
// Don't mess with segments related to llvm coverage tools such as
// __llvm_covfun. There segments are expected/parsed by external downstream
// tools (llvm-cov) so they need to be left intact.
// See https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
- if (segment.name.is() && segment.name.startsWith("__llvm")) {
+ if (segment->name.is() && segment->name.startsWith("__llvm")) {
return false;
}
- if (segment.isPassive) {
+ if (segment->isPassive) {
for (auto* referrer : referrers) {
if (auto* init = referrer->dynCast<MemoryInit>()) {
// Do not try to split if there is a nonconstant offset or size
@@ -268,13 +272,13 @@ bool MemoryPacking::canSplit(const Memory::Segment& segment,
}
// Active segments can only be split if they have constant offsets
- return segment.offset->is<Const>();
+ return segment->offset->is<Const>();
}
-void MemoryPacking::calculateRanges(const Memory::Segment& segment,
+void MemoryPacking::calculateRanges(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers,
std::vector<Range>& ranges) {
- auto& data = segment.data;
+ auto& data = segment->data;
if (data.size() == 0) {
return;
}
@@ -304,7 +308,7 @@ void MemoryPacking::calculateRanges(const Memory::Segment& segment,
// entire segment and that all its arguments are constants. These assumptions
// are true of all memory.inits generated by the tools.
size_t threshold = 0;
- if (segment.isPassive) {
+ if (segment->isPassive) {
// Passive segment metadata size
threshold += 2;
// Zeroes on the edge do not increase the number of segments or data.drops,
@@ -375,8 +379,8 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
void visitMemoryInit(MemoryInit* curr) {
Builder builder(*getModule());
- Memory::Segment& segment = getModule()->memory.segments[curr->segment];
- size_t maxRuntimeSize = segment.isPassive ? segment.data.size() : 0;
+ auto& segment = getModule()->dataSegments[curr->segment];
+ size_t maxRuntimeSize = segment->isPassive ? segment->data.size() : 0;
bool mustNop = false;
bool mustTrap = false;
auto* offset = curr->offset->dynCast<Const>();
@@ -409,7 +413,7 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
builder.makeDrop(curr->size),
builder.makeUnreachable()));
needsRefinalizing = true;
- } else if (!segment.isPassive) {
+ } else if (!segment->isPassive) {
// trap if (dest > memory.size | offset | size) != 0
replaceCurrent(builder.makeIf(
builder.makeBinary(
@@ -420,7 +424,7 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
}
}
void visitDataDrop(DataDrop* curr) {
- if (!getModule()->memory.segments[curr->segment].isPassive) {
+ if (!getModule()->dataSegments[curr->segment]->isPassive) {
ExpressionManipulator::nop(curr);
}
}
@@ -467,9 +471,10 @@ void MemoryPacking::getSegmentReferrers(Module* module,
}
}
-void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
- ReferrersMap& referrers) {
- std::vector<Memory::Segment> usedSegments;
+void MemoryPacking::dropUnusedSegments(
+ std::vector<std::unique_ptr<DataSegment>>& segments,
+ ReferrersMap& referrers) {
+ std::vector<std::unique_ptr<DataSegment>> usedSegments;
ReferrersMap usedReferrers;
// Remove segments that are never used
// TODO: remove unused portions of partially used segments as well
@@ -477,7 +482,7 @@ void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
bool used = false;
auto referrersIt = referrers.find(i);
bool hasReferrers = referrersIt != referrers.end();
- if (segments[i].isPassive) {
+ if (segments[i]->isPassive) {
if (hasReferrers) {
for (auto* referrer : referrersIt->second) {
if (referrer->is<MemoryInit>()) {
@@ -506,20 +511,22 @@ void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
std::swap(referrers, usedReferrers);
}
-void MemoryPacking::createSplitSegments(Builder& builder,
- const Memory::Segment& segment,
- std::vector<Range>& ranges,
- std::vector<Memory::Segment>& packed,
- size_t segmentsRemaining) {
+void MemoryPacking::createSplitSegments(
+ Builder& builder,
+ const DataSegment* segment,
+ std::vector<Range>& ranges,
+ std::vector<std::unique_ptr<DataSegment>>& packed,
+ size_t segmentsRemaining) {
size_t segmentCount = 0;
+ bool hasExplicitName = false;
for (size_t i = 0; i < ranges.size(); ++i) {
Range& range = ranges[i];
if (range.isZero) {
continue;
}
Expression* offset = nullptr;
- if (!segment.isPassive) {
- if (auto* c = segment.offset->dynCast<Const>()) {
+ if (!segment->isPassive) {
+ if (auto* c = segment->offset->dynCast<Const>()) {
if (c->value.type == Type::i32) {
offset = builder.makeConst(int32_t(c->value.geti32() + range.start));
} else {
@@ -528,7 +535,7 @@ void MemoryPacking::createSplitSegments(Builder& builder,
}
} else {
assert(ranges.size() == 1);
- offset = segment.offset;
+ offset = segment->offset;
}
}
if (WebLimitations::MaxDataSegments <= packed.size() + segmentsRemaining) {
@@ -541,24 +548,27 @@ void MemoryPacking::createSplitSegments(Builder& builder,
ranges.erase(ranges.begin() + i + 1, lastNonzero + 1);
}
Name name;
- if (segment.name.is()) {
+ if (segment->name.is()) {
// Name the first range after the original segment and all following
// ranges get numbered accordingly. This means that for segments that
// canot be split (segments that contains a single range) the input and
// output segment have the same name.
if (!segmentCount) {
- name = segment.name;
+ name = segment->name;
+ hasExplicitName = segment->hasExplicitName;
} else {
- name = std::string(segment.name.c_str()) + "." +
+ name = std::string(segment->name.c_str()) + "." +
std::to_string(segmentCount);
}
segmentCount++;
}
- packed.emplace_back(name,
- segment.isPassive,
- offset,
- &segment.data[range.start],
- range.end - range.start);
+ auto curr = Builder::makeDataSegment(name,
+ segment->isPassive,
+ offset,
+ &segment->data[range.start],
+ range.end - range.start);
+ curr->hasExplicitName = hasExplicitName;
+ packed.push_back(std::move(curr));
}
}