summaryrefslogtreecommitdiff
path: root/src/passes
diff options
context:
space:
mode:
Diffstat (limited to 'src/passes')
-rw-r--r--src/passes/Memory64Lowering.cpp15
-rw-r--r--src/passes/MemoryPacking.cpp120
-rw-r--r--src/passes/Metrics.cpp22
-rw-r--r--src/passes/Print.cpp97
-rw-r--r--src/passes/RemoveMemory.cpp2
-rw-r--r--src/passes/RemoveUnusedModuleElements.cpp10
6 files changed, 141 insertions, 125 deletions
diff --git a/src/passes/Memory64Lowering.cpp b/src/passes/Memory64Lowering.cpp
index 95dbf4f17..4c2770e3a 100644
--- a/src/passes/Memory64Lowering.cpp
+++ b/src/passes/Memory64Lowering.cpp
@@ -99,19 +99,20 @@ struct Memory64Lowering : public WalkerPass<PostWalker<Memory64Lowering>> {
void visitAtomicNotify(AtomicNotify* curr) { wrapAddress64(curr->ptr); }
void visitMemory(Memory* memory) {
- for (auto& segment : memory->segments) {
- if (!segment.isPassive) {
- auto* c = segment.offset->cast<Const>();
- c->value = Literal(static_cast<uint32_t>(c->value.geti64()));
- c->type = Type::i32;
- }
- }
// This is visited last.
memory->indexType = Type::i32;
if (memory->hasMax() && memory->max > Memory::kMaxSize32) {
memory->max = Memory::kMaxSize32;
}
}
+
+ void visitDataSegment(DataSegment* segment) {
+ if (!segment->isPassive) {
+ auto* c = segment->offset->cast<Const>();
+ c->value = Literal(static_cast<uint32_t>(c->value.geti64()));
+ c->type = Type::i32;
+ }
+ }
};
Pass* createMemory64LoweringPass() { return new Memory64Lowering(); }
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 7b4caf11e..3be08e7b9 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -95,19 +95,22 @@ makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) {
struct MemoryPacking : public Pass {
void run(PassRunner* runner, Module* module) override;
- bool canOptimize(const Memory& memory, const PassOptions& passOptions);
+ bool canOptimize(const Memory& memory,
+ std::vector<std::unique_ptr<DataSegment>>& dataSegments,
+ const PassOptions& passOptions);
void optimizeBulkMemoryOps(PassRunner* runner, Module* module);
void getSegmentReferrers(Module* module, ReferrersMap& referrers);
- void dropUnusedSegments(std::vector<Memory::Segment>& segments,
+ void dropUnusedSegments(std::vector<std::unique_ptr<DataSegment>>& segments,
ReferrersMap& referrers);
- bool canSplit(const Memory::Segment& segment, const Referrers& referrers);
- void calculateRanges(const Memory::Segment& segment,
+ bool canSplit(const std::unique_ptr<DataSegment>& segment,
+ const Referrers& referrers);
+ void calculateRanges(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers,
std::vector<Range>& ranges);
void createSplitSegments(Builder& builder,
- const Memory::Segment& segment,
+ const DataSegment* segment,
std::vector<Range>& ranges,
- std::vector<Memory::Segment>& packed,
+ std::vector<std::unique_ptr<DataSegment>>& packed,
size_t segmentsRemaining);
void createReplacements(Module* module,
const std::vector<Range>& ranges,
@@ -120,11 +123,11 @@ struct MemoryPacking : public Pass {
};
void MemoryPacking::run(PassRunner* runner, Module* module) {
- if (!canOptimize(module->memory, runner->options)) {
+ if (!canOptimize(module->memory, module->dataSegments, runner->options)) {
return;
}
- auto& segments = module->memory.segments;
+ auto& segments = module->dataSegments;
// For each segment, a list of bulk memory instructions that refer to it
ReferrersMap referrers;
@@ -141,7 +144,7 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
// The new, split memory segments
- std::vector<Memory::Segment> packed;
+ std::vector<std::unique_ptr<DataSegment>> packed;
Replacements replacements;
Builder builder(*module);
@@ -156,12 +159,13 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
} else {
// A single range covers the entire segment. Set isZero to false so the
// original memory.init will be used even if segment is all zeroes.
- ranges.push_back({false, 0, segment.data.size()});
+ ranges.push_back({false, 0, segment->data.size()});
}
Index firstNewIndex = packed.size();
size_t segmentsRemaining = segments.size() - origIndex;
- createSplitSegments(builder, segment, ranges, packed, segmentsRemaining);
+ createSplitSegments(
+ builder, segment.get(), ranges, packed, segmentsRemaining);
createReplacements(
module, ranges, currReferrers, replacements, firstNewIndex);
}
@@ -173,8 +177,10 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
}
-bool MemoryPacking::canOptimize(const Memory& memory,
- const PassOptions& passOptions) {
+bool MemoryPacking::canOptimize(
+ const Memory& memory,
+ std::vector<std::unique_ptr<DataSegment>>& dataSegments,
+ const PassOptions& passOptions) {
if (!memory.exists) {
return false;
}
@@ -186,18 +192,16 @@ bool MemoryPacking::canOptimize(const Memory& memory,
return false;
}
- auto& segments = memory.segments;
-
// One segment is always ok to optimize, as it does not have the potential
// problems handled below.
- if (segments.size() <= 1) {
+ if (dataSegments.size() <= 1) {
return true;
}
// Check if it is ok for us to optimize.
Address maxAddress = 0;
- for (auto& segment : segments) {
- if (!segment.isPassive) {
- auto* c = segment.offset->dynCast<Const>();
+ for (auto& segment : dataSegments) {
+ if (!segment->isPassive) {
+ auto* c = segment->offset->dynCast<Const>();
// If an active segment has a non-constant offset, then what gets written
// cannot be known until runtime. That is, the active segments are written
// out at startup, in order, and one may trample the data of another, like
@@ -222,7 +226,7 @@ bool MemoryPacking::canOptimize(const Memory& memory,
}
// Note the maximum address so far.
maxAddress = std::max(
- maxAddress, Address(c->value.getUnsigned() + segment.data.size()));
+ maxAddress, Address(c->value.getUnsigned() + segment->data.size()));
}
}
// All active segments have constant offsets, known at this time, so we may be
@@ -230,11 +234,11 @@ bool MemoryPacking::canOptimize(const Memory& memory,
// earlier.
// TODO: optimize in the trampling case
DisjointSpans space;
- for (auto& segment : segments) {
- if (!segment.isPassive) {
- auto* c = segment.offset->cast<Const>();
+ for (auto& segment : dataSegments) {
+ if (!segment->isPassive) {
+ auto* c = segment->offset->cast<Const>();
Address start = c->value.getUnsigned();
- DisjointSpans::Span span{start, start + segment.data.size()};
+ DisjointSpans::Span span{start, start + segment->data.size()};
if (space.addAndCheckOverlap(span)) {
std::cerr << "warning: active memory segments have overlap, which "
<< "prevents some optimizations.\n";
@@ -245,17 +249,17 @@ bool MemoryPacking::canOptimize(const Memory& memory,
return true;
}
-bool MemoryPacking::canSplit(const Memory::Segment& segment,
+bool MemoryPacking::canSplit(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers) {
// Don't mess with segments related to llvm coverage tools such as
// __llvm_covfun. There segments are expected/parsed by external downstream
// tools (llvm-cov) so they need to be left intact.
// See https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
- if (segment.name.is() && segment.name.startsWith("__llvm")) {
+ if (segment->name.is() && segment->name.startsWith("__llvm")) {
return false;
}
- if (segment.isPassive) {
+ if (segment->isPassive) {
for (auto* referrer : referrers) {
if (auto* init = referrer->dynCast<MemoryInit>()) {
// Do not try to split if there is a nonconstant offset or size
@@ -268,13 +272,13 @@ bool MemoryPacking::canSplit(const Memory::Segment& segment,
}
// Active segments can only be split if they have constant offsets
- return segment.offset->is<Const>();
+ return segment->offset->is<Const>();
}
-void MemoryPacking::calculateRanges(const Memory::Segment& segment,
+void MemoryPacking::calculateRanges(const std::unique_ptr<DataSegment>& segment,
const Referrers& referrers,
std::vector<Range>& ranges) {
- auto& data = segment.data;
+ auto& data = segment->data;
if (data.size() == 0) {
return;
}
@@ -304,7 +308,7 @@ void MemoryPacking::calculateRanges(const Memory::Segment& segment,
// entire segment and that all its arguments are constants. These assumptions
// are true of all memory.inits generated by the tools.
size_t threshold = 0;
- if (segment.isPassive) {
+ if (segment->isPassive) {
// Passive segment metadata size
threshold += 2;
// Zeroes on the edge do not increase the number of segments or data.drops,
@@ -375,8 +379,8 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
void visitMemoryInit(MemoryInit* curr) {
Builder builder(*getModule());
- Memory::Segment& segment = getModule()->memory.segments[curr->segment];
- size_t maxRuntimeSize = segment.isPassive ? segment.data.size() : 0;
+ auto& segment = getModule()->dataSegments[curr->segment];
+ size_t maxRuntimeSize = segment->isPassive ? segment->data.size() : 0;
bool mustNop = false;
bool mustTrap = false;
auto* offset = curr->offset->dynCast<Const>();
@@ -409,7 +413,7 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
builder.makeDrop(curr->size),
builder.makeUnreachable()));
needsRefinalizing = true;
- } else if (!segment.isPassive) {
+ } else if (!segment->isPassive) {
// trap if (dest > memory.size | offset | size) != 0
replaceCurrent(builder.makeIf(
builder.makeBinary(
@@ -420,7 +424,7 @@ void MemoryPacking::optimizeBulkMemoryOps(PassRunner* runner, Module* module) {
}
}
void visitDataDrop(DataDrop* curr) {
- if (!getModule()->memory.segments[curr->segment].isPassive) {
+ if (!getModule()->dataSegments[curr->segment]->isPassive) {
ExpressionManipulator::nop(curr);
}
}
@@ -467,9 +471,10 @@ void MemoryPacking::getSegmentReferrers(Module* module,
}
}
-void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
- ReferrersMap& referrers) {
- std::vector<Memory::Segment> usedSegments;
+void MemoryPacking::dropUnusedSegments(
+ std::vector<std::unique_ptr<DataSegment>>& segments,
+ ReferrersMap& referrers) {
+ std::vector<std::unique_ptr<DataSegment>> usedSegments;
ReferrersMap usedReferrers;
// Remove segments that are never used
// TODO: remove unused portions of partially used segments as well
@@ -477,7 +482,7 @@ void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
bool used = false;
auto referrersIt = referrers.find(i);
bool hasReferrers = referrersIt != referrers.end();
- if (segments[i].isPassive) {
+ if (segments[i]->isPassive) {
if (hasReferrers) {
for (auto* referrer : referrersIt->second) {
if (referrer->is<MemoryInit>()) {
@@ -506,20 +511,22 @@ void MemoryPacking::dropUnusedSegments(std::vector<Memory::Segment>& segments,
std::swap(referrers, usedReferrers);
}
-void MemoryPacking::createSplitSegments(Builder& builder,
- const Memory::Segment& segment,
- std::vector<Range>& ranges,
- std::vector<Memory::Segment>& packed,
- size_t segmentsRemaining) {
+void MemoryPacking::createSplitSegments(
+ Builder& builder,
+ const DataSegment* segment,
+ std::vector<Range>& ranges,
+ std::vector<std::unique_ptr<DataSegment>>& packed,
+ size_t segmentsRemaining) {
size_t segmentCount = 0;
+ bool hasExplicitName = false;
for (size_t i = 0; i < ranges.size(); ++i) {
Range& range = ranges[i];
if (range.isZero) {
continue;
}
Expression* offset = nullptr;
- if (!segment.isPassive) {
- if (auto* c = segment.offset->dynCast<Const>()) {
+ if (!segment->isPassive) {
+ if (auto* c = segment->offset->dynCast<Const>()) {
if (c->value.type == Type::i32) {
offset = builder.makeConst(int32_t(c->value.geti32() + range.start));
} else {
@@ -528,7 +535,7 @@ void MemoryPacking::createSplitSegments(Builder& builder,
}
} else {
assert(ranges.size() == 1);
- offset = segment.offset;
+ offset = segment->offset;
}
}
if (WebLimitations::MaxDataSegments <= packed.size() + segmentsRemaining) {
@@ -541,24 +548,27 @@ void MemoryPacking::createSplitSegments(Builder& builder,
ranges.erase(ranges.begin() + i + 1, lastNonzero + 1);
}
Name name;
- if (segment.name.is()) {
+ if (segment->name.is()) {
// Name the first range after the original segment and all following
// ranges get numbered accordingly. This means that for segments that
// canot be split (segments that contains a single range) the input and
// output segment have the same name.
if (!segmentCount) {
- name = segment.name;
+ name = segment->name;
+ hasExplicitName = segment->hasExplicitName;
} else {
- name = std::string(segment.name.c_str()) + "." +
+ name = std::string(segment->name.c_str()) + "." +
std::to_string(segmentCount);
}
segmentCount++;
}
- packed.emplace_back(name,
- segment.isPassive,
- offset,
- &segment.data[range.start],
- range.end - range.start);
+ auto curr = Builder::makeDataSegment(name,
+ segment->isPassive,
+ offset,
+ &segment->data[range.start],
+ range.end - range.start);
+ curr->hasExplicitName = hasExplicitName;
+ packed.push_back(std::move(curr));
}
}
diff --git a/src/passes/Metrics.cpp b/src/passes/Metrics.cpp
index 566738161..70d79c12c 100644
--- a/src/passes/Metrics.cpp
+++ b/src/passes/Metrics.cpp
@@ -53,7 +53,6 @@ struct Metrics
}
ModuleUtils::iterDefinedGlobals(*module,
[&](Global* curr) { walkGlobal(curr); });
- walkMemory(&module->memory);
// add imports / funcs / globals / exports / tables
counts["[imports]"] = imports.getNumImports();
@@ -62,23 +61,26 @@ struct Metrics
counts["[tags]"] = imports.getNumDefinedTags();
counts["[exports]"] = module->exports.size();
counts["[tables]"] = imports.getNumDefinedTables();
- // add memory and table
- if (module->memory.exists) {
- Index size = 0;
- for (auto& segment : module->memory.segments) {
- size += segment.data.size();
- }
+
+ // add memory
+ walkMemory(&module->memory);
+ Index size = 0;
+ for (auto& segment : module->dataSegments) {
+ walkDataSegment(segment.get());
+ size += segment->data.size();
+ }
+ if (!module->dataSegments.empty()) {
counts["[memory-data]"] = size;
}
- Index size = 0;
- ModuleUtils::iterActiveElementSegments(
- *module, [&](ElementSegment* segment) { size += segment->data.size(); });
+ // add table
+ size = 0;
for (auto& table : module->tables) {
walkTable(table.get());
}
for (auto& segment : module->elementSegments) {
walkElementSegment(segment.get());
+ size += segment->data.size();
}
if (!module->tables.empty()) {
counts["[table-data]"] = size;
diff --git a/src/passes/Print.cpp b/src/passes/Print.cpp
index a87b2e643..7260d7161 100644
--- a/src/passes/Print.cpp
+++ b/src/passes/Print.cpp
@@ -3006,57 +3006,57 @@ struct PrintSExpression : public UnifiedExpressionVisitor<PrintSExpression> {
printMemoryHeader(curr);
o << '\n';
}
- for (auto segment : curr->segments) {
- doIndent(o, indent);
- o << '(';
- printMajor(o, "data ");
- if (segment.name.is()) {
- printName(segment.name, o);
- o << ' ';
- }
- if (!segment.isPassive) {
- visit(segment.offset);
- o << ' ';
- }
- o << "\"";
- for (size_t i = 0; i < segment.data.size(); i++) {
- unsigned char c = segment.data[i];
- switch (c) {
- case '\n':
- o << "\\n";
- break;
- case '\r':
- o << "\\0d";
- break;
- case '\t':
- o << "\\t";
- break;
- case '\f':
- o << "\\0c";
- break;
- case '\b':
- o << "\\08";
- break;
- case '\\':
- o << "\\\\";
- break;
- case '"':
- o << "\\\"";
- break;
- case '\'':
- o << "\\'";
- break;
- default: {
- if (c >= 32 && c < 127) {
- o << c;
- } else {
- o << std::hex << '\\' << (c / 16) << (c % 16) << std::dec;
- }
+ }
+ void visitDataSegment(DataSegment* curr) {
+ doIndent(o, indent);
+ o << '(';
+ printMajor(o, "data ");
+ if (curr->hasExplicitName) {
+ printName(curr->name, o);
+ o << ' ';
+ }
+ if (!curr->isPassive) {
+ visit(curr->offset);
+ o << ' ';
+ }
+ o << "\"";
+ for (size_t i = 0; i < curr->data.size(); i++) {
+ unsigned char c = curr->data[i];
+ switch (c) {
+ case '\n':
+ o << "\\n";
+ break;
+ case '\r':
+ o << "\\0d";
+ break;
+ case '\t':
+ o << "\\t";
+ break;
+ case '\f':
+ o << "\\0c";
+ break;
+ case '\b':
+ o << "\\08";
+ break;
+ case '\\':
+ o << "\\\\";
+ break;
+ case '"':
+ o << "\\\"";
+ break;
+ case '\'':
+ o << "\\'";
+ break;
+ default: {
+ if (c >= 32 && c < 127) {
+ o << c;
+ } else {
+ o << std::hex << '\\' << (c / 16) << (c % 16) << std::dec;
}
}
}
- o << "\")" << maybeNewLine;
}
+ o << "\")" << maybeNewLine;
}
void printDylinkSection(const std::unique_ptr<DylinkSection>& dylinkSection) {
doIndent(o, indent) << ";; dylink section\n";
@@ -3134,6 +3134,9 @@ struct PrintSExpression : public UnifiedExpressionVisitor<PrintSExpression> {
*curr, [&](Global* global) { visitGlobal(global); });
ModuleUtils::iterDefinedMemories(
*curr, [&](Memory* memory) { visitMemory(memory); });
+ for (auto& segment : curr->dataSegments) {
+ visitDataSegment(segment.get());
+ }
ModuleUtils::iterDefinedTables(*curr,
[&](Table* table) { visitTable(table); });
for (auto& segment : curr->elementSegments) {
diff --git a/src/passes/RemoveMemory.cpp b/src/passes/RemoveMemory.cpp
index 399a32933..6f2f9aee7 100644
--- a/src/passes/RemoveMemory.cpp
+++ b/src/passes/RemoveMemory.cpp
@@ -25,7 +25,7 @@ namespace wasm {
struct RemoveMemory : public Pass {
void run(PassRunner* runner, Module* module) override {
- module->memory.segments.clear();
+ module->removeDataSegments([&](DataSegment* curr) { return true; });
}
};
diff --git a/src/passes/RemoveUnusedModuleElements.cpp b/src/passes/RemoveUnusedModuleElements.cpp
index 686a95032..9268232b4 100644
--- a/src/passes/RemoveUnusedModuleElements.cpp
+++ b/src/passes/RemoveUnusedModuleElements.cpp
@@ -70,9 +70,9 @@ struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
: module(module) {
queue = roots;
// Globals used in memory/table init expressions are also roots
- for (auto& segment : module->memory.segments) {
- if (!segment.isPassive) {
- walk(segment.offset);
+ for (auto& segment : module->dataSegments) {
+ if (!segment->isPassive) {
+ walk(segment->offset);
}
}
for (auto& segment : module->elementSegments) {
@@ -365,9 +365,9 @@ struct RemoveUnusedModuleElements : public Pass {
if (!importsMemory) {
// The memory is unobservable to the outside, we can remove the
// contents.
- module->memory.segments.clear();
+ module->dataSegments.clear();
}
- if (module->memory.segments.empty()) {
+ if (module->dataSegments.empty()) {
module->memory.exists = false;
module->memory.module = module->memory.base = Name();
module->memory.initial = 0;