summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/passes/MemoryPacking.cpp69
-rw-r--r--src/support/space.h84
2 files changed, 150 insertions, 3 deletions
diff --git a/src/passes/MemoryPacking.cpp b/src/passes/MemoryPacking.cpp
index 27168942a..3ab170ea9 100644
--- a/src/passes/MemoryPacking.cpp
+++ b/src/passes/MemoryPacking.cpp
@@ -32,12 +32,15 @@
#include "ir/module-utils.h"
#include "ir/utils.h"
#include "pass.h"
+#include "support/space.h"
#include "wasm-binary.h"
#include "wasm-builder.h"
#include "wasm.h"
namespace wasm {
+namespace {
+
// A subsection of an orginal memory segment. If `isZero` is true, memory.fill
// will be used instead of memory.init for this range.
struct Range {
@@ -74,8 +77,6 @@ const size_t MEMORY_FILL_SIZE = 9;
// data.drop: 2 byte opcode + ~1 byte index immediate
const size_t DATA_DROP_SIZE = 3;
-namespace {
-
Expression*
makeGtShiftedMemorySize(Builder& builder, Module& module, MemoryInit* curr) {
return builder.makeBinary(
@@ -97,6 +98,7 @@ struct MemoryPacking : public Pass {
uint32_t maxSegments;
void run(PassRunner* runner, Module* module) override;
+ bool canOptimize(const std::vector<Memory::Segment>& segments);
void optimizeBulkMemoryOps(PassRunner* runner, Module* module);
void getSegmentReferrers(Module* module, std::vector<Referrers>& referrers);
void dropUnusedSegments(std::vector<Memory::Segment>& segments,
@@ -125,10 +127,15 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
return;
}
+ auto& segments = module->memory.segments;
+
+ if (!canOptimize(segments)) {
+ return;
+ }
+
maxSegments = module->features.hasBulkMemory()
? 63
: uint32_t(WebLimitations::MaxDataSegments);
- auto& segments = module->memory.segments;
// For each segment, a list of bulk memory instructions that refer to it
std::vector<Referrers> referrers(segments.size());
@@ -176,6 +183,62 @@ void MemoryPacking::run(PassRunner* runner, Module* module) {
}
}
+bool MemoryPacking::canOptimize(const std::vector<Memory::Segment>& segments) {
+ // One segment is always ok to optimize, as it does not have the potential
+ // problems handled below.
+ if (segments.size() <= 1) {
+ return true;
+ }
+ // Check if it is ok for us to optimize.
+ Address maxAddress = 0;
+ for (auto& segment : segments) {
+ if (!segment.isPassive) {
+ auto* c = segment.offset->dynCast<Const>();
+ // If an active segment has a non-constant offset, then what gets written
+ // cannot be known until runtime. That is, the active segments are written
+ // out at startup, in order, and one may trample the data of another, like
+ //
+ // (data (i32.const 100) "a")
+ // (data (i32.const 100) "\00")
+ //
+ // It is *not* ok to optimize out the zero in the last segment, as it is
+ // actually needed, it will zero out the "a" that was written earlier. And
+ // if a segment has an imported offset,
+ //
+ // (data (i32.const 100) "a")
+ // (data (global.get $x) "\00")
+ //
+ // then we can't tell if that last segment will end up overwriting or not.
+ // The only case we can easily handle is if there is just a single
+ // segment, which we handled earlier. (Note that that includes the main
+ // case of having a non-constant offset, dynamic linking, in which we have
+ // a single segment.)
+ if (!c) {
+ return false;
+ }
+ // Note the maximum address so far.
+ maxAddress = std::max(
+ maxAddress, Address(c->value.getInteger() + segment.data.size()));
+ }
+ }
+ // All active segments have constant offsets, known at this time, so we may be
+ // able to optimize, but must still check for the trampling problem mentioned
+ // earlier.
+ // TODO: optimize in the trampling case
+ DisjointSpans space;
+ for (auto& segment : segments) {
+ if (!segment.isPassive) {
+ auto* c = segment.offset->cast<Const>();
+ Address start = c->value.getInteger();
+ DisjointSpans::Span span{start, start + segment.data.size()};
+ if (space.addAndCheckOverlap(span)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
bool MemoryPacking::canSplit(const Memory::Segment& segment,
const Referrers& referrers) {
if (segment.isPassive) {
diff --git a/src/support/space.h b/src/support/space.h
new file mode 100644
index 000000000..9d940c872
--- /dev/null
+++ b/src/support/space.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_support_space_h
+#define wasm_support_space_h
+
+#include "utilities.h"
+#include <wasm.h>
+
+namespace wasm {
+
+struct DisjointSpans {
+ // A span of form [a, b), i.e., that does not include the end point.
+ struct Span {
+ Address left, right;
+
+ bool checkOverlap(const Span& other) const {
+ return !(left >= other.right || right <= other.left);
+ }
+ };
+
+ struct SortByLeft {
+ bool operator()(const Span& left, const Span& right) const {
+ return left.left < right.left ||
+ (left.left == right.left && left.right < right.right);
+ }
+ };
+
+ // The spans seen so far. Guaranteed to be disjoint.
+ std::set<Span, SortByLeft> spans;
+
+ // Adds an item and checks overlap while doing so, returning true if such
+ // overlap exists.
+ bool addAndCheckOverlap(Span span) {
+ // Insert the new span. We can then find its predecessor and successor.
+ // They are disjoint by assumption, so the question is then does the new
+ // span overlap with them, or not.
+ decltype(spans)::iterator iter;
+ bool inserted;
+ std::tie(iter, inserted) = spans.insert(span);
+ if (!inserted) {
+ // This exact span was already there, so there is definite overlap.
+ return true;
+ }
+ // Check predecessor and successor, if they exist.
+ if (iter != spans.begin() && std::prev(iter)->checkOverlap(span)) {
+ return true;
+ }
+ if (std::next(iter) != spans.end() && std::next(iter)->checkOverlap(span)) {
+ return true;
+ }
+ return false;
+ }
+
+ // Inefficient - mostly for testing.
+ void add(Span span) { addAndCheckOverlap(span); }
+
+ // Inefficient - mostly for testing.
+ bool checkOverlap(Span span) {
+ bool existsBefore = spans.find(span) != spans.end();
+ auto hasOverlap = addAndCheckOverlap(span);
+ if (!existsBefore) {
+ spans.erase(span);
+ }
+ return hasOverlap;
+ }
+};
+
+} // namespace wasm
+
+#endif // wasm_support_space_h