summaryrefslogtreecommitdiff
path: root/src/passes/MemoryPacking.cpp
blob: f98c25096c3caf80a3b15d4942a00820402ced14 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
/*
 * Copyright 2016 WebAssembly Community Group participants
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "ir/manipulation.h"
#include "ir/utils.h"
#include "pass.h"
#include "wasm-binary.h"
#include "wasm-builder.h"
#include "wasm.h"

namespace wasm {

// Adding segments adds overhead, this is a rough estimate
const Index OVERHEAD = 8;

struct MemoryPacking : public Pass {
  bool modifiesBinaryenIR() override { return false; }

  void run(PassRunner* runner, Module* module) override {
    if (!module->memory.exists) {
      return;
    }

    if (module->features.hasBulkMemory()) {
      // Remove any references to active segments that might be invalidated.
      optimizeTrappingBulkMemoryOps(runner, module);
      // Conservatively refuse to change segments if any are passive to avoid
      // invalidating segment indices or segment contents referenced from
      // memory.init and data.drop instructions.
      // TODO: optimize in the presence of memory.init and  data.drop
      for (auto segment : module->memory.segments) {
        if (segment.isPassive) {
          return;
        }
      }
    }

    std::vector<Memory::Segment> packed;

    // we can only handle a constant offset for splitting
    auto isSplittable = [&](const Memory::Segment& segment) {
      return segment.offset->is<Const>();
    };

    for (auto& segment : module->memory.segments) {
      if (!isSplittable(segment)) {
        packed.push_back(segment);
      }
    }

    size_t numRemaining = module->memory.segments.size() - packed.size();

    // Split only if we have room for more segments
    auto shouldSplit = [&]() {
      return WebLimitations::MaxDataSegments > packed.size() + numRemaining;
    };

    for (auto& segment : module->memory.segments) {
      if (!isSplittable(segment)) {
        continue;
      }

      // skip final zeros
      while (segment.data.size() > 0 && segment.data.back() == 0) {
        segment.data.pop_back();
      }

      if (!shouldSplit()) {
        packed.push_back(segment);
        continue;
      }

      auto* offset = segment.offset->cast<Const>();
      // Find runs of zeros, and split
      auto& data = segment.data;
      auto base = offset->value.geti32();
      Index start = 0;
      // create new segments
      while (start < data.size()) {
        // skip initial zeros
        while (start < data.size() && data[start] == 0) {
          start++;
        }
        Index end = start; // end of data-containing part
        Index next = end;  // after zeros we can skip. preserves next >= end
        if (!shouldSplit()) {
          next = end = data.size();
        }
        while (next < data.size() && (next - end < OVERHEAD)) {
          if (data[end] != 0) {
            end++;
            next = end; // we can try to skip zeros from here
          } else {
            // end is on a zero, we are looking to skip
            if (data[next] != 0) {
              end = next; // we must extend the segment, including some zeros
            } else {
              next++;
            }
          }
        }
        if (end != start) {
          packed.emplace_back(
            Builder(*module).makeConst(Literal(int32_t(base + start))),
            &data[start],
            end - start);
        }
        start = next;
      }
      numRemaining--;
    }
    module->memory.segments.swap(packed);
  }

  void optimizeTrappingBulkMemoryOps(PassRunner* runner, Module* module) {
    struct Trapper : WalkerPass<PostWalker<Trapper>> {
      bool changed;
      void visitMemoryInit(MemoryInit* curr) {
        if (!getModule()->memory.segments[curr->segment].isPassive) {
          Builder builder(*getModule());
          replaceCurrent(builder.blockify(builder.makeDrop(curr->dest),
                                          builder.makeDrop(curr->offset),
                                          builder.makeDrop(curr->size),
                                          builder.makeUnreachable()));
          changed = true;
        }
      }
      void visitDataDrop(DataDrop* curr) {
        if (!getModule()->memory.segments[curr->segment].isPassive) {
          ExpressionManipulator::unreachable(curr);
          changed = true;
        }
      }
      void walkFunction(Function* func) {
        changed = false;
        PostWalker<Trapper>::walkFunction(func);
        if (changed) {
          ReFinalize().walkFunctionInModule(func, getModule());
        }
      }
      bool isFunctionParallel() override { return true; }
      Pass* create() override { return new Trapper; }
    } trapper;
    trapper.run(runner, module);
  }
};

Pass* createMemoryPackingPass() { return new MemoryPacking(); }

} // namespace wasm