summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--scripts/fuzz_opt.py14
-rw-r--r--src/passes/RemoveUnusedModuleElements.cpp13
-rw-r--r--src/tools/wasm-opt.cpp1
-rw-r--r--src/wasm-interpreter.h89
-rw-r--r--src/wasm-traversal.h12
-rw-r--r--src/wasm.h1
-rw-r--r--src/wasm/wasm-binary.cpp5
-rw-r--r--test/spec/bulk-memory.wast183
8 files changed, 286 insertions, 32 deletions
diff --git a/scripts/fuzz_opt.py b/scripts/fuzz_opt.py
index b138209f5..cd4912475 100644
--- a/scripts/fuzz_opt.py
+++ b/scripts/fuzz_opt.py
@@ -138,12 +138,12 @@ def run_vm(cmd):
def run_bynterp(wasm):
- return fix_output(run_vm([in_bin('wasm-opt'), wasm, '--fuzz-exec-before']))
+ return fix_output(run_vm([in_bin('wasm-opt'), wasm, '--fuzz-exec-before'] + FUZZ_OPTS))
def run_wasm2js(wasm):
- wrapper = run([in_bin('wasm-opt'), wasm, '--emit-js-wrapper=/dev/stdout'])
- main = run([in_bin('wasm2js'), wasm, '--emscripten'])
+ wrapper = run([in_bin('wasm-opt'), wasm, '--emit-js-wrapper=/dev/stdout'] + FUZZ_OPTS)
+ main = run([in_bin('wasm2js'), wasm, '--emscripten'] + FUZZ_OPTS)
with open(os.path.join(options.binaryen_root, 'scripts', 'wasm2js.js')) as f:
glue = f.read()
with open('js.js', 'w') as f:
@@ -193,7 +193,7 @@ def test_one(infile, opts):
before = run_vms('a.')
print('----------------')
# gather VM outputs on processed file
- run([in_bin('wasm-opt'), 'a.wasm', '-o', 'b.wasm'] + opts)
+ run([in_bin('wasm-opt'), 'a.wasm', '-o', 'b.wasm'] + opts + FUZZ_OPTS)
wasm_size = os.stat('b.wasm').st_size
bytes += wasm_size
print('post js size:', os.stat('a.js').st_size, ' wasm size:', wasm_size)
@@ -205,10 +205,10 @@ def test_one(infile, opts):
if NANS:
break
# fuzz binaryen interpreter itself. separate invocation so result is easily fuzzable
- run([in_bin('wasm-opt'), 'a.wasm', '--fuzz-exec', '--fuzz-binary'] + opts)
+ run([in_bin('wasm-opt'), 'a.wasm', '--fuzz-exec', '--fuzz-binary'] + opts + FUZZ_OPTS)
# check for determinism
- run([in_bin('wasm-opt'), 'a.wasm', '-o', 'b.wasm'] + opts)
- run([in_bin('wasm-opt'), 'a.wasm', '-o', 'c.wasm'] + opts)
+ run([in_bin('wasm-opt'), 'a.wasm', '-o', 'b.wasm'] + opts + FUZZ_OPTS)
+ run([in_bin('wasm-opt'), 'a.wasm', '-o', 'c.wasm'] + opts + FUZZ_OPTS)
assert open('b.wasm').read() == open('c.wasm').read(), 'output must be deterministic'
return bytes
diff --git a/src/passes/RemoveUnusedModuleElements.cpp b/src/passes/RemoveUnusedModuleElements.cpp
index 777d2dacc..e91fafcba 100644
--- a/src/passes/RemoveUnusedModuleElements.cpp
+++ b/src/passes/RemoveUnusedModuleElements.cpp
@@ -39,6 +39,7 @@ enum class ModuleElementKind {
typedef std::pair<ModuleElementKind, Name> ModuleElement;
// Finds reachabilities
+// TODO: use Effects to determine if a memory is used
struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
Module* module;
@@ -119,6 +120,18 @@ struct ReachabilityAnalyzer : public PostWalker<ReachabilityAnalyzer> {
void visitAtomicNotify(AtomicNotify* curr) {
usesMemory = true;
}
+ void visitMemoryInit(MemoryInit* curr) {
+ usesMemory = true;
+ }
+ void visitDataDrop(DataDrop* curr) {
+ usesMemory = true;
+ }
+ void visitMemoryCopy(MemoryCopy* curr) {
+ usesMemory = true;
+ }
+ void visitMemoryFill(MemoryFill* curr) {
+ usesMemory = true;
+ }
void visitHost(Host* curr) {
if (curr->op == CurrentMemory || curr->op == GrowMemory) {
usesMemory = true;
diff --git a/src/tools/wasm-opt.cpp b/src/tools/wasm-opt.cpp
index 6fce0f89b..519713298 100644
--- a/src/tools/wasm-opt.cpp
+++ b/src/tools/wasm-opt.cpp
@@ -240,6 +240,7 @@ int main(int argc, const char* argv[]) {
auto input = buffer.getAsChars();
WasmBinaryBuilder parser(other, input, false);
parser.read();
+ options.applyFeatures(other);
if (options.passOptions.validate) {
bool valid = WasmValidator().validate(other);
if (!valid) {
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index fcdb44c42..23121a849 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -928,10 +928,6 @@ private:
}
void initializeMemoryContents() {
- // no way to create a Block without an ArenaAllocator, so use a builder
- // instead of creating it locally.
- Builder builder(wasm);
-
Const offset;
offset.value = Literal(uint32_t(0));
offset.finalize();
@@ -955,15 +951,16 @@ private:
init.finalize();
DataDrop drop;
- drop.segment = segment.index;
+ drop.segment = i;
drop.finalize();
- Function initializer;
- initializer.body = builder.blockify(&init, &drop);
-
- FunctionScope scope(&initializer, {});
-
- RuntimeExpressionRunner(*this, scope).visit(&init);
+ // we don't actually have a function, but we need one in order to visit
+ // the memory.init and data.drop instructions.
+ Function dummyFunc;
+ FunctionScope dummyScope(&dummyFunc, {});
+ RuntimeExpressionRunner runner(*this, dummyScope);
+ runner.visit(&init);
+ runner.visit(&drop);
}
}
@@ -1228,14 +1225,20 @@ private:
trap("memory.init of dropped segment");
}
- size_t destVal(dest.value.geti32());
- size_t offsetVal(offset.value.geti32());
- size_t sizeVal(size.value.geti32());
+ Address destVal(uint32_t(dest.value.geti32()));
+ Address offsetVal(uint32_t(offset.value.geti32()));
+ Address sizeVal(uint32_t(size.value.geti32()));
+
+ instance.checkLoadAddress(destVal, 0);
+ if (offsetVal > segment.data.size()) {
+ trap("segment offset out of bounds");
+ }
+
for (size_t i = 0; i < sizeVal; ++i) {
if (offsetVal + i >= segment.data.size()) {
trap("out of bounds segment access in memory.init");
}
- Literal addr = Literal(uint32_t(destVal + i));
+ Literal addr(uint32_t(destVal + i));
instance.externalInterface->store8(
instance.getFinalAddress(addr, 1),
segment.data[offsetVal + i]
@@ -1253,12 +1256,64 @@ private:
}
Flow visitMemoryCopy(MemoryCopy *curr) {
NOTE_ENTER("MemoryCopy");
- // TODO(tlively): implement me
+ Flow dest = this->visit(curr->dest);
+ if (dest.breaking()) return dest;
+ Flow source = this->visit(curr->source);
+ if (source.breaking()) return source;
+ Flow size = this->visit(curr->size);
+ if (size.breaking()) return size;
+ NOTE_EVAL1(dest);
+ NOTE_EVAL1(source);
+ NOTE_EVAL1(size);
+ Address destVal(uint32_t(dest.value.geti32()));
+ Address sourceVal(uint32_t(source.value.geti32()));
+ Address sizeVal(uint32_t(size.value.geti32()));
+
+ instance.checkLoadAddress(destVal, 0);
+ instance.checkLoadAddress(sourceVal, 0);
+
+ size_t start = 0;
+ size_t end = sizeVal;
+ int step = 1;
+ // Reverse direction if source is below dest and they overlap
+ if (sourceVal < destVal &&
+ (sourceVal + sizeVal > destVal || sourceVal + sizeVal < sourceVal)) {
+ start = sizeVal - 1;
+ end = -1;
+ step = -1;
+ }
+ for (size_t i = start; i != end; i += step) {
+ if (i + destVal >= std::numeric_limits<uint32_t>::max()) {
+ trap("Out of bounds memory access");
+ }
+ instance.externalInterface->store8(
+ instance.getFinalAddress(Literal(uint32_t(destVal + i)), 1),
+ instance.externalInterface->load8s(
+ instance.getFinalAddress(Literal(uint32_t(sourceVal + i)), 1)));
+ }
return {};
}
Flow visitMemoryFill(MemoryFill *curr) {
NOTE_ENTER("MemoryFill");
- // TODO(tlively): implement me
+ Flow dest = this->visit(curr->dest);
+ if (dest.breaking()) return dest;
+ Flow value = this->visit(curr->value);
+ if (value.breaking()) return value;
+ Flow size = this->visit(curr->size);
+ if (size.breaking()) return size;
+ NOTE_EVAL1(dest);
+ NOTE_EVAL1(value);
+ NOTE_EVAL1(size);
+ Address destVal(uint32_t(dest.value.geti32()));
+ Address sizeVal(uint32_t(size.value.geti32()));
+
+ instance.checkLoadAddress(destVal, 0);
+
+ uint8_t val(value.value.geti32());
+ for (size_t i = 0; i < sizeVal; ++i) {
+ instance.externalInterface->store8(
+ instance.getFinalAddress(Literal(uint32_t(destVal + i)), 1), val);
+ }
return {};
}
diff --git a/src/wasm-traversal.h b/src/wasm-traversal.h
index 8aa203fa7..38fcdb990 100644
--- a/src/wasm-traversal.h
+++ b/src/wasm-traversal.h
@@ -657,9 +657,9 @@ struct PostWalker : public Walker<SubType, VisitorType> {
}
case Expression::Id::MemoryInitId: {
self->pushTask(SubType::doVisitMemoryInit, currp);
- self->pushTask(SubType::scan, &curr->cast<MemoryInit>()->dest);
- self->pushTask(SubType::scan, &curr->cast<MemoryInit>()->offset);
self->pushTask(SubType::scan, &curr->cast<MemoryInit>()->size);
+ self->pushTask(SubType::scan, &curr->cast<MemoryInit>()->offset);
+ self->pushTask(SubType::scan, &curr->cast<MemoryInit>()->dest);
break;
}
case Expression::Id::DataDropId: {
@@ -668,16 +668,16 @@ struct PostWalker : public Walker<SubType, VisitorType> {
}
case Expression::Id::MemoryCopyId: {
self->pushTask(SubType::doVisitMemoryCopy, currp);
- self->pushTask(SubType::scan, &curr->cast<MemoryCopy>()->dest);
- self->pushTask(SubType::scan, &curr->cast<MemoryCopy>()->source);
self->pushTask(SubType::scan, &curr->cast<MemoryCopy>()->size);
+ self->pushTask(SubType::scan, &curr->cast<MemoryCopy>()->source);
+ self->pushTask(SubType::scan, &curr->cast<MemoryCopy>()->dest);
break;
}
case Expression::Id::MemoryFillId: {
self->pushTask(SubType::doVisitMemoryFill, currp);
- self->pushTask(SubType::scan, &curr->cast<MemoryFill>()->dest);
- self->pushTask(SubType::scan, &curr->cast<MemoryFill>()->value);
self->pushTask(SubType::scan, &curr->cast<MemoryFill>()->size);
+ self->pushTask(SubType::scan, &curr->cast<MemoryFill>()->value);
+ self->pushTask(SubType::scan, &curr->cast<MemoryFill>()->dest);
break;
}
case Expression::Id::ConstId: {
diff --git a/src/wasm.h b/src/wasm.h
index f4180f7a0..83809243c 100644
--- a/src/wasm.h
+++ b/src/wasm.h
@@ -859,7 +859,6 @@ public:
struct Segment {
bool isPassive = false;
- Index index = 0;
Expression* offset = nullptr;
std::vector<char> data; // TODO: optimize
Segment() = default;
diff --git a/src/wasm/wasm-binary.cpp b/src/wasm/wasm-binary.cpp
index f3642203d..b8c3f1049 100644
--- a/src/wasm/wasm-binary.cpp
+++ b/src/wasm/wasm-binary.cpp
@@ -1540,7 +1540,10 @@ void WasmBinaryBuilder::readDataSegments() {
}
curr.isPassive = flags & BinaryConsts::IsPassive;
if (flags & BinaryConsts::HasMemIndex) {
- curr.index = getU32LEB();
+ auto memIndex = getU32LEB();
+ if (memIndex != 0) {
+ throwError("nonzero memory index");
+ }
}
if (!curr.isPassive) {
curr.offset = readExpression();
diff --git a/test/spec/bulk-memory.wast b/test/spec/bulk-memory.wast
new file mode 100644
index 000000000..474a5fbe1
--- /dev/null
+++ b/test/spec/bulk-memory.wast
@@ -0,0 +1,183 @@
+;; Passive segment syntax
+(module
+ (memory 1)
+ (data passive "foo"))
+
+;; memory.fill
+(module
+ (memory 1)
+
+ (func (export "fill") (param i32 i32 i32)
+ (memory.fill
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+;; Basic fill test.
+(invoke "fill" (i32.const 1) (i32.const 0xff) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 0))
+
+;; Fill value is stored as a byte.
+(invoke "fill" (i32.const 0) (i32.const 0xbbaa) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xaa))
+
+;; Fill all of memory
+(invoke "fill" (i32.const 0) (i32.const 0) (i32.const 0x10000))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "fill" (i32.const 0xff00) (i32.const 1) (i32.const 0x101))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xff00)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 1))
+
+;; Succeed when writing 0 bytes at the end of the region.
+(invoke "fill" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+
+;; Fail on out-of-bounds when writing 0 bytes outside of memory.
+(assert_trap (invoke "fill" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+
+
+;; memory.copy
+(module
+ (memory 1 1)
+ (data (i32.const 0) "\aa\bb\cc\dd")
+
+ (func (export "copy") (param i32 i32 i32)
+ (memory.copy
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+;; Non-overlapping copy.
+(invoke "copy" (i32.const 10) (i32.const 0) (i32.const 4))
+
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0))
+
+;; Overlap, source > dest
+(invoke "copy" (i32.const 8) (i32.const 10) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
+
+;; Overlap, source < dest
+(invoke "copy" (i32.const 10) (i32.const 7) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
+
+;; Overlap, source < dest but size is out of bounds
+(assert_trap (invoke "copy" (i32.const 13) (i32.const 11) (i32.const -1)))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
+
+;; Copy ending at memory limit is ok.
+(invoke "copy" (i32.const 0xff00) (i32.const 0) (i32.const 0x100))
+(invoke "copy" (i32.const 0xfe00) (i32.const 0xff00) (i32.const 0x100))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "copy" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
+
+;; Succeed when copying 0 bytes at the end of the region.
+(invoke "copy" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+(invoke "copy" (i32.const 0) (i32.const 0x10000) (i32.const 0))
+
+;; Fail on out-of-bounds when copying 0 bytes outside of memory.
+(assert_trap (invoke "copy" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+(assert_trap (invoke "copy" (i32.const 0) (i32.const 0x10001) (i32.const 0))
+ "out of bounds memory access")
+
+;; memory.init
+(module
+ (memory 1)
+ (data passive "\aa\bb\cc\dd")
+
+ (func (export "init") (param i32 i32 i32)
+ (memory.init 0
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+(invoke "init" (i32.const 0) (i32.const 1) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0))
+
+;; Init ending at memory limit and segment limit is ok.
+(invoke "init" (i32.const 0xfffc) (i32.const 0) (i32.const 4))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "init" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
+
+;; Succeed when writing 0 bytes at the end of either region.
+(invoke "init" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+(invoke "init" (i32.const 0) (i32.const 4) (i32.const 0))
+
+;; Fail on out-of-bounds when writing 0 bytes outside of memory or segment.
+(assert_trap (invoke "init" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+(assert_trap (invoke "init" (i32.const 0) (i32.const 5) (i32.const 0))
+ "out of bounds memory access")
+
+;; data.drop
+(module
+ (memory 1)
+ (data passive "")
+ (data (i32.const 0) "")
+
+ (func (export "drop_passive") (data.drop 0))
+ (func (export "init_passive")
+ (memory.init 0 (i32.const 0) (i32.const 0) (i32.const 0)))
+
+ (func (export "drop_active") (data.drop 1))
+ (func (export "init_active")
+ (memory.init 1 (i32.const 0) (i32.const 0) (i32.const 0)))
+)
+
+(invoke "init_passive")
+(invoke "drop_passive")
+(assert_trap (invoke "drop_passive") "data segment dropped")
+(assert_trap (invoke "init_passive") "data segment dropped")
+(assert_trap (invoke "drop_active") "data segment dropped")
+(assert_trap (invoke "init_active") "data segment dropped")