summaryrefslogtreecommitdiff
path: root/src/wasm-interpreter.h
diff options
context:
space:
mode:
authorAlon Zakai <azakai@google.com>2020-04-13 15:57:51 -0700
committerGitHub <noreply@github.com>2020-04-13 15:57:51 -0700
commit1bddd6be5416be08d92d0b4a8248decb400b60e0 (patch)
tree426df38afc548929139c1771571a023c768e02ae /src/wasm-interpreter.h
parentc16bfeebb5879e9512f2bbf7d611b3b1e0be7dee (diff)
downloadbinaryen-1bddd6be5416be08d92d0b4a8248decb400b60e0.tar.gz
binaryen-1bddd6be5416be08d92d0b4a8248decb400b60e0.tar.bz2
binaryen-1bddd6be5416be08d92d0b4a8248decb400b60e0.zip
Fix Atomics fuzz bugs in interpreter (#2760)
I am working to bring up the fuzzer on comparisons between VMs. Comparing between the binaryen interpreter and v8, it found some atomics issues: Atomic operations, including loads and stores, must be aligned or they trap. AtomicRMW did the wrong thing with the operands. AtomicCmpxchg must wrap the input to the proper size (if we only load 1 byte, only look at 1 byte of the expected value too). AtomicWait and AtomicNotify must take into account their offsets. Also SIMDLoadExtend was missing that. This was confusing in the code as two getFinalAddresses existed, one that doesn't compute with an offset, and one that does. I renamed the one without to getFinalAddressWithoutOffset so it's explicit and we can easily see we only call that one on an instruction without an offset (which is the case for MemoryInit, MemoryCopy, and MemoryFill). AtomicNotify must check its address to see if it should trap, even though we don't actually have multiple threads running. Atomic loads of fewer bytes than the type always do an unsigned extension, not signed.
Diffstat (limited to 'src/wasm-interpreter.h')
-rw-r--r--src/wasm-interpreter.h113
1 files changed, 91 insertions, 22 deletions
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index ca8041d5a..7a1fc1784 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -1689,6 +1689,9 @@ private:
}
NOTE_EVAL1(flow);
auto addr = instance.getFinalAddress(curr, flow.getSingleValue());
+ if (curr->isAtomic) {
+ instance.checkAtomicAddress(addr, curr->bytes);
+ }
auto ret = instance.externalInterface->load(curr, addr);
NOTE_EVAL1(addr);
NOTE_EVAL1(ret);
@@ -1705,6 +1708,9 @@ private:
return value;
}
auto addr = instance.getFinalAddress(curr, ptr.getSingleValue());
+ if (curr->isAtomic) {
+ instance.checkAtomicAddress(addr, curr->bytes);
+ }
NOTE_EVAL1(addr);
NOTE_EVAL1(value);
instance.externalInterface->store(curr, addr, value.getSingleValue());
@@ -1730,22 +1736,21 @@ private:
auto computed = value.getSingleValue();
switch (curr->op) {
case Add:
- computed = computed.add(value.getSingleValue());
+ computed = loaded.add(computed);
break;
case Sub:
- computed = computed.sub(value.getSingleValue());
+ computed = loaded.sub(computed);
break;
case And:
- computed = computed.and_(value.getSingleValue());
+ computed = loaded.and_(computed);
break;
case Or:
- computed = computed.or_(value.getSingleValue());
+ computed = loaded.or_(computed);
break;
case Xor:
- computed = computed.xor_(value.getSingleValue());
+ computed = loaded.xor_(computed);
break;
case Xchg:
- computed = value.getSingleValue();
break;
}
instance.doAtomicStore(addr, curr->bytes, computed);
@@ -1767,6 +1772,8 @@ private:
return replacement;
}
auto addr = instance.getFinalAddress(curr, ptr.getSingleValue());
+ expected =
+ Flow(wrapToSmallerSize(expected.getSingleValue(), curr->bytes));
NOTE_EVAL1(addr);
NOTE_EVAL1(expected);
NOTE_EVAL1(replacement);
@@ -1795,7 +1802,7 @@ private:
return timeout;
}
auto bytes = curr->expectedType.getByteSize();
- auto addr = instance.getFinalAddress(ptr.getSingleValue(), bytes);
+ auto addr = instance.getFinalAddress(curr, ptr.getSingleValue(), bytes);
auto loaded = instance.doAtomicLoad(addr, bytes, curr->expectedType);
NOTE_EVAL1(loaded);
if (loaded != expected.getSingleValue()) {
@@ -1817,7 +1824,9 @@ private:
if (count.breaking()) {
return count;
}
- // TODO: add threads support!
+ auto addr = instance.getFinalAddress(curr, ptr.getSingleValue(), 4);
+ // Just check TODO actual threads support
+ instance.checkAtomicAddress(addr, 4);
return Literal(int32_t(0)); // none woken up
}
Flow visitSIMDLoad(SIMDLoad* curr) {
@@ -1901,7 +1910,7 @@ private:
auto fillLanes = [&](auto lanes, size_t laneBytes) {
for (auto& lane : lanes) {
lane = loadLane(
- instance.getFinalAddress(Literal(uint32_t(src)), laneBytes));
+ instance.getFinalAddress(curr, Literal(uint32_t(src)), laneBytes));
src = Address(uint32_t(src) + laneBytes);
}
return Literal(lanes);
@@ -1997,8 +2006,9 @@ private:
}
for (size_t i = 0; i < sizeVal; ++i) {
Literal addr(uint32_t(destVal + i));
- instance.externalInterface->store8(instance.getFinalAddress(addr, 1),
- segment.data[offsetVal + i]);
+ instance.externalInterface->store8(
+ instance.getFinalAddressWithoutOffset(addr, 1),
+ segment.data[offsetVal + i]);
}
return {};
}
@@ -2046,9 +2056,11 @@ private:
}
for (int64_t i = start; i != end; i += step) {
instance.externalInterface->store8(
- instance.getFinalAddress(Literal(uint32_t(destVal + i)), 1),
+ instance.getFinalAddressWithoutOffset(Literal(uint32_t(destVal + i)),
+ 1),
instance.externalInterface->load8s(
- instance.getFinalAddress(Literal(uint32_t(sourceVal + i)), 1)));
+ instance.getFinalAddressWithoutOffset(
+ Literal(uint32_t(sourceVal + i)), 1)));
}
return {};
}
@@ -2079,7 +2091,9 @@ private:
uint8_t val(value.getSingleValue().geti32());
for (size_t i = 0; i < sizeVal; ++i) {
instance.externalInterface->store8(
- instance.getFinalAddress(Literal(uint32_t(destVal + i)), 1), val);
+ instance.getFinalAddressWithoutOffset(Literal(uint32_t(destVal + i)),
+ 1),
+ val);
}
return {};
}
@@ -2103,6 +2117,44 @@ private:
void trap(const char* why) override {
instance.externalInterface->trap(why);
}
+
+ // Given a value, wrap it to a smaller given number of bytes.
+ Literal wrapToSmallerSize(Literal value, Index bytes) {
+ if (value.type == Type::i32) {
+ switch (bytes) {
+ case 1: {
+ return value.and_(Literal(uint32_t(0xff)));
+ }
+ case 2: {
+ return value.and_(Literal(uint32_t(0xffff)));
+ }
+ case 4: {
+ break;
+ }
+ default:
+ WASM_UNREACHABLE("unexpected bytes");
+ }
+ } else {
+ assert(value.type == Type::i64);
+ switch (bytes) {
+ case 1: {
+ return value.and_(Literal(uint64_t(0xff)));
+ }
+ case 2: {
+ return value.and_(Literal(uint64_t(0xffff)));
+ }
+ case 4: {
+ return value.and_(Literal(uint64_t(0xffffffffUL)));
+ }
+ case 8: {
+ break;
+ }
+ default:
+ WASM_UNREACHABLE("unexpected bytes");
+ }
+ }
+ return value;
+ }
};
public:
@@ -2173,21 +2225,25 @@ protected:
}
}
- template<class LS> Address getFinalAddress(LS* curr, Literal ptr) {
+ template<class LS>
+ Address getFinalAddress(LS* curr, Literal ptr, Index bytes) {
Address memorySizeBytes = memorySize * Memory::kPageSize;
uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64();
trapIfGt(curr->offset, memorySizeBytes, "offset > memory");
trapIfGt(addr, memorySizeBytes - curr->offset, "final > memory");
addr += curr->offset;
- trapIfGt(curr->bytes, memorySizeBytes, "bytes > memory");
- checkLoadAddress(addr, curr->bytes);
+ trapIfGt(bytes, memorySizeBytes, "bytes > memory");
+ checkLoadAddress(addr, bytes);
return addr;
}
- Address getFinalAddress(Literal ptr, Index bytes) {
- Address memorySizeBytes = memorySize * Memory::kPageSize;
+ template<class LS> Address getFinalAddress(LS* curr, Literal ptr) {
+ return getFinalAddress(curr, ptr, curr->bytes);
+ }
+
+ Address getFinalAddressWithoutOffset(Literal ptr, Index bytes) {
uint64_t addr = ptr.type == Type::i32 ? ptr.geti32() : ptr.geti64();
- trapIfGt(addr, memorySizeBytes - bytes, "highest > memory");
+ checkLoadAddress(addr, bytes);
return addr;
}
@@ -2196,14 +2252,26 @@ protected:
trapIfGt(addr, memorySizeBytes - bytes, "highest > memory");
}
- Literal doAtomicLoad(Address addr, Index bytes, Type type) {
+ void checkAtomicAddress(Address addr, Index bytes) {
checkLoadAddress(addr, bytes);
+ // Unaligned atomics trap.
+ if (bytes > 1) {
+ if (addr & (bytes - 1)) {
+ externalInterface->trap("unaligned atomic operation");
+ }
+ }
+ }
+
+ Literal doAtomicLoad(Address addr, Index bytes, Type type) {
+ checkAtomicAddress(addr, bytes);
Const ptr;
ptr.value = Literal(int32_t(addr));
ptr.type = Type::i32;
Load load;
load.bytes = bytes;
- load.signed_ = true;
+ // When an atomic loads a partial number of bytes for the type, it is
+ // always an unsigned extension.
+ load.signed_ = false;
load.align = bytes;
load.isAtomic = true; // understatement
load.ptr = &ptr;
@@ -2212,6 +2280,7 @@ protected:
}
void doAtomicStore(Address addr, Index bytes, Literal toStore) {
+ checkAtomicAddress(addr, bytes);
Const ptr;
ptr.value = Literal(int32_t(addr));
ptr.type = Type::i32;