summaryrefslogtreecommitdiff
path: root/include/wabt/interp
diff options
context:
space:
mode:
authorAlex Reinking <reinking@google.com>2022-09-28 07:24:49 -0700
committerGitHub <noreply@github.com>2022-09-28 07:24:49 -0700
commita4a77c18df16d6ee672f2a2564969bc9b2beef3a (patch)
treee4368558b4b44a7421761341e3b18edbd93f9785 /include/wabt/interp
parent520614a5f83878a4d26702a3ad67c44302c2b073 (diff)
downloadwabt-a4a77c18df16d6ee672f2a2564969bc9b2beef3a.tar.gz
wabt-a4a77c18df16d6ee672f2a2564969bc9b2beef3a.tar.bz2
wabt-a4a77c18df16d6ee672f2a2564969bc9b2beef3a.zip
Move headers to include/wabt/ (#1998)
This makes things easier for users and packagers of libwabt.
Diffstat (limited to 'include/wabt/interp')
-rw-r--r--include/wabt/interp/binary-reader-interp.h40
-rw-r--r--include/wabt/interp/interp-inl.h1000
-rw-r--r--include/wabt/interp/interp-math.h412
-rw-r--r--include/wabt/interp/interp-util.h50
-rw-r--r--include/wabt/interp/interp-wasi.h46
-rw-r--r--include/wabt/interp/interp.h1275
-rw-r--r--include/wabt/interp/istream.h165
-rw-r--r--include/wabt/interp/wasi_api.def28
8 files changed, 3016 insertions, 0 deletions
diff --git a/include/wabt/interp/binary-reader-interp.h b/include/wabt/interp/binary-reader-interp.h
new file mode 100644
index 00000000..74d8e15f
--- /dev/null
+++ b/include/wabt/interp/binary-reader-interp.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_BINARY_READER_INTERP_H_
+#define WABT_BINARY_READER_INTERP_H_
+
+#include "wabt/common.h"
+#include "wabt/error.h"
+#include "wabt/interp/interp.h"
+
+namespace wabt {
+
+struct ReadBinaryOptions;
+
+namespace interp {
+
+Result ReadBinaryInterp(std::string_view filename,
+ const void* data,
+ size_t size,
+ const ReadBinaryOptions& options,
+ Errors*,
+ ModuleDesc* out_module);
+
+} // namespace interp
+} // namespace wabt
+
+#endif /* WABT_BINARY_READER_INTERP_H_ */
diff --git a/include/wabt/interp/interp-inl.h b/include/wabt/interp/interp-inl.h
new file mode 100644
index 00000000..d985bea4
--- /dev/null
+++ b/include/wabt/interp/interp-inl.h
@@ -0,0 +1,1000 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <limits>
+#include <string>
+
+namespace wabt {
+namespace interp {
+
+//// Ref ////
+inline Ref::Ref(size_t index) : index(index) {}
+
+inline bool operator==(Ref lhs, Ref rhs) {
+ return lhs.index == rhs.index;
+}
+
+inline bool operator!=(Ref lhs, Ref rhs) {
+ return lhs.index != rhs.index;
+}
+
+//// ExternType ////
+inline ExternType::ExternType(ExternKind kind) : kind(kind) {}
+
+//// FuncType ////
+// static
+inline bool FuncType::classof(const ExternType* type) {
+ return type->kind == skind;
+}
+
+inline FuncType::FuncType(ValueTypes params, ValueTypes results)
+ : ExternType(ExternKind::Func), params(params), results(results) {}
+
+//// TableType ////
+// static
+inline bool TableType::classof(const ExternType* type) {
+ return type->kind == skind;
+}
+
+inline TableType::TableType(ValueType element, Limits limits)
+ : ExternType(ExternKind::Table), element(element), limits(limits) {
+ // Always set max.
+ if (!limits.has_max) {
+ this->limits.max = std::numeric_limits<u32>::max();
+ }
+}
+
+//// MemoryType ////
+// static
+inline bool MemoryType::classof(const ExternType* type) {
+ return type->kind == skind;
+}
+
+inline MemoryType::MemoryType(Limits limits)
+ : ExternType(ExternKind::Memory), limits(limits) {
+ // Always set max.
+ if (!limits.has_max) {
+ this->limits.max = limits.is_64 ? WABT_MAX_PAGES64 : WABT_MAX_PAGES32;
+ }
+}
+
+//// GlobalType ////
+// static
+inline bool GlobalType::classof(const ExternType* type) {
+ return type->kind == skind;
+}
+
+inline GlobalType::GlobalType(ValueType type, Mutability mut)
+ : ExternType(ExternKind::Global), type(type), mut(mut) {}
+
+//// TagType ////
+// static
+inline bool TagType::classof(const ExternType* type) {
+ return type->kind == skind;
+}
+
+inline TagType::TagType(TagAttr attr, const ValueTypes& signature)
+ : ExternType(ExternKind::Tag), attr(attr), signature(signature) {}
+
+//// ImportType ////
+inline ImportType::ImportType(std::string module,
+ std::string name,
+ std::unique_ptr<ExternType> type)
+ : module(module), name(name), type(std::move(type)) {}
+
+inline ImportType::ImportType(const ImportType& other)
+ : module(other.module), name(other.name), type(other.type->Clone()) {}
+
+inline ImportType& ImportType::operator=(const ImportType& other) {
+ if (this != &other) {
+ module = other.module;
+ name = other.name;
+ type = other.type->Clone();
+ }
+ return *this;
+}
+
+//// ExportType ////
+inline ExportType::ExportType(std::string name,
+ std::unique_ptr<ExternType> type)
+ : name(name), type(std::move(type)) {}
+
+inline ExportType::ExportType(const ExportType& other)
+ : name(other.name), type(other.type->Clone()) {}
+
+inline ExportType& ExportType::operator=(const ExportType& other) {
+ if (this != &other) {
+ name = other.name;
+ type = other.type->Clone();
+ }
+ return *this;
+}
+
+//// Frame ////
+inline Frame::Frame(Ref func,
+ u32 values,
+ u32 exceptions,
+ u32 offset,
+ Instance* inst,
+ Module* mod)
+ : func(func),
+ values(values),
+ exceptions(exceptions),
+ offset(offset),
+ inst(inst),
+ mod(mod) {}
+
+//// FreeList ////
+template <>
+inline bool FreeList<Ref>::IsUsed(Index index) const {
+ return (list_[index].index & refFreeBit) == 0;
+}
+
+template <>
+inline FreeList<Ref>::~FreeList<Ref>() {}
+
+template <>
+template <typename... Args>
+auto FreeList<Ref>::New(Args&&... args) -> Index {
+ if (free_head_ == 0) {
+ list_.push_back(Ref(std::forward<Args>(args)...));
+ return list_.size() - 1;
+ }
+
+ Index index = free_head_ - 1;
+
+ assert(!IsUsed(index));
+ assert(free_items_ > 0);
+
+ free_head_ = list_[index].index & (refFreeBit - 1);
+ list_[index] = Ref(std::forward<Args>(args)...);
+ free_items_--;
+ return index;
+}
+
+template <>
+inline void FreeList<Ref>::Delete(Index index) {
+ assert(IsUsed(index));
+
+ list_[index].index = free_head_ | refFreeBit;
+ free_head_ = index + 1;
+ free_items_++;
+}
+
+template <typename T>
+bool FreeList<T>::IsUsed(Index index) const {
+ return (reinterpret_cast<uintptr_t>(list_[index]) & ptrFreeBit) == 0;
+}
+
+template <typename T>
+FreeList<T>::~FreeList<T>() {
+ for (auto object : list_) {
+ if ((reinterpret_cast<uintptr_t>(object) & ptrFreeBit) == 0) {
+ delete object;
+ }
+ }
+}
+
+template <typename T>
+template <typename... Args>
+auto FreeList<T>::New(Args&&... args) -> Index {
+ if (free_head_ == 0) {
+ list_.push_back(T(std::forward<Args>(args)...));
+ return list_.size() - 1;
+ }
+
+ Index index = free_head_ - 1;
+
+ assert(!IsUsed(index));
+ assert(free_items_ > 0);
+
+ free_head_ = reinterpret_cast<uintptr_t>(list_[index]) >> ptrFreeShift;
+ list_[index] = T(std::forward<Args>(args)...);
+ free_items_--;
+ return index;
+}
+
+template <typename T>
+void FreeList<T>::Delete(Index index) {
+ assert(IsUsed(index));
+
+ delete list_[index];
+ list_[index] = reinterpret_cast<T>((free_head_ << ptrFreeShift) | ptrFreeBit);
+ free_head_ = index + 1;
+ free_items_++;
+}
+
+template <typename T>
+const T& FreeList<T>::Get(Index index) const {
+ assert(IsUsed(index));
+ return list_[index];
+}
+
+template <typename T>
+T& FreeList<T>::Get(Index index) {
+ assert(IsUsed(index));
+ return list_[index];
+}
+
+template <typename T>
+auto FreeList<T>::size() const -> Index {
+ return list_.size();
+}
+
+template <typename T>
+auto FreeList<T>::count() const -> Index {
+ return list_.size() - free_items_;
+}
+
+//// RefPtr ////
+template <typename T>
+RefPtr<T>::RefPtr() : obj_(nullptr), store_(nullptr), root_index_(0) {}
+
+template <typename T>
+RefPtr<T>::RefPtr(Store& store, Ref ref) {
+#ifndef NDEBUG
+ if (!store.Is<T>(ref)) {
+ ObjectKind ref_kind;
+ if (ref == Ref::Null) {
+ ref_kind = ObjectKind::Null;
+ } else {
+ ref_kind = store.objects_.Get(ref.index)->kind();
+ }
+ fprintf(stderr, "Invalid conversion from Ref (%s) to RefPtr<%s>!\n",
+ GetName(ref_kind), T::GetTypeName());
+ abort();
+ }
+#endif
+ root_index_ = store.NewRoot(ref);
+ obj_ = static_cast<T*>(store.objects_.Get(ref.index));
+ store_ = &store;
+}
+
+template <typename T>
+RefPtr<T>::RefPtr(const RefPtr& other)
+ : obj_(other.obj_), store_(other.store_) {
+ root_index_ = store_ ? store_->CopyRoot(other.root_index_) : 0;
+}
+
+template <typename T>
+RefPtr<T>& RefPtr<T>::operator=(const RefPtr& other) {
+ obj_ = other.obj_;
+ store_ = other.store_;
+ root_index_ = store_ ? store_->CopyRoot(other.root_index_) : 0;
+ return *this;
+}
+
+template <typename T>
+RefPtr<T>::RefPtr(RefPtr&& other)
+ : obj_(other.obj_), store_(other.store_), root_index_(other.root_index_) {
+ other.obj_ = nullptr;
+ other.store_ = nullptr;
+ other.root_index_ = 0;
+}
+
+template <typename T>
+RefPtr<T>& RefPtr<T>::operator=(RefPtr&& other) {
+ obj_ = other.obj_;
+ store_ = other.store_;
+ root_index_ = other.root_index_;
+ other.obj_ = nullptr;
+ other.store_ = nullptr;
+ other.root_index_ = 0;
+ return *this;
+}
+
+template <typename T>
+RefPtr<T>::~RefPtr() {
+ reset();
+}
+
+template <typename T>
+template <typename U>
+RefPtr<T>::RefPtr(const RefPtr<U>& other)
+ : obj_(other.obj_), store_(other.store_) {
+ root_index_ = store_ ? store_->CopyRoot(other.root_index_) : 0;
+}
+
+template <typename T>
+template <typename U>
+RefPtr<T>& RefPtr<T>::operator=(const RefPtr<U>& other) {
+ obj_ = other.obj_;
+ store_ = other.store_;
+ root_index_ = store_ ? store_->CopyRoot(other.root_index_) : 0;
+ return *this;
+}
+
+template <typename T>
+template <typename U>
+RefPtr<T>::RefPtr(RefPtr&& other)
+ : obj_(other.obj_), store_(other.store_), root_index_(other.root_index_) {
+ other.obj_ = nullptr;
+ other.store_ = nullptr;
+ other.root_index_ = 0;
+}
+
+template <typename T>
+template <typename U>
+RefPtr<T>& RefPtr<T>::operator=(RefPtr&& other) {
+ obj_ = other.obj_;
+ store_ = other.store_;
+ root_index_ = other.root_index_;
+ other.obj_ = nullptr;
+ other.store_ = nullptr;
+ other.root_index_ = 0;
+ return *this;
+}
+
+template <typename T>
+template <typename U>
+RefPtr<U> RefPtr<T>::As() {
+ static_assert(std::is_base_of<T, U>::value, "T must be base class of U");
+ assert(store_->Is<U>(obj_->self()));
+ RefPtr<U> result;
+ result.obj_ = static_cast<U*>(obj_);
+ result.store_ = store_;
+ result.root_index_ = store_->CopyRoot(root_index_);
+ return result;
+}
+
+template <typename T>
+bool RefPtr<T>::empty() const {
+ return obj_ == nullptr;
+}
+
+template <typename T>
+void RefPtr<T>::reset() {
+ if (obj_) {
+ store_->DeleteRoot(root_index_);
+ obj_ = nullptr;
+ root_index_ = 0;
+ store_ = nullptr;
+ }
+}
+
+template <typename T>
+T* RefPtr<T>::get() const {
+ return obj_;
+}
+
+template <typename T>
+T* RefPtr<T>::operator->() const {
+ return obj_;
+}
+
+template <typename T>
+T& RefPtr<T>::operator*() const {
+ return *obj_;
+}
+
+template <typename T>
+RefPtr<T>::operator bool() const {
+ return obj_ != nullptr;
+}
+
+template <typename T>
+Ref RefPtr<T>::ref() const {
+ return store_ ? store_->roots_.Get(root_index_) : Ref::Null;
+}
+
+template <typename T>
+Store* RefPtr<T>::store() const {
+ return store_;
+}
+
+template <typename U, typename V>
+bool operator==(const RefPtr<U>& lhs, const RefPtr<V>& rhs) {
+ return lhs.obj_->self() == rhs.obj_->self();
+}
+
+template <typename U, typename V>
+bool operator!=(const RefPtr<U>& lhs, const RefPtr<V>& rhs) {
+ return lhs.obj_->self() != rhs.obj_->self();
+}
+
+//// ValueType ////
+inline bool IsReference(ValueType type) { return type.IsRef(); }
+template <> inline bool HasType<s32>(ValueType type) { return type == ValueType::I32; }
+template <> inline bool HasType<u32>(ValueType type) { return type == ValueType::I32; }
+template <> inline bool HasType<s64>(ValueType type) { return type == ValueType::I64; }
+template <> inline bool HasType<u64>(ValueType type) { return type == ValueType::I64; }
+template <> inline bool HasType<f32>(ValueType type) { return type == ValueType::F32; }
+template <> inline bool HasType<f64>(ValueType type) { return type == ValueType::F64; }
+template <> inline bool HasType<Ref>(ValueType type) { return IsReference(type); }
+
+template <typename T>
+void RequireType(ValueType type) {
+ assert(HasType<T>(type));
+}
+
+inline bool TypesMatch(ValueType expected, ValueType actual) {
+ // Currently there is no subtyping, so expected and actual must match
+ // exactly. In the future this may be expanded.
+ return expected == actual;
+}
+
+//// Value ////
+inline Value WABT_VECTORCALL Value::Make(s32 val) { Value res; res.i32_ = val; res.SetType(ValueType::I32); return res; }
+inline Value WABT_VECTORCALL Value::Make(u32 val) { Value res; res.i32_ = val; res.SetType(ValueType::I32); return res; }
+inline Value WABT_VECTORCALL Value::Make(s64 val) { Value res; res.i64_ = val; res.SetType(ValueType::I64); return res; }
+inline Value WABT_VECTORCALL Value::Make(u64 val) { Value res; res.i64_ = val; res.SetType(ValueType::I64); return res; }
+inline Value WABT_VECTORCALL Value::Make(f32 val) { Value res; res.f32_ = val; res.SetType(ValueType::F32); return res; }
+inline Value WABT_VECTORCALL Value::Make(f64 val) { Value res; res.f64_ = val; res.SetType(ValueType::F64); return res; }
+inline Value WABT_VECTORCALL Value::Make(v128 val) { Value res; res.v128_ = val; res.SetType(ValueType::V128); return res; }
+inline Value WABT_VECTORCALL Value::Make(Ref val) { Value res; res.ref_ = val; res.SetType(ValueType::ExternRef); return res; }
+template <typename T, u8 L>
+Value WABT_VECTORCALL Value::Make(Simd<T, L> val) {
+ Value res;
+ res.v128_ = Bitcast<v128>(val);
+ res.SetType(ValueType::V128);
+ return res;
+}
+
+template <> inline s8 WABT_VECTORCALL Value::Get<s8>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline u8 WABT_VECTORCALL Value::Get<u8>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline s16 WABT_VECTORCALL Value::Get<s16>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline u16 WABT_VECTORCALL Value::Get<u16>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline s32 WABT_VECTORCALL Value::Get<s32>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline u32 WABT_VECTORCALL Value::Get<u32>() const { CheckType(ValueType::I32); return i32_; }
+template <> inline s64 WABT_VECTORCALL Value::Get<s64>() const { CheckType(ValueType::I64); return i64_; }
+template <> inline u64 WABT_VECTORCALL Value::Get<u64>() const { CheckType(ValueType::I64); return i64_; }
+template <> inline f32 WABT_VECTORCALL Value::Get<f32>() const { CheckType(ValueType::F32); return f32_; }
+template <> inline f64 WABT_VECTORCALL Value::Get<f64>() const { CheckType(ValueType::F64); return f64_; }
+template <> inline v128 WABT_VECTORCALL Value::Get<v128>() const { CheckType(ValueType::V128); return v128_; }
+template <> inline Ref WABT_VECTORCALL Value::Get<Ref>() const { CheckType(ValueType::ExternRef); return ref_; }
+
+template <> inline s8x16 WABT_VECTORCALL Value::Get<s8x16>() const { CheckType(ValueType::V128); return Bitcast<s8x16>(v128_); }
+template <> inline u8x16 WABT_VECTORCALL Value::Get<u8x16>() const { CheckType(ValueType::V128); return Bitcast<u8x16>(v128_); }
+template <> inline s16x8 WABT_VECTORCALL Value::Get<s16x8>() const { CheckType(ValueType::V128); return Bitcast<s16x8>(v128_); }
+template <> inline u16x8 WABT_VECTORCALL Value::Get<u16x8>() const { CheckType(ValueType::V128); return Bitcast<u16x8>(v128_); }
+template <> inline s32x4 WABT_VECTORCALL Value::Get<s32x4>() const { CheckType(ValueType::V128); return Bitcast<s32x4>(v128_); }
+template <> inline u32x4 WABT_VECTORCALL Value::Get<u32x4>() const { CheckType(ValueType::V128); return Bitcast<u32x4>(v128_); }
+template <> inline s64x2 WABT_VECTORCALL Value::Get<s64x2>() const { CheckType(ValueType::V128); return Bitcast<s64x2>(v128_); }
+template <> inline u64x2 WABT_VECTORCALL Value::Get<u64x2>() const { CheckType(ValueType::V128); return Bitcast<u64x2>(v128_); }
+template <> inline f32x4 WABT_VECTORCALL Value::Get<f32x4>() const { CheckType(ValueType::V128); return Bitcast<f32x4>(v128_); }
+template <> inline f64x2 WABT_VECTORCALL Value::Get<f64x2>() const { CheckType(ValueType::V128); return Bitcast<f64x2>(v128_); }
+
+template <> inline void WABT_VECTORCALL Value::Set<s32>(s32 val) { i32_ = val; SetType(ValueType::I32); }
+template <> inline void WABT_VECTORCALL Value::Set<u32>(u32 val) { i32_ = val; SetType(ValueType::I32); }
+template <> inline void WABT_VECTORCALL Value::Set<s64>(s64 val) { i64_ = val; SetType(ValueType::I64); }
+template <> inline void WABT_VECTORCALL Value::Set<u64>(u64 val) { i64_ = val; SetType(ValueType::I64); }
+template <> inline void WABT_VECTORCALL Value::Set<f32>(f32 val) { f32_ = val; SetType(ValueType::F32); }
+template <> inline void WABT_VECTORCALL Value::Set<f64>(f64 val) { f64_ = val; SetType(ValueType::F64); }
+template <> inline void WABT_VECTORCALL Value::Set<v128>(v128 val) { v128_ = val; SetType(ValueType::V128); }
+template <> inline void WABT_VECTORCALL Value::Set<Ref>(Ref val) { ref_ = val; SetType(ValueType::ExternRef); }
+
+//// Store ////
+inline bool Store::IsValid(Ref ref) const {
+ return objects_.IsUsed(ref.index) && objects_.Get(ref.index);
+}
+
+template <typename T>
+bool Store::Is(Ref ref) const {
+ return objects_.IsUsed(ref.index) && isa<T>(objects_.Get(ref.index));
+}
+
+template <typename T>
+Result Store::Get(Ref ref, RefPtr<T>* out) {
+ if (Is<T>(ref)) {
+ *out = RefPtr<T>(*this, ref);
+ return Result::Ok;
+ }
+ return Result::Error;
+}
+
+template <typename T>
+RefPtr<T> Store::UnsafeGet(Ref ref) {
+ return RefPtr<T>(*this, ref);
+}
+
+template <typename T, typename... Args>
+RefPtr<T> Store::Alloc(Args&&... args) {
+ Ref ref{objects_.New(new T(std::forward<Args>(args)...))};
+ RefPtr<T> ptr{*this, ref};
+ ptr->self_ = ref;
+ return ptr;
+}
+
+inline Store::ObjectList::Index Store::object_count() const {
+ return objects_.count();
+}
+
+inline const Features& Store::features() const {
+ return features_;
+}
+
+inline std::set<Thread*>& Store::threads() {
+ return threads_;
+}
+
+//// Object ////
+// static
+inline bool Object::classof(const Object* obj) {
+ return true;
+}
+
+inline Object::Object(ObjectKind kind) : kind_(kind) {}
+
+inline ObjectKind Object::kind() const {
+ return kind_;
+}
+
+inline Ref Object::self() const {
+ return self_;
+}
+
+inline void* Object::host_info() const {
+ return host_info_;
+}
+
+inline void Object::set_host_info(void* host_info) {
+ host_info_ = host_info;
+}
+
+inline Finalizer Object::get_finalizer() const {
+ return finalizer_;
+}
+
+inline void Object::set_finalizer(Finalizer finalizer) {
+ finalizer_ = finalizer;
+}
+
+//// Foreign ////
+// static
+inline bool Foreign::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Foreign::Ptr Foreign::New(Store& store, void* ptr) {
+ return store.Alloc<Foreign>(store, ptr);
+}
+
+inline void* Foreign::ptr() {
+ return ptr_;
+}
+
+//// Trap ////
+// static
+inline bool Trap::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Trap::Ptr Trap::New(Store& store,
+ const std::string& msg,
+ const std::vector<Frame>& trace) {
+ return store.Alloc<Trap>(store, msg, trace);
+}
+
+inline std::string Trap::message() const {
+ return message_;
+}
+
+//// Exception ////
+// static
+inline bool Exception::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Exception::Ptr Exception::New(Store& store, Ref tag, Values& args) {
+ return store.Alloc<Exception>(store, tag, args);
+}
+
+inline Ref Exception::tag() const {
+ return tag_;
+}
+
+inline Values& Exception::args() {
+ return args_;
+}
+
+//// Extern ////
+// static
+inline bool Extern::classof(const Object* obj) {
+ switch (obj->kind()) {
+ case ObjectKind::DefinedFunc:
+ case ObjectKind::HostFunc:
+ case ObjectKind::Table:
+ case ObjectKind::Memory:
+ case ObjectKind::Global:
+ case ObjectKind::Tag:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline Extern::Extern(ObjectKind kind) : Object(kind) {}
+
+//// Func ////
+// static
+inline bool Func::classof(const Object* obj) {
+ switch (obj->kind()) {
+ case ObjectKind::DefinedFunc:
+ case ObjectKind::HostFunc:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline const ExternType& Func::extern_type() {
+ return type_;
+}
+
+inline const FuncType& Func::type() const {
+ return type_;
+}
+
+//// DefinedFunc ////
+// static
+inline bool DefinedFunc::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline DefinedFunc::Ptr DefinedFunc::New(Store& store,
+ Ref instance,
+ FuncDesc desc) {
+ return store.Alloc<DefinedFunc>(store, instance, desc);
+}
+
+inline Ref DefinedFunc::instance() const {
+ return instance_;
+}
+
+inline const FuncDesc& DefinedFunc::desc() const {
+ return desc_;
+}
+
+//// HostFunc ////
+// static
+inline bool HostFunc::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline HostFunc::Ptr HostFunc::New(Store& store, FuncType type, Callback cb) {
+ return store.Alloc<HostFunc>(store, type, cb);
+}
+
+//// Table ////
+// static
+inline bool Table::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Table::Ptr Table::New(Store& store, TableType type) {
+ return store.Alloc<Table>(store, type);
+}
+
+inline const ExternType& Table::extern_type() {
+ return type_;
+}
+
+inline const TableType& Table::type() const {
+ return type_;
+}
+
+inline const RefVec& Table::elements() const {
+ return elements_;
+}
+
+inline u32 Table::size() const {
+ return static_cast<u32>(elements_.size());
+}
+
+//// Memory ////
+// static
+inline bool Memory::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Memory::Ptr Memory::New(interp::Store& store, MemoryType type) {
+ return store.Alloc<Memory>(store, type);
+}
+
+inline bool Memory::IsValidAccess(u64 offset, u64 addend, u64 size) const {
+ // FIXME: make this faster.
+ return offset <= data_.size() && addend <= data_.size() &&
+ size <= data_.size() && offset + addend + size <= data_.size();
+}
+
+inline bool Memory::IsValidAtomicAccess(u64 offset,
+ u64 addend,
+ u64 size) const {
+ return IsValidAccess(offset, addend, size) &&
+ ((offset + addend) & (size - 1)) == 0;
+}
+
+template <typename T>
+Result Memory::Load(u64 offset, u64 addend, T* out) const {
+ if (!IsValidAccess(offset, addend, sizeof(T))) {
+ return Result::Error;
+ }
+ wabt::MemcpyEndianAware(out, data_.data(), sizeof(T), data_.size(), 0,
+ offset + addend, sizeof(T));
+ return Result::Ok;
+}
+
+template <typename T>
+T WABT_VECTORCALL Memory::UnsafeLoad(u64 offset, u64 addend) const {
+ assert(IsValidAccess(offset, addend, sizeof(T)));
+ T val;
+ wabt::MemcpyEndianAware(&val, data_.data(), sizeof(T), data_.size(), 0,
+ offset + addend, sizeof(T));
+ return val;
+}
+
+template <typename T>
+Result WABT_VECTORCALL Memory::Store(u64 offset, u64 addend, T val) {
+ if (!IsValidAccess(offset, addend, sizeof(T))) {
+ return Result::Error;
+ }
+ wabt::MemcpyEndianAware(data_.data(), &val, data_.size(), sizeof(T),
+ offset + addend, 0, sizeof(T));
+ return Result::Ok;
+}
+
+template <typename T>
+Result Memory::AtomicLoad(u64 offset, u64 addend, T* out) const {
+ if (!IsValidAtomicAccess(offset, addend, sizeof(T))) {
+ return Result::Error;
+ }
+ wabt::MemcpyEndianAware(out, data_.data(), sizeof(T), data_.size(), 0,
+ offset + addend, sizeof(T));
+ return Result::Ok;
+}
+
+template <typename T>
+Result Memory::AtomicStore(u64 offset, u64 addend, T val) {
+ if (!IsValidAtomicAccess(offset, addend, sizeof(T))) {
+ return Result::Error;
+ }
+ wabt::MemcpyEndianAware(data_.data(), &val, data_.size(), sizeof(T),
+ offset + addend, 0, sizeof(T));
+ return Result::Ok;
+}
+
+template <typename T, typename F>
+Result Memory::AtomicRmw(u64 offset, u64 addend, T rhs, F&& func, T* out) {
+ T lhs;
+ CHECK_RESULT(AtomicLoad(offset, addend, &lhs));
+ CHECK_RESULT(AtomicStore(offset, addend, func(lhs, rhs)));
+ *out = lhs;
+ return Result::Ok;
+}
+
+template <typename T>
+Result Memory::AtomicRmwCmpxchg(u64 offset,
+ u64 addend,
+ T expect,
+ T replace,
+ T* out) {
+ T read;
+ CHECK_RESULT(AtomicLoad(offset, addend, &read));
+ if (read == expect) {
+ CHECK_RESULT(AtomicStore(offset, addend, replace));
+ }
+ *out = read;
+ return Result::Ok;
+}
+
+inline u8* Memory::UnsafeData() {
+ return data_.data();
+}
+
+inline u64 Memory::ByteSize() const {
+ return data_.size();
+}
+
+inline u64 Memory::PageSize() const {
+ return pages_;
+}
+
+inline const ExternType& Memory::extern_type() {
+ return type_;
+}
+
+inline const MemoryType& Memory::type() const {
+ return type_;
+}
+
+//// Global ////
+// static
+inline bool Global::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Global::Ptr Global::New(Store& store, GlobalType type, Value value) {
+ return store.Alloc<Global>(store, type, value);
+}
+
+inline Value Global::Get() const {
+ return value_;
+}
+
+template <typename T>
+Result Global::Get(T* out) const {
+ if (HasType<T>(type_.type)) {
+ *out = value_.Get<T>();
+ return Result::Ok;
+ }
+ return Result::Error;
+}
+
+template <typename T>
+T WABT_VECTORCALL Global::UnsafeGet() const {
+ RequireType<T>(type_.type);
+ return value_.Get<T>();
+}
+
+template <typename T>
+Result WABT_VECTORCALL Global::Set(T val) {
+ if (type_.mut == Mutability::Var && HasType<T>(type_.type)) {
+ value_.Set(val);
+ return Result::Ok;
+ }
+ return Result::Error;
+}
+
+inline const ExternType& Global::extern_type() {
+ return type_;
+}
+
+inline const GlobalType& Global::type() const {
+ return type_;
+}
+
+//// Tag ////
+// static
+inline bool Tag::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Tag::Ptr Tag::New(Store& store, TagType type) {
+ return store.Alloc<Tag>(store, type);
+}
+
+inline const ExternType& Tag::extern_type() {
+ return type_;
+}
+
+inline const TagType& Tag::type() const {
+ return type_;
+}
+
+//// ElemSegment ////
+inline void ElemSegment::Drop() {
+ elements_.clear();
+}
+
+inline const ElemDesc& ElemSegment::desc() const {
+ return *desc_;
+}
+
+inline const RefVec& ElemSegment::elements() const {
+ return elements_;
+}
+
+inline u32 ElemSegment::size() const {
+ return elements_.size();
+}
+
+//// DataSegment ////
+inline void DataSegment::Drop() {
+ size_ = 0;
+}
+
+inline const DataDesc& DataSegment::desc() const {
+ return *desc_;
+}
+
+inline u64 DataSegment::size() const {
+ return size_;
+}
+
+//// Module ////
+// static
+inline bool Module::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+// static
+inline Module::Ptr Module::New(Store& store, ModuleDesc desc) {
+ return store.Alloc<Module>(store, std::move(desc));
+}
+
+inline const ModuleDesc& Module::desc() const {
+ return desc_;
+}
+
+inline const std::vector<ImportType>& Module::import_types() const {
+ return import_types_;
+}
+
+inline const std::vector<ExportType>& Module::export_types() const {
+ return export_types_;
+}
+
+//// Instance ////
+// static
+inline bool Instance::classof(const Object* obj) {
+ return obj->kind() == skind;
+}
+
+inline Ref Instance::module() const {
+ return module_;
+}
+
+inline const RefVec& Instance::imports() const {
+ return imports_;
+}
+
+inline const RefVec& Instance::funcs() const {
+ return funcs_;
+}
+
+inline const RefVec& Instance::tables() const {
+ return tables_;
+}
+
+inline const RefVec& Instance::memories() const {
+ return memories_;
+}
+
+inline const RefVec& Instance::globals() const {
+ return globals_;
+}
+
+inline const RefVec& Instance::tags() const {
+ return tags_;
+}
+
+inline const RefVec& Instance::exports() const {
+ return exports_;
+}
+
+inline const std::vector<ElemSegment>& Instance::elems() const {
+ return elems_;
+}
+
+inline std::vector<ElemSegment>& Instance::elems() {
+ return elems_;
+}
+
+inline const std::vector<DataSegment>& Instance::datas() const {
+ return datas_;
+}
+
+inline std::vector<DataSegment>& Instance::datas() {
+ return datas_;
+}
+
+//// Thread ////
+inline Store& Thread::store() {
+ return store_;
+}
+
+} // namespace interp
+} // namespace wabt
diff --git a/include/wabt/interp/interp-math.h b/include/wabt/interp/interp-math.h
new file mode 100644
index 00000000..ef93e336
--- /dev/null
+++ b/include/wabt/interp/interp-math.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_INTERP_MATH_H_
+#define WABT_INTERP_MATH_H_
+
+#include <cmath>
+#include <limits>
+#include <string>
+#include <type_traits>
+
+#if COMPILER_IS_MSVC
+#include <emmintrin.h>
+#include <immintrin.h>
+#endif
+
+#include "wabt/common.h"
+#include "wabt/interp/interp.h"
+
+namespace wabt {
+namespace interp {
+
+template <
+ typename T,
+ typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
+bool WABT_VECTORCALL IsNaN(T val) {
+ return false;
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
+bool WABT_VECTORCALL IsNaN(T val) {
+ return std::isnan(val);
+}
+
+template <
+ typename T,
+ typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
+T WABT_VECTORCALL CanonNaN(T val) {
+ return val;
+}
+
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
+T WABT_VECTORCALL CanonNaN(T val) {
+ if (WABT_UNLIKELY(std::isnan(val))) {
+ return std::numeric_limits<f32>::quiet_NaN();
+ }
+ return val;
+}
+
+template <typename T> T ShiftMask(T val) { return val & (sizeof(T)*8-1); }
+
+template <typename T> bool WABT_VECTORCALL IntEqz(T val) { return val == 0; }
+template <typename T> bool WABT_VECTORCALL Eq(T lhs, T rhs) { return lhs == rhs; }
+template <typename T> bool WABT_VECTORCALL Ne(T lhs, T rhs) { return lhs != rhs; }
+template <typename T> bool WABT_VECTORCALL Lt(T lhs, T rhs) { return lhs < rhs; }
+template <typename T> bool WABT_VECTORCALL Le(T lhs, T rhs) { return lhs <= rhs; }
+template <typename T> bool WABT_VECTORCALL Gt(T lhs, T rhs) { return lhs > rhs; }
+template <typename T> bool WABT_VECTORCALL Ge(T lhs, T rhs) { return lhs >= rhs; }
+template <typename T> T WABT_VECTORCALL IntClz(T val) { return Clz(val); }
+template <typename T> T WABT_VECTORCALL IntCtz(T val) { return Ctz(val); }
+template <typename T> T WABT_VECTORCALL IntPopcnt(T val) { return Popcount(val); }
+template <typename T> T WABT_VECTORCALL IntNot(T val) { return ~val; }
+template <typename T> T WABT_VECTORCALL IntNeg(T val) { return ~val + 1; }
+template <typename T> T WABT_VECTORCALL Add(T lhs, T rhs) { return CanonNaN(lhs + rhs); }
+template <typename T> T WABT_VECTORCALL Sub(T lhs, T rhs) { return CanonNaN(lhs - rhs); }
+template <typename T> T WABT_VECTORCALL IntAnd(T lhs, T rhs) { return lhs & rhs; }
+template <typename T> T WABT_VECTORCALL IntOr(T lhs, T rhs) { return lhs | rhs; }
+template <typename T> T WABT_VECTORCALL IntXor(T lhs, T rhs) { return lhs ^ rhs; }
+template <typename T> T WABT_VECTORCALL IntShl(T lhs, T rhs) { return lhs << ShiftMask(rhs); }
+template <typename T> T WABT_VECTORCALL IntShr(T lhs, T rhs) { return lhs >> ShiftMask(rhs); }
+template <typename T> T WABT_VECTORCALL IntMin(T lhs, T rhs) { return std::min(lhs, rhs); }
+template <typename T> T WABT_VECTORCALL IntMax(T lhs, T rhs) { return std::max(lhs, rhs); }
+template <typename T> T WABT_VECTORCALL IntAndNot(T lhs, T rhs) { return lhs & ~rhs; }
+template <typename T> T WABT_VECTORCALL IntAvgr(T lhs, T rhs) { return (lhs + rhs + 1) / 2; }
+template <typename T> T WABT_VECTORCALL Xchg(T lhs, T rhs) { return rhs; }
+
+// This is a wrapping absolute value function, so a negative number that is not
+// representable as a positive number will be unchanged (e.g. abs(-128) = 128).
+//
+// Note that std::abs() does not have this behavior (e.g. abs(-128) is UB).
+// Similarly, using unary minus is also UB.
+template <typename T>
+T WABT_VECTORCALL IntAbs(T val) {
+ static_assert(std::is_unsigned<T>::value, "T must be unsigned.");
+ const auto signbit = T(-1) << (sizeof(T) * 8 - 1);
+ return (val & signbit) ? ~val + 1 : val;
+}
+
+// Because of the integer promotion rules [1], any value of a type T which is
+// smaller than `int` will be converted to an `int`, as long as `int` can hold
+// any value of type T.
+//
+// So type `u16` will be promoted to `int`, since all values can be stored in
+// an int. Unfortunately, the product of two `u16` values cannot always be
+// stored in an `int` (e.g. 65535 * 65535). This triggers an error in UBSan.
+//
+// As a result, we make sure to promote the type ahead of time for `u16`. Note
+// that this isn't a problem for any other unsigned types.
+//
+// [1]; https://en.cppreference.com/w/cpp/language/implicit_conversion#Integral_promotion
+template <typename T> struct PromoteMul { using type = T; };
+template <> struct PromoteMul<u16> { using type = u32; };
+
+template <typename T>
+T WABT_VECTORCALL Mul(T lhs, T rhs) {
+ using U = typename PromoteMul<T>::type;
+ return CanonNaN(U(lhs) * U(rhs));
+}
+
+template <typename T> struct Mask { using Type = T; };
+template <> struct Mask<f32> { using Type = u32; };
+template <> struct Mask<f64> { using Type = u64; };
+
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL EqMask(T lhs, T rhs) { return lhs == rhs ? -1 : 0; }
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL NeMask(T lhs, T rhs) { return lhs != rhs ? -1 : 0; }
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL LtMask(T lhs, T rhs) { return lhs < rhs ? -1 : 0; }
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL LeMask(T lhs, T rhs) { return lhs <= rhs ? -1 : 0; }
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL GtMask(T lhs, T rhs) { return lhs > rhs ? -1 : 0; }
+template <typename T> typename Mask<T>::Type WABT_VECTORCALL GeMask(T lhs, T rhs) { return lhs >= rhs ? -1 : 0; }
+
+template <typename T>
+T WABT_VECTORCALL IntRotl(T lhs, T rhs) {
+ return (lhs << ShiftMask(rhs)) | (lhs >> ShiftMask<T>(0 - rhs));
+}
+
+template <typename T>
+T WABT_VECTORCALL IntRotr(T lhs, T rhs) {
+ return (lhs >> ShiftMask(rhs)) | (lhs << ShiftMask<T>(0 - rhs));
+}
+
+// i{32,64}.{div,rem}_s are special-cased because they trap when dividing the
+// max signed value by -1. The modulo operation on x86 uses the same
+// instruction to generate the quotient and the remainder.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value, int>::type = 0>
+bool IsNormalDivRem(T lhs, T rhs) {
+ return !(lhs == std::numeric_limits<T>::min() && rhs == -1);
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value, int>::type = 0>
+bool IsNormalDivRem(T lhs, T rhs) {
+ return true;
+}
+
+template <typename T>
+RunResult WABT_VECTORCALL IntDiv(T lhs, T rhs, T* out, std::string* out_msg) {
+ if (WABT_UNLIKELY(rhs == 0)) {
+ *out_msg = "integer divide by zero";
+ return RunResult::Trap;
+ }
+ if (WABT_LIKELY(IsNormalDivRem(lhs, rhs))) {
+ *out = lhs / rhs;
+ return RunResult::Ok;
+ } else {
+ *out_msg = "integer overflow";
+ return RunResult::Trap;
+ }
+}
+
+template <typename T>
+RunResult WABT_VECTORCALL IntRem(T lhs, T rhs, T* out, std::string* out_msg) {
+ if (WABT_UNLIKELY(rhs == 0)) {
+ *out_msg = "integer divide by zero";
+ return RunResult::Trap;
+ }
+ if (WABT_LIKELY(IsNormalDivRem(lhs, rhs))) {
+ *out = lhs % rhs;
+ } else {
+ *out = 0;
+ }
+ return RunResult::Ok;
+}
+
+#if COMPILER_IS_MSVC
+template <typename T> T WABT_VECTORCALL FloatAbs(T val);
+template <typename T> T WABT_VECTORCALL FloatCopysign(T lhs, T rhs);
+
+// Don't use std::{abs,copysign} directly on MSVC, since that seems to lose
+// the NaN tag.
+template <>
+inline f32 WABT_VECTORCALL FloatAbs(f32 val) {
+ return _mm_cvtss_f32(_mm_and_ps(
+ _mm_set1_ps(val), _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))));
+}
+
+template <>
+inline f64 WABT_VECTORCALL FloatAbs(f64 val) {
+ return _mm_cvtsd_f64(
+ _mm_and_pd(_mm_set1_pd(val),
+ _mm_castsi128_pd(_mm_set1_epi64x(0x7fffffffffffffffull))));
+}
+
+template <>
+inline f32 WABT_VECTORCALL FloatCopysign(f32 lhs, f32 rhs) {
+ return _mm_cvtss_f32(
+ _mm_or_ps(_mm_and_ps(_mm_set1_ps(lhs),
+ _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))),
+ _mm_and_ps(_mm_set1_ps(rhs),
+ _mm_castsi128_ps(_mm_set1_epi32(0x80000000)))));
+}
+
+template <>
+inline f64 WABT_VECTORCALL FloatCopysign(f64 lhs, f64 rhs) {
+ return _mm_cvtsd_f64(_mm_or_pd(
+ _mm_and_pd(_mm_set1_pd(lhs),
+ _mm_castsi128_pd(_mm_set1_epi64x(0x7fffffffffffffffull))),
+ _mm_and_pd(_mm_set1_pd(rhs),
+ _mm_castsi128_pd(_mm_set1_epi64x(0x8000000000000000ull)))));
+}
+
+#else
+template <typename T>
+T WABT_VECTORCALL FloatAbs(T val) {
+ return std::abs(val);
+}
+
+template <typename T>
+T WABT_VECTORCALL FloatCopysign(T lhs, T rhs) {
+ return std::copysign(lhs, rhs);
+}
+#endif
+
+#if COMPILER_IS_MSVC
+#else
+#endif
+
+template <typename T> T WABT_VECTORCALL FloatNeg(T val) { return -val; }
+template <typename T> T WABT_VECTORCALL FloatCeil(T val) { return CanonNaN(std::ceil(val)); }
+template <typename T> T WABT_VECTORCALL FloatFloor(T val) { return CanonNaN(std::floor(val)); }
+template <typename T> T WABT_VECTORCALL FloatTrunc(T val) { return CanonNaN(std::trunc(val)); }
+template <typename T> T WABT_VECTORCALL FloatNearest(T val) { return CanonNaN(std::nearbyint(val)); }
+template <typename T> T WABT_VECTORCALL FloatSqrt(T val) { return CanonNaN(std::sqrt(val)); }
+
+template <typename T>
+T WABT_VECTORCALL FloatDiv(T lhs, T rhs) {
+ // IEE754 specifies what should happen when dividing a float by zero, but
+ // C/C++ says it is undefined behavior.
+ if (WABT_UNLIKELY(rhs == 0)) {
+ return std::isnan(lhs) || lhs == 0
+ ? std::numeric_limits<T>::quiet_NaN()
+ : ((std::signbit(lhs) ^ std::signbit(rhs))
+ ? -std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::infinity());
+ }
+ return CanonNaN(lhs / rhs);
+}
+
+template <typename T>
+T WABT_VECTORCALL FloatMin(T lhs, T rhs) {
+ if (WABT_UNLIKELY(std::isnan(lhs) || std::isnan(rhs))) {
+ return std::numeric_limits<T>::quiet_NaN();
+ } else if (WABT_UNLIKELY(lhs == 0 && rhs == 0)) {
+ return std::signbit(lhs) ? lhs : rhs;
+ } else {
+ return std::min(lhs, rhs);
+ }
+}
+
+template <typename T>
+T WABT_VECTORCALL FloatPMin(T lhs, T rhs) {
+ return std::min(lhs, rhs);
+}
+
+template <typename T>
+T WABT_VECTORCALL FloatMax(T lhs, T rhs) {
+ if (WABT_UNLIKELY(std::isnan(lhs) || std::isnan(rhs))) {
+ return std::numeric_limits<T>::quiet_NaN();
+ } else if (WABT_UNLIKELY(lhs == 0 && rhs == 0)) {
+ return std::signbit(lhs) ? rhs : lhs;
+ } else {
+ return std::max(lhs, rhs);
+ }
+}
+
+template <typename T>
+T WABT_VECTORCALL FloatPMax(T lhs, T rhs) {
+ return std::max(lhs, rhs);
+}
+
+template <typename R, typename T> bool WABT_VECTORCALL CanConvert(T val) { return true; }
+template <> inline bool WABT_VECTORCALL CanConvert<s32, f32>(f32 val) { return val >= -2147483648.f && val < 2147483648.f; }
+template <> inline bool WABT_VECTORCALL CanConvert<s32, f64>(f64 val) { return val > -2147483649. && val < 2147483648.; }
+template <> inline bool WABT_VECTORCALL CanConvert<u32, f32>(f32 val) { return val > -1.f && val < 4294967296.f; }
+template <> inline bool WABT_VECTORCALL CanConvert<u32, f64>(f64 val) { return val > -1. && val < 4294967296.; }
+template <> inline bool WABT_VECTORCALL CanConvert<s64, f32>(f32 val) { return val >= -9223372036854775808.f && val < 9223372036854775808.f; }
+template <> inline bool WABT_VECTORCALL CanConvert<s64, f64>(f64 val) { return val >= -9223372036854775808. && val < 9223372036854775808.; }
+template <> inline bool WABT_VECTORCALL CanConvert<u64, f32>(f32 val) { return val > -1.f && val < 18446744073709551616.f; }
+template <> inline bool WABT_VECTORCALL CanConvert<u64, f64>(f64 val) { return val > -1. && val < 18446744073709551616.; }
+
+template <typename R, typename T>
+R WABT_VECTORCALL Convert(T val) {
+ assert((CanConvert<R, T>(val)));
+ return static_cast<R>(val);
+}
+
+template <>
+inline f32 WABT_VECTORCALL Convert(f64 val) {
+ // The WebAssembly rounding mode means that these values (which are > F32_MAX)
+ // should be rounded to F32_MAX and not set to infinity. Unfortunately, UBSAN
+ // complains that the value is not representable as a float, so we'll special
+ // case them.
+ const f64 kMin = 3.4028234663852886e38;
+ const f64 kMax = 3.4028235677973366e38;
+ if (WABT_LIKELY(val >= -kMin && val <= kMin)) {
+ return val;
+ } else if (WABT_UNLIKELY(val > kMin && val < kMax)) {
+ return std::numeric_limits<f32>::max();
+ } else if (WABT_UNLIKELY(val > -kMax && val < -kMin)) {
+ return -std::numeric_limits<f32>::max();
+ } else if (WABT_UNLIKELY(std::isnan(val))) {
+ return std::numeric_limits<f32>::quiet_NaN();
+ } else {
+ return std::copysign(std::numeric_limits<f32>::infinity(), val);
+ }
+}
+
+template <>
+inline f32 WABT_VECTORCALL Convert(u64 val) {
+ return wabt_convert_uint64_to_float(val);
+}
+
+template <>
+inline f64 WABT_VECTORCALL Convert(u64 val) {
+ return wabt_convert_uint64_to_double(val);
+}
+
+template <>
+inline f32 WABT_VECTORCALL Convert(s64 val) {
+ return wabt_convert_int64_to_float(val);
+}
+
+template <>
+inline f64 WABT_VECTORCALL Convert(s64 val) {
+ return wabt_convert_int64_to_double(val);
+}
+
+template <typename T, int N>
+T WABT_VECTORCALL IntExtend(T val) {
+ // Hacker's delight 2.6 - sign extension
+ auto bit = T{1} << N;
+ auto mask = (bit << 1) - 1;
+ return ((val & mask) ^ bit) - bit;
+}
+
+template <typename R, typename T>
+R WABT_VECTORCALL IntTruncSat(T val) {
+ if (WABT_UNLIKELY(std::isnan(val))) {
+ return 0;
+ } else if (WABT_UNLIKELY(!CanConvert<R>(val))) {
+ return std::signbit(val) ? std::numeric_limits<R>::min()
+ : std::numeric_limits<R>::max();
+ } else {
+ return static_cast<R>(val);
+ }
+}
+
+template <typename T> struct SatPromote;
+template <> struct SatPromote<s8> { using type = s32; };
+template <> struct SatPromote<s16> { using type = s32; };
+template <> struct SatPromote<u8> { using type = s32; };
+template <> struct SatPromote<u16> { using type = s32; };
+
+template <typename R, typename T>
+R WABT_VECTORCALL Saturate(T val) {
+ static_assert(sizeof(R) < sizeof(T), "Incorrect types for Saturate");
+ const T min = std::numeric_limits<R>::min();
+ const T max = std::numeric_limits<R>::max();
+ return val > max ? max : val < min ? min : val;
+}
+
+template <typename T, typename U = typename SatPromote<T>::type>
+T WABT_VECTORCALL IntAddSat(T lhs, T rhs) {
+ return Saturate<T, U>(lhs + rhs);
+}
+
+template <typename T, typename U = typename SatPromote<T>::type>
+T WABT_VECTORCALL IntSubSat(T lhs, T rhs) {
+ return Saturate<T, U>(lhs - rhs);
+}
+
+template <typename T>
+T WABT_VECTORCALL SaturatingRoundingQMul(T lhs, T rhs) {
+ constexpr int size_in_bits = sizeof(T) * 8;
+ int round_const = 1 << (size_in_bits - 2);
+ int64_t product = lhs * rhs;
+ product += round_const;
+ product >>= (size_in_bits - 1);
+ return Saturate<T, int64_t>(product);
+}
+
+} // namespace interp
+} // namespace wabt
+
+#endif // WABT_INTERP_MATH_H_
diff --git a/include/wabt/interp/interp-util.h b/include/wabt/interp/interp-util.h
new file mode 100644
index 00000000..383e96a6
--- /dev/null
+++ b/include/wabt/interp/interp-util.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_INTERP_UTIL_H_
+#define WABT_INTERP_UTIL_H_
+
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include "wabt/interp/interp.h"
+
+namespace wabt {
+
+class Stream;
+
+namespace interp {
+
+std::string TypedValueToString(const TypedValue&);
+
+void WriteValue(Stream* stream, const TypedValue&);
+
+void WriteValues(Stream* stream, const ValueTypes&, const Values&);
+
+void WriteTrap(Stream* stream, const char* desc, const Trap::Ptr&);
+
+void WriteCall(Stream* stream,
+ std::string_view name,
+ const FuncType& func_type,
+ const Values& params,
+ const Values& results,
+ const Trap::Ptr& trap);
+
+} // namespace interp
+} // namespace wabt
+
+#endif // WABT_INTERP_UTIL_H_
diff --git a/include/wabt/interp/interp-wasi.h b/include/wabt/interp/interp-wasi.h
new file mode 100644
index 00000000..7eaaedf7
--- /dev/null
+++ b/include/wabt/interp/interp-wasi.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_INTERP_WASI_H_
+#define WABT_INTERP_WASI_H_
+
+#include "wabt/common.h"
+#include "wabt/error.h"
+#include "wabt/interp/interp.h"
+
+#ifdef WITH_WASI
+
+struct uvwasi_s;
+
+namespace wabt {
+namespace interp {
+
+Result WasiBindImports(const Module::Ptr& module,
+ RefVec& imports,
+ Stream* err_stream,
+ Stream* trace_stream);
+
+Result WasiRunStart(const Instance::Ptr& instance,
+ uvwasi_s* uvwasi,
+ Stream* stream,
+ Stream* trace_stream);
+
+} // namespace interp
+} // namespace wabt
+
+#endif
+
+#endif /* WABT_INTERP_WASI_H_ */
diff --git a/include/wabt/interp/interp.h b/include/wabt/interp/interp.h
new file mode 100644
index 00000000..418d00e0
--- /dev/null
+++ b/include/wabt/interp/interp.h
@@ -0,0 +1,1275 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_INTERP_H_
+#define WABT_INTERP_H_
+
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <set>
+#include <string>
+#include <string_view>
+#include <type_traits>
+#include <vector>
+
+#include "wabt/cast.h"
+#include "wabt/common.h"
+#include "wabt/feature.h"
+#include "wabt/opcode.h"
+#include "wabt/result.h"
+
+#include "wabt/interp/istream.h"
+
+namespace wabt {
+namespace interp {
+
+class Store;
+class Object;
+class Trap;
+class DataSegment;
+class ElemSegment;
+class Module;
+class Instance;
+class Thread;
+template <typename T>
+class RefPtr;
+
+using s8 = int8_t;
+using u8 = uint8_t;
+using s16 = int16_t;
+using u16 = uint16_t;
+using s32 = int32_t;
+using u32 = uint32_t;
+using Index = uint32_t;
+using s64 = int64_t;
+using u64 = uint64_t;
+using f32 = float;
+using f64 = double;
+
+using Buffer = std::vector<u8>;
+
+using ValueType = wabt::Type;
+using ValueTypes = std::vector<ValueType>;
+
+template <typename T>
+bool HasType(ValueType);
+template <typename T>
+void RequireType(ValueType);
+bool IsReference(ValueType);
+bool TypesMatch(ValueType expected, ValueType actual);
+
+using ExternKind = ExternalKind;
+enum class Mutability { Const, Var };
+enum class TagAttr { Exception };
+using SegmentMode = SegmentKind;
+enum class ElemKind { RefNull, RefFunc };
+
+enum class ObjectKind {
+ Null,
+ Foreign,
+ Trap,
+ Exception,
+ DefinedFunc,
+ HostFunc,
+ Table,
+ Memory,
+ Global,
+ Tag,
+ Module,
+ Instance,
+
+ First = Null,
+ Last = Instance,
+};
+
+static const int kCommandTypeCount = WABT_ENUM_COUNT(ObjectKind);
+
+const char* GetName(Mutability);
+const std::string GetName(ValueType);
+const char* GetName(ExternKind);
+const char* GetName(ObjectKind);
+
+struct Ref {
+ static const Ref Null;
+
+ Ref() = default;
+ explicit Ref(size_t index);
+
+ friend bool operator==(Ref, Ref);
+ friend bool operator!=(Ref, Ref);
+
+ size_t index;
+};
+using RefVec = std::vector<Ref>;
+
+template <typename T, u8 L>
+struct Simd {
+ using LaneType = T;
+ static const u8 lanes = L;
+
+ T v[L];
+
+ inline T& operator[](u8 idx) {
+#if WABT_BIG_ENDIAN
+ idx = (~idx) & (L - 1);
+#endif
+ return v[idx];
+ }
+ inline T operator[](u8 idx) const {
+#if WABT_BIG_ENDIAN
+ idx = (~idx) & (L - 1);
+#endif
+ return v[idx];
+ }
+};
+using s8x16 = Simd<s8, 16>;
+using u8x16 = Simd<u8, 16>;
+using s16x8 = Simd<s16, 8>;
+using u16x8 = Simd<u16, 8>;
+using s32x4 = Simd<s32, 4>;
+using u32x4 = Simd<u32, 4>;
+using s64x2 = Simd<s64, 2>;
+using u64x2 = Simd<u64, 2>;
+using f32x4 = Simd<f32, 4>;
+using f64x2 = Simd<f64, 2>;
+
+// Used for load extend instructions.
+using s8x8 = Simd<s8, 8>;
+using u8x8 = Simd<u8, 8>;
+using s16x4 = Simd<s16, 4>;
+using u16x4 = Simd<u16, 4>;
+using s32x2 = Simd<s32, 2>;
+using u32x2 = Simd<u32, 2>;
+
+//// Types ////
+
+bool CanGrow(const Limits&, u32 old_size, u32 delta, u32* new_size);
+Result Match(const Limits& expected,
+ const Limits& actual,
+ std::string* out_msg);
+
+struct ExternType {
+ explicit ExternType(ExternKind);
+ virtual ~ExternType() {}
+ virtual std::unique_ptr<ExternType> Clone() const = 0;
+
+ ExternKind kind;
+};
+
+struct FuncType : ExternType {
+ static const ExternKind skind = ExternKind::Func;
+ static bool classof(const ExternType* type);
+
+ explicit FuncType(ValueTypes params, ValueTypes results);
+
+ std::unique_ptr<ExternType> Clone() const override;
+
+ friend Result Match(const FuncType& expected,
+ const FuncType& actual,
+ std::string* out_msg);
+
+ ValueTypes params;
+ ValueTypes results;
+};
+
+struct TableType : ExternType {
+ static const ExternKind skind = ExternKind::Table;
+ static bool classof(const ExternType* type);
+
+ explicit TableType(ValueType, Limits);
+
+ std::unique_ptr<ExternType> Clone() const override;
+
+ friend Result Match(const TableType& expected,
+ const TableType& actual,
+ std::string* out_msg);
+
+ ValueType element;
+ Limits limits;
+};
+
+struct MemoryType : ExternType {
+ static const ExternKind skind = ExternKind::Memory;
+ static bool classof(const ExternType* type);
+
+ explicit MemoryType(Limits);
+
+ std::unique_ptr<ExternType> Clone() const override;
+
+ friend Result Match(const MemoryType& expected,
+ const MemoryType& actual,
+ std::string* out_msg);
+
+ Limits limits;
+};
+
+struct GlobalType : ExternType {
+ static const ExternKind skind = ExternKind::Global;
+ static bool classof(const ExternType* type);
+
+ explicit GlobalType(ValueType, Mutability);
+
+ std::unique_ptr<ExternType> Clone() const override;
+
+ friend Result Match(const GlobalType& expected,
+ const GlobalType& actual,
+ std::string* out_msg);
+
+ ValueType type;
+ Mutability mut;
+};
+
+struct TagType : ExternType {
+ static const ExternKind skind = ExternKind::Tag;
+ static bool classof(const ExternType* type);
+
+ explicit TagType(TagAttr, const ValueTypes&);
+
+ std::unique_ptr<ExternType> Clone() const override;
+
+ friend Result Match(const TagType& expected,
+ const TagType& actual,
+ std::string* out_msg);
+
+ TagAttr attr;
+ ValueTypes signature;
+};
+
+struct ImportType {
+ explicit ImportType(std::string module,
+ std::string name,
+ std::unique_ptr<ExternType>);
+ ImportType(const ImportType&);
+ ImportType& operator=(const ImportType&);
+
+ std::string module;
+ std::string name;
+ std::unique_ptr<ExternType> type;
+};
+
+struct ExportType {
+ explicit ExportType(std::string name, std::unique_ptr<ExternType>);
+ ExportType(const ExportType&);
+ ExportType& operator=(const ExportType&);
+
+ std::string name;
+ std::unique_ptr<ExternType> type;
+};
+
+//// Structure ////
+
+struct ImportDesc {
+ ImportType type;
+};
+
+struct LocalDesc {
+ ValueType type;
+ u32 count;
+ // One past the last local index that has this type. For example, a vector of
+ // LocalDesc might look like:
+ //
+ // {{I32, 2, 2}, {I64, 3, 5}, {F32, 1, 6}, ...}
+ //
+ // This makes it possible to use a binary search to find the type of a local
+ // at a given index.
+ u32 end;
+};
+
+// Metadata for representing exception handlers associated with a function's
+// code. This is needed to look up exceptions from call frames from interpreter
+// instructions.
+struct CatchDesc {
+ Index tag_index;
+ u32 offset;
+};
+
+// Handlers for a catch-less `try` or `try-catch` block are included in the
+// Catch kind. `try-delegate` instructions create a Delegate handler.
+enum class HandlerKind { Catch, Delegate };
+
+struct HandlerDesc {
+ HandlerKind kind;
+ u32 try_start_offset;
+ u32 try_end_offset;
+ std::vector<CatchDesc> catches;
+ union {
+ u32 catch_all_offset;
+ u32 delegate_handler_index;
+ };
+ // Local stack heights at the handler site that need to be restored.
+ u32 values;
+ u32 exceptions;
+};
+
+struct FuncDesc {
+ // Includes params.
+ ValueType GetLocalType(Index) const;
+
+ FuncType type;
+ std::vector<LocalDesc> locals;
+ u32 code_offset; // Istream offset.
+ std::vector<HandlerDesc> handlers;
+};
+
+struct TableDesc {
+ TableType type;
+};
+
+struct MemoryDesc {
+ MemoryType type;
+};
+
+struct GlobalDesc {
+ GlobalType type;
+ FuncDesc init_func;
+};
+
+struct TagDesc {
+ TagType type;
+};
+
+struct ExportDesc {
+ ExportType type;
+ Index index;
+};
+
+struct StartDesc {
+ Index func_index;
+};
+
+struct DataDesc {
+ Buffer data;
+ SegmentMode mode;
+ Index memory_index;
+ FuncDesc init_func;
+};
+
+struct ElemExpr {
+ ElemKind kind;
+ Index index;
+};
+
+struct ElemDesc {
+ std::vector<ElemExpr> elements;
+ ValueType type;
+ SegmentMode mode;
+ Index table_index;
+ FuncDesc init_func;
+};
+
+struct ModuleDesc {
+ std::vector<FuncType> func_types;
+ std::vector<ImportDesc> imports;
+ std::vector<FuncDesc> funcs;
+ std::vector<TableDesc> tables;
+ std::vector<MemoryDesc> memories;
+ std::vector<GlobalDesc> globals;
+ std::vector<TagDesc> tags;
+ std::vector<ExportDesc> exports;
+ std::vector<StartDesc> starts;
+ std::vector<ElemDesc> elems;
+ std::vector<DataDesc> datas;
+ Istream istream;
+};
+
+//// Runtime ////
+
+struct Frame {
+ explicit Frame(Ref func,
+ u32 values,
+ u32 exceptions,
+ u32 offset,
+ Instance*,
+ Module*);
+
+ void Mark(Store&);
+
+ Ref func;
+ u32 values; // Height of the value stack at this activation.
+ u32 exceptions; // Height of the exception stack at this activation.
+ u32 offset; // Istream offset; either the return PC, or the current PC.
+
+ // Cached for convenience. Both are null if func is a HostFunc.
+ Instance* inst;
+ Module* mod;
+};
+
+template <typename T>
+class FreeList {
+ public:
+ using Index = size_t;
+
+ ~FreeList();
+
+ template <typename... Args>
+ Index New(Args&&...);
+ void Delete(Index);
+
+ bool IsUsed(Index) const;
+
+ const T& Get(Index) const;
+ T& Get(Index);
+
+ Index size() const; // 1 greater than the maximum index.
+ Index count() const; // The number of used elements.
+
+ private:
+ // As for Refs, the free bit is 0x80..0. This bit is never
+ // set for valid Refs, since it would mean more objects
+ // are allocated than the total amount of memory.
+ static const Index refFreeBit = (SIZE_MAX >> 1) + 1;
+
+ // As for Objects, the free bit is 0x1. This bit is never
+ // set for valid Objects, since pointers are aligned to at
+ // least four bytes.
+ static const Index ptrFreeBit = 1;
+ static const int ptrFreeShift = 1;
+
+ std::vector<T> list_;
+ // If free_head_ is zero, there is no free slots in list_,
+ // otherwise free_head_ - 1 represents the first free slot.
+ Index free_head_ = 0;
+ Index free_items_ = 0;
+};
+
+class Store {
+ public:
+ using ObjectList = FreeList<Object*>;
+ using RootList = FreeList<Ref>;
+
+ explicit Store(const Features& = Features{});
+
+ Store(const Store&) = delete;
+ Store& operator=(const Store&) = delete;
+ Store& operator=(const Store&&) = delete;
+
+ bool IsValid(Ref) const;
+ bool HasValueType(Ref, ValueType) const;
+ template <typename T>
+ bool Is(Ref) const;
+
+ template <typename T, typename... Args>
+ RefPtr<T> Alloc(Args&&...);
+ template <typename T>
+ Result Get(Ref, RefPtr<T>* out);
+ template <typename T>
+ RefPtr<T> UnsafeGet(Ref);
+
+ RootList::Index NewRoot(Ref);
+ RootList::Index CopyRoot(RootList::Index);
+ void DeleteRoot(RootList::Index);
+
+ void Collect();
+ void Mark(Ref);
+ void Mark(const RefVec&);
+
+ ObjectList::Index object_count() const;
+
+ const Features& features() const;
+ void setFeatures(const Features& features) { features_ = features; }
+
+ std::set<Thread*>& threads();
+
+ private:
+ template <typename T>
+ friend class RefPtr;
+
+ struct GCContext {
+ int call_depth = 0;
+ std::vector<bool> marks;
+ std::vector<size_t> untraced_objects;
+ };
+
+ static const int max_call_depth = 10;
+
+ Features features_;
+ GCContext gc_context_;
+ // This set contains the currently active Thread objects.
+ std::set<Thread*> threads_;
+ ObjectList objects_;
+ RootList roots_;
+};
+
+template <typename T>
+class RefPtr {
+ public:
+ RefPtr();
+ RefPtr(Store&, Ref);
+ RefPtr(const RefPtr&);
+ RefPtr& operator=(const RefPtr&);
+ RefPtr(RefPtr&&);
+ RefPtr& operator=(RefPtr&&);
+ ~RefPtr();
+
+ template <typename U>
+ RefPtr(const RefPtr<U>&);
+ template <typename U>
+ RefPtr& operator=(const RefPtr<U>&);
+ template <typename U>
+ RefPtr(RefPtr&&);
+ template <typename U>
+ RefPtr& operator=(RefPtr&&);
+
+ template <typename U>
+ RefPtr<U> As();
+
+ bool empty() const;
+ void reset();
+
+ T* get() const;
+ T* operator->() const;
+ T& operator*() const;
+ explicit operator bool() const;
+
+ Ref ref() const;
+ Store* store() const;
+
+ template <typename U, typename V>
+ friend bool operator==(const RefPtr<U>& lhs, const RefPtr<V>& rhs);
+ template <typename U, typename V>
+ friend bool operator!=(const RefPtr<U>& lhs, const RefPtr<V>& rhs);
+
+ private:
+ template <typename U>
+ friend class RefPtr;
+
+ T* obj_;
+ Store* store_;
+ Store::RootList::Index root_index_;
+};
+
+struct Value {
+ static Value WABT_VECTORCALL Make(s32);
+ static Value WABT_VECTORCALL Make(u32);
+ static Value WABT_VECTORCALL Make(s64);
+ static Value WABT_VECTORCALL Make(u64);
+ static Value WABT_VECTORCALL Make(f32);
+ static Value WABT_VECTORCALL Make(f64);
+ static Value WABT_VECTORCALL Make(v128);
+ static Value WABT_VECTORCALL Make(Ref);
+ template <typename T, u8 L>
+ static Value WABT_VECTORCALL Make(Simd<T, L>);
+
+ template <typename T>
+ T WABT_VECTORCALL Get() const;
+ template <typename T>
+ void WABT_VECTORCALL Set(T);
+
+ private:
+ union {
+ u32 i32_;
+ u64 i64_;
+ f32 f32_;
+ f64 f64_;
+ v128 v128_;
+ Ref ref_;
+ };
+
+ public:
+#ifndef NDEBUG
+ Value() : v128_(0, 0, 0, 0), type(ValueType::Any) {}
+ void SetType(ValueType t) { type = t; }
+ void CheckType(ValueType t) const {
+ // Sadly we must allow Any here, since locals may be uninitialized.
+ // Alternatively we could modify InterpAlloca to set the type.
+ assert(t == type || type == ValueType::Any);
+ }
+ ValueType type;
+#else
+ Value() : v128_(0, 0, 0, 0) {}
+ void SetType(ValueType) {}
+ void CheckType(ValueType) const {}
+#endif
+};
+using Values = std::vector<Value>;
+
+struct TypedValue {
+ ValueType type;
+ Value value;
+};
+using TypedValues = std::vector<TypedValue>;
+
+using Finalizer = std::function<void(Object*)>;
+
+class Object {
+ public:
+ static bool classof(const Object* obj);
+ static const char* GetTypeName() { return "Object"; }
+ using Ptr = RefPtr<Object>;
+
+ Object(const Object&) = delete;
+ Object& operator=(const Object&) = delete;
+
+ virtual ~Object();
+
+ ObjectKind kind() const;
+ Ref self() const;
+
+ void* host_info() const;
+ void set_host_info(void*);
+
+ Finalizer get_finalizer() const;
+ void set_finalizer(Finalizer);
+
+ protected:
+ friend Store;
+ explicit Object(ObjectKind);
+ virtual void Mark(Store&) {}
+
+ ObjectKind kind_;
+ Finalizer finalizer_ = nullptr;
+ void* host_info_ = nullptr;
+ Ref self_ = Ref::Null;
+};
+
+class Foreign : public Object {
+ public:
+ static const ObjectKind skind = ObjectKind::Foreign;
+ static bool classof(const Object* obj);
+ static const char* GetTypeName() { return "Foreign"; }
+ using Ptr = RefPtr<Foreign>;
+
+ static Foreign::Ptr New(Store&, void*);
+
+ void* ptr();
+
+ private:
+ friend Store;
+ explicit Foreign(Store&, void*);
+ void Mark(Store&) override;
+
+ void* ptr_;
+};
+
+class Trap : public Object {
+ public:
+ static const ObjectKind skind = ObjectKind::Trap;
+ static bool classof(const Object* obj);
+ using Ptr = RefPtr<Trap>;
+
+ static Trap::Ptr New(Store&,
+ const std::string& msg,
+ const std::vector<Frame>& trace = std::vector<Frame>());
+
+ std::string message() const;
+
+ private:
+ friend Store;
+ explicit Trap(Store&,
+ const std::string& msg,
+ const std::vector<Frame>& trace = std::vector<Frame>());
+ void Mark(Store&) override;
+
+ std::string message_;
+ std::vector<Frame> trace_;
+};
+
+class Exception : public Object {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Exception;
+ static const char* GetTypeName() { return "Exception"; }
+ using Ptr = RefPtr<Exception>;
+
+ static Exception::Ptr New(Store&, Ref tag, Values& args);
+
+ Ref tag() const;
+ Values& args();
+
+ private:
+ friend Store;
+ explicit Exception(Store&, Ref, Values&);
+ void Mark(Store&) override;
+
+ Ref tag_;
+ Values args_;
+};
+
+class Extern : public Object {
+ public:
+ static bool classof(const Object* obj);
+ static const char* GetTypeName() { return "Foreign"; }
+ using Ptr = RefPtr<Extern>;
+
+ virtual Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) = 0;
+ virtual const ExternType& extern_type() = 0;
+
+ protected:
+ friend Store;
+ explicit Extern(ObjectKind);
+
+ template <typename T>
+ Result MatchImpl(Store&,
+ const ImportType&,
+ const T& actual,
+ Trap::Ptr* out_trap);
+};
+
+class Func : public Extern {
+ public:
+ static bool classof(const Object* obj);
+ using Ptr = RefPtr<Func>;
+
+ Result Call(Thread& thread,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap);
+
+ // Convenience function that creates new Thread.
+ Result Call(Store&,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap,
+ Stream* = nullptr);
+
+ const ExternType& extern_type() override;
+ const FuncType& type() const;
+
+ protected:
+ explicit Func(ObjectKind, FuncType);
+ virtual Result DoCall(Thread& thread,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap) = 0;
+
+ FuncType type_;
+};
+
+class DefinedFunc : public Func {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::DefinedFunc;
+ static const char* GetTypeName() { return "DefinedFunc"; }
+ using Ptr = RefPtr<DefinedFunc>;
+
+ static DefinedFunc::Ptr New(Store&, Ref instance, FuncDesc);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ Ref instance() const;
+ const FuncDesc& desc() const;
+
+ protected:
+ Result DoCall(Thread& thread,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap) override;
+
+ private:
+ friend Store;
+ explicit DefinedFunc(Store&, Ref instance, FuncDesc);
+ void Mark(Store&) override;
+
+ Ref instance_;
+ FuncDesc desc_;
+};
+
+class HostFunc : public Func {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::HostFunc;
+ static const char* GetTypeName() { return "HostFunc"; }
+ using Ptr = RefPtr<HostFunc>;
+
+ using Callback = std::function<Result(Thread& thread,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap)>;
+
+ static HostFunc::Ptr New(Store&, FuncType, Callback);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ protected:
+ Result DoCall(Thread& thread,
+ const Values& params,
+ Values& results,
+ Trap::Ptr* out_trap) override;
+
+ private:
+ friend Store;
+ friend Thread;
+ explicit HostFunc(Store&, FuncType, Callback);
+ void Mark(Store&) override;
+
+ Callback callback_;
+};
+
+class Table : public Extern {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Table;
+ static const char* GetTypeName() { return "Table"; }
+ using Ptr = RefPtr<Table>;
+
+ static Table::Ptr New(Store&, TableType);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ bool IsValidRange(u32 offset, u32 size) const;
+
+ Result Get(u32 offset, Ref* out) const;
+ Result Set(Store&, u32 offset, Ref);
+ Result Grow(Store&, u32 count, Ref);
+ Result Fill(Store&, u32 offset, Ref, u32 size);
+ Result Init(Store&,
+ u32 dst_offset,
+ const ElemSegment&,
+ u32 src_offset,
+ u32 size);
+ static Result Copy(Store&,
+ Table& dst,
+ u32 dst_offset,
+ const Table& src,
+ u32 src_offset,
+ u32 size);
+
+ // Unsafe API.
+ Ref UnsafeGet(u32 offset) const;
+
+ const ExternType& extern_type() override;
+ const TableType& type() const;
+ const RefVec& elements() const;
+ u32 size() const;
+
+ private:
+ friend Store;
+ explicit Table(Store&, TableType);
+ void Mark(Store&) override;
+
+ TableType type_;
+ RefVec elements_;
+};
+
+class Memory : public Extern {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Memory;
+ static const char* GetTypeName() { return "Memory"; }
+ using Ptr = RefPtr<Memory>;
+
+ static Memory::Ptr New(Store&, MemoryType);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ bool IsValidAccess(u64 offset, u64 addend, u64 size) const;
+ bool IsValidAtomicAccess(u64 offset, u64 addend, u64 size) const;
+
+ template <typename T>
+ Result Load(u64 offset, u64 addend, T* out) const;
+ template <typename T>
+ Result WABT_VECTORCALL Store(u64 offset, u64 addend, T);
+ Result Grow(u64 pages);
+ Result Fill(u64 offset, u8 value, u64 size);
+ Result Init(u64 dst_offset, const DataSegment&, u64 src_offset, u64 size);
+ static Result Copy(Memory& dst,
+ u64 dst_offset,
+ const Memory& src,
+ u64 src_offset,
+ u64 size);
+
+ // Fake atomics; just checks alignment.
+ template <typename T>
+ Result AtomicLoad(u64 offset, u64 addend, T* out) const;
+ template <typename T>
+ Result AtomicStore(u64 offset, u64 addend, T);
+ template <typename T, typename F>
+ Result AtomicRmw(u64 offset, u64 addend, T, F&& func, T* out);
+ template <typename T>
+ Result AtomicRmwCmpxchg(u64 offset, u64 addend, T expect, T replace, T* out);
+
+ u64 ByteSize() const;
+ u64 PageSize() const;
+
+ // Unsafe API.
+ template <typename T>
+ T WABT_VECTORCALL UnsafeLoad(u64 offset, u64 addend) const;
+ u8* UnsafeData();
+
+ const ExternType& extern_type() override;
+ const MemoryType& type() const;
+
+ private:
+ friend class Store;
+ explicit Memory(class Store&, MemoryType);
+ void Mark(class Store&) override;
+
+ MemoryType type_;
+ Buffer data_;
+ u64 pages_;
+};
+
+class Global : public Extern {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Global;
+ static const char* GetTypeName() { return "Global"; }
+ using Ptr = RefPtr<Global>;
+
+ static Global::Ptr New(Store&, GlobalType, Value);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ Value Get() const;
+ template <typename T>
+ Result Get(T* out) const;
+ template <typename T>
+ Result WABT_VECTORCALL Set(T);
+ Result Set(Store&, Ref);
+
+ template <typename T>
+ T WABT_VECTORCALL UnsafeGet() const;
+ void UnsafeSet(Value);
+
+ const ExternType& extern_type() override;
+ const GlobalType& type() const;
+
+ private:
+ friend Store;
+ explicit Global(Store&, GlobalType, Value);
+ void Mark(Store&) override;
+
+ GlobalType type_;
+ Value value_;
+};
+
+class Tag : public Extern {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Tag;
+ static const char* GetTypeName() { return "Tag"; }
+ using Ptr = RefPtr<Tag>;
+
+ static Tag::Ptr New(Store&, TagType);
+
+ Result Match(Store&, const ImportType&, Trap::Ptr* out_trap) override;
+
+ const ExternType& extern_type() override;
+ const TagType& type() const;
+
+ private:
+ friend Store;
+ explicit Tag(Store&, TagType);
+ void Mark(Store&) override;
+
+ TagType type_;
+};
+
+class ElemSegment {
+ public:
+ explicit ElemSegment(const ElemDesc*, RefPtr<Instance>&);
+
+ bool IsValidRange(u32 offset, u32 size) const;
+ void Drop();
+
+ const ElemDesc& desc() const;
+ const RefVec& elements() const;
+ u32 size() const;
+
+ private:
+ friend Instance;
+ void Mark(Store&);
+
+ const ElemDesc* desc_; // Borrowed from the Module.
+ RefVec elements_;
+};
+
+class DataSegment {
+ public:
+ explicit DataSegment(const DataDesc*);
+
+ bool IsValidRange(u64 offset, u64 size) const;
+ void Drop();
+
+ const DataDesc& desc() const;
+ u64 size() const;
+
+ private:
+ const DataDesc* desc_; // Borrowed from the Module.
+ u64 size_;
+};
+
+class Module : public Object {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Module;
+ static const char* GetTypeName() { return "Module"; }
+ using Ptr = RefPtr<Module>;
+
+ static Module::Ptr New(Store&, ModuleDesc);
+
+ const ModuleDesc& desc() const;
+ const std::vector<ImportType>& import_types() const;
+ const std::vector<ExportType>& export_types() const;
+
+ private:
+ friend Store;
+ friend Instance;
+ explicit Module(Store&, ModuleDesc);
+ void Mark(Store&) override;
+
+ ModuleDesc desc_;
+ std::vector<ImportType> import_types_;
+ std::vector<ExportType> export_types_;
+};
+
+class Instance : public Object {
+ public:
+ static bool classof(const Object* obj);
+ static const ObjectKind skind = ObjectKind::Instance;
+ static const char* GetTypeName() { return "Instance"; }
+ using Ptr = RefPtr<Instance>;
+
+ static Instance::Ptr Instantiate(Store&,
+ Ref module,
+ const RefVec& imports,
+ Trap::Ptr* out_trap);
+
+ Ref module() const;
+ const RefVec& imports() const;
+ const RefVec& funcs() const;
+ const RefVec& tables() const;
+ const RefVec& memories() const;
+ const RefVec& globals() const;
+ const RefVec& tags() const;
+ const RefVec& exports() const;
+ const std::vector<ElemSegment>& elems() const;
+ std::vector<ElemSegment>& elems();
+ const std::vector<DataSegment>& datas() const;
+ std::vector<DataSegment>& datas();
+
+ private:
+ friend Store;
+ friend ElemSegment;
+ friend DataSegment;
+ explicit Instance(Store&, Ref module);
+ void Mark(Store&) override;
+
+ Result CallInitFunc(Store&,
+ const Ref func_ref,
+ Value* result,
+ Trap::Ptr* out_trap);
+
+ Ref module_;
+ RefVec imports_;
+ RefVec funcs_;
+ RefVec tables_;
+ RefVec memories_;
+ RefVec globals_;
+ RefVec tags_;
+ RefVec exports_;
+ std::vector<ElemSegment> elems_;
+ std::vector<DataSegment> datas_;
+};
+
+enum class RunResult {
+ Ok,
+ Return,
+ Trap,
+ Exception,
+};
+
+class Thread {
+ public:
+ struct Options {
+ static const u32 kDefaultValueStackSize = 64 * 1024 / sizeof(Value);
+ static const u32 kDefaultCallStackSize = 64 * 1024 / sizeof(Frame);
+
+ u32 value_stack_size = kDefaultValueStackSize;
+ u32 call_stack_size = kDefaultCallStackSize;
+ Stream* trace_stream = nullptr;
+ };
+
+ Thread(Store& store, Stream* trace_stream = nullptr);
+ ~Thread();
+
+ RunResult Run(Trap::Ptr* out_trap);
+ RunResult Run(int num_instructions, Trap::Ptr* out_trap);
+ RunResult Step(Trap::Ptr* out_trap);
+
+ Store& store();
+ void Mark();
+
+ Instance* GetCallerInstance();
+
+ private:
+ friend Store;
+ friend DefinedFunc;
+
+ struct TraceSource;
+
+ RunResult PushCall(Ref func, u32 offset, Trap::Ptr* out_trap);
+ RunResult PushCall(const DefinedFunc&, Trap::Ptr* out_trap);
+ RunResult PushCall(const HostFunc&, Trap::Ptr* out_trap);
+ RunResult PopCall();
+ RunResult DoCall(const Func::Ptr&, Trap::Ptr* out_trap);
+ RunResult DoReturnCall(const Func::Ptr&, Trap::Ptr* out_trap);
+
+ void PushValues(const ValueTypes&, const Values&);
+ void PopValues(const ValueTypes&, Values*);
+
+ Value& Pick(Index);
+
+ template <typename T>
+ T WABT_VECTORCALL Pop();
+ Value Pop();
+ u64 PopPtr(const Memory::Ptr& memory);
+
+ template <typename T>
+ void WABT_VECTORCALL Push(T);
+ void Push(Value);
+ void Push(Ref);
+
+ template <typename R, typename T>
+ using UnopFunc = R WABT_VECTORCALL(T);
+ template <typename R, typename T>
+ using UnopTrapFunc = RunResult WABT_VECTORCALL(T, R*, std::string*);
+ template <typename R, typename T>
+ using BinopFunc = R WABT_VECTORCALL(T, T);
+ template <typename R, typename T>
+ using BinopTrapFunc = RunResult WABT_VECTORCALL(T, T, R*, std::string*);
+
+ template <typename R, typename T>
+ RunResult DoUnop(UnopFunc<R, T>);
+ template <typename R, typename T>
+ RunResult DoUnop(UnopTrapFunc<R, T>, Trap::Ptr* out_trap);
+ template <typename R, typename T>
+ RunResult DoBinop(BinopFunc<R, T>);
+ template <typename R, typename T>
+ RunResult DoBinop(BinopTrapFunc<R, T>, Trap::Ptr* out_trap);
+
+ template <typename R, typename T>
+ RunResult DoConvert(Trap::Ptr* out_trap);
+ template <typename R, typename T>
+ RunResult DoReinterpret();
+
+ template <typename T>
+ RunResult Load(Instr, T* out, Trap::Ptr* out_trap);
+ template <typename T, typename V = T>
+ RunResult DoLoad(Instr, Trap::Ptr* out_trap);
+ template <typename T, typename V = T>
+ RunResult DoStore(Instr, Trap::Ptr* out_trap);
+
+ RunResult DoMemoryInit(Instr, Trap::Ptr* out_trap);
+ RunResult DoDataDrop(Instr);
+ RunResult DoMemoryCopy(Instr, Trap::Ptr* out_trap);
+ RunResult DoMemoryFill(Instr, Trap::Ptr* out_trap);
+
+ RunResult DoTableInit(Instr, Trap::Ptr* out_trap);
+ RunResult DoElemDrop(Instr);
+ RunResult DoTableCopy(Instr, Trap::Ptr* out_trap);
+ RunResult DoTableGet(Instr, Trap::Ptr* out_trap);
+ RunResult DoTableSet(Instr, Trap::Ptr* out_trap);
+ RunResult DoTableGrow(Instr, Trap::Ptr* out_trap);
+ RunResult DoTableSize(Instr);
+ RunResult DoTableFill(Instr, Trap::Ptr* out_trap);
+
+ template <typename R, typename T>
+ RunResult DoSimdSplat();
+ template <typename R, typename T>
+ RunResult DoSimdExtract(Instr);
+ template <typename R, typename T>
+ RunResult DoSimdReplace(Instr);
+
+ template <typename R, typename T>
+ RunResult DoSimdUnop(UnopFunc<R, T>);
+ // Like DoSimdUnop but zeroes top half.
+ template <typename R, typename T>
+ RunResult DoSimdUnopZero(UnopFunc<R, T>);
+ template <typename R, typename T>
+ RunResult DoSimdBinop(BinopFunc<R, T>);
+ RunResult DoSimdBitSelect();
+ template <typename S, u8 count>
+ RunResult DoSimdIsTrue();
+ template <typename S>
+ RunResult DoSimdBitmask();
+ template <typename R, typename T>
+ RunResult DoSimdShift(BinopFunc<R, T>);
+ template <typename S>
+ RunResult DoSimdLoadSplat(Instr, Trap::Ptr* out_trap);
+ template <typename S>
+ RunResult DoSimdLoadLane(Instr, Trap::Ptr* out_trap);
+ template <typename S>
+ RunResult DoSimdStoreLane(Instr, Trap::Ptr* out_trap);
+ template <typename S, typename T>
+ RunResult DoSimdLoadZero(Instr, Trap::Ptr* out_trap);
+ RunResult DoSimdSwizzle();
+ RunResult DoSimdShuffle(Instr);
+ template <typename S, typename T>
+ RunResult DoSimdNarrow();
+ template <typename S, typename T, bool low>
+ RunResult DoSimdConvert();
+ template <typename S, typename T>
+ RunResult DoSimdDot();
+ template <typename S, typename T>
+ RunResult DoSimdLoadExtend(Instr, Trap::Ptr* out_trap);
+ template <typename S, typename T>
+ RunResult DoSimdExtaddPairwise();
+ template <typename S, typename T, bool low>
+ RunResult DoSimdExtmul();
+
+ template <typename T, typename V = T>
+ RunResult DoAtomicLoad(Instr, Trap::Ptr* out_trap);
+ template <typename T, typename V = T>
+ RunResult DoAtomicStore(Instr, Trap::Ptr* out_trap);
+ template <typename R, typename T>
+ RunResult DoAtomicRmw(BinopFunc<T, T>, Instr, Trap::Ptr* out_trap);
+ template <typename T, typename V = T>
+ RunResult DoAtomicRmwCmpxchg(Instr, Trap::Ptr* out_trap);
+
+ RunResult DoThrow(Exception::Ptr exn_ref);
+
+ RunResult StepInternal(Trap::Ptr* out_trap);
+
+ std::vector<Frame> frames_;
+ std::vector<Value> values_;
+ std::vector<u32> refs_; // Index into values_.
+
+ // Exception handling requires tracking a separate stack of caught
+ // exceptions for catch blocks.
+ RefVec exceptions_;
+
+ // Cached for convenience.
+ Store& store_;
+ Instance* inst_ = nullptr;
+ Module* mod_ = nullptr;
+
+ // Tracing.
+ Stream* trace_stream_;
+ std::unique_ptr<TraceSource> trace_source_;
+};
+
+struct Thread::TraceSource : Istream::TraceSource {
+ public:
+ explicit TraceSource(Thread*);
+ std::string Header(Istream::Offset) override;
+ std::string Pick(Index, Instr) override;
+
+ private:
+ ValueType GetLocalType(Index);
+ ValueType GetGlobalType(Index);
+ ValueType GetTableElementType(Index);
+
+ Thread* thread_;
+};
+
+} // namespace interp
+} // namespace wabt
+
+#include "wabt/interp/interp-inl.h"
+
+#endif // WABT_INTERP_H_
diff --git a/include/wabt/interp/istream.h b/include/wabt/interp/istream.h
new file mode 100644
index 00000000..06e1cfca
--- /dev/null
+++ b/include/wabt/interp/istream.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2020 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WABT_INTERP_ISTREAM_H_
+#define WABT_INTERP_ISTREAM_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "wabt/common.h"
+#include "wabt/opcode.h"
+#include "wabt/stream.h"
+
+namespace wabt {
+namespace interp {
+
+using u8 = uint8_t;
+using u16 = uint16_t;
+using u32 = uint32_t;
+using u64 = uint64_t;
+using f32 = float;
+using f64 = double;
+
+using Buffer = std::vector<u8>;
+
+using ValueType = wabt::Type;
+
+// Group instructions based on their immediates their operands. This way we can
+// simplify instruction decoding, disassembling, and tracing. There is an
+// example of an instruction that uses this encoding on the right.
+enum class InstrKind {
+ Imm_0_Op_0, // Nop
+ Imm_0_Op_1, // i32.eqz
+ Imm_0_Op_2, // i32.add
+ Imm_0_Op_3, // select
+ Imm_Jump_Op_0, // br
+ Imm_Jump_Op_1, // br_if
+ Imm_Index_Op_0, // global.get
+ Imm_Index_Op_1, // global.set
+ Imm_Index_Op_2, // table.set
+ Imm_Index_Op_3, // memory.fill
+ Imm_Index_Op_N, // call
+ Imm_Index_Index_Op_3, // memory.init
+ Imm_Index_Index_Op_N, // call_indirect
+ Imm_Index_Offset_Op_1, // i32.load
+ Imm_Index_Offset_Op_2, // i32.store
+ Imm_Index_Offset_Op_3, // i32.atomic.rmw.cmpxchg
+ Imm_Index_Offset_Lane_Op_2, // v128.load8_lane
+ Imm_I32_Op_0, // i32.const
+ Imm_I64_Op_0, // i64.const
+ Imm_F32_Op_0, // f32.const
+ Imm_F64_Op_0, // f64.const
+ Imm_I32_I32_Op_0, // drop_keep
+ Imm_I8_Op_1, // i32x4.extract_lane
+ Imm_I8_Op_2, // i32x4.replace_lane
+ Imm_V128_Op_0, // v128.const
+ Imm_V128_Op_2, // i8x16.shuffle
+};
+
+struct Instr {
+ Opcode op;
+ InstrKind kind;
+ union {
+ u8 imm_u8;
+ u32 imm_u32;
+ f32 imm_f32;
+ u64 imm_u64;
+ f64 imm_f64;
+ v128 imm_v128;
+ struct {
+ u32 fst, snd;
+ } imm_u32x2;
+ struct {
+ u32 fst, snd;
+ u8 idx;
+ } imm_u32x2_u8;
+ };
+};
+
+class Istream {
+ public:
+ using SerializedOpcode = u32; // TODO: change to u16
+ using Offset = u32;
+ static const Offset kInvalidOffset = ~0;
+ // Each br_table entry is made up of three instructions:
+ //
+ // interp_drop_keep $drop $keep
+ // interp_catch_drop $catches
+ // br $label
+ //
+ // Each opcode is a SerializedOpcode, and each immediate is a u32.
+ static const Offset kBrTableEntrySize =
+ sizeof(SerializedOpcode) * 3 + 4 * sizeof(u32);
+
+ // Emit API.
+ void Emit(u32);
+ void Emit(Opcode::Enum);
+ void Emit(Opcode::Enum, u8);
+ void Emit(Opcode::Enum, u32);
+ void Emit(Opcode::Enum, u64);
+ void Emit(Opcode::Enum, v128);
+ void Emit(Opcode::Enum, u32, u32);
+ void Emit(Opcode::Enum, u32, u32, u8);
+ void EmitDropKeep(u32 drop, u32 keep);
+ void EmitCatchDrop(u32 drop);
+
+ Offset EmitFixupU32();
+ void ResolveFixupU32(Offset);
+
+ Offset end() const;
+
+ // Read API.
+ Instr Read(Offset*) const;
+
+ // Disassemble/Trace API.
+ // TODO separate out disassembly/tracing?
+ struct TraceSource {
+ virtual ~TraceSource() {}
+ // Whatever content should go before the instruction on each line, e.g. the
+ // call stack size, value stack size, and istream offset.
+ virtual std::string Header(Offset) = 0;
+ virtual std::string Pick(Index, Instr) = 0;
+ };
+
+ struct DisassemblySource : TraceSource {
+ std::string Header(Offset) override;
+ std::string Pick(Index, Instr) override;
+ };
+
+ void Disassemble(Stream*) const;
+ Offset Disassemble(Stream*, Offset) const;
+ void Disassemble(Stream*, Offset from, Offset to) const;
+
+ Offset Trace(Stream*, Offset, TraceSource*) const;
+
+ private:
+ template <typename T>
+ void WABT_VECTORCALL EmitAt(Offset, T val);
+ template <typename T>
+ void WABT_VECTORCALL EmitInternal(T val);
+
+ template <typename T>
+ T WABT_VECTORCALL ReadAt(Offset*) const;
+
+ Buffer data_;
+};
+
+} // namespace interp
+} // namespace wabt
+
+#endif // WABT_INTERP_ISTREAM_H_
diff --git a/include/wabt/interp/wasi_api.def b/include/wabt/interp/wasi_api.def
new file mode 100644
index 00000000..008e9fd6
--- /dev/null
+++ b/include/wabt/interp/wasi_api.def
@@ -0,0 +1,28 @@
+WASI_FUNC(proc_exit)
+WASI_FUNC(fd_read)
+WASI_FUNC(fd_pread)
+WASI_FUNC(fd_write)
+WASI_FUNC(fd_pwrite)
+WASI_FUNC(fd_close)
+WASI_FUNC(fd_seek)
+WASI_FUNC(fd_prestat_get)
+WASI_FUNC(fd_prestat_dir_name)
+WASI_FUNC(fd_fdstat_get)
+WASI_FUNC(fd_fdstat_set_flags)
+WASI_FUNC(fd_filestat_get)
+WASI_FUNC(fd_readdir)
+WASI_FUNC(environ_sizes_get)
+WASI_FUNC(environ_get)
+WASI_FUNC(args_sizes_get)
+WASI_FUNC(args_get)
+WASI_FUNC(path_open)
+WASI_FUNC(path_filestat_get)
+WASI_FUNC(path_symlink)
+WASI_FUNC(path_unlink_file)
+WASI_FUNC(path_remove_directory)
+WASI_FUNC(path_create_directory)
+WASI_FUNC(path_readlink)
+WASI_FUNC(path_rename)
+WASI_FUNC(clock_time_get)
+WASI_FUNC(poll_oneoff)
+WASI_FUNC(random_get)