summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlon Zakai <azakai@google.com>2021-10-06 14:23:48 -0700
committerGitHub <noreply@github.com>2021-10-06 21:23:48 +0000
commit10efbdfbb39f3710a21746d28dbcf4aa7156f147 (patch)
tree5d20027e0be1b3840cd85ed9814d15b9c0527b0b
parent19a98619cbe402b39dc298604169f8dd994c082f (diff)
downloadbinaryen-10efbdfbb39f3710a21746d28dbcf4aa7156f147.tar.gz
binaryen-10efbdfbb39f3710a21746d28dbcf4aa7156f147.tar.bz2
binaryen-10efbdfbb39f3710a21746d28dbcf4aa7156f147.zip
[Wasm GC] GlobalTypeOptimization: Turn fields immutable when possible (#4213)
Add a new pass to perform global type optimization. So far this just does one thing, to find fields with no struct.set and to turn them immutable (where possible - sub and supertypes must agree). To do that, this adds a GlobalTypeRewriter utility which rewrites all the heap types in the module, allowing changes while doing so. In this PR, the change is to flip the mutable field. Otherwise, the utility handles all the boilerplate of creating temp heap types using a TypeBuilder, and it handles replacing the types in every place they are used in the module. This is not enabled by default yet as I don't see enough of a benefit on j2cl. This PR is basically the simplest thing to do in the space of global type optimization, and the simplest way I can think of to fully test the GlobalTypeRewriter (which can't be done as a unit test, really, since we want to emit a full module and validate it etc.). This PR builds the foundation for more complicated things like removing unused fields, subtyping fields, and more.
-rw-r--r--src/ir/type-updating.cpp208
-rw-r--r--src/ir/type-updating.h37
-rw-r--r--src/passes/CMakeLists.txt1
-rw-r--r--src/passes/GlobalTypeOptimization.cpp182
-rw-r--r--src/passes/pass.cpp3
-rw-r--r--src/passes/passes.h1
-rw-r--r--test/lit/help/optimization-opts.test2
-rw-r--r--test/lit/passes/gto-mutability.wast348
8 files changed, 782 insertions, 0 deletions
diff --git a/src/ir/type-updating.cpp b/src/ir/type-updating.cpp
index a3ce8aad7..91f74cff5 100644
--- a/src/ir/type-updating.cpp
+++ b/src/ir/type-updating.cpp
@@ -16,9 +16,217 @@
#include "type-updating.h"
#include "find_all.h"
+#include "ir/module-utils.h"
+#include "wasm-type.h"
+#include "wasm.h"
namespace wasm {
+GlobalTypeRewriter::GlobalTypeRewriter(Module& wasm) : wasm(wasm) {}
+
+void GlobalTypeRewriter::update() {
+ ModuleUtils::collectHeapTypes(wasm, types, typeIndices);
+ typeBuilder.grow(types.size());
+
+ // Create the temporary heap types.
+ for (Index i = 0; i < types.size(); i++) {
+ auto type = types[i];
+ if (type.isSignature()) {
+ auto sig = type.getSignature();
+ TypeList newParams, newResults;
+ for (auto t : sig.params) {
+ newParams.push_back(getTempType(t));
+ }
+ for (auto t : sig.results) {
+ newResults.push_back(getTempType(t));
+ }
+ Signature newSig(typeBuilder.getTempTupleType(newParams),
+ typeBuilder.getTempTupleType(newResults));
+ modifySignature(types[i], newSig);
+ typeBuilder.setHeapType(i, newSig);
+ } else if (type.isStruct()) {
+ auto struct_ = type.getStruct();
+ // Start with a copy to get mutability/packing/etc.
+ auto newStruct = struct_;
+ for (auto& field : newStruct.fields) {
+ field.type = getTempType(field.type);
+ }
+ modifyStruct(types[i], newStruct);
+ typeBuilder.setHeapType(i, newStruct);
+ } else if (type.isArray()) {
+ auto array = type.getArray();
+ // Start with a copy to get mutability/packing/etc.
+ auto newArray = array;
+ newArray.element.type = getTempType(newArray.element.type);
+ modifyArray(types[i], newArray);
+ typeBuilder.setHeapType(i, newArray);
+ } else {
+ WASM_UNREACHABLE("bad type");
+ }
+
+ // Apply a super, if there is one
+ HeapType super;
+ if (type.getSuperType(super)) {
+ typeBuilder.setSubType(i, typeIndices[super]);
+ }
+ }
+
+ auto newTypes = typeBuilder.build();
+
+ // Map the old types to the new ones. This uses the fact that type indices
+ // are the same in the old and new types, that is, we have not added or
+ // removed types, just modified them.
+ using OldToNewTypes = std::unordered_map<HeapType, HeapType>;
+ OldToNewTypes oldToNewTypes;
+ for (Index i = 0; i < types.size(); i++) {
+ oldToNewTypes[types[i]] = newTypes[i];
+ }
+
+ // Replace all the old types in the module with the new ones.
+ struct CodeUpdater
+ : public WalkerPass<
+ PostWalker<CodeUpdater, UnifiedExpressionVisitor<CodeUpdater>>> {
+ bool isFunctionParallel() override { return true; }
+
+ OldToNewTypes& oldToNewTypes;
+
+ CodeUpdater(OldToNewTypes& oldToNewTypes) : oldToNewTypes(oldToNewTypes) {}
+
+ CodeUpdater* create() override { return new CodeUpdater(oldToNewTypes); }
+
+ Type getNew(Type type) {
+ if (type.isRef()) {
+ return Type(getNew(type.getHeapType()), type.getNullability());
+ }
+ if (type.isRtt()) {
+ return Type(Rtt(type.getRtt().depth, getNew(type.getHeapType())));
+ }
+ return type;
+ }
+
+ HeapType getNew(HeapType type) {
+ if (type.isBasic()) {
+ return type;
+ }
+ if (type.isFunction() || type.isData()) {
+ assert(oldToNewTypes.count(type));
+ return oldToNewTypes[type];
+ }
+ return type;
+ }
+
+ Signature getNew(Signature sig) {
+ return Signature(getNew(sig.params), getNew(sig.results));
+ }
+
+ void visitExpression(Expression* curr) {
+ // Update the type to the new one.
+ curr->type = getNew(curr->type);
+
+ // Update any other type fields as well.
+
+#define DELEGATE_ID curr->_id
+
+#define DELEGATE_START(id) \
+ auto* cast = curr->cast<id>(); \
+ WASM_UNUSED(cast);
+
+#define DELEGATE_GET_FIELD(id, name) cast->name
+
+#define DELEGATE_FIELD_TYPE(id, name) cast->name = getNew(cast->name);
+
+#define DELEGATE_FIELD_HEAPTYPE(id, name) cast->name = getNew(cast->name);
+
+#define DELEGATE_FIELD_SIGNATURE(id, name) cast->name = getNew(cast->name);
+
+#define DELEGATE_FIELD_CHILD(id, name)
+#define DELEGATE_FIELD_OPTIONAL_CHILD(id, name)
+#define DELEGATE_FIELD_INT(id, name)
+#define DELEGATE_FIELD_INT_ARRAY(id, name)
+#define DELEGATE_FIELD_LITERAL(id, name)
+#define DELEGATE_FIELD_NAME(id, name)
+#define DELEGATE_FIELD_NAME_VECTOR(id, name)
+#define DELEGATE_FIELD_SCOPE_NAME_DEF(id, name)
+#define DELEGATE_FIELD_SCOPE_NAME_USE(id, name)
+#define DELEGATE_FIELD_SCOPE_NAME_USE_VECTOR(id, name)
+#define DELEGATE_FIELD_ADDRESS(id, name)
+
+#include "wasm-delegations-fields.def"
+ }
+ };
+
+ CodeUpdater updater(oldToNewTypes);
+ PassRunner runner(&wasm);
+ updater.run(&runner, &wasm);
+ updater.walkModuleCode(&wasm);
+
+ // Update global locations that refer to types.
+ for (auto& table : wasm.tables) {
+ table->type = updater.getNew(table->type);
+ }
+ for (auto& elementSegment : wasm.elementSegments) {
+ elementSegment->type = updater.getNew(elementSegment->type);
+ }
+ for (auto& global : wasm.globals) {
+ global->type = updater.getNew(global->type);
+ }
+ for (auto& func : wasm.functions) {
+ func->type = updater.getNew(func->type);
+ for (auto& var : func->vars) {
+ var = updater.getNew(var);
+ }
+ }
+ for (auto& tag : wasm.tags) {
+ tag->sig = updater.getNew(tag->sig);
+ }
+
+ // Update type names.
+ for (auto& kv : oldToNewTypes) {
+ auto old = kv.first;
+ auto new_ = kv.second;
+ if (wasm.typeNames.count(old)) {
+ wasm.typeNames[new_] = wasm.typeNames[old];
+ }
+ }
+}
+
+Type GlobalTypeRewriter::getTempType(Type type) {
+ if (type.isBasic()) {
+ return type;
+ }
+ if (type.isRef()) {
+ auto heapType = type.getHeapType();
+ if (!typeIndices.count(heapType)) {
+ // This type was not present in the module, but is now being used when
+ // defining new types. That is fine; just use it.
+ return type;
+ }
+ return typeBuilder.getTempRefType(
+ typeBuilder.getTempHeapType(typeIndices[heapType]),
+ type.getNullability());
+ }
+ if (type.isRtt()) {
+ auto rtt = type.getRtt();
+ auto newRtt = rtt;
+ auto heapType = type.getHeapType();
+ if (!typeIndices.count(heapType)) {
+ // See above with references.
+ return type;
+ }
+ newRtt.heapType = typeBuilder.getTempHeapType(typeIndices[heapType]);
+ return typeBuilder.getTempRttType(newRtt);
+ }
+ if (type.isTuple()) {
+ auto& tuple = type.getTuple();
+ auto newTuple = tuple;
+ for (auto& t : newTuple.types) {
+ t = getTempType(t);
+ }
+ return typeBuilder.getTempTupleType(newTuple);
+ }
+ WASM_UNREACHABLE("bad type");
+}
+
namespace TypeUpdating {
bool canHandleAsLocal(Type type) {
diff --git a/src/ir/type-updating.h b/src/ir/type-updating.h
index 4668c0ad5..83c1e1aa1 100644
--- a/src/ir/type-updating.h
+++ b/src/ir/type-updating.h
@@ -305,6 +305,43 @@ struct TypeUpdater
}
};
+// Rewrites global heap types across an entire module, allowing changes to be
+// made while doing so.
+class GlobalTypeRewriter {
+public:
+ GlobalTypeRewriter(Module& wasm);
+ virtual ~GlobalTypeRewriter() {}
+
+ // Main entry point. This performs the entire process of creating new heap
+ // types and calling the hooks below, then applies the new types throughout
+ // the module.
+ void update();
+
+ // Subclasses can implement these methods to modify the new set of types that
+ // we map to. By default, we simply copy over the types, and these functions
+ // are the hooks to apply changes through. The methods receive as input the
+ // old type, and a structure that they can modify. That structure is the one
+ // used to define the new type in the TypeBuilder.
+ virtual void modifyStruct(HeapType oldType, Struct& struct_) {}
+ virtual void modifyArray(HeapType oldType, Array& array) {}
+ virtual void modifySignature(HeapType oldType, Signature& sig) {}
+
+ // Map an old type to a temp type. This can be called from the above hooks,
+ // so that they can use a proper temp type of the TypeBuilder while modifying
+ // things.
+ Type getTempType(Type type);
+
+private:
+ Module& wasm;
+ TypeBuilder typeBuilder;
+
+ // The list of old types.
+ std::vector<HeapType> types;
+
+ // Type indices of the old types.
+ std::unordered_map<HeapType, Index> typeIndices;
+};
+
namespace TypeUpdating {
// Checks whether a type is valid as a local, or whether
diff --git a/src/passes/CMakeLists.txt b/src/passes/CMakeLists.txt
index a49ae8980..4ee7fe9d1 100644
--- a/src/passes/CMakeLists.txt
+++ b/src/passes/CMakeLists.txt
@@ -87,6 +87,7 @@ set(passes_SOURCES
SSAify.cpp
Untee.cpp
Vacuum.cpp
+ GlobalTypeOptimization.cpp
${CMAKE_CURRENT_BINARY_DIR}/WasmIntrinsics.cpp
${passes_HEADERS}
)
diff --git a/src/passes/GlobalTypeOptimization.cpp b/src/passes/GlobalTypeOptimization.cpp
new file mode 100644
index 000000000..89d5b8c0a
--- /dev/null
+++ b/src/passes/GlobalTypeOptimization.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2021 WebAssembly Community Group participants
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Optimize types at the global level, altering fields etc. on the set of heap
+// types defined in the module.
+//
+// * Immutability: If a field has no struct.set, it can become immutable.
+//
+// TODO: Specialize field types.
+// TODO: Remove unused fields.
+//
+
+#include "ir/struct-utils.h"
+#include "ir/subtypes.h"
+#include "ir/type-updating.h"
+#include "ir/utils.h"
+#include "pass.h"
+#include "support/small_set.h"
+#include "wasm-builder.h"
+#include "wasm-type.h"
+#include "wasm.h"
+
+using namespace std;
+
+namespace wasm {
+
+namespace {
+
+// Information about usage of a field.
+struct FieldInfo {
+ bool hasWrite = false;
+
+ void noteWrite() { hasWrite = true; }
+
+ bool combine(const FieldInfo& other) {
+ if (!hasWrite && other.hasWrite) {
+ hasWrite = true;
+ return true;
+ }
+ return false;
+ }
+};
+
+struct FieldInfoScanner : public Scanner<FieldInfo, FieldInfoScanner> {
+ Pass* create() override {
+ return new FieldInfoScanner(functionNewInfos, functionSetInfos);
+ }
+
+ FieldInfoScanner(FunctionStructValuesMap<FieldInfo>& functionNewInfos,
+ FunctionStructValuesMap<FieldInfo>& functionSetInfos)
+ : Scanner<FieldInfo, FieldInfoScanner>(functionNewInfos, functionSetInfos) {
+ }
+
+ void noteExpression(Expression* expr,
+ HeapType type,
+ Index index,
+ FieldInfo& info) {
+ info.noteWrite();
+ }
+
+ void
+ noteDefault(Type fieldType, HeapType type, Index index, FieldInfo& info) {
+ info.noteWrite();
+ }
+
+ void noteCopy(HeapType type, Index index, FieldInfo& info) {
+ info.noteWrite();
+ }
+};
+
+struct GlobalTypeOptimization : public Pass {
+ void run(PassRunner* runner, Module* module) override {
+ if (getTypeSystem() != TypeSystem::Nominal) {
+ Fatal() << "GlobalTypeOptimization requires nominal typing";
+ }
+
+ // Find and analyze struct operations inside each function.
+ FunctionStructValuesMap<FieldInfo> functionNewInfos(*module),
+ functionSetInfos(*module);
+ FieldInfoScanner scanner(functionNewInfos, functionSetInfos);
+ scanner.run(runner, module);
+ scanner.walkModuleCode(module);
+
+ // Combine the data from the functions.
+ StructValuesMap<FieldInfo> combinedNewInfos, combinedSetInfos;
+ functionSetInfos.combineInto(combinedSetInfos);
+ // TODO: combine newInfos as well, once we have a need for that (we will
+ // when we do things like subtyping).
+
+ // Find which fields are immutable in all super- and sub-classes. To see
+ // that, propagate sets in both directions. This is necessary because we
+ // cannot have a supertype's field be immutable while a subtype's is not -
+ // they must match for us to preserve subtyping.
+ //
+ // Note that we do not need to care about types here: If the fields were
+ // mutable before, then they must have had identical types for them to be
+ // subtypes (as wasm only allows the type to differ if the fields are
+ // immutable). Note that by making more things immutable we therefore make
+ // it possible to apply more specific subtypes in subtype fields.
+ TypeHierarchyPropagator<FieldInfo> propagator(*module);
+ propagator.propagateToSuperAndSubTypes(combinedSetInfos);
+
+ // Maps types to a vector of booleans that indicate if we can turn the
+ // field immutable. To avoid eager allocation of memory, the vectors are
+ // only resized when we actually have a true to place in them (which is
+ // rare).
+ using CanBecomeImmutable = std::unordered_map<HeapType, std::vector<bool>>;
+ CanBecomeImmutable canBecomeImmutable;
+
+ for (auto type : propagator.subTypes.types) {
+ if (!type.isStruct()) {
+ continue;
+ }
+
+ auto& fields = type.getStruct().fields;
+ for (Index i = 0; i < fields.size(); i++) {
+ if (fields[i].mutable_ == Immutable) {
+ // Already immutable; nothing to do.
+ continue;
+ }
+
+ if (combinedSetInfos[type][i].hasWrite) {
+ // A set exists.
+ continue;
+ }
+
+ // No set exists. Mark it as something we can make immutable.
+ auto& vec = canBecomeImmutable[type];
+ vec.resize(i + 1);
+ vec[i] = true;
+ }
+ }
+
+ // The types are now generally correct, except for their internals, which we
+ // rewrite now.
+ class TypeRewriter : public GlobalTypeRewriter {
+ CanBecomeImmutable& canBecomeImmutable;
+
+ public:
+ TypeRewriter(Module& wasm, CanBecomeImmutable& canBecomeImmutable)
+ : GlobalTypeRewriter(wasm), canBecomeImmutable(canBecomeImmutable) {}
+
+ virtual void modifyStruct(HeapType oldStructType, Struct& struct_) {
+ if (!canBecomeImmutable.count(oldStructType)) {
+ return;
+ }
+
+ auto& newFields = struct_.fields;
+ auto& immutableVec = canBecomeImmutable[oldStructType];
+ for (Index i = 0; i < immutableVec.size(); i++) {
+ if (immutableVec[i]) {
+ newFields[i].mutable_ = Immutable;
+ }
+ }
+ }
+ };
+
+ TypeRewriter(*module, canBecomeImmutable).update();
+ }
+};
+
+} // anonymous namespace
+
+Pass* createGlobalTypeOptimizationPass() {
+ return new GlobalTypeOptimization();
+}
+
+} // namespace wasm
diff --git a/src/passes/pass.cpp b/src/passes/pass.cpp
index 5f53a1d78..67087c328 100644
--- a/src/passes/pass.cpp
+++ b/src/passes/pass.cpp
@@ -155,6 +155,8 @@ void PassRegistry::registerPasses() {
registerPass(
"generate-stack-ir", "generate Stack IR", createGenerateStackIRPass);
registerPass(
+ "gto", "globally optimize GC types", createGlobalTypeOptimizationPass);
+ registerPass(
"heap2local", "replace GC allocations with locals", createHeap2LocalPass);
registerPass(
"inline-main", "inline __original_main into main", createInlineMainPass);
@@ -524,6 +526,7 @@ void PassRunner::addDefaultGlobalOptimizationPrePasses() {
options.optimizeLevel >= 2) {
addIfNoDWARFIssues("cfp");
}
+ // TODO: investigate enabling --gto
}
void PassRunner::addDefaultGlobalOptimizationPostPasses() {
diff --git a/src/passes/passes.h b/src/passes/passes.h
index 47b246bd4..a08e42f62 100644
--- a/src/passes/passes.h
+++ b/src/passes/passes.h
@@ -131,6 +131,7 @@ Pass* createTrapModeClamp();
Pass* createTrapModeJS();
Pass* createUnteePass();
Pass* createVacuumPass();
+Pass* createGlobalTypeOptimizationPass();
} // namespace wasm
diff --git a/test/lit/help/optimization-opts.test b/test/lit/help/optimization-opts.test
index 22a730128..db55e2bdc 100644
--- a/test/lit/help/optimization-opts.test
+++ b/test/lit/help/optimization-opts.test
@@ -271,6 +271,8 @@
;; CHECK-NEXT:
;; CHECK-NEXT: --generate-stack-ir generate Stack IR
;; CHECK-NEXT:
+;; CHECK-NEXT: --gto globally optimize GC types
+;; CHECK-NEXT:
;; CHECK-NEXT: --heap2local replace GC allocations with
;; CHECK-NEXT: locals
;; CHECK-NEXT:
diff --git a/test/lit/passes/gto-mutability.wast b/test/lit/passes/gto-mutability.wast
new file mode 100644
index 000000000..83b9f4bf5
--- /dev/null
+++ b/test/lit/passes/gto-mutability.wast
@@ -0,0 +1,348 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py --all-items and should not be edited.
+;; RUN: foreach %s %t wasm-opt --nominal --gto -all -S -o - | filecheck %s
+;; (remove-unused-names is added to test fallthrough values without a block
+;; name getting in the way)
+
+(module
+ ;; The struct here has three fields, and the second of them has no struct.set
+ ;; which means we can make it immutable.
+
+ ;; CHECK: (type $struct (struct (field (mut funcref)) (field funcref) (field (mut funcref))))
+ (type $struct (struct (field (mut funcref)) (field (mut funcref)) (field (mut funcref))))
+
+ ;; Test that we update tag types properly.
+ ;; CHECK: (type $ref|$struct|_=>_none (func (param (ref $struct))))
+
+ ;; CHECK: (type $none_=>_ref?|$struct| (func (result (ref null $struct))))
+
+ ;; CHECK: (tag $tag (param (ref $struct)))
+ (tag $tag (param (ref $struct)))
+
+ ;; CHECK: (func $func (param $x (ref $struct))
+ ;; CHECK-NEXT: (local $temp (ref null $struct))
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.new $struct
+ ;; CHECK-NEXT: (ref.null func)
+ ;; CHECK-NEXT: (ref.null func)
+ ;; CHECK-NEXT: (ref.null func)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $struct 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (ref.null func)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $struct 2
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (ref.null func)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (local.set $temp
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.get $struct 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.get $struct 1
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $struct))
+ (local $temp (ref null $struct))
+ ;; The presence of a struct.new does not prevent this optimization: we just
+ ;; care about writes using struct.set.
+ (drop
+ (struct.new $struct
+ (ref.null func)
+ (ref.null func)
+ (ref.null func)
+ )
+ )
+ (struct.set $struct 0
+ (local.get $x)
+ (ref.null func)
+ )
+ (struct.set $struct 2
+ (local.get $x)
+ (ref.null func)
+ )
+ ;; Test that local types remain valid after our work (otherwise, we'd get a
+ ;; validation error).
+ (local.set $temp
+ (local.get $x)
+ )
+ ;; Test that struct.get types remain valid after our work.
+ (drop
+ (struct.get $struct 0
+ (local.get $x)
+ )
+ )
+ (drop
+ (struct.get $struct 1
+ (local.get $x)
+ )
+ )
+ )
+
+ ;; CHECK: (func $foo (result (ref null $struct))
+ ;; CHECK-NEXT: (try $try
+ ;; CHECK-NEXT: (do
+ ;; CHECK-NEXT: (nop)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (catch $tag
+ ;; CHECK-NEXT: (return
+ ;; CHECK-NEXT: (pop (ref $struct))
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (ref.null $struct)
+ ;; CHECK-NEXT: )
+ (func $foo (result (ref null $struct))
+ ;; Use a tag so that we test proper updating of its type after making
+ ;; changes.
+ (try
+ (do
+ (nop)
+ )
+ (catch $tag
+ (return
+ (pop (ref $struct))
+ )
+ )
+ )
+ (ref.null $struct)
+ )
+)
+
+(module
+ ;; Test recursion between structs where we only modify one. Specifically $B
+ ;; has no writes to either of its fields.
+
+ ;; CHECK: (type $A (struct (field (mut (ref null $B))) (field (mut i32))))
+ (type $A (struct (field (mut (ref null $B))) (field (mut i32)) ))
+ ;; CHECK: (type $B (struct (field (ref null $A)) (field f64)))
+ (type $B (struct (field (mut (ref null $A))) (field (mut f64)) ))
+
+ ;; CHECK: (type $ref|$A|_=>_none (func (param (ref $A))))
+
+ ;; CHECK: (func $func (param $x (ref $A))
+ ;; CHECK-NEXT: (struct.set $A 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (ref.null $B)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $A 1
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (i32.const 20)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $A))
+ (struct.set $A 0
+ (local.get $x)
+ (ref.null $B)
+ )
+ (struct.set $A 1
+ (local.get $x)
+ (i32.const 20)
+ )
+ )
+)
+
+(module
+ ;; As before, but flipped so that $A's fields can become immutable.
+
+ ;; CHECK: (type $B (struct (field (mut (ref null $A))) (field (mut f64))))
+ (type $B (struct (field (mut (ref null $A))) (field (mut f64)) ))
+
+ ;; CHECK: (type $A (struct (field (ref null $B)) (field i32)))
+ (type $A (struct (field (mut (ref null $B))) (field (mut i32)) ))
+
+ ;; CHECK: (type $ref|$B|_=>_none (func (param (ref $B))))
+
+ ;; CHECK: (func $func (param $x (ref $B))
+ ;; CHECK-NEXT: (struct.set $B 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (ref.null $A)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $B 1
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (f64.const 3.14159)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $B))
+ (struct.set $B 0
+ (local.get $x)
+ (ref.null $A)
+ )
+ (struct.set $B 1
+ (local.get $x)
+ (f64.const 3.14159)
+ )
+ )
+)
+
+(module
+ ;; As before, but now one field in each can become immutable.
+
+ ;; CHECK: (type $B (struct (field (ref null $A)) (field (mut f64))))
+ (type $B (struct (field (mut (ref null $A))) (field (mut f64)) ))
+
+ ;; CHECK: (type $A (struct (field (mut (ref null $B))) (field i32)))
+ (type $A (struct (field (mut (ref null $B))) (field (mut i32)) ))
+
+ ;; CHECK: (type $ref|$A|_ref|$B|_=>_none (func (param (ref $A) (ref $B))))
+
+ ;; CHECK: (func $func (param $x (ref $A)) (param $y (ref $B))
+ ;; CHECK-NEXT: (struct.set $A 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (ref.null $B)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $B 1
+ ;; CHECK-NEXT: (local.get $y)
+ ;; CHECK-NEXT: (f64.const 3.14159)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $A)) (param $y (ref $B))
+ (struct.set $A 0
+ (local.get $x)
+ (ref.null $B)
+ )
+ (struct.set $B 1
+ (local.get $y)
+ (f64.const 3.14159)
+ )
+ )
+)
+
+(module
+ ;; Field #0 is already immutable.
+ ;; Field #1 is mutable and can become so.
+ ;; Field #2 is mutable and must remain so.
+
+ ;; CHECK: (type $struct (struct (field i32) (field i32) (field (mut i32))))
+ (type $struct (struct (field i32) (field (mut i32)) (field (mut i32))))
+
+ ;; CHECK: (type $ref|$struct|_=>_none (func (param (ref $struct))))
+
+ ;; CHECK: (func $func (param $x (ref $struct))
+ ;; CHECK-NEXT: (struct.set $struct 2
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (i32.const 1)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $struct))
+ (struct.set $struct 2
+ (local.get $x)
+ (i32.const 1)
+ )
+ )
+)
+
+(module
+ ;; Subtyping. Without a write in either supertype or subtype, we can
+ ;; optimize the field to be immutable.
+
+ ;; CHECK: (type $none_=>_none (func))
+
+ ;; CHECK: (type $super (struct (field i32)))
+ (type $super (struct (field (mut i32))))
+ ;; CHECK: (type $sub (struct (field i32)) (extends $super))
+ (type $sub (struct (field (mut i32))) (extends $super))
+
+ ;; CHECK: (func $func
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.new $super
+ ;; CHECK-NEXT: (i32.const 1)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.new $sub
+ ;; CHECK-NEXT: (i32.const 1)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func
+ ;; The presence of struct.new do not prevent us optimizing
+ (drop
+ (struct.new $super
+ (i32.const 1)
+ )
+ )
+ (drop
+ (struct.new $sub
+ (i32.const 1)
+ )
+ )
+ )
+)
+
+(module
+ ;; As above, but add a write in the super, which prevents optimization.
+
+ ;; CHECK: (type $super (struct (field (mut i32))))
+ (type $super (struct (field (mut i32))))
+ ;; CHECK: (type $ref|$super|_=>_none (func (param (ref $super))))
+
+ ;; CHECK: (type $sub (struct (field (mut i32))) (extends $super))
+ (type $sub (struct (field (mut i32))) (extends $super))
+
+ ;; CHECK: (func $func (param $x (ref $super))
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.new $super
+ ;; CHECK-NEXT: (i32.const 1)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (struct.new $sub
+ ;; CHECK-NEXT: (i32.const 1)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: (struct.set $super 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (i32.const 2)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $super))
+ ;; The presence of struct.new do not prevent us optimizing
+ (drop
+ (struct.new $super
+ (i32.const 1)
+ )
+ )
+ (drop
+ (struct.new $sub
+ (i32.const 1)
+ )
+ )
+ (struct.set $super 0
+ (local.get $x)
+ (i32.const 2)
+ )
+ )
+)
+
+(module
+ ;; As above, but add a write in the sub, which prevents optimization.
+
+ ;; CHECK: (type $sub (struct (field (mut i32))) (extends $super))
+
+ ;; CHECK: (type $ref|$sub|_=>_none (func (param (ref $sub))))
+
+ ;; CHECK: (type $super (struct (field (mut i32))))
+ (type $super (struct (field (mut i32))))
+ (type $sub (struct (field (mut i32))) (extends $super))
+
+ ;; CHECK: (func $func (param $x (ref $sub))
+ ;; CHECK-NEXT: (struct.set $sub 0
+ ;; CHECK-NEXT: (local.get $x)
+ ;; CHECK-NEXT: (i32.const 2)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $func (param $x (ref $sub))
+ (struct.set $sub 0
+ (local.get $x)
+ (i32.const 2)
+ )
+ )
+)