summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlon Zakai <azakai@google.com>2021-05-06 10:56:12 -0700
committerGitHub <noreply@github.com>2021-05-06 10:56:12 -0700
commitc79a0f8ab95e17203feeb919dcf4424ac6522110 (patch)
treebcfe7be651f884fa44352dbfb0f7dee3d440181e
parentc84116dd715c3e548f6de1b8d2e2c29b5e248132 (diff)
downloadbinaryen-c79a0f8ab95e17203feeb919dcf4424ac6522110.tar.gz
binaryen-c79a0f8ab95e17203feeb919dcf4424ac6522110.tar.bz2
binaryen-c79a0f8ab95e17203feeb919dcf4424ac6522110.zip
Fix interpreting of a ref.cast of a function that is not on the module (#3863)
Binaryen allows optimizing functions in function-parallel passes while the module is still being built, that is, while not all the other functions have even been added to the module yet. Since the removal of asm2wasm that has not been heavily tested, but the fuzzer found a closely related bug: in passes like inlining-optimizing, that inline and then optimize the functions we inlined into, the mechanism for optimizing only the relevant functions is to create a module with only some of them. (We only want to optimize the relevant ones, that we inlined into, because this happens after the main optimization pipeline - we don't want to re-optimize all the functions if we just inlined into one of them.) The specific bug here is that ref.cast of a funcref looked up the target function on the module (in order to get its signature, to see if the cast has the right RTT for it). The fix is to return a nonconstant flow in that case, as it is something we cannot precompute. (This does mean we may miss some optimization opportunities, but as in the case of where we optimize functions before the module is fully built up, we do still get 99% of function-local optimizations that way, and a subsequent round of full optimizations can be done later if necessary.)
-rw-r--r--src/wasm-interpreter.h15
-rw-r--r--test/lit/passes/inlining-optimizing.wast41
2 files changed, 53 insertions, 3 deletions
diff --git a/src/wasm-interpreter.h b/src/wasm-interpreter.h
index 844716f95..42bcbaebc 100644
--- a/src/wasm-interpreter.h
+++ b/src/wasm-interpreter.h
@@ -1440,9 +1440,18 @@ public:
if (cast.originalRef.isFunction()) {
// Function casts are simple in that they have no RTT hierarchies; instead
// each reference has the canonical RTT for the signature.
- // We must have a module in order to perform the cast, to get the type.
- assert(module);
- auto* func = module->getFunction(cast.originalRef.getFunc());
+ // We must have a module in order to perform the cast, to get the type. If
+ // we do not have one, or if the function is not present (which may happen
+ // if we are optimizing a function before the entire module is built),
+ // then this is not something we cannot precompute.
+ auto* func = module
+ ? module->getFunctionOrNull(cast.originalRef.getFunc())
+ : nullptr;
+ if (!func) {
+ cast.outcome = cast.Break;
+ cast.breaking = NONCONSTANT_FLOW;
+ return cast;
+ }
seenRtt = Literal(Type(Rtt(0, func->sig)));
cast.castRef =
Literal(func->name, Type(intendedRtt.type.getHeapType(), NonNullable));
diff --git a/test/lit/passes/inlining-optimizing.wast b/test/lit/passes/inlining-optimizing.wast
new file mode 100644
index 000000000..bbbb385f8
--- /dev/null
+++ b/test/lit/passes/inlining-optimizing.wast
@@ -0,0 +1,41 @@
+;; NOTE: Assertions have been generated by update_lit_checks.py and should not be edited.
+;; RUN: wasm-opt %s -all --inlining-optimizing -S -o - | filecheck %s
+
+(module
+ (type $none_=>_none (func))
+ (type $none_=>_i32 (func (result i32)))
+ ;; CHECK: (func $0
+ ;; CHECK-NEXT: (nop)
+ ;; CHECK-NEXT: )
+ (func $0
+ (nop)
+ )
+ ;; CHECK: (func $1
+ ;; CHECK-NEXT: (drop
+ ;; CHECK-NEXT: (call_ref
+ ;; CHECK-NEXT: (ref.cast
+ ;; CHECK-NEXT: (ref.func $0)
+ ;; CHECK-NEXT: (rtt.canon $none_=>_i32)
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ ;; CHECK-NEXT: )
+ (func $1
+ ;; $0 will be inlined into here. We will then optimize this function - but
+ ;; we do so *without* optimizing $0 (as inlining-optimizing only optimizes
+ ;; where it inlines, for efficiency). As part of the optimiziations, we will
+ ;; try to precompute the cast here, which will try to look up $0. We should
+ ;; not hit an assertion, rather we should skip precomputing it, the same as if
+ ;; we were optimizing $1 before $0 were added to the module.
+ (call $0)
+ (drop
+ (call_ref
+ (ref.cast
+ (ref.func $0)
+ (rtt.canon $none_=>_i32)
+ )
+ )
+ )
+ )
+)
+