diff options
-rw-r--r-- | src/wasm/literal.cpp | 43 | ||||
-rw-r--r-- | test/passes/fuzz-exec_O.txt | 40 | ||||
-rw-r--r-- | test/passes/fuzz-exec_O.wast | 29 | ||||
-rw-r--r-- | test/spec/old_float_exprs.wast | 6 |
4 files changed, 75 insertions, 43 deletions
diff --git a/src/wasm/literal.cpp b/src/wasm/literal.cpp index ec8642865..ffb98abe4 100644 --- a/src/wasm/literal.cpp +++ b/src/wasm/literal.cpp @@ -863,10 +863,35 @@ Literal Literal::mul(const Literal& other) const { return Literal(uint32_t(i32) * uint32_t(other.i32)); case Type::i64: return Literal(uint64_t(i64) * uint64_t(other.i64)); - case Type::f32: - return Literal(getf32() * other.getf32()); - case Type::f64: - return Literal(getf64() * other.getf64()); + case Type::f32: { + // Special-case multiplication by 1. nan * 1 can change nan bits per the + // wasm spec, but it is ok to just return that original nan, and we + // do that here so that we are consistent with the optimization of + // removing the * 1 and leaving just the nan. That is, if we just + // do a normal multiply and the CPU decides to change the bits, we'd + // give a different result on optimized code, which would look like + // it was a bad optimization. So out of all the valid results to + // return here, return the simplest one that is consistent with + // our optimization for the case of 1. + float lhs = getf32(), rhs = other.getf32(); + if (rhs == 1) { + return Literal(lhs); + } + if (lhs == 1) { + return Literal(rhs); + } + return Literal(lhs * rhs); + } + case Type::f64: { + double lhs = getf64(), rhs = other.getf64(); + if (rhs == 1) { + return Literal(lhs); + } + if (lhs == 1) { + return Literal(rhs); + } + return Literal(lhs * rhs); + } case Type::v128: case Type::funcref: case Type::externref: @@ -903,15 +928,7 @@ Literal Literal::div(const Literal& other) const { case FP_INFINITE: // fallthrough case FP_NORMAL: // fallthrough case FP_SUBNORMAL: - // Special-case division by 1. nan / 1 can change nan bits per the - // wasm spec, but it is ok to just return that original nan, and we - // do that here so that we are consistent with the optimization of - // removing the / 1 and leaving just the nan. That is, if we just - // do a normal divide and the CPU decides to change the bits, we'd - // give a different result on optimized code, which would look like - // it was a bad optimization. So out of all the valid results to - // return here, return the simplest one that is consistent with - // optimization. + // Special-case division by 1, similar to multiply from earlier. if (rhs == 1) { return Literal(lhs); } diff --git a/test/passes/fuzz-exec_O.txt b/test/passes/fuzz-exec_O.txt index 9c8280586..ef8e165bb 100644 --- a/test/passes/fuzz-exec_O.txt +++ b/test/passes/fuzz-exec_O.txt @@ -30,22 +30,30 @@ [trap final > memory: 18446744073709551615 > 65514] [fuzz-exec] comparing func_0 [fuzz-exec] comparing func_1 -[fuzz-exec] calling func_113 -[LoggingExternalInterface logging -nan:0x23017a] -[fuzz-exec] note result: func_113 => 113 +[fuzz-exec] calling div +[fuzz-exec] note result: div => -nan:0x23017a +[fuzz-exec] calling mul1 +[fuzz-exec] note result: mul1 => -nan:0x34546d +[fuzz-exec] calling mul2 +[fuzz-exec] note result: mul2 => -nan:0x34546d (module - (type $f32_=>_none (func (param f32))) - (type $none_=>_i64 (func (result i64))) - (import "fuzzing-support" "log-f32" (func $fimport$0 (param f32))) - (export "func_113" (func $0)) - (func $0 (; has Stack IR ;) (result i64) - (call $fimport$0 - (f32.const -nan:0x23017a) - ) - (i64.const 113) + (type $none_=>_f32 (func (result f32))) + (export "div" (func $0)) + (export "mul1" (func $1)) + (export "mul2" (func $1)) + (func $0 (; has Stack IR ;) (result f32) + (f32.const -nan:0x23017a) + ) + (func $1 (; has Stack IR ;) (result f32) + (f32.const -nan:0x34546d) ) ) -[fuzz-exec] calling func_113 -[LoggingExternalInterface logging -nan:0x23017a] -[fuzz-exec] note result: func_113 => 113 -[fuzz-exec] comparing func_113 +[fuzz-exec] calling div +[fuzz-exec] note result: div => -nan:0x23017a +[fuzz-exec] calling mul1 +[fuzz-exec] note result: mul1 => -nan:0x34546d +[fuzz-exec] calling mul2 +[fuzz-exec] note result: mul2 => -nan:0x34546d +[fuzz-exec] comparing div +[fuzz-exec] comparing mul1 +[fuzz-exec] comparing mul2 diff --git a/test/passes/fuzz-exec_O.wast b/test/passes/fuzz-exec_O.wast index 44bbd7101..5c739c548 100644 --- a/test/passes/fuzz-exec_O.wast +++ b/test/passes/fuzz-exec_O.wast @@ -21,18 +21,23 @@ ) ) (module - (type $f32_=>_none (func (param f32))) - (type $none_=>_i64 (func (result i64))) - (import "fuzzing-support" "log-f32" (func $fimport$0 (param f32))) - (export "func_113" (func $0)) - (func $0 (result i64) - (call $fimport$0 - (f32.div - (f32.const -nan:0x23017a) ;; div by 1 can be removed, leaving this nan - (f32.const 1) ;; as it is. wasm semantics allow nan bits to - ) ;; change, but the interpreter should not do so, - ) ;; so that it does not fail on that opt. - (i64.const 113) + (func "div" (result f32) + (f32.div ;; div by 1 can be removed, leaving this nan + (f32.const -nan:0x23017a) ;; as it is. wasm semantics allow nan bits to + (f32.const 1) ;; change, but the interpreter should not do so, + ) ;; so that it does not fail on that opt. + ) + (func "mul1" (result f32) + (f32.mul + (f32.const -nan:0x34546d) + (f32.const 1) + ) + ) + (func "mul2" (result f32) + (f32.mul + (f32.const 1) + (f32.const -nan:0x34546d) + ) ) ) diff --git a/test/spec/old_float_exprs.wast b/test/spec/old_float_exprs.wast index 44515a32e..ca031114f 100644 --- a/test/spec/old_float_exprs.wast +++ b/test/spec/old_float_exprs.wast @@ -103,8 +103,10 @@ (f64.mul (local.get $x) (f64.const 1.0))) ) -(assert_return (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +;; XXX BINARYEN: disable this test, as we have testing for the more strict property +;; of not changing the bits at all in our interpreter +;; (assert_return (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)) (f32.const nan:0x600000)) +;; (assert_return (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) ;; Test that 0.0/x is not folded to 0.0. |