diff options
-rw-r--r-- | src/wasm/literal.cpp | 134 | ||||
-rw-r--r-- | test/passes/O_fast-math.txt | 2 | ||||
-rw-r--r-- | test/passes/fuzz-exec_O.txt | 57 | ||||
-rw-r--r-- | test/passes/optimize-instructions_fuzz-exec.txt | 255 | ||||
-rw-r--r-- | test/passes/optimize-instructions_fuzz-exec.wast | 206 | ||||
-rw-r--r-- | test/spec/old_float_exprs.wast | 48 | ||||
-rw-r--r-- | test/spec/simd.wast | 16 |
7 files changed, 593 insertions, 125 deletions
diff --git a/src/wasm/literal.cpp b/src/wasm/literal.cpp index 6d0a0fee7..6613bc1c7 100644 --- a/src/wasm/literal.cpp +++ b/src/wasm/literal.cpp @@ -837,6 +837,40 @@ Literal Literal::demote() const { return Literal(float(getf64())); } +// Wasm has nondeterministic rules for NaN propagation in some operations. For +// example. f32.neg is deterministic and just flips the sign, even of a NaN, but +// f32.add is nondeterministic, and if one or more of the inputs is a NaN, then +// +// * if all NaNs are canonical NaNs, the output is some arbitrary canonical NaN +// * otherwise the output is some arbitrary arithmetic NaN +// +// (canonical = NaN payload is 1000..000; arithmetic: 1???..???, that is, the +// high bit is 1 and all others can be 0 or 1) +// +// For many things we don't need to care, and can just do a normal C++ add for +// an f32.add, for example - the wasm rules are specified so that things like +// that just work (in order for such math to be fast). However, for our +// optimizer, it is useful to "standardize" NaNs when there is nondeterminism. +// That is, when there are multiple valid outputs, it's nice to emit the same +// one consistently, so that it doesn't look like the optimization changed +// something. In other words, if the valid output of an expression is a set of +// valid NaNs, and after optimization the output is still that same set, then +// the optimization is valid. And if the interpreter picks the same NaN in both +// cases from that identical set then nothing looks wrong to the fuzzer. +template<typename T> static Literal standardizeNaN(T result) { + if (!std::isnan(result)) { + return Literal(result); + } + // Pick a simple canonical payload, and positive. + if (sizeof(T) == 4) { + return Literal(Literal(uint32_t(0x7fc00000u)).reinterpretf32()); + } else if (sizeof(T) == 8) { + return Literal(Literal(uint64_t(0x7ff8000000000000ull)).reinterpretf64()); + } else { + WASM_UNREACHABLE("invalid float"); + } +} + Literal Literal::add(const Literal& other) const { switch (type.getBasic()) { case Type::i32: @@ -844,9 +878,9 @@ Literal Literal::add(const Literal& other) const { case Type::i64: return Literal(uint64_t(i64) + uint64_t(other.i64)); case Type::f32: - return Literal(getf32() + other.getf32()); + return standardizeNaN(getf32() + other.getf32()); case Type::f64: - return Literal(getf64() + other.getf64()); + return standardizeNaN(getf64() + other.getf64()); case Type::v128: case Type::funcref: case Type::externref: @@ -868,9 +902,9 @@ Literal Literal::sub(const Literal& other) const { case Type::i64: return Literal(uint64_t(i64) - uint64_t(other.i64)); case Type::f32: - return Literal(getf32() - other.getf32()); + return standardizeNaN(getf32() - other.getf32()); case Type::f64: - return Literal(getf64() - other.getf64()); + return standardizeNaN(getf64() - other.getf64()); case Type::v128: case Type::funcref: case Type::externref: @@ -963,9 +997,9 @@ Literal Literal::mul(const Literal& other) const { case Type::i64: return Literal(uint64_t(i64) * uint64_t(other.i64)); case Type::f32: - return Literal(getf32() * other.getf32()); + return standardizeNaN(getf32() * other.getf32()); case Type::f64: - return Literal(getf64() * other.getf64()); + return standardizeNaN(getf64() * other.getf64()); case Type::v128: case Type::funcref: case Type::externref: @@ -989,10 +1023,8 @@ Literal Literal::div(const Literal& other) const { case FP_ZERO: switch (std::fpclassify(lhs)) { case FP_NAN: - return Literal(setQuietNaN(lhs)); case FP_ZERO: - return Literal( - std::copysign(std::numeric_limits<float>::quiet_NaN(), sign)); + return standardizeNaN(lhs / rhs); case FP_NORMAL: // fallthrough case FP_SUBNORMAL: // fallthrough case FP_INFINITE: @@ -1005,7 +1037,7 @@ Literal Literal::div(const Literal& other) const { case FP_INFINITE: // fallthrough case FP_NORMAL: // fallthrough case FP_SUBNORMAL: - return Literal(lhs / rhs); + return standardizeNaN(lhs / rhs); default: WASM_UNREACHABLE("invalid fp classification"); } @@ -1017,10 +1049,8 @@ Literal Literal::div(const Literal& other) const { case FP_ZERO: switch (std::fpclassify(lhs)) { case FP_NAN: - return Literal(setQuietNaN(lhs)); case FP_ZERO: - return Literal( - std::copysign(std::numeric_limits<double>::quiet_NaN(), sign)); + return standardizeNaN(lhs / rhs); case FP_NORMAL: // fallthrough case FP_SUBNORMAL: // fallthrough case FP_INFINITE: @@ -1033,7 +1063,7 @@ Literal Literal::div(const Literal& other) const { case FP_INFINITE: // fallthrough case FP_NORMAL: // fallthrough case FP_SUBNORMAL: - return Literal(lhs / rhs); + return standardizeNaN(lhs / rhs); default: WASM_UNREACHABLE("invalid fp classification"); } @@ -1380,39 +1410,29 @@ Literal Literal::min(const Literal& other) const { switch (type.getBasic()) { case Type::f32: { auto l = getf32(), r = other.getf32(); - if (l == r && l == 0) { - return Literal(std::signbit(l) ? l : r); + if (std::isnan(l)) { + return standardizeNaN(l); } - auto result = std::min(l, r); - bool lnan = std::isnan(l), rnan = std::isnan(r); - if (!std::isnan(result) && !lnan && !rnan) { - return Literal(result); + if (std::isnan(r)) { + return standardizeNaN(r); } - if (!lnan && !rnan) { - return Literal((int32_t)0x7fc00000).castToF32(); + if (l == r && l == 0) { + return Literal(std::signbit(l) ? l : r); } - return Literal(lnan ? l : r) - .castToI32() - .or_(Literal(0xc00000)) - .castToF32(); + return Literal(std::min(l, r)); } case Type::f64: { auto l = getf64(), r = other.getf64(); - if (l == r && l == 0) { - return Literal(std::signbit(l) ? l : r); + if (std::isnan(l)) { + return standardizeNaN(l); } - auto result = std::min(l, r); - bool lnan = std::isnan(l), rnan = std::isnan(r); - if (!std::isnan(result) && !lnan && !rnan) { - return Literal(result); + if (std::isnan(r)) { + return standardizeNaN(r); } - if (!lnan && !rnan) { - return Literal((int64_t)0x7ff8000000000000LL).castToF64(); + if (l == r && l == 0) { + return Literal(std::signbit(l) ? l : r); } - return Literal(lnan ? l : r) - .castToI64() - .or_(Literal(int64_t(0x8000000000000LL))) - .castToF64(); + return Literal(std::min(l, r)); } default: WASM_UNREACHABLE("unexpected type"); @@ -1423,39 +1443,29 @@ Literal Literal::max(const Literal& other) const { switch (type.getBasic()) { case Type::f32: { auto l = getf32(), r = other.getf32(); - if (l == r && l == 0) { - return Literal(std::signbit(l) ? r : l); + if (std::isnan(l)) { + return standardizeNaN(l); } - auto result = std::max(l, r); - bool lnan = std::isnan(l), rnan = std::isnan(r); - if (!std::isnan(result) && !lnan && !rnan) { - return Literal(result); + if (std::isnan(r)) { + return standardizeNaN(r); } - if (!lnan && !rnan) { - return Literal((int32_t)0x7fc00000).castToF32(); + if (l == r && l == 0) { + return Literal(std::signbit(l) ? r : l); } - return Literal(lnan ? l : r) - .castToI32() - .or_(Literal(0xc00000)) - .castToF32(); + return Literal(std::max(l, r)); } case Type::f64: { auto l = getf64(), r = other.getf64(); - if (l == r && l == 0) { - return Literal(std::signbit(l) ? r : l); + if (std::isnan(l)) { + return standardizeNaN(l); } - auto result = std::max(l, r); - bool lnan = std::isnan(l), rnan = std::isnan(r); - if (!std::isnan(result) && !lnan && !rnan) { - return Literal(result); + if (std::isnan(r)) { + return standardizeNaN(r); } - if (!lnan && !rnan) { - return Literal((int64_t)0x7ff8000000000000LL).castToF64(); + if (l == r && l == 0) { + return Literal(std::signbit(l) ? r : l); } - return Literal(lnan ? l : r) - .castToI64() - .or_(Literal(int64_t(0x8000000000000LL))) - .castToF64(); + return Literal(std::max(l, r)); } default: WASM_UNREACHABLE("unexpected type"); diff --git a/test/passes/O_fast-math.txt b/test/passes/O_fast-math.txt index e3833dbba..c6ae6ec16 100644 --- a/test/passes/O_fast-math.txt +++ b/test/passes/O_fast-math.txt @@ -22,7 +22,7 @@ (f32.const -nan:0x34546d) ) (func $2 (; has Stack IR ;) (result f32) - (f32.const -nan:0x74546d) + (f32.const nan:0x400000) ) (func $9 (; has Stack IR ;) (param $0 f32) (result f32) (f32.neg diff --git a/test/passes/fuzz-exec_O.txt b/test/passes/fuzz-exec_O.txt index f17b04650..2a32e23e8 100644 --- a/test/passes/fuzz-exec_O.txt +++ b/test/passes/fuzz-exec_O.txt @@ -31,59 +31,56 @@ [fuzz-exec] comparing func_0 [fuzz-exec] comparing func_1 [fuzz-exec] calling div -[fuzz-exec] note result: div => -nan:0x63017a +[fuzz-exec] note result: div => nan:0x400000 [fuzz-exec] calling mul1 -[fuzz-exec] note result: mul1 => -nan:0x74546d +[fuzz-exec] note result: mul1 => nan:0x400000 [fuzz-exec] calling mul2 -[fuzz-exec] note result: mul2 => -nan:0x74546d +[fuzz-exec] note result: mul2 => nan:0x400000 [fuzz-exec] calling add1 -[fuzz-exec] note result: add1 => -nan:0x74546d +[fuzz-exec] note result: add1 => nan:0x400000 [fuzz-exec] calling add2 -[fuzz-exec] note result: add2 => -nan:0x74546d +[fuzz-exec] note result: add2 => nan:0x400000 [fuzz-exec] calling add3 -[fuzz-exec] note result: add3 => -nan:0x74546d +[fuzz-exec] note result: add3 => nan:0x400000 [fuzz-exec] calling add4 -[fuzz-exec] note result: add4 => -nan:0x74546d +[fuzz-exec] note result: add4 => nan:0x400000 [fuzz-exec] calling sub1 -[fuzz-exec] note result: sub1 => -nan:0x74546d +[fuzz-exec] note result: sub1 => nan:0x400000 [fuzz-exec] calling sub2 -[fuzz-exec] note result: sub2 => -nan:0x74546d +[fuzz-exec] note result: sub2 => nan:0x400000 (module (type $none_=>_f32 (func (result f32))) (export "div" (func $0)) - (export "mul1" (func $1)) - (export "mul2" (func $1)) - (export "add1" (func $1)) - (export "add2" (func $1)) - (export "add3" (func $1)) - (export "add4" (func $1)) - (export "sub1" (func $1)) - (export "sub2" (func $1)) + (export "mul1" (func $0)) + (export "mul2" (func $0)) + (export "add1" (func $0)) + (export "add2" (func $0)) + (export "add3" (func $0)) + (export "add4" (func $0)) + (export "sub1" (func $0)) + (export "sub2" (func $0)) (func $0 (; has Stack IR ;) (result f32) - (f32.const -nan:0x63017a) - ) - (func $1 (; has Stack IR ;) (result f32) - (f32.const -nan:0x74546d) + (f32.const nan:0x400000) ) ) [fuzz-exec] calling div -[fuzz-exec] note result: div => -nan:0x63017a +[fuzz-exec] note result: div => nan:0x400000 [fuzz-exec] calling mul1 -[fuzz-exec] note result: mul1 => -nan:0x74546d +[fuzz-exec] note result: mul1 => nan:0x400000 [fuzz-exec] calling mul2 -[fuzz-exec] note result: mul2 => -nan:0x74546d +[fuzz-exec] note result: mul2 => nan:0x400000 [fuzz-exec] calling add1 -[fuzz-exec] note result: add1 => -nan:0x74546d +[fuzz-exec] note result: add1 => nan:0x400000 [fuzz-exec] calling add2 -[fuzz-exec] note result: add2 => -nan:0x74546d +[fuzz-exec] note result: add2 => nan:0x400000 [fuzz-exec] calling add3 -[fuzz-exec] note result: add3 => -nan:0x74546d +[fuzz-exec] note result: add3 => nan:0x400000 [fuzz-exec] calling add4 -[fuzz-exec] note result: add4 => -nan:0x74546d +[fuzz-exec] note result: add4 => nan:0x400000 [fuzz-exec] calling sub1 -[fuzz-exec] note result: sub1 => -nan:0x74546d +[fuzz-exec] note result: sub1 => nan:0x400000 [fuzz-exec] calling sub2 -[fuzz-exec] note result: sub2 => -nan:0x74546d +[fuzz-exec] note result: sub2 => nan:0x400000 [fuzz-exec] comparing add1 [fuzz-exec] comparing add2 [fuzz-exec] comparing add3 diff --git a/test/passes/optimize-instructions_fuzz-exec.txt b/test/passes/optimize-instructions_fuzz-exec.txt new file mode 100644 index 000000000..548148e09 --- /dev/null +++ b/test/passes/optimize-instructions_fuzz-exec.txt @@ -0,0 +1,255 @@ +[fuzz-exec] calling test32 +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x7fff82] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[fuzz-exec] calling test64 +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0xfffffffffff82] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[fuzz-exec] calling just-one-nan +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[fuzz-exec] calling ignore +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +(module + (type $none_=>_none (func)) + (type $f32_=>_none (func (param f32))) + (type $f64_=>_none (func (param f64))) + (import "fuzzing-support" "log-f32" (func $logf32 (param f32))) + (import "fuzzing-support" "log-f64" (func $logf64 (param f64))) + (export "test32" (func $0)) + (export "test64" (func $1)) + (export "just-one-nan" (func $2)) + (export "ignore" (func $3)) + (func $0 + (call $logf32 + (f32.add + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + (call $logf32 + (f32.sub + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + (call $logf32 + (f32.mul + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + (call $logf32 + (f32.div + (f32.const nan:0x7fff82) + (f32.const -nan:0x7ff622) + ) + ) + (call $logf32 + (f32.copysign + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + (call $logf32 + (f32.min + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + (call $logf32 + (f32.max + (f32.const -nan:0x7fff82) + (f32.neg + (f32.const -nan:0x7ff622) + ) + ) + ) + ) + (func $1 + (call $logf64 + (f64.add + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.sub + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.mul + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.div + (f64.const nan:0xfffffffffff82) + (f64.const -nan:0xfffffffffa622) + ) + ) + (call $logf64 + (f64.copysign + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.min + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.max + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + ) + (func $2 + (call $logf32 + (f32.add + (f32.neg + (f32.const -nan:0x7ff622) + ) + (f32.const 0) + ) + ) + (call $logf32 + (f32.add + (f32.const -nan:0x7ff622) + (f32.neg + (f32.const 0) + ) + ) + ) + (call $logf32 + (f32.add + (f32.neg + (f32.const -nan:0x7ff622) + ) + (f32.const -0) + ) + ) + (call $logf32 + (f32.add + (f32.const -nan:0x7ff622) + (f32.neg + (f32.const -0) + ) + ) + ) + (call $logf32 + (f32.add + (f32.neg + (f32.const nan:0x7ff622) + ) + (f32.const 0) + ) + ) + (call $logf32 + (f32.add + (f32.const nan:0x7ff622) + (f32.neg + (f32.const 0) + ) + ) + ) + ) + (func $3 + (call $logf32 + (f32.div + (f32.const -0) + (f32.const 0) + ) + ) + (call $logf32 + (f32.div + (f32.const 0) + (f32.const 0) + ) + ) + (call $logf32 + (f32.div + (f32.const -0) + (f32.const -0) + ) + ) + (call $logf32 + (f32.div + (f32.const 0) + (f32.const -0) + ) + ) + ) +) +[fuzz-exec] calling test32 +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x7fff82] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[fuzz-exec] calling test64 +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0xfffffffffff82] +[LoggingExternalInterface logging nan:0x8000000000000] +[LoggingExternalInterface logging nan:0x8000000000000] +[fuzz-exec] calling just-one-nan +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[fuzz-exec] calling ignore +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] +[LoggingExternalInterface logging nan:0x400000] diff --git a/test/passes/optimize-instructions_fuzz-exec.wast b/test/passes/optimize-instructions_fuzz-exec.wast new file mode 100644 index 000000000..317db04e4 --- /dev/null +++ b/test/passes/optimize-instructions_fuzz-exec.wast @@ -0,0 +1,206 @@ +(module + (import "fuzzing-support" "log-f32" (func $logf32 (param f32))) + (import "fuzzing-support" "log-f64" (func $logf64 (param f64))) + (func "test32" + (call $logf32 + (f32.add + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.sub + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.mul + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.div + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.copysign + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.min + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.max + (f32.const -nan:0xffff82) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + ) + (func "test64" + (call $logf64 + (f64.add + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.sub + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.mul + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.div + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.copysign + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.min + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + (call $logf64 + (f64.max + (f64.const -nan:0xfffffffffff82) + (f64.neg + (f64.const -nan:0xfffffffffa622) + ) + ) + ) + ) + (func "just-one-nan" + (call $logf32 + (f32.add + (f32.const 0) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.add + (f32.const -nan:0xfff622) + (f32.neg + (f32.const 0) + ) + ) + ) + (call $logf32 + (f32.add + (f32.const -0) + (f32.neg + (f32.const -nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.add + (f32.const -nan:0xfff622) + (f32.neg + (f32.const -0) + ) + ) + ) + (call $logf32 + (f32.add + (f32.const 0) + (f32.neg + (f32.const nan:0xfff622) + ) + ) + ) + (call $logf32 + (f32.add + (f32.const nan:0xfff622) + (f32.neg + (f32.const 0) + ) + ) + ) + ) + (func "ignore" + ;; none of these are nan inputs, so the interpreter must not change the sign + (call $logf32 + (f32.div + (f32.const 0) + (f32.neg + (f32.const 0) + ) + ) + ) + (call $logf32 + (f32.div + (f32.const -0) + (f32.neg + (f32.const 0) + ) + ) + ) + (call $logf32 + (f32.div + (f32.const 0) + (f32.neg + (f32.const -0) + ) + ) + ) + (call $logf32 + (f32.div + (f32.const -0) + (f32.neg + (f32.const -0) + ) + ) + ) + ) +) + diff --git a/test/spec/old_float_exprs.wast b/test/spec/old_float_exprs.wast index 7900832b0..854e21a62 100644 --- a/test/spec/old_float_exprs.wast +++ b/test/spec/old_float_exprs.wast @@ -46,8 +46,8 @@ (assert_return (invoke "f32.no_fold_add_zero" (f32.const -0.0)) (f32.const 0.0)) (assert_return (invoke "f64.no_fold_add_zero" (f64.const -0.0)) (f64.const 0.0)) -(assert_return (invoke "f32.no_fold_add_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_add_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_add_zero" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_add_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that 0.0 - x is not folded to -x. @@ -60,8 +60,8 @@ (assert_return (invoke "f32.no_fold_zero_sub" (f32.const 0.0)) (f32.const 0.0)) (assert_return (invoke "f64.no_fold_zero_sub" (f64.const 0.0)) (f64.const 0.0)) -(assert_return (invoke "f32.no_fold_zero_sub" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_zero_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_zero_sub" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_zero_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x - 0.0 is not folded to x. @@ -72,8 +72,8 @@ (f64.sub (local.get $x) (f64.const 0.0))) ) -(assert_return (invoke "f32.no_fold_sub_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_sub_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_sub_zero" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_sub_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x*0.0 is not folded to 0.0. @@ -87,11 +87,11 @@ (assert_return (invoke "f32.no_fold_mul_zero" (f32.const -0.0)) (f32.const -0.0)) (assert_return (invoke "f32.no_fold_mul_zero" (f32.const -1.0)) (f32.const -0.0)) (assert_return (invoke "f32.no_fold_mul_zero" (f32.const -2.0)) (f32.const -0.0)) -(assert_return (invoke "f32.no_fold_mul_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000)) +(assert_return (invoke "f32.no_fold_mul_zero" (f32.const nan:0x200000)) (f32.const nan:0x400000)) (assert_return (invoke "f64.no_fold_mul_zero" (f64.const -0.0)) (f64.const -0.0)) (assert_return (invoke "f64.no_fold_mul_zero" (f64.const -1.0)) (f64.const -0.0)) (assert_return (invoke "f64.no_fold_mul_zero" (f64.const -2.0)) (f64.const -0.0)) -(assert_return (invoke "f64.no_fold_mul_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f64.no_fold_mul_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x*1.0 is not folded to x. ;; See IEEE 754-2008 10.4 "Literal meaning and value-changing optimizations". @@ -103,8 +103,8 @@ (f64.mul (local.get $x) (f64.const 1.0))) ) -(assert_return (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that 0.0/x is not folded to 0.0. @@ -118,11 +118,11 @@ (assert_return_nan (invoke "f32.no_fold_zero_div" (f32.const 0.0))) (assert_return_nan (invoke "f32.no_fold_zero_div" (f32.const -0.0))) (assert_return (invoke "f32.no_fold_zero_div" (f32.const nan)) (f32.const nan)) -(assert_return (invoke "f32.no_fold_zero_div" (f32.const nan:0x200000)) (f32.const nan:0x600000)) +(assert_return (invoke "f32.no_fold_zero_div" (f32.const nan:0x200000)) (f32.const nan:0x400000)) (assert_return_nan (invoke "f64.no_fold_zero_div" (f64.const 0.0))) (assert_return_nan (invoke "f64.no_fold_zero_div" (f64.const -0.0))) (assert_return (invoke "f64.no_fold_zero_div" (f64.const nan)) (f64.const nan)) -(assert_return (invoke "f64.no_fold_zero_div" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f64.no_fold_zero_div" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x/1.0 is not folded to x. @@ -133,8 +133,8 @@ (f64.div (local.get $x) (f64.const 1.0))) ) -(assert_return (invoke "f32.no_fold_div_one" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_div_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_div_one" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_div_one" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x/-1.0 is not folded to -x. @@ -145,8 +145,8 @@ (f64.div (local.get $x) (f64.const -1.0))) ) -(assert_return (invoke "f32.no_fold_div_neg1" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_div_neg1" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_div_neg1" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_div_neg1" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that -0.0 - x is not folded to -x. @@ -157,8 +157,8 @@ (f64.sub (f64.const -0.0) (local.get $x))) ) -(assert_return (invoke "f32.no_fold_neg0_sub" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_neg0_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_neg0_sub" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_neg0_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that -1.0 * x is not folded to -x. @@ -169,8 +169,8 @@ (f64.mul (f64.const -1.0) (local.get $x))) ) -(assert_return (invoke "f32.no_fold_neg1_mul" (f32.const nan:0x200000)) (f32.const nan:0x600000)) -(assert_return (invoke "f64.no_fold_neg1_mul" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f32.no_fold_neg1_mul" (f32.const nan:0x200000)) (f32.const nan:0x400000)) +(assert_return (invoke "f64.no_fold_neg1_mul" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x == x is not folded to true. @@ -365,7 +365,7 @@ (assert_return (invoke "f32.no_fold_div_0" (f32.const -infinity)) (f32.const -infinity)) (assert_return_nan (invoke "f32.no_fold_div_0" (f32.const 0))) (assert_return_nan (invoke "f32.no_fold_div_0" (f32.const -0))) -(assert_return (invoke "f32.no_fold_div_0" (f32.const nan:0x200000)) (f32.const nan:0x600000)) +(assert_return (invoke "f32.no_fold_div_0" (f32.const nan:0x200000)) (f32.const nan:0x400000)) (assert_return (invoke "f32.no_fold_div_0" (f32.const nan)) (f32.const nan)) (assert_return (invoke "f64.no_fold_div_0" (f64.const 1.0)) (f64.const infinity)) (assert_return (invoke "f64.no_fold_div_0" (f64.const -1.0)) (f64.const -infinity)) @@ -374,7 +374,7 @@ (assert_return_nan (invoke "f64.no_fold_div_0" (f64.const 0))) (assert_return_nan (invoke "f64.no_fold_div_0" (f64.const -0))) (assert_return (invoke "f64.no_fold_div_0" (f64.const nan)) (f64.const nan)) -(assert_return (invoke "f64.no_fold_div_0" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f64.no_fold_div_0" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that x/-0 is not folded away. @@ -391,7 +391,7 @@ (assert_return (invoke "f32.no_fold_div_neg0" (f32.const -infinity)) (f32.const infinity)) (assert_return_nan (invoke "f32.no_fold_div_neg0" (f32.const 0))) (assert_return_nan (invoke "f32.no_fold_div_neg0" (f32.const -0))) -(assert_return (invoke "f32.no_fold_div_neg0" (f32.const nan:0x200000)) (f32.const nan:0x600000)) +(assert_return (invoke "f32.no_fold_div_neg0" (f32.const nan:0x200000)) (f32.const nan:0x400000)) (assert_return (invoke "f32.no_fold_div_neg0" (f32.const nan)) (f32.const nan)) (assert_return (invoke "f64.no_fold_div_neg0" (f64.const 1.0)) (f64.const -infinity)) (assert_return (invoke "f64.no_fold_div_neg0" (f64.const -1.0)) (f64.const infinity)) @@ -400,7 +400,7 @@ (assert_return_nan (invoke "f64.no_fold_div_neg0" (f64.const 0))) (assert_return_nan (invoke "f64.no_fold_div_neg0" (f64.const -0))) (assert_return (invoke "f64.no_fold_div_neg0" (f64.const nan)) (f64.const nan)) -(assert_return (invoke "f64.no_fold_div_neg0" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000)) +(assert_return (invoke "f64.no_fold_div_neg0" (f64.const nan:0x4000000000000)) (f64.const nan:0x8000000000000)) ;; Test that sqrt(x*x+y*y) is not folded to hypot. diff --git a/test/spec/simd.wast b/test/spec/simd.wast index 41b47170b..332d48d01 100644 --- a/test/spec/simd.wast +++ b/test/spec/simd.wast @@ -866,10 +866,10 @@ (assert_return (invoke "f32x4.neg" (v128.const f32x4 -0 nan -infinity 5)) (v128.const f32x4 0 -nan infinity -5)) (assert_return (invoke "f32x4.sqrt" (v128.const f32x4 -0 nan infinity 4)) (v128.const f32x4 -0 nan infinity 2)) ;; TODO: qfma/qfms tests -(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 1)) (v128.const f32x4 nan -nan infinity 43)) -(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity -infinity 1)) (v128.const f32x4 nan -nan infinity 41)) -(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 2)) (v128.const f32x4 nan -nan infinity 84)) -(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity 2 2)) (v128.const f32x4 nan -nan infinity 21)) +(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 1)) (v128.const f32x4 nan nan infinity 43)) +(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity -infinity 1)) (v128.const f32x4 nan nan infinity 41)) +(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 2)) (v128.const f32x4 nan nan infinity 84)) +(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity 2 2)) (v128.const f32x4 nan nan infinity 21)) (assert_return (invoke "f32x4.min" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 -0 -0 nan nan)) (assert_return (invoke "f32x4.max" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 0 0 nan nan)) (assert_return (invoke "f32x4.pmin" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 -0 0 nan 5)) @@ -895,13 +895,13 @@ (assert_return (invoke "f64x2.sqrt" (v128.const f64x2 -0 nan)) (v128.const f64x2 -0 nan)) (assert_return (invoke "f64x2.sqrt" (v128.const f64x2 infinity 4)) (v128.const f64x2 infinity 2)) ;; TODO: qfma/qfms tests -(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan -nan)) +(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) (assert_return (invoke "f64x2.add" (v128.const f64x2 infinity 42) (v128.const f64x2 infinity 1)) (v128.const f64x2 infinity 43)) -(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan -nan)) +(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) (assert_return (invoke "f64x2.sub" (v128.const f64x2 infinity 42) (v128.const f64x2 -infinity 1)) (v128.const f64x2 infinity 41)) -(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan -nan)) +(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) (assert_return (invoke "f64x2.mul" (v128.const f64x2 infinity 42) (v128.const f64x2 infinity 2)) (v128.const f64x2 infinity 84)) -(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan -nan)) +(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) (assert_return (invoke "f64x2.div" (v128.const f64x2 infinity 42) (v128.const f64x2 2 2)) (v128.const f64x2 infinity 21)) (assert_return (invoke "f64x2.min" (v128.const f64x2 -0 0) (v128.const f64x2 0 -0)) (v128.const f64x2 -0 -0)) (assert_return (invoke "f64x2.min" (v128.const f64x2 nan 5) (v128.const f64x2 5 nan)) (v128.const f64x2 nan nan)) |