summaryrefslogtreecommitdiff
path: root/test/spec/float_exprs.wast
diff options
context:
space:
mode:
authorHeejin Ahn <aheejin@gmail.com>2019-11-29 18:33:41 -0800
committerGitHub <noreply@github.com>2019-11-29 18:33:41 -0800
commit24d274983df9f7dbeebe8a890297d4f30d5bbca7 (patch)
treeb76f10f6de34e94b8c0e449b18427daeb34bee0c /test/spec/float_exprs.wast
parent8c97dc61a713768d7f8302ec3a695c1207ce7239 (diff)
downloadbinaryen-24d274983df9f7dbeebe8a890297d4f30d5bbca7.tar.gz
binaryen-24d274983df9f7dbeebe8a890297d4f30d5bbca7.tar.bz2
binaryen-24d274983df9f7dbeebe8a890297d4f30d5bbca7.zip
Update spec test suite (#2484)
This updates spec test suite to that of the current up-to-date version of https://github.com/WebAssembly/spec repo. - All failing tests are added in `BLACKLIST` in shared.py with reasons. - For tests that already existed and was passing and started failing after the update, we add the new test to the blacklist and preserve the old file by renaming it to 'old_[FILENAME].wast' not to lose test coverage. When the cause of the error is fixed or the unsupported construct gets support so the new test passes, we can delete the corresponding 'old_[FILENAME].wast' file. - Adds support for `spectest.print_[type] style imports.
Diffstat (limited to 'test/spec/float_exprs.wast')
-rw-r--r--test/spec/float_exprs.wast849
1 files changed, 720 insertions, 129 deletions
diff --git a/test/spec/float_exprs.wast b/test/spec/float_exprs.wast
index 7900832b0..e6583634f 100644
--- a/test/spec/float_exprs.wast
+++ b/test/spec/float_exprs.wast
@@ -46,8 +46,8 @@
(assert_return (invoke "f32.no_fold_add_zero" (f32.const -0.0)) (f32.const 0.0))
(assert_return (invoke "f64.no_fold_add_zero" (f64.const -0.0)) (f64.const 0.0))
-(assert_return (invoke "f32.no_fold_add_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_add_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_add_zero" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_add_zero" (f64.const nan:0x4000000000000)))
;; Test that 0.0 - x is not folded to -x.
@@ -60,8 +60,8 @@
(assert_return (invoke "f32.no_fold_zero_sub" (f32.const 0.0)) (f32.const 0.0))
(assert_return (invoke "f64.no_fold_zero_sub" (f64.const 0.0)) (f64.const 0.0))
-(assert_return (invoke "f32.no_fold_zero_sub" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_zero_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_zero_sub" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_zero_sub" (f64.const nan:0x4000000000000)))
;; Test that x - 0.0 is not folded to x.
@@ -72,8 +72,8 @@
(f64.sub (local.get $x) (f64.const 0.0)))
)
-(assert_return (invoke "f32.no_fold_sub_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_sub_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_sub_zero" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_sub_zero" (f64.const nan:0x4000000000000)))
;; Test that x*0.0 is not folded to 0.0.
@@ -87,11 +87,11 @@
(assert_return (invoke "f32.no_fold_mul_zero" (f32.const -0.0)) (f32.const -0.0))
(assert_return (invoke "f32.no_fold_mul_zero" (f32.const -1.0)) (f32.const -0.0))
(assert_return (invoke "f32.no_fold_mul_zero" (f32.const -2.0)) (f32.const -0.0))
-(assert_return (invoke "f32.no_fold_mul_zero" (f32.const nan:0x200000)) (f32.const nan:0x600000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_mul_zero" (f32.const nan:0x200000)))
(assert_return (invoke "f64.no_fold_mul_zero" (f64.const -0.0)) (f64.const -0.0))
(assert_return (invoke "f64.no_fold_mul_zero" (f64.const -1.0)) (f64.const -0.0))
(assert_return (invoke "f64.no_fold_mul_zero" (f64.const -2.0)) (f64.const -0.0))
-(assert_return (invoke "f64.no_fold_mul_zero" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_mul_zero" (f64.const nan:0x4000000000000)))
;; Test that x*1.0 is not folded to x.
;; See IEEE 754-2008 10.4 "Literal meaning and value-changing optimizations".
@@ -103,8 +103,8 @@
(f64.mul (local.get $x) (f64.const 1.0)))
)
-(assert_return (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_mul_one" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_mul_one" (f64.const nan:0x4000000000000)))
;; Test that 0.0/x is not folded to 0.0.
@@ -115,14 +115,14 @@
(f64.div (f64.const 0.0) (local.get $x)))
)
-(assert_return_nan (invoke "f32.no_fold_zero_div" (f32.const 0.0)))
-(assert_return_nan (invoke "f32.no_fold_zero_div" (f32.const -0.0)))
-(assert_return (invoke "f32.no_fold_zero_div" (f32.const nan)) (f32.const nan))
-(assert_return (invoke "f32.no_fold_zero_div" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return_nan (invoke "f64.no_fold_zero_div" (f64.const 0.0)))
-(assert_return_nan (invoke "f64.no_fold_zero_div" (f64.const -0.0)))
-(assert_return (invoke "f64.no_fold_zero_div" (f64.const nan)) (f64.const nan))
-(assert_return (invoke "f64.no_fold_zero_div" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_canonical_nan (invoke "f32.no_fold_zero_div" (f32.const 0.0)))
+(assert_return_canonical_nan (invoke "f32.no_fold_zero_div" (f32.const -0.0)))
+(assert_return_canonical_nan (invoke "f32.no_fold_zero_div" (f32.const nan)))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_zero_div" (f32.const nan:0x200000)))
+(assert_return_canonical_nan (invoke "f64.no_fold_zero_div" (f64.const 0.0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_zero_div" (f64.const -0.0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_zero_div" (f64.const nan)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_zero_div" (f64.const nan:0x4000000000000)))
;; Test that x/1.0 is not folded to x.
@@ -133,8 +133,8 @@
(f64.div (local.get $x) (f64.const 1.0)))
)
-(assert_return (invoke "f32.no_fold_div_one" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_div_one" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_div_one" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_div_one" (f64.const nan:0x4000000000000)))
;; Test that x/-1.0 is not folded to -x.
@@ -145,8 +145,8 @@
(f64.div (local.get $x) (f64.const -1.0)))
)
-(assert_return (invoke "f32.no_fold_div_neg1" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_div_neg1" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_div_neg1" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_div_neg1" (f64.const nan:0x4000000000000)))
;; Test that -0.0 - x is not folded to -x.
@@ -157,8 +157,8 @@
(f64.sub (f64.const -0.0) (local.get $x)))
)
-(assert_return (invoke "f32.no_fold_neg0_sub" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_neg0_sub" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_neg0_sub" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_neg0_sub" (f64.const nan:0x4000000000000)))
;; Test that -1.0 * x is not folded to -x.
@@ -169,8 +169,8 @@
(f64.mul (f64.const -1.0) (local.get $x)))
)
-(assert_return (invoke "f32.no_fold_neg1_mul" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f64.no_fold_neg1_mul" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_neg1_mul" (f32.const nan:0x200000)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_neg1_mul" (f64.const nan:0x4000000000000)))
;; Test that x == x is not folded to true.
@@ -205,10 +205,28 @@
(f64.sub (local.get $x) (local.get $x)))
)
-(assert_return_nan (invoke "f32.no_fold_sub_self" (f32.const infinity)))
-(assert_return (invoke "f32.no_fold_sub_self" (f32.const nan)) (f32.const nan))
-(assert_return_nan (invoke "f64.no_fold_sub_self" (f64.const infinity)))
-(assert_return (invoke "f64.no_fold_sub_self" (f64.const nan)) (f64.const nan))
+(assert_return_canonical_nan (invoke "f32.no_fold_sub_self" (f32.const inf)))
+(assert_return_canonical_nan (invoke "f32.no_fold_sub_self" (f32.const nan)))
+(assert_return_canonical_nan (invoke "f64.no_fold_sub_self" (f64.const inf)))
+(assert_return_canonical_nan (invoke "f64.no_fold_sub_self" (f64.const nan)))
+
+;; Test that x / x is not folded to 1.0.
+
+(module
+ (func (export "f32.no_fold_div_self") (param $x f32) (result f32)
+ (f32.div (local.get $x) (local.get $x)))
+ (func (export "f64.no_fold_div_self") (param $x f64) (result f64)
+ (f64.div (local.get $x) (local.get $x)))
+)
+
+(assert_return_canonical_nan (invoke "f32.no_fold_div_self" (f32.const inf)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_self" (f32.const nan)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_self" (f32.const 0.0)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_self" (f32.const -0.0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_self" (f64.const inf)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_self" (f64.const nan)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_self" (f64.const 0.0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_self" (f64.const -0.0)))
;; Test that x/3 is not folded to x*(1/3).
@@ -230,7 +248,7 @@
(assert_return (invoke "f64.no_fold_div_3" (f64.const -0x1.0138bf530a53cp+1007)) (f64.const -0x1.56f6546eb86fbp+1005))
(assert_return (invoke "f64.no_fold_div_3" (f64.const 0x1.052b87f9d794dp+415)) (f64.const 0x1.5c3a0aa274c67p+413))
-;; Test that (x*z)+(y*z) is not folded to (x+y)*z
+;; Test that (x*z)+(y*z) is not folded to (x+y)*z.
(module
(func (export "f32.no_factor") (param $x f32) (param $y f32) (param $z f32) (result f32)
@@ -250,7 +268,7 @@
(assert_return (invoke "f64.no_factor" (f64.const -0x1.c4ded58a6f389p-289) (f64.const 0x1.ba6fdef5d59c9p-260) (f64.const -0x1.c1201c0470205p-253)) (f64.const -0x1.841ada2e0f184p-512))
(assert_return (invoke "f64.no_factor" (f64.const 0x1.9d3688f8e375ap-608) (f64.const 0x1.bf91311588256p-579) (f64.const -0x1.1605a6b5d5ff8p+489)) (f64.const -0x1.e6118ca76af53p-90))
-;; Test that (x+y)*z is not folded to (x*z)+(y*z)
+;; Test that (x+y)*z is not folded to (x*z)+(y*z).
(module
(func (export "f32.no_distribute") (param $x f32) (param $y f32) (param $z f32) (result f32)
@@ -270,7 +288,7 @@
(assert_return (invoke "f64.no_distribute" (f64.const -0x1.c4ded58a6f389p-289) (f64.const 0x1.ba6fdef5d59c9p-260) (f64.const -0x1.c1201c0470205p-253)) (f64.const -0x1.841ada2e0f183p-512))
(assert_return (invoke "f64.no_distribute" (f64.const 0x1.9d3688f8e375ap-608) (f64.const 0x1.bf91311588256p-579) (f64.const -0x1.1605a6b5d5ff8p+489)) (f64.const -0x1.e6118ca76af52p-90))
-;; Test that x*(y/z) is not folded to (x*y)/z
+;; Test that x*(y/z) is not folded to (x*y)/z.
(module
(func (export "f32.no_regroup_div_mul") (param $x f32) (param $y f32) (param $z f32) (result f32)
@@ -286,11 +304,11 @@
(assert_return (invoke "f32.no_regroup_div_mul" (f32.const 0x1.c6ae76p+112) (f32.const 0x1.fc953cp+24) (f32.const -0x1.60b3e8p+71)) (f32.const -0x1.47d0eap+66))
(assert_return (invoke "f64.no_regroup_div_mul" (f64.const 0x1.3c04b815e30bp-423) (f64.const -0x1.379646fd98127p-119) (f64.const 0x1.bddb158506031p-642)) (f64.const -0x1.b9b3301f2dd2dp+99))
(assert_return (invoke "f64.no_regroup_div_mul" (f64.const 0x1.46b3a402f86d5p+337) (f64.const 0x1.6fbf1b9e1798dp-447) (f64.const -0x1.bd9704a5a6a06p+797)) (f64.const -0x0p+0))
-(assert_return (invoke "f64.no_regroup_div_mul" (f64.const 0x1.6c9765bb4347fp-479) (f64.const 0x1.a4af42e34a141p+902) (f64.const 0x1.d2dde70eb68f9p-448)) (f64.const infinity))
+(assert_return (invoke "f64.no_regroup_div_mul" (f64.const 0x1.6c9765bb4347fp-479) (f64.const 0x1.a4af42e34a141p+902) (f64.const 0x1.d2dde70eb68f9p-448)) (f64.const inf))
(assert_return (invoke "f64.no_regroup_div_mul" (f64.const -0x1.706023645be72p+480) (f64.const -0x1.6c229f7d9101dp+611) (f64.const -0x1.4d50fa68d3d9ep+836)) (f64.const -0x1.926fa3cacc651p+255))
(assert_return (invoke "f64.no_regroup_div_mul" (f64.const 0x1.8cc63d8caf4c7p-599) (f64.const 0x1.8671ac4c35753p-878) (f64.const -0x1.ef35b1695e659p-838)) (f64.const -0x1.38d55f56406dp-639))
-;; Test that (x*y)/z is not folded to x*(y/z)
+;; Test that (x*y)/z is not folded to x*(y/z).
(module
(func (export "f32.no_regroup_mul_div") (param $x f32) (param $y f32) (param $z f32) (result f32)
@@ -303,11 +321,11 @@
(assert_return (invoke "f32.no_regroup_mul_div" (f32.const -0x1.454738p+91) (f32.const -0x1.b28a66p-115) (f32.const -0x1.f53908p+72)) (f32.const -0x1.1a00e8p-96))
(assert_return (invoke "f32.no_regroup_mul_div" (f32.const -0x1.6be56ep+16) (f32.const -0x1.b46fc6p-21) (f32.const -0x1.a51df6p-123)) (f32.const -0x1.79225ap+118))
(assert_return (invoke "f32.no_regroup_mul_div" (f32.const -0x1.c343f8p-94) (f32.const 0x1.e4d906p+73) (f32.const 0x1.be69f8p+68)) (f32.const -0x1.ea1df4p-89))
-(assert_return (invoke "f32.no_regroup_mul_div" (f32.const 0x1.c6ae76p+112) (f32.const 0x1.fc953cp+24) (f32.const -0x1.60b3e8p+71)) (f32.const -infinity))
+(assert_return (invoke "f32.no_regroup_mul_div" (f32.const 0x1.c6ae76p+112) (f32.const 0x1.fc953cp+24) (f32.const -0x1.60b3e8p+71)) (f32.const -inf))
(assert_return (invoke "f64.no_regroup_mul_div" (f64.const 0x1.3c04b815e30bp-423) (f64.const -0x1.379646fd98127p-119) (f64.const 0x1.bddb158506031p-642)) (f64.const -0x1.b9b3301f2dd2ep+99))
(assert_return (invoke "f64.no_regroup_mul_div" (f64.const 0x1.46b3a402f86d5p+337) (f64.const 0x1.6fbf1b9e1798dp-447) (f64.const -0x1.bd9704a5a6a06p+797)) (f64.const -0x1.0da0b6328e09p-907))
(assert_return (invoke "f64.no_regroup_mul_div" (f64.const 0x1.6c9765bb4347fp-479) (f64.const 0x1.a4af42e34a141p+902) (f64.const 0x1.d2dde70eb68f9p-448)) (f64.const 0x1.4886b6d9a9a79p+871))
-(assert_return (invoke "f64.no_regroup_mul_div" (f64.const -0x1.706023645be72p+480) (f64.const -0x1.6c229f7d9101dp+611) (f64.const -0x1.4d50fa68d3d9ep+836)) (f64.const -infinity))
+(assert_return (invoke "f64.no_regroup_mul_div" (f64.const -0x1.706023645be72p+480) (f64.const -0x1.6c229f7d9101dp+611) (f64.const -0x1.4d50fa68d3d9ep+836)) (f64.const -inf))
(assert_return (invoke "f64.no_regroup_mul_div" (f64.const 0x1.8cc63d8caf4c7p-599) (f64.const 0x1.8671ac4c35753p-878) (f64.const -0x1.ef35b1695e659p-838)) (f64.const -0x0p+0))
;; Test that x+y+z+w is not reassociated.
@@ -359,22 +377,22 @@
(f64.div (local.get $x) (f64.const 0.0)))
)
-(assert_return (invoke "f32.no_fold_div_0" (f32.const 1.0)) (f32.const infinity))
-(assert_return (invoke "f32.no_fold_div_0" (f32.const -1.0)) (f32.const -infinity))
-(assert_return (invoke "f32.no_fold_div_0" (f32.const infinity)) (f32.const infinity))
-(assert_return (invoke "f32.no_fold_div_0" (f32.const -infinity)) (f32.const -infinity))
-(assert_return_nan (invoke "f32.no_fold_div_0" (f32.const 0)))
-(assert_return_nan (invoke "f32.no_fold_div_0" (f32.const -0)))
-(assert_return (invoke "f32.no_fold_div_0" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f32.no_fold_div_0" (f32.const nan)) (f32.const nan))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const 1.0)) (f64.const infinity))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const -1.0)) (f64.const -infinity))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const infinity)) (f64.const infinity))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const -infinity)) (f64.const -infinity))
-(assert_return_nan (invoke "f64.no_fold_div_0" (f64.const 0)))
-(assert_return_nan (invoke "f64.no_fold_div_0" (f64.const -0)))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const nan)) (f64.const nan))
-(assert_return (invoke "f64.no_fold_div_0" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return (invoke "f32.no_fold_div_0" (f32.const 1.0)) (f32.const inf))
+(assert_return (invoke "f32.no_fold_div_0" (f32.const -1.0)) (f32.const -inf))
+(assert_return (invoke "f32.no_fold_div_0" (f32.const inf)) (f32.const inf))
+(assert_return (invoke "f32.no_fold_div_0" (f32.const -inf)) (f32.const -inf))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_0" (f32.const 0)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_0" (f32.const -0)))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_div_0" (f32.const nan:0x200000)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_0" (f32.const nan)))
+(assert_return (invoke "f64.no_fold_div_0" (f64.const 1.0)) (f64.const inf))
+(assert_return (invoke "f64.no_fold_div_0" (f64.const -1.0)) (f64.const -inf))
+(assert_return (invoke "f64.no_fold_div_0" (f64.const inf)) (f64.const inf))
+(assert_return (invoke "f64.no_fold_div_0" (f64.const -inf)) (f64.const -inf))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_0" (f64.const 0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_0" (f64.const -0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_0" (f64.const nan)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_div_0" (f64.const nan:0x4000000000000)))
;; Test that x/-0 is not folded away.
@@ -385,22 +403,22 @@
(f64.div (local.get $x) (f64.const -0.0)))
)
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const 1.0)) (f32.const -infinity))
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const -1.0)) (f32.const infinity))
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const infinity)) (f32.const -infinity))
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const -infinity)) (f32.const infinity))
-(assert_return_nan (invoke "f32.no_fold_div_neg0" (f32.const 0)))
-(assert_return_nan (invoke "f32.no_fold_div_neg0" (f32.const -0)))
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const nan:0x200000)) (f32.const nan:0x600000))
-(assert_return (invoke "f32.no_fold_div_neg0" (f32.const nan)) (f32.const nan))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const 1.0)) (f64.const -infinity))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const -1.0)) (f64.const infinity))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const infinity)) (f64.const -infinity))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const -infinity)) (f64.const infinity))
-(assert_return_nan (invoke "f64.no_fold_div_neg0" (f64.const 0)))
-(assert_return_nan (invoke "f64.no_fold_div_neg0" (f64.const -0)))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const nan)) (f64.const nan))
-(assert_return (invoke "f64.no_fold_div_neg0" (f64.const nan:0x4000000000000)) (f64.const nan:0xc000000000000))
+(assert_return (invoke "f32.no_fold_div_neg0" (f32.const 1.0)) (f32.const -inf))
+(assert_return (invoke "f32.no_fold_div_neg0" (f32.const -1.0)) (f32.const inf))
+(assert_return (invoke "f32.no_fold_div_neg0" (f32.const inf)) (f32.const -inf))
+(assert_return (invoke "f32.no_fold_div_neg0" (f32.const -inf)) (f32.const inf))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_neg0" (f32.const 0)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_neg0" (f32.const -0)))
+(assert_return_arithmetic_nan (invoke "f32.no_fold_div_neg0" (f32.const nan:0x200000)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_neg0" (f32.const nan)))
+(assert_return (invoke "f64.no_fold_div_neg0" (f64.const 1.0)) (f64.const -inf))
+(assert_return (invoke "f64.no_fold_div_neg0" (f64.const -1.0)) (f64.const inf))
+(assert_return (invoke "f64.no_fold_div_neg0" (f64.const inf)) (f64.const -inf))
+(assert_return (invoke "f64.no_fold_div_neg0" (f64.const -inf)) (f64.const inf))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_neg0" (f64.const 0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_neg0" (f64.const -0)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_neg0" (f64.const nan)))
+(assert_return_arithmetic_nan (invoke "f64.no_fold_div_neg0" (f64.const nan:0x4000000000000)))
;; Test that sqrt(x*x+y*y) is not folded to hypot.
@@ -471,7 +489,7 @@
(assert_return (invoke "f32.no_approximate_sqrt_reciprocal" (f32.const 0x1.ba4c5p+13)) (f32.const 0x1.136f16p-7))
(assert_return (invoke "f32.no_approximate_sqrt_reciprocal" (f32.const 0x1.4a5be2p+104)) (f32.const 0x1.c2b5bp-53))
-;; Test that converting i32/i64 to f32/f64 and back isn't folded away
+;; Test that converting i32/i64 to f32/f64 and back isn't folded away.
(module
(func (export "i32.no_fold_f32_s") (param i32) (result i32)
@@ -584,6 +602,18 @@
(assert_return (invoke "f64.no_fold_div_mul" (f64.const -0x1.e75859d2f0765p-278) (f64.const -0x1.5f19b6ab497f9p+283)) (f64.const -0x1.e75859d2f0764p-278))
(assert_return (invoke "f64.no_fold_div_mul" (f64.const -0x1.515fe9c3b5f5p+620) (f64.const 0x1.36be869c99f7ap+989)) (f64.const -0x1.515fe9c3b5f4fp+620))
+;; Test that x/2*2 is not folded to x.
+
+(module
+ (func (export "f32.no_fold_div2_mul2") (param $x f32) (result f32)
+ (f32.mul (f32.div (local.get $x) (f32.const 2.0)) (f32.const 2.0)))
+ (func (export "f64.no_fold_div2_mul2") (param $x f64) (result f64)
+ (f64.mul (f64.div (local.get $x) (f64.const 2.0)) (f64.const 2.0)))
+)
+
+(assert_return (invoke "f32.no_fold_div2_mul2" (f32.const 0x1.fffffep-126)) (f32.const 0x1p-125))
+(assert_return (invoke "f64.no_fold_div2_mul2" (f64.const 0x1.fffffffffffffp-1022)) (f64.const 0x1p-1021))
+
;; Test that promote(demote(x)) is not folded to x.
(module
@@ -605,7 +635,7 @@
(f32.demote_f64 (f64.promote_f32 (local.get $x))))
)
-(assert_return (invoke "no_fold_promote_demote" (f32.const nan:0x200000)) (f32.const nan:0x600000))
+(assert_return_arithmetic_nan (invoke "no_fold_promote_demote" (f32.const nan:0x200000)))
(assert_return (invoke "no_fold_promote_demote" (f32.const 0x0p+0)) (f32.const 0x0p+0))
(assert_return (invoke "no_fold_promote_demote" (f32.const -0x0p+0)) (f32.const -0x0p+0))
(assert_return (invoke "no_fold_promote_demote" (f32.const 0x1p-149)) (f32.const 0x1p-149))
@@ -616,8 +646,8 @@
(assert_return (invoke "no_fold_promote_demote" (f32.const -0x1p-126)) (f32.const -0x1p-126))
(assert_return (invoke "no_fold_promote_demote" (f32.const 0x1.fffffep+127)) (f32.const 0x1.fffffep+127))
(assert_return (invoke "no_fold_promote_demote" (f32.const -0x1.fffffep+127)) (f32.const -0x1.fffffep+127))
-(assert_return (invoke "no_fold_promote_demote" (f32.const infinity)) (f32.const infinity))
-(assert_return (invoke "no_fold_promote_demote" (f32.const -infinity)) (f32.const -infinity))
+(assert_return (invoke "no_fold_promote_demote" (f32.const inf)) (f32.const inf))
+(assert_return (invoke "no_fold_promote_demote" (f32.const -inf)) (f32.const -inf))
;; Test that demote(x+promote(y)) is not folded to demote(x)+y.
@@ -795,8 +825,8 @@
(assert_return (invoke "check" (i32.const 16)) (f64.const 15.3))
(assert_return (invoke "check" (i32.const 24)) (f64.const 15.4))
(invoke "run" (i32.const 32) (f64.const 3.0))
-(assert_return (invoke "check" (i32.const 0)) (f64.const 0x1.4222222222222p+2))
-(assert_return (invoke "check" (i32.const 8)) (f64.const 0x1.4444444444444p+2))
+(assert_return (invoke "check" (i32.const 0)) (f64.const 0x1.4222222222222p+2))
+(assert_return (invoke "check" (i32.const 8)) (f64.const 0x1.4444444444444p+2))
(assert_return (invoke "check" (i32.const 16)) (f64.const 0x1.4666666666667p+2))
(assert_return (invoke "check" (i32.const 24)) (f64.const 0x1.4888888888889p+2))
@@ -897,15 +927,47 @@
;; Test that x<y?x:y, etc. using if and else aren't folded to min, etc.
(module
- (func (export "f32.no_fold_lt_if") (param $x f32) (param $y f32) (result f32) (if f32 (f32.lt (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f32.no_fold_le_if") (param $x f32) (param $y f32) (result f32) (if f32 (f32.le (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f32.no_fold_gt_if") (param $x f32) (param $y f32) (result f32) (if f32 (f32.gt (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f32.no_fold_ge_if") (param $x f32) (param $y f32) (result f32) (if f32 (f32.ge (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
+ (func (export "f32.no_fold_lt_if") (param $x f32) (param $y f32) (result f32)
+ (if (result f32) (f32.lt (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f32.no_fold_le_if") (param $x f32) (param $y f32) (result f32)
+ (if (result f32) (f32.le (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f32.no_fold_gt_if") (param $x f32) (param $y f32) (result f32)
+ (if (result f32) (f32.gt (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f32.no_fold_ge_if") (param $x f32) (param $y f32) (result f32)
+ (if (result f32) (f32.ge (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
- (func (export "f64.no_fold_lt_if") (param $x f64) (param $y f64) (result f64) (if f64 (f64.lt (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f64.no_fold_le_if") (param $x f64) (param $y f64) (result f64) (if f64 (f64.le (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f64.no_fold_gt_if") (param $x f64) (param $y f64) (result f64) (if f64 (f64.gt (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
- (func (export "f64.no_fold_ge_if") (param $x f64) (param $y f64) (result f64) (if f64 (f64.ge (local.get $x) (local.get $y)) (local.get $x) (local.get $y)))
+ (func (export "f64.no_fold_lt_if") (param $x f64) (param $y f64) (result f64)
+ (if (result f64) (f64.lt (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f64.no_fold_le_if") (param $x f64) (param $y f64) (result f64)
+ (if (result f64) (f64.le (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f64.no_fold_gt_if") (param $x f64) (param $y f64) (result f64)
+ (if (result f64) (f64.gt (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
+ (func (export "f64.no_fold_ge_if") (param $x f64) (param $y f64) (result f64)
+ (if (result f64) (f64.ge (local.get $x) (local.get $y))
+ (then (local.get $x)) (else (local.get $y))
+ )
+ )
)
(assert_return (invoke "f32.no_fold_lt_if" (f32.const 0.0) (f32.const nan)) (f32.const nan))
@@ -941,7 +1003,7 @@
(assert_return (invoke "f64.no_fold_ge_if" (f64.const 0.0) (f64.const -0.0)) (f64.const 0.0))
(assert_return (invoke "f64.no_fold_ge_if" (f64.const -0.0) (f64.const 0.0)) (f64.const -0.0))
-;; Test that x<0?-x:0, etc. using select aren't folded to abs
+;; Test that x<0?-x:x, etc. using select aren't folded to abs.
(module
(func (export "f32.no_fold_lt_select_to_abs") (param $x f32) (result f32) (select (f32.neg (local.get $x)) (local.get $x) (f32.lt (local.get $x) (f32.const 0.0))))
@@ -988,18 +1050,50 @@
(assert_return (invoke "f64.no_fold_ge_select_to_abs" (f64.const 0.0)) (f64.const 0.0))
(assert_return (invoke "f64.no_fold_ge_select_to_abs" (f64.const -0.0)) (f64.const -0.0))
-;; Test that x<0?-x:0, etc. using if aren't folded to abs
+;; Test that x<0?-x:x, etc. using if aren't folded to abs.
(module
- (func (export "f32.no_fold_lt_if_to_abs") (param $x f32) (result f32) (if f32 (f32.lt (local.get $x) (f32.const 0.0)) (f32.neg (local.get $x)) (local.get $x)))
- (func (export "f32.no_fold_le_if_to_abs") (param $x f32) (result f32) (if f32 (f32.le (local.get $x) (f32.const -0.0)) (f32.neg (local.get $x)) (local.get $x)))
- (func (export "f32.no_fold_gt_if_to_abs") (param $x f32) (result f32) (if f32 (f32.gt (local.get $x) (f32.const -0.0)) (local.get $x) (f32.neg (local.get $x))))
- (func (export "f32.no_fold_ge_if_to_abs") (param $x f32) (result f32) (if f32 (f32.ge (local.get $x) (f32.const 0.0)) (local.get $x) (f32.neg (local.get $x))))
+ (func (export "f32.no_fold_lt_if_to_abs") (param $x f32) (result f32)
+ (if (result f32) (f32.lt (local.get $x) (f32.const 0.0))
+ (then (f32.neg (local.get $x))) (else (local.get $x))
+ )
+ )
+ (func (export "f32.no_fold_le_if_to_abs") (param $x f32) (result f32)
+ (if (result f32) (f32.le (local.get $x) (f32.const -0.0))
+ (then (f32.neg (local.get $x))) (else (local.get $x))
+ )
+ )
+ (func (export "f32.no_fold_gt_if_to_abs") (param $x f32) (result f32)
+ (if (result f32) (f32.gt (local.get $x) (f32.const -0.0))
+ (then (local.get $x)) (else (f32.neg (local.get $x)))
+ )
+ )
+ (func (export "f32.no_fold_ge_if_to_abs") (param $x f32) (result f32)
+ (if (result f32) (f32.ge (local.get $x) (f32.const 0.0))
+ (then (local.get $x)) (else (f32.neg (local.get $x)))
+ )
+ )
- (func (export "f64.no_fold_lt_if_to_abs") (param $x f64) (result f64) (if f64 (f64.lt (local.get $x) (f64.const 0.0)) (f64.neg (local.get $x)) (local.get $x)))
- (func (export "f64.no_fold_le_if_to_abs") (param $x f64) (result f64) (if f64 (f64.le (local.get $x) (f64.const -0.0)) (f64.neg (local.get $x)) (local.get $x)))
- (func (export "f64.no_fold_gt_if_to_abs") (param $x f64) (result f64) (if f64 (f64.gt (local.get $x) (f64.const -0.0)) (local.get $x) (f64.neg (local.get $x))))
- (func (export "f64.no_fold_ge_if_to_abs") (param $x f64) (result f64) (if f64 (f64.ge (local.get $x) (f64.const 0.0)) (local.get $x) (f64.neg (local.get $x))))
+ (func (export "f64.no_fold_lt_if_to_abs") (param $x f64) (result f64)
+ (if (result f64) (f64.lt (local.get $x) (f64.const 0.0))
+ (then (f64.neg (local.get $x))) (else (local.get $x))
+ )
+ )
+ (func (export "f64.no_fold_le_if_to_abs") (param $x f64) (result f64)
+ (if (result f64) (f64.le (local.get $x) (f64.const -0.0))
+ (then (f64.neg (local.get $x))) (else (local.get $x))
+ )
+ )
+ (func (export "f64.no_fold_gt_if_to_abs") (param $x f64) (result f64)
+ (if (result f64) (f64.gt (local.get $x) (f64.const -0.0))
+ (then (local.get $x)) (else (f64.neg (local.get $x)))
+ )
+ )
+ (func (export "f64.no_fold_ge_if_to_abs") (param $x f64) (result f64)
+ (if (result f64) (f64.ge (local.get $x) (f64.const 0.0))
+ (then (local.get $x)) (else (f64.neg (local.get $x)))
+ )
+ )
)
(assert_return (invoke "f32.no_fold_lt_if_to_abs" (f32.const nan:0x200000)) (f32.const nan:0x200000))
@@ -1039,20 +1133,16 @@
;; https://support.microsoft.com/en-us/kb/78113
(module
- (func (export "incorrect_correction") (result f32)
+ (func (export "f32.incorrect_correction") (result f32)
(f32.sub (f32.sub (f32.add (f32.const 1.333) (f32.const 1.225)) (f32.const 1.333)) (f32.const 1.225))
)
-)
-
-(assert_return (invoke "incorrect_correction") (f32.const 0x1p-23))
-
-(module
- (func (export "incorrect_correction") (result f64)
+ (func (export "f64.incorrect_correction") (result f64)
(f64.sub (f64.sub (f64.add (f64.const 1.333) (f64.const 1.225)) (f64.const 1.333)) (f64.const 1.225))
)
)
-(assert_return (invoke "incorrect_correction") (f64.const -0x1p-52))
+(assert_return (invoke "f32.incorrect_correction") (f32.const 0x1p-23))
+(assert_return (invoke "f64.incorrect_correction") (f64.const -0x1p-52))
;; Test for a historical calculator bug.
;; http://www.hpmuseum.org/cgi-sys/cgiwrap/hpmuseum/articles.cgi?read=735
@@ -1200,8 +1290,8 @@
(assert_return (invoke "f32.no_fold_recip_recip" (f32.const -0x0p+0)) (f32.const -0x0p+0))
(assert_return (invoke "f32.no_fold_recip_recip" (f32.const 0x0p+0)) (f32.const 0x0p+0))
-(assert_return (invoke "f32.no_fold_recip_recip" (f32.const -infinity)) (f32.const -infinity))
-(assert_return (invoke "f32.no_fold_recip_recip" (f32.const infinity)) (f32.const infinity))
+(assert_return (invoke "f32.no_fold_recip_recip" (f32.const -inf)) (f32.const -inf))
+(assert_return (invoke "f32.no_fold_recip_recip" (f32.const inf)) (f32.const inf))
(assert_return (invoke "f64.no_fold_recip_recip" (f64.const -0x1.d81248dda63dp+148)) (f64.const -0x1.d81248dda63d1p+148))
(assert_return (invoke "f64.no_fold_recip_recip" (f64.const -0x1.f4750312039e3p+66)) (f64.const -0x1.f4750312039e2p+66))
@@ -1211,8 +1301,8 @@
(assert_return (invoke "f64.no_fold_recip_recip" (f64.const -0x0p+0)) (f64.const -0x0p+0))
(assert_return (invoke "f64.no_fold_recip_recip" (f64.const 0x0p+0)) (f64.const 0x0p+0))
-(assert_return (invoke "f64.no_fold_recip_recip" (f64.const -infinity)) (f64.const -infinity))
-(assert_return (invoke "f64.no_fold_recip_recip" (f64.const infinity)) (f64.const infinity))
+(assert_return (invoke "f64.no_fold_recip_recip" (f64.const -inf)) (f64.const -inf))
+(assert_return (invoke "f64.no_fold_recip_recip" (f64.const inf)) (f64.const inf))
;; Test that (x+y) * (x-y) is not optimized to x*x - y*y.
@@ -1262,6 +1352,87 @@
(assert_return (invoke "f64.no_algebraic_factoring" (f64.const -0x1.be663e4c0e4b2p+182) (f64.const -0x1.da85703760d25p+166)) (f64.const 0x1.853434f1a2ffep+365))
(assert_return (invoke "f64.no_algebraic_factoring" (f64.const -0x1.230e09952df1cp-236) (f64.const -0x1.fa2752adfadc9p-237)) (f64.const 0x1.42e43156bd1b8p-474))
+;; Test that platforms where SIMD instructions flush subnormals don't implicitly
+;; optimize using SIMD instructions.
+
+(module
+ (memory (data
+ "\01\00\00\00\01\00\00\80\01\00\00\00\01\00\00\80"
+ "\01\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00"
+ "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
+ ))
+
+ (func (export "f32.simple_x4_sum")
+ (param $i i32)
+ (param $j i32)
+ (param $k i32)
+ (local $x0 f32) (local $x1 f32) (local $x2 f32) (local $x3 f32)
+ (local $y0 f32) (local $y1 f32) (local $y2 f32) (local $y3 f32)
+ (local.set $x0 (f32.load offset=0 (local.get $i)))
+ (local.set $x1 (f32.load offset=4 (local.get $i)))
+ (local.set $x2 (f32.load offset=8 (local.get $i)))
+ (local.set $x3 (f32.load offset=12 (local.get $i)))
+ (local.set $y0 (f32.load offset=0 (local.get $j)))
+ (local.set $y1 (f32.load offset=4 (local.get $j)))
+ (local.set $y2 (f32.load offset=8 (local.get $j)))
+ (local.set $y3 (f32.load offset=12 (local.get $j)))
+ (f32.store offset=0 (local.get $k) (f32.add (local.get $x0) (local.get $y0)))
+ (f32.store offset=4 (local.get $k) (f32.add (local.get $x1) (local.get $y1)))
+ (f32.store offset=8 (local.get $k) (f32.add (local.get $x2) (local.get $y2)))
+ (f32.store offset=12 (local.get $k) (f32.add (local.get $x3) (local.get $y3)))
+ )
+
+ (func (export "f32.load")
+ (param $k i32) (result f32)
+ (f32.load (local.get $k))
+ )
+)
+
+(assert_return (invoke "f32.simple_x4_sum" (i32.const 0) (i32.const 16) (i32.const 32)))
+(assert_return (invoke "f32.load" (i32.const 32)) (f32.const 0x1p-148))
+(assert_return (invoke "f32.load" (i32.const 36)) (f32.const 0x0p+0))
+(assert_return (invoke "f32.load" (i32.const 40)) (f32.const 0x1p-149))
+(assert_return (invoke "f32.load" (i32.const 44)) (f32.const -0x1p-149))
+
+(module
+ (memory (data
+ "\01\00\00\00\00\00\00\00\01\00\00\00\00\00\00\80\01\00\00\00\00\00\00\00\01\00\00\00\00\00\00\80"
+ "\01\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
+ "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
+ ))
+
+ (func (export "f64.simple_x4_sum")
+ (param $i i32)
+ (param $j i32)
+ (param $k i32)
+ (local $x0 f64) (local $x1 f64) (local $x2 f64) (local $x3 f64)
+ (local $y0 f64) (local $y1 f64) (local $y2 f64) (local $y3 f64)
+ (local.set $x0 (f64.load offset=0 (local.get $i)))
+ (local.set $x1 (f64.load offset=8 (local.get $i)))
+ (local.set $x2 (f64.load offset=16 (local.get $i)))
+ (local.set $x3 (f64.load offset=24 (local.get $i)))
+ (local.set $y0 (f64.load offset=0 (local.get $j)))
+ (local.set $y1 (f64.load offset=8 (local.get $j)))
+ (local.set $y2 (f64.load offset=16 (local.get $j)))
+ (local.set $y3 (f64.load offset=24 (local.get $j)))
+ (f64.store offset=0 (local.get $k) (f64.add (local.get $x0) (local.get $y0)))
+ (f64.store offset=8 (local.get $k) (f64.add (local.get $x1) (local.get $y1)))
+ (f64.store offset=16 (local.get $k) (f64.add (local.get $x2) (local.get $y2)))
+ (f64.store offset=24 (local.get $k) (f64.add (local.get $x3) (local.get $y3)))
+ )
+
+ (func (export "f64.load")
+ (param $k i32) (result f64)
+ (f64.load (local.get $k))
+ )
+)
+
+(assert_return (invoke "f64.simple_x4_sum" (i32.const 0) (i32.const 32) (i32.const 64)))
+(assert_return (invoke "f64.load" (i32.const 64)) (f64.const 0x0.0000000000001p-1021))
+(assert_return (invoke "f64.load" (i32.const 72)) (f64.const 0x0p+0))
+(assert_return (invoke "f64.load" (i32.const 80)) (f64.const 0x0.0000000000001p-1022))
+(assert_return (invoke "f64.load" (i32.const 88)) (f64.const -0x0.0000000000001p-1022))
+
;; Test that plain summation is not reassociated, and that Kahan summation
;; isn't optimized into plain summation.
@@ -1430,6 +1601,46 @@
(assert_return (invoke "f64.no_fold_neg_sub" (f64.const -0.0) (f64.const 0.0)) (f64.const 0.0))
(assert_return (invoke "f64.no_fold_neg_sub" (f64.const 0.0) (f64.const 0.0)) (f64.const -0.0))
+;; Test that -(x + y) is not folded to (-x + -y).
+
+(module
+ (func (export "f32.no_fold_neg_add") (param $x f32) (param $y f32) (result f32)
+ (f32.neg (f32.add (local.get $x) (local.get $y))))
+
+ (func (export "f64.no_fold_neg_add") (param $x f64) (param $y f64) (result f64)
+ (f64.neg (f64.add (local.get $x) (local.get $y))))
+)
+
+(assert_return (invoke "f32.no_fold_neg_add" (f32.const -0.0) (f32.const -0.0)) (f32.const 0.0))
+(assert_return (invoke "f32.no_fold_neg_add" (f32.const 0.0) (f32.const -0.0)) (f32.const -0.0))
+(assert_return (invoke "f32.no_fold_neg_add" (f32.const -0.0) (f32.const 0.0)) (f32.const -0.0))
+(assert_return (invoke "f32.no_fold_neg_add" (f32.const 0.0) (f32.const 0.0)) (f32.const -0.0))
+
+(assert_return (invoke "f64.no_fold_neg_add" (f64.const -0.0) (f64.const -0.0)) (f64.const 0.0))
+(assert_return (invoke "f64.no_fold_neg_add" (f64.const 0.0) (f64.const -0.0)) (f64.const -0.0))
+(assert_return (invoke "f64.no_fold_neg_add" (f64.const -0.0) (f64.const 0.0)) (f64.const -0.0))
+(assert_return (invoke "f64.no_fold_neg_add" (f64.const 0.0) (f64.const 0.0)) (f64.const -0.0))
+
+;; Test that (-x + -y) is not folded to -(x + y).
+
+(module
+ (func (export "f32.no_fold_add_neg_neg") (param $x f32) (param $y f32) (result f32)
+ (f32.add (f32.neg (local.get $x)) (f32.neg (local.get $y))))
+
+ (func (export "f64.no_fold_add_neg_neg") (param $x f64) (param $y f64) (result f64)
+ (f64.add (f64.neg (local.get $x)) (f64.neg (local.get $y))))
+)
+
+(assert_return (invoke "f32.no_fold_add_neg_neg" (f32.const -0.0) (f32.const -0.0)) (f32.const 0.0))
+(assert_return (invoke "f32.no_fold_add_neg_neg" (f32.const 0.0) (f32.const -0.0)) (f32.const 0.0))
+(assert_return (invoke "f32.no_fold_add_neg_neg" (f32.const -0.0) (f32.const 0.0)) (f32.const 0.0))
+(assert_return (invoke "f32.no_fold_add_neg_neg" (f32.const 0.0) (f32.const 0.0)) (f32.const -0.0))
+
+(assert_return (invoke "f64.no_fold_add_neg_neg" (f64.const -0.0) (f64.const -0.0)) (f64.const 0.0))
+(assert_return (invoke "f64.no_fold_add_neg_neg" (f64.const 0.0) (f64.const -0.0)) (f64.const 0.0))
+(assert_return (invoke "f64.no_fold_add_neg_neg" (f64.const -0.0) (f64.const 0.0)) (f64.const 0.0))
+(assert_return (invoke "f64.no_fold_add_neg_neg" (f64.const 0.0) (f64.const 0.0)) (f64.const -0.0))
+
;; Test that -x + x is not folded to 0.0.
(module
@@ -1442,13 +1653,13 @@
(assert_return (invoke "f32.no_fold_add_neg" (f32.const 0.0)) (f32.const 0.0))
(assert_return (invoke "f32.no_fold_add_neg" (f32.const -0.0)) (f32.const 0.0))
-(assert_return_nan (invoke "f32.no_fold_add_neg" (f32.const infinity)))
-(assert_return_nan (invoke "f32.no_fold_add_neg" (f32.const -infinity)))
+(assert_return_canonical_nan (invoke "f32.no_fold_add_neg" (f32.const inf)))
+(assert_return_canonical_nan (invoke "f32.no_fold_add_neg" (f32.const -inf)))
(assert_return (invoke "f64.no_fold_add_neg" (f64.const 0.0)) (f64.const 0.0))
(assert_return (invoke "f64.no_fold_add_neg" (f64.const -0.0)) (f64.const 0.0))
-(assert_return_nan (invoke "f64.no_fold_add_neg" (f64.const infinity)))
-(assert_return_nan (invoke "f64.no_fold_add_neg" (f64.const -infinity)))
+(assert_return_canonical_nan (invoke "f64.no_fold_add_neg" (f64.const inf)))
+(assert_return_canonical_nan (invoke "f64.no_fold_add_neg" (f64.const -inf)))
;; Test that x+x+x+x+x+x is not folded to x * 6.
@@ -1491,14 +1702,14 @@
(assert_return (invoke "f32.no_fold_div_div" (f32.const -0x1.f70228p+78) (f32.const -0x1.fbc612p-16) (f32.const -0x1.8c379p+10)) (f32.const -0x1.47b43cp+83))
(assert_return (invoke "f32.no_fold_div_div" (f32.const 0x1.d29d2ep-70) (f32.const 0x1.f3a17ep+110) (f32.const -0x1.64d41p-112)) (f32.const -0x0p+0))
-(assert_return (invoke "f32.no_fold_div_div" (f32.const 0x1.867f98p+43) (f32.const 0x1.30acfcp-105) (f32.const 0x1.e210d8p+105)) (f32.const infinity))
+(assert_return (invoke "f32.no_fold_div_div" (f32.const 0x1.867f98p+43) (f32.const 0x1.30acfcp-105) (f32.const 0x1.e210d8p+105)) (f32.const inf))
(assert_return (invoke "f32.no_fold_div_div" (f32.const -0x1.c4001ap-14) (f32.const -0x1.9beb6cp+124) (f32.const -0x1.74f34cp-43)) (f32.const -0x1.819874p-96))
(assert_return (invoke "f32.no_fold_div_div" (f32.const 0x1.db0e6ep+46) (f32.const 0x1.55eea2p+56) (f32.const -0x1.f3134p+124)) (f32.const -0x1.6cep-135))
(assert_return (invoke "f64.no_fold_div_div" (f64.const 0x1.b4dc8ec3c7777p+337) (f64.const 0x1.9f95ac2d1863p+584) (f64.const -0x1.d4318abba341ep-782)) (f64.const -0x1.2649159d87e02p+534))
(assert_return (invoke "f64.no_fold_div_div" (f64.const -0x1.ac53af5eb445fp+791) (f64.const 0x1.8549c0a4ceb13p-29) (f64.const 0x1.64e384003c801p+316)) (f64.const -0x1.9417cdccbae91p+503))
(assert_return (invoke "f64.no_fold_div_div" (f64.const -0x1.d2685afb27327p+2) (f64.const -0x1.abb1eeed3dbebp+880) (f64.const 0x1.a543e2e6968a3p+170)) (f64.const 0x0.0000002a69a5fp-1022))
-(assert_return (invoke "f64.no_fold_div_div" (f64.const -0x1.47ddede78ad1cp+825) (f64.const 0x1.6d932d070a367p-821) (f64.const 0x1.79cf18cc64fp+961)) (f64.const -infinity))
+(assert_return (invoke "f64.no_fold_div_div" (f64.const -0x1.47ddede78ad1cp+825) (f64.const 0x1.6d932d070a367p-821) (f64.const 0x1.79cf18cc64fp+961)) (f64.const -inf))
(assert_return (invoke "f64.no_fold_div_div" (f64.const -0x1.f73d4979a9379p-888) (f64.const 0x1.4d83b53e97788p-596) (f64.const -0x1.f8f86c9603b5bp-139)) (f64.const 0x1.87a7bd89c586cp-154))
;; Test that (x/y)*(z/w) is not optimized to (x*z)/(y*w), example from
@@ -1516,11 +1727,11 @@
(assert_return (invoke "f32.no_fold_mul_divs" (f32.const -0x1.c483bep-109) (f32.const 0x1.ee1c3cp-92) (f32.const 0x1.800756p-88) (f32.const -0x1.95b972p+4)) (f32.const 0x1.bbd30cp-110))
(assert_return (invoke "f32.no_fold_mul_divs" (f32.const -0x1.0f4262p+102) (f32.const 0x1.248498p+25) (f32.const 0x1.f66a7cp-17) (f32.const 0x1.897fc8p-3)) (f32.const -0x1.2f1aa4p+63))
(assert_return (invoke "f32.no_fold_mul_divs" (f32.const -0x1.df5f22p+33) (f32.const -0x1.fcee3ep+39) (f32.const -0x1.9ea914p+29) (f32.const -0x1.2c4d3p+10)) (f32.const 0x1.4cf51cp+13))
-(assert_return (invoke "f32.no_fold_mul_divs" (f32.const -0x1.f568bcp+109) (f32.const 0x1.d9963p-34) (f32.const 0x1.37a87ap-16) (f32.const 0x1.a1524ap+78)) (f32.const -infinity))
+(assert_return (invoke "f32.no_fold_mul_divs" (f32.const -0x1.f568bcp+109) (f32.const 0x1.d9963p-34) (f32.const 0x1.37a87ap-16) (f32.const 0x1.a1524ap+78)) (f32.const -inf))
(assert_return (invoke "f32.no_fold_mul_divs" (f32.const 0x1.3dd592p-53) (f32.const -0x1.332c22p-64) (f32.const 0x1.b01064p-91) (f32.const 0x1.92bb3ap-36)) (f32.const -0x1.1c2dbp-44))
(assert_return (invoke "f64.no_fold_mul_divs" (f64.const -0x1.363d6764f7b12p-819) (f64.const -0x1.ed5471f660b5fp-464) (f64.const -0x1.671b0a7f3a42p+547) (f64.const 0x1.0633be34ba1f2p+186)) (f64.const -0x1.b8fa2b76baeebp+5))
-(assert_return (invoke "f64.no_fold_mul_divs" (f64.const -0x1.37880182e0fa8p+115) (f64.const 0x1.f842631576147p-920) (f64.const -0x1.999372231d156p+362) (f64.const -0x1.d5db481ab9554p+467)) (f64.const -infinity))
+(assert_return (invoke "f64.no_fold_mul_divs" (f64.const -0x1.37880182e0fa8p+115) (f64.const 0x1.f842631576147p-920) (f64.const -0x1.999372231d156p+362) (f64.const -0x1.d5db481ab9554p+467)) (f64.const -inf))
(assert_return (invoke "f64.no_fold_mul_divs" (f64.const -0x1.9a747c8d4b541p+308) (f64.const -0x1.99092ad6bbdc8p+192) (f64.const -0x1.cb23755c20101p-140) (f64.const -0x1.de8716f6b0b6ap+732)) (f64.const 0x1.ecf584c8466a5p-757))
(assert_return (invoke "f64.no_fold_mul_divs" (f64.const -0x1.c424b2ece903dp+129) (f64.const -0x1.568ce281db37fp-347) (f64.const 0x1.53900b99fd3dp-957) (f64.const 0x1.5c33952254dadp+223)) (f64.const 0x0p+0))
(assert_return (invoke "f64.no_fold_mul_divs" (f64.const 0x1.a8ec2cecb32a9p-18) (f64.const 0x1.58acab0051851p-277) (f64.const 0x1.35e87c9077f7fp-620) (f64.const -0x1.925ee37ffb386p+352)) (f64.const -0x1.e6286970b31bfp-714))
@@ -1561,13 +1772,13 @@
(assert_return (invoke "f32.no_fold_sqrt_square" (f32.const -0x1.b0f9e4p-73)) (f32.const 0x1.b211b2p-73))
(assert_return (invoke "f32.no_fold_sqrt_square" (f32.const -0x1.de417cp-71)) (f32.const 0x1.de65b8p-71))
(assert_return (invoke "f32.no_fold_sqrt_square" (f32.const 0x1.64c872p-86)) (f32.const 0x0p+0))
-(assert_return (invoke "f32.no_fold_sqrt_square" (f32.const 0x1.e199e4p+108)) (f32.const infinity))
+(assert_return (invoke "f32.no_fold_sqrt_square" (f32.const 0x1.e199e4p+108)) (f32.const inf))
(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const 0x1.1759d657203fdp-529)) (f64.const 0x1.1759dd57545f3p-529))
(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const -0x1.4c68de1c78d83p-514)) (f64.const 0x1.4c68de1c78d81p-514))
(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const -0x1.214736edb6e1ep-521)) (f64.const 0x1.214736ed9cf8dp-521))
(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const -0x1.0864b9f68457p-616)) (f64.const 0x0p+0))
-(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const 0x1.b2a9855995abap+856)) (f64.const infinity))
+(assert_return (invoke "f64.no_fold_sqrt_square" (f64.const 0x1.b2a9855995abap+856)) (f64.const inf))
;; Test that sqrt(x)*sqrt(y) is not optimized to sqrt(x*y).
@@ -1579,13 +1790,13 @@
(f64.mul (f64.sqrt (local.get $x)) (f64.sqrt (local.get $y))))
)
-(assert_return_nan (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.dddda8p-125) (f32.const -0x1.25d22ap-83)))
+(assert_return_canonical_nan (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.dddda8p-125) (f32.const -0x1.25d22ap-83)))
(assert_return (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.418d14p-92) (f32.const 0x1.c6535cp-32)) (f32.const 0x1.7e373ap-62))
(assert_return (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.4de7ep-88) (f32.const 0x1.84ff18p+6)) (f32.const 0x1.686668p-41))
(assert_return (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.78091ep+101) (f32.const 0x1.81feb8p-9)) (f32.const 0x1.7cfb98p+46))
(assert_return (invoke "f32.no_fold_mul_sqrts" (f32.const 0x1.583ap-56) (f32.const 0x1.14ba2ap-9)) (f32.const 0x1.b47a8ep-33))
-(assert_return_nan (invoke "f64.no_fold_mul_sqrts" (f64.const -0x1.d1144cc28cdbep-635) (f64.const -0x1.bf9bc373d3b6ap-8)))
+(assert_return_canonical_nan (invoke "f64.no_fold_mul_sqrts" (f64.const -0x1.d1144cc28cdbep-635) (f64.const -0x1.bf9bc373d3b6ap-8)))
(assert_return (invoke "f64.no_fold_mul_sqrts" (f64.const 0x1.5a7eb976bebc9p-643) (f64.const 0x1.f30cb8865a4cap-404)) (f64.const 0x1.260a1032d6e76p-523))
(assert_return (invoke "f64.no_fold_mul_sqrts" (f64.const 0x1.711a0c1707935p-89) (f64.const 0x1.6fb5de51a20d3p-913)) (f64.const 0x1.7067ca28e31ecp-501))
(assert_return (invoke "f64.no_fold_mul_sqrts" (f64.const 0x1.fb0bbea33b076p-363) (f64.const 0x1.d963b34894158p-573)) (f64.const 0x1.e9edc1fa624afp-468))
@@ -1601,13 +1812,13 @@
(f64.div (f64.sqrt (local.get $x)) (f64.sqrt (local.get $y))))
)
-(assert_return_nan (invoke "f32.no_fold_div_sqrts" (f32.const -0x1.bea9bap+25) (f32.const -0x1.db776ep-58)))
+(assert_return_canonical_nan (invoke "f32.no_fold_div_sqrts" (f32.const -0x1.bea9bap+25) (f32.const -0x1.db776ep-58)))
(assert_return (invoke "f32.no_fold_div_sqrts" (f32.const 0x1.b983b6p+32) (f32.const 0x1.901f1ep+27)) (f32.const 0x1.7c4df6p+2))
(assert_return (invoke "f32.no_fold_div_sqrts" (f32.const 0x1.d45e72p-120) (f32.const 0x1.ab49ccp+15)) (f32.const 0x1.7b0b04p-68))
(assert_return (invoke "f32.no_fold_div_sqrts" (f32.const 0x1.b2e444p+59) (f32.const 0x1.5b8b16p-30)) (f32.const 0x1.94fca8p+44))
(assert_return (invoke "f32.no_fold_div_sqrts" (f32.const 0x1.835aa6p-112) (f32.const 0x1.d17128p-103)) (f32.const 0x1.4a468p-5))
-(assert_return_nan (invoke "f64.no_fold_div_sqrts" (f64.const -0x1.509fc16411167p-711) (f64.const -0x1.9c4255f5d6517p-187)))
+(assert_return_canonical_nan (invoke "f64.no_fold_div_sqrts" (f64.const -0x1.509fc16411167p-711) (f64.const -0x1.9c4255f5d6517p-187)))
(assert_return (invoke "f64.no_fold_div_sqrts" (f64.const 0x1.b6897bddac76p-587) (f64.const 0x1.104578b4c91f3p+541)) (f64.const 0x1.44e4f21f26cc9p-564))
(assert_return (invoke "f64.no_fold_div_sqrts" (f64.const 0x1.ac83451b08989p+523) (f64.const 0x1.8da575c6d12b8p-109)) (f64.const 0x1.09c003991ce17p+316))
(assert_return (invoke "f64.no_fold_div_sqrts" (f64.const 0x1.bab7836456417p-810) (f64.const 0x1.1ff60d03ba607p+291)) (f64.const 0x1.c0e6c833bf657p-551))
@@ -1623,13 +1834,13 @@
(f64.div (f64.mul (local.get $x) (f64.sqrt (local.get $y))) (local.get $y)))
)
-(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const -0x1.f4a7cap+81) (f32.const 0x1.c09adep+92)) (f32.const -infinity))
+(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const -0x1.f4a7cap+81) (f32.const 0x1.c09adep+92)) (f32.const -inf))
(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const -0x1.90bf1cp-120) (f32.const 0x1.8dbe88p-97)) (f32.const -0x0p+0))
(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const 0x1.8570e8p+29) (f32.const 0x1.217d3p-128)) (f32.const 0x1.6e391ap+93))
(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const -0x1.5b4652p+43) (f32.const 0x1.a9d71cp+112)) (f32.const -0x1.0d423ap-13))
(assert_return (invoke "f32.no_fold_mul_sqrt_div" (f32.const -0x1.910604p+8) (f32.const 0x1.0ca912p+7)) (f32.const -0x1.14cdecp+5))
-(assert_return (invoke "f64.no_fold_mul_sqrt_div" (f64.const 0x1.1dcdeb857305fp+698) (f64.const 0x1.a066171c40eb9p+758)) (f64.const infinity))
+(assert_return (invoke "f64.no_fold_mul_sqrt_div" (f64.const 0x1.1dcdeb857305fp+698) (f64.const 0x1.a066171c40eb9p+758)) (f64.const inf))
(assert_return (invoke "f64.no_fold_mul_sqrt_div" (f64.const -0x1.8b4f1c218e2abp-827) (f64.const 0x1.5e1ee65953b0bp-669)) (f64.const -0x0p+0))
(assert_return (invoke "f64.no_fold_mul_sqrt_div" (f64.const 0x1.74ee531ddba38p-425) (f64.const 0x1.f370f758857f3p+560)) (f64.const 0x1.0aff34269583ep-705))
(assert_return (invoke "f64.no_fold_mul_sqrt_div" (f64.const -0x1.27f216b0da6c5p+352) (f64.const 0x1.8e0b4e0b9fd7ep-483)) (f64.const -0x1.4fa558aad514ep+593))
@@ -1675,17 +1886,17 @@
(f32.mul (f32.demote_f64 (local.get $x)) (local.get $y)))
)
-(assert_return (invoke "f32.recoding_eq" (f32.const -infinity) (f32.const 3.0)) (i32.const 1))
-(assert_return (invoke "f32.recoding_le" (f32.const -infinity) (f32.const 3.0)) (i32.const 1))
-(assert_return (invoke "f32.recoding_lt" (f32.const -infinity) (f32.const 3.0)) (i32.const 0))
+(assert_return (invoke "f32.recoding_eq" (f32.const -inf) (f32.const 3.0)) (i32.const 1))
+(assert_return (invoke "f32.recoding_le" (f32.const -inf) (f32.const 3.0)) (i32.const 1))
+(assert_return (invoke "f32.recoding_lt" (f32.const -inf) (f32.const 3.0)) (i32.const 0))
(assert_return (invoke "f32.recoding_eq" (f32.const 0x0p+0) (f32.const 0x1p+0)) (i32.const 1))
(assert_return (invoke "f32.recoding_le" (f32.const 0x0p+0) (f32.const 0x1p+0)) (i32.const 1))
(assert_return (invoke "f32.recoding_lt" (f32.const 0x0p+0) (f32.const 0x1p+0)) (i32.const 0))
-(assert_return (invoke "f64.recoding_eq" (f64.const -infinity) (f64.const 3.0)) (i32.const 1))
-(assert_return (invoke "f64.recoding_le" (f64.const -infinity) (f64.const 3.0)) (i32.const 1))
-(assert_return (invoke "f64.recoding_lt" (f64.const -infinity) (f64.const 3.0)) (i32.const 0))
+(assert_return (invoke "f64.recoding_eq" (f64.const -inf) (f64.const 3.0)) (i32.const 1))
+(assert_return (invoke "f64.recoding_le" (f64.const -inf) (f64.const 3.0)) (i32.const 1))
+(assert_return (invoke "f64.recoding_lt" (f64.const -inf) (f64.const 3.0)) (i32.const 0))
(assert_return (invoke "f64.recoding_eq" (f64.const 0x0p+0) (f64.const 0x1p+0)) (i32.const 1))
(assert_return (invoke "f64.recoding_le" (f64.const 0x0p+0) (f64.const 0x1p+0)) (i32.const 1))
@@ -1890,9 +2101,9 @@
)
(assert_return (invoke "f32.no_fold_add_le_monotonicity" (f32.const 0.0) (f32.const 0.0) (f32.const nan)) (i32.const 0))
-(assert_return (invoke "f32.no_fold_add_le_monotonicity" (f32.const infinity) (f32.const -infinity) (f32.const infinity)) (i32.const 0))
+(assert_return (invoke "f32.no_fold_add_le_monotonicity" (f32.const inf) (f32.const -inf) (f32.const inf)) (i32.const 0))
(assert_return (invoke "f64.no_fold_add_le_monotonicity" (f64.const 0.0) (f64.const 0.0) (f64.const nan)) (i32.const 0))
-(assert_return (invoke "f64.no_fold_add_le_monotonicity" (f64.const infinity) (f64.const -infinity) (f64.const infinity)) (i32.const 0))
+(assert_return (invoke "f64.no_fold_add_le_monotonicity" (f64.const inf) (f64.const -inf) (f64.const inf)) (i32.const 0))
;; Test that !(x < y) and friends are not optimized to x >= y and friends.
@@ -1946,6 +2157,61 @@
(assert_return (invoke "f32.epsilon") (f32.const -0x1p-23))
(assert_return (invoke "f64.epsilon") (f64.const 0x1p-52))
+;; Test that a method for computing a "machine epsilon" produces the expected
+;; result.
+;; https://www.math.utah.edu/~beebe/software/ieee/
+
+(module
+ (func (export "f32.epsilon") (result f32)
+ (local $x f32)
+ (local $result f32)
+ (local.set $x (f32.const 1))
+ (loop $loop
+ (br_if $loop
+ (f32.gt
+ (f32.add
+ (local.tee $x
+ (f32.mul
+ (local.tee $result (local.get $x))
+ (f32.const 0.5)
+ )
+ )
+ (f32.const 1)
+ )
+ (f32.const 1)
+ )
+ )
+ )
+ (local.get $result)
+ )
+
+ (func (export "f64.epsilon") (result f64)
+ (local $x f64)
+ (local $result f64)
+ (local.set $x (f64.const 1))
+ (loop $loop
+ (br_if $loop
+ (f64.gt
+ (f64.add
+ (local.tee $x
+ (f64.mul
+ (local.tee $result (local.get $x))
+ (f64.const 0.5)
+ )
+ )
+ (f64.const 1)
+ )
+ (f64.const 1)
+ )
+ )
+ )
+ (local.get $result)
+ )
+)
+
+(assert_return (invoke "f32.epsilon") (f32.const 0x1p-23))
+(assert_return (invoke "f64.epsilon") (f64.const 0x1p-52))
+
;; Test that floating-point numbers are not optimized as if they form a
;; trichotomy.
@@ -1977,3 +2243,328 @@
(assert_return (invoke "f64.no_trichotomy_le" (f64.const 0.0) (f64.const nan)) (i32.const 0))
(assert_return (invoke "f64.no_trichotomy_gt" (f64.const 0.0) (f64.const nan)) (i32.const 0))
(assert_return (invoke "f64.no_trichotomy_ge" (f64.const 0.0) (f64.const nan)) (i32.const 0))
+
+;; Some test harnesses which can run this testsuite are unable to perform tests
+;; of NaN bitpatterns. The following tests whether the underlying platform is
+;; generally producing the kinds of NaNs expected.
+(module
+ (func (export "f32.arithmetic_nan_bitpattern")
+ (param $x i32) (param $y i32) (result i32)
+ (i32.and (i32.reinterpret_f32
+ (f32.div
+ (f32.reinterpret_i32 (local.get $x))
+ (f32.reinterpret_i32 (local.get $y))))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.canonical_nan_bitpattern")
+ (param $x i32) (param $y i32) (result i32)
+ (i32.and (i32.reinterpret_f32
+ (f32.div
+ (f32.reinterpret_i32 (local.get $x))
+ (f32.reinterpret_i32 (local.get $y))))
+ (i32.const 0x7fffffff)))
+ (func (export "f32.nonarithmetic_nan_bitpattern")
+ (param $x i32) (result i32)
+ (i32.reinterpret_f32 (f32.neg (f32.reinterpret_i32 (local.get $x)))))
+
+ (func (export "f64.arithmetic_nan_bitpattern")
+ (param $x i64) (param $y i64) (result i64)
+ (i64.and (i64.reinterpret_f64
+ (f64.div
+ (f64.reinterpret_i64 (local.get $x))
+ (f64.reinterpret_i64 (local.get $y))))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.canonical_nan_bitpattern")
+ (param $x i64) (param $y i64) (result i64)
+ (i64.and (i64.reinterpret_f64
+ (f64.div
+ (f64.reinterpret_i64 (local.get $x))
+ (f64.reinterpret_i64 (local.get $y))))
+ (i64.const 0x7fffffffffffffff)))
+ (func (export "f64.nonarithmetic_nan_bitpattern")
+ (param $x i64) (result i64)
+ (i64.reinterpret_f64 (f64.neg (f64.reinterpret_i64 (local.get $x)))))
+
+ ;; Versions of no_fold testcases that only care about NaN bitpatterns.
+ (func (export "f32.no_fold_sub_zero") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.sub (f32.reinterpret_i32 (local.get $x)) (f32.const 0.0)))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.no_fold_neg0_sub") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.sub (f32.const -0.0) (f32.reinterpret_i32 (local.get $x))))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.no_fold_mul_one") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.mul (f32.reinterpret_i32 (local.get $x)) (f32.const 1.0)))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.no_fold_neg1_mul") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.mul (f32.const -1.0) (f32.reinterpret_i32 (local.get $x))))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.no_fold_div_one") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.div (f32.reinterpret_i32 (local.get $x)) (f32.const 1.0)))
+ (i32.const 0x7fc00000)))
+ (func (export "f32.no_fold_div_neg1") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.div (f32.reinterpret_i32 (local.get $x)) (f32.const -1.0)))
+ (i32.const 0x7fc00000)))
+ (func (export "f64.no_fold_sub_zero") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.sub (f64.reinterpret_i64 (local.get $x)) (f64.const 0.0)))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.no_fold_neg0_sub") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.sub (f64.const -0.0) (f64.reinterpret_i64 (local.get $x))))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.no_fold_mul_one") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.mul (f64.reinterpret_i64 (local.get $x)) (f64.const 1.0)))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.no_fold_neg1_mul") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.mul (f64.const -1.0) (f64.reinterpret_i64 (local.get $x))))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.no_fold_div_one") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.div (f64.reinterpret_i64 (local.get $x)) (f64.const 1.0)))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "f64.no_fold_div_neg1") (param $x i64) (result i64)
+ (i64.and (i64.reinterpret_f64 (f64.div (f64.reinterpret_i64 (local.get $x)) (f64.const -1.0)))
+ (i64.const 0x7ff8000000000000)))
+ (func (export "no_fold_promote_demote") (param $x i32) (result i32)
+ (i32.and (i32.reinterpret_f32 (f32.demote_f64 (f64.promote_f32 (f32.reinterpret_i32 (local.get $x)))))
+ (i32.const 0x7fc00000)))
+)
+
+(assert_return (invoke "f32.arithmetic_nan_bitpattern" (i32.const 0x7f803210) (i32.const 0x7f803210)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.canonical_nan_bitpattern" (i32.const 0) (i32.const 0)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.canonical_nan_bitpattern" (i32.const 0x7fc00000) (i32.const 0x7fc00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.canonical_nan_bitpattern" (i32.const 0xffc00000) (i32.const 0x7fc00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.canonical_nan_bitpattern" (i32.const 0x7fc00000) (i32.const 0xffc00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.canonical_nan_bitpattern" (i32.const 0xffc00000) (i32.const 0xffc00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.nonarithmetic_nan_bitpattern" (i32.const 0x7fc03210)) (i32.const 0xffc03210))
+(assert_return (invoke "f32.nonarithmetic_nan_bitpattern" (i32.const 0xffc03210)) (i32.const 0x7fc03210))
+(assert_return (invoke "f32.nonarithmetic_nan_bitpattern" (i32.const 0x7f803210)) (i32.const 0xff803210))
+(assert_return (invoke "f32.nonarithmetic_nan_bitpattern" (i32.const 0xff803210)) (i32.const 0x7f803210))
+(assert_return (invoke "f64.arithmetic_nan_bitpattern" (i64.const 0x7ff0000000003210) (i64.const 0x7ff0000000003210)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.canonical_nan_bitpattern" (i64.const 0) (i64.const 0)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.canonical_nan_bitpattern" (i64.const 0x7ff8000000000000) (i64.const 0x7ff8000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.canonical_nan_bitpattern" (i64.const 0xfff8000000000000) (i64.const 0x7ff8000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.canonical_nan_bitpattern" (i64.const 0x7ff8000000000000) (i64.const 0xfff8000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.canonical_nan_bitpattern" (i64.const 0xfff8000000000000) (i64.const 0xfff8000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.nonarithmetic_nan_bitpattern" (i64.const 0x7ff8000000003210)) (i64.const 0xfff8000000003210))
+(assert_return (invoke "f64.nonarithmetic_nan_bitpattern" (i64.const 0xfff8000000003210)) (i64.const 0x7ff8000000003210))
+(assert_return (invoke "f64.nonarithmetic_nan_bitpattern" (i64.const 0x7ff0000000003210)) (i64.const 0xfff0000000003210))
+(assert_return (invoke "f64.nonarithmetic_nan_bitpattern" (i64.const 0xfff0000000003210)) (i64.const 0x7ff0000000003210))
+(assert_return (invoke "f32.no_fold_sub_zero" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.no_fold_neg0_sub" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.no_fold_mul_one" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.no_fold_neg1_mul" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.no_fold_div_one" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f32.no_fold_div_neg1" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+(assert_return (invoke "f64.no_fold_sub_zero" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.no_fold_neg0_sub" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.no_fold_mul_one" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.no_fold_neg1_mul" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.no_fold_div_one" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "f64.no_fold_div_neg1" (i64.const 0x7ff4000000000000)) (i64.const 0x7ff8000000000000))
+(assert_return (invoke "no_fold_promote_demote" (i32.const 0x7fa00000)) (i32.const 0x7fc00000))
+
+;; Test that IEEE 754 double precision does, in fact, compute a certain dot
+;; product correctly.
+
+(module
+ (func (export "dot_product_example")
+ (param $x0 f64) (param $x1 f64) (param $x2 f64) (param $x3 f64)
+ (param $y0 f64) (param $y1 f64) (param $y2 f64) (param $y3 f64)
+ (result f64)
+ (f64.add (f64.add (f64.add
+ (f64.mul (local.get $x0) (local.get $y0))
+ (f64.mul (local.get $x1) (local.get $y1)))
+ (f64.mul (local.get $x2) (local.get $y2)))
+ (f64.mul (local.get $x3) (local.get $y3)))
+ )
+
+ (func (export "with_binary_sum_collapse")
+ (param $x0 f64) (param $x1 f64) (param $x2 f64) (param $x3 f64)
+ (param $y0 f64) (param $y1 f64) (param $y2 f64) (param $y3 f64)
+ (result f64)
+ (f64.add (f64.add (f64.mul (local.get $x0) (local.get $y0))
+ (f64.mul (local.get $x1) (local.get $y1)))
+ (f64.add (f64.mul (local.get $x2) (local.get $y2))
+ (f64.mul (local.get $x3) (local.get $y3))))
+ )
+)
+
+(assert_return (invoke "dot_product_example"
+ (f64.const 3.2e7) (f64.const 1.0) (f64.const -1.0) (f64.const 8.0e7)
+ (f64.const 4.0e7) (f64.const 1.0) (f64.const -1.0) (f64.const -1.6e7))
+ (f64.const 2.0))
+(assert_return (invoke "with_binary_sum_collapse"
+ (f64.const 3.2e7) (f64.const 1.0) (f64.const -1.0) (f64.const 8.0e7)
+ (f64.const 4.0e7) (f64.const 1.0) (f64.const -1.0) (f64.const -1.6e7))
+ (f64.const 2.0))
+
+;; http://www.vinc17.org/research/fptest.en.html#contract2fma
+
+(module
+ (func (export "f32.contract2fma")
+ (param $x f32) (param $y f32) (result f32)
+ (f32.sqrt (f32.sub (f32.mul (local.get $x) (local.get $x))
+ (f32.mul (local.get $y) (local.get $y)))))
+ (func (export "f64.contract2fma")
+ (param $x f64) (param $y f64) (result f64)
+ (f64.sqrt (f64.sub (f64.mul (local.get $x) (local.get $x))
+ (f64.mul (local.get $y) (local.get $y)))))
+)
+
+(assert_return (invoke "f32.contract2fma" (f32.const 1.0) (f32.const 1.0)) (f32.const 0.0))
+(assert_return (invoke "f32.contract2fma" (f32.const 0x1.19999ap+0) (f32.const 0x1.19999ap+0)) (f32.const 0.0))
+(assert_return (invoke "f32.contract2fma" (f32.const 0x1.333332p+0) (f32.const 0x1.333332p+0)) (f32.const 0.0))
+(assert_return (invoke "f64.contract2fma" (f64.const 1.0) (f64.const 1.0)) (f64.const 0.0))
+(assert_return (invoke "f64.contract2fma" (f64.const 0x1.199999999999ap+0) (f64.const 0x1.199999999999ap+0)) (f64.const 0.0))
+(assert_return (invoke "f64.contract2fma" (f64.const 0x1.3333333333333p+0) (f64.const 0x1.3333333333333p+0)) (f64.const 0.0))
+
+;; Test that floating-point isn't implemented with QuickBasic for MS-DOS.
+;; https://support.microsoft.com/en-us/help/42980/-complete-tutorial-to-understand-ieee-floating-point-errors
+
+(module
+ (func (export "f32.division_by_small_number")
+ (param $a f32) (param $b f32) (param $c f32) (result f32)
+ (f32.sub (local.get $a) (f32.div (local.get $b) (local.get $c))))
+ (func (export "f64.division_by_small_number")
+ (param $a f64) (param $b f64) (param $c f64) (result f64)
+ (f64.sub (local.get $a) (f64.div (local.get $b) (local.get $c))))
+)
+
+(assert_return (invoke "f32.division_by_small_number" (f32.const 112000000) (f32.const 100000) (f32.const 0.0009)) (f32.const 888888))
+(assert_return (invoke "f64.division_by_small_number" (f64.const 112000000) (f64.const 100000) (f64.const 0.0009)) (f64.const 888888.8888888806))
+
+;; Test a simple golden ratio computation.
+;; http://mathworld.wolfram.com/GoldenRatio.html
+
+(module
+ (func (export "f32.golden_ratio") (param $a f32) (param $b f32) (param $c f32) (result f32)
+ (f32.mul (local.get 0) (f32.add (local.get 1) (f32.sqrt (local.get 2)))))
+ (func (export "f64.golden_ratio") (param $a f64) (param $b f64) (param $c f64) (result f64)
+ (f64.mul (local.get 0) (f64.add (local.get 1) (f64.sqrt (local.get 2)))))
+)
+
+(assert_return (invoke "f32.golden_ratio" (f32.const 0.5) (f32.const 1.0) (f32.const 5.0)) (f32.const 1.618034))
+(assert_return (invoke "f64.golden_ratio" (f64.const 0.5) (f64.const 1.0) (f64.const 5.0)) (f64.const 1.618033988749895))
+
+;; Test some silver means computations.
+;; http://mathworld.wolfram.com/SilverRatio.html
+
+(module
+ (func (export "f32.silver_means") (param $n f32) (result f32)
+ (f32.mul (f32.const 0.5)
+ (f32.add (local.get $n)
+ (f32.sqrt (f32.add (f32.mul (local.get $n) (local.get $n))
+ (f32.const 4.0))))))
+ (func (export "f64.silver_means") (param $n f64) (result f64)
+ (f64.mul (f64.const 0.5)
+ (f64.add (local.get $n)
+ (f64.sqrt (f64.add (f64.mul (local.get $n) (local.get $n))
+ (f64.const 4.0))))))
+)
+
+(assert_return (invoke "f32.silver_means" (f32.const 0.0)) (f32.const 1.0))
+(assert_return (invoke "f32.silver_means" (f32.const 1.0)) (f32.const 1.6180340))
+(assert_return (invoke "f32.silver_means" (f32.const 2.0)) (f32.const 2.4142136))
+(assert_return (invoke "f32.silver_means" (f32.const 3.0)) (f32.const 3.3027756))
+(assert_return (invoke "f32.silver_means" (f32.const 4.0)) (f32.const 4.2360680))
+(assert_return (invoke "f32.silver_means" (f32.const 5.0)) (f32.const 5.1925821))
+(assert_return (invoke "f64.silver_means" (f64.const 0.0)) (f64.const 1.0))
+(assert_return (invoke "f64.silver_means" (f64.const 1.0)) (f64.const 1.618033988749895))
+(assert_return (invoke "f64.silver_means" (f64.const 2.0)) (f64.const 2.414213562373095))
+(assert_return (invoke "f64.silver_means" (f64.const 3.0)) (f64.const 3.302775637731995))
+(assert_return (invoke "f64.silver_means" (f64.const 4.0)) (f64.const 4.236067977499790))
+(assert_return (invoke "f64.silver_means" (f64.const 5.0)) (f64.const 5.192582403567252))
+
+;; Test that an f64 0.4 isn't double-rounded as via extended precision.
+;; https://bugs.llvm.org/show_bug.cgi?id=11200
+
+(module
+ (func (export "point_four") (param $four f64) (param $ten f64) (result i32)
+ (f64.lt (f64.div (local.get $four) (local.get $ten)) (f64.const 0.4)))
+)
+
+(assert_return (invoke "point_four" (f64.const 4.0) (f64.const 10.0)) (i32.const 0))
+
+;; Test an approximation function for tau; it should produces the correctly
+;; rounded result after (and only after) the expected number of iterations.
+
+(module
+ (func (export "tau") (param i32) (result f64)
+ (local f64 f64 f64 f64)
+ f64.const 0x0p+0
+ local.set 1
+ block
+ local.get 0
+ i32.const 1
+ i32.lt_s
+ br_if 0
+ f64.const 0x1p+0
+ local.set 2
+ f64.const 0x0p+0
+ local.set 3
+ loop
+ local.get 1
+ local.get 2
+ f64.const 0x1p+3
+ local.get 3
+ f64.const 0x1p+3
+ f64.mul
+ local.tee 4
+ f64.const 0x1p+0
+ f64.add
+ f64.div
+ f64.const 0x1p+2
+ local.get 4
+ f64.const 0x1p+2
+ f64.add
+ f64.div
+ f64.sub
+ f64.const 0x1p+1
+ local.get 4
+ f64.const 0x1.4p+2
+ f64.add
+ f64.div
+ f64.sub
+ f64.const 0x1p+1
+ local.get 4
+ f64.const 0x1.8p+2
+ f64.add
+ f64.div
+ f64.sub
+ f64.mul
+ f64.add
+ local.set 1
+ local.get 3
+ f64.const 0x1p+0
+ f64.add
+ local.set 3
+ local.get 2
+ f64.const 0x1p-4
+ f64.mul
+ local.set 2
+ local.get 0
+ i32.const -1
+ i32.add
+ local.tee 0
+ br_if 0
+ end
+ end
+ local.get 1
+ )
+)
+
+(assert_return (invoke "tau" (i32.const 10)) (f64.const 0x1.921fb54442d14p+2))
+(assert_return (invoke "tau" (i32.const 11)) (f64.const 0x1.921fb54442d18p+2))
+
+;; Test that y < 0 ? x : (x + 1) is not folded to x + (y < 0).
+
+(module
+ (func (export "f32.no_fold_conditional_inc") (param $x f32) (param $y f32) (result f32)
+ (select (local.get $x)
+ (f32.add (local.get $x) (f32.const 1.0))
+ (f32.lt (local.get $y) (f32.const 0.0))))
+ (func (export "f64.no_fold_conditional_inc") (param $x f64) (param $y f64) (result f64)
+ (select (local.get $x)
+ (f64.add (local.get $x) (f64.const 1.0))
+ (f64.lt (local.get $y) (f64.const 0.0))))
+)
+
+(assert_return (invoke "f32.no_fold_conditional_inc" (f32.const -0.0) (f32.const -1.0)) (f32.const -0.0))
+(assert_return (invoke "f64.no_fold_conditional_inc" (f64.const -0.0) (f64.const -1.0)) (f64.const -0.0))