diff options
Diffstat (limited to 'test/spec/simd.wast')
-rw-r--r-- | test/spec/simd.wast | 406 |
1 files changed, 203 insertions, 203 deletions
diff --git a/test/spec/simd.wast b/test/spec/simd.wast index cec571387..351de4df2 100644 --- a/test/spec/simd.wast +++ b/test/spec/simd.wast @@ -297,13 +297,13 @@ ;; i16x8 lane accesses (assert_return (invoke "i16x8.splat" (i32.const 5)) (v128.const i16x8 5 5 5 5 5 5 5 5)) -(assert_return (invoke "i16x8.splat" (i32.const 65537)) (v128.const i32x4 1 1 1 1 1 1 1 1)) -(assert_return (invoke "i16x8.extract_lane_s_first" (v128.const i32x4 65535 0 0 0 0 0 0 0)) (i32.const -1)) -(assert_return (invoke "i16x8.extract_lane_s_last" (v128.const i32x4 0 0 0 0 0 0 0 65535)) (i32.const -1)) -(assert_return (invoke "i16x8.extract_lane_u_first" (v128.const i32x4 65535 0 0 0 0 0 0 0)) (i32.const 65535)) -(assert_return (invoke "i16x8.extract_lane_u_last" (v128.const i32x4 0 0 0 0 0 0 0 65535)) (i32.const 65535)) -(assert_return (invoke "i16x8.replace_lane_first" (v128.const i64x2 0 0) (i32.const 7)) (v128.const i32x4 7 0 0 0 0 0 0 0)) -(assert_return (invoke "i16x8.replace_lane_last" (v128.const i64x2 0 0) (i32.const 7)) (v128.const i32x4 0 0 0 0 0 0 0 7)) +(assert_return (invoke "i16x8.splat" (i32.const 65537)) (v128.const i16x8 1 1 1 1 1 1 1 1)) +(assert_return (invoke "i16x8.extract_lane_s_first" (v128.const i16x8 65535 0 0 0 0 0 0 0)) (i32.const -1)) +(assert_return (invoke "i16x8.extract_lane_s_last" (v128.const i16x8 0 0 0 0 0 0 0 65535)) (i32.const -1)) +(assert_return (invoke "i16x8.extract_lane_u_first" (v128.const i16x8 65535 0 0 0 0 0 0 0)) (i32.const 65535)) +(assert_return (invoke "i16x8.extract_lane_u_last" (v128.const i16x8 0 0 0 0 0 0 0 65535)) (i32.const 65535)) +(assert_return (invoke "i16x8.replace_lane_first" (v128.const i64x2 0 0) (i32.const 7)) (v128.const i16x8 7 0 0 0 0 0 0 0)) +(assert_return (invoke "i16x8.replace_lane_last" (v128.const i64x2 0 0) (i32.const 7)) (v128.const i16x8 0 0 0 0 0 0 0 7)) ;; i32x4 lane accesses (assert_return (invoke "i32x4.splat" (i32.const -5)) (v128.const i32x4 -5 -5 -5 -5)) @@ -336,144 +336,144 @@ ;; i8x16 comparisons (assert_return (invoke "i8x16.eq" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 -1 0 -1 0 0 0 0 0 -1 0 0 -1 0 0 0 0) + (v128.const i8x16 -1 0 -1 0 0 0 0 0 -1 0 0 -1 0 0 0 0) ) (assert_return (invoke "i8x16.ne" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 0 -1 0 -1 -1 -1 -1 -1 0 -1 -1 0 -1 -1 -1 -1) + (v128.const i8x16 0 -1 0 -1 -1 -1 -1 -1 0 -1 -1 0 -1 -1 -1 -1) ) (assert_return (invoke "i8x16.lt_s" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 0 0 0 -1 0 -1 -1 0 0 0 -1 0 0 -1 -1 0) + (v128.const i8x16 0 0 0 -1 0 -1 -1 0 0 0 -1 0 0 -1 -1 0) ) (assert_return (invoke "i8x16.lt_u" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 0 -1 0 0 -1 -1 0 -1 0 -1 0 0 -1 -1 0 -1) + (v128.const i8x16 0 -1 0 0 -1 -1 0 -1 0 -1 0 0 -1 -1 0 -1) ) (assert_return (invoke "i8x16.gt_s" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 0 -1 0 0 -1 0 0 -1 0 -1 0 0 -1 0 0 -1) + (v128.const i8x16 0 -1 0 0 -1 0 0 -1 0 -1 0 0 -1 0 0 -1) ) (assert_return (invoke "i8x16.gt_u" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 0 0 0 -1 0 0 -1 0 0 0 -1 0 0 0 -1 0) + (v128.const i8x16 0 0 0 -1 0 0 -1 0 0 0 -1 0 0 0 -1 0) ) (assert_return (invoke "i8x16.le_s" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 -1 0 -1 -1 0 -1 -1 0 -1 0 -1 -1 0 -1 -1 0) + (v128.const i8x16 -1 0 -1 -1 0 -1 -1 0 -1 0 -1 -1 0 -1 -1 0) ) (assert_return (invoke "i8x16.le_u" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 -1 -1 -1 0 -1 -1 0 -1 -1 -1 0 -1 -1 -1 0 -1) + (v128.const i8x16 -1 -1 -1 0 -1 -1 0 -1 -1 -1 0 -1 -1 -1 0 -1) ) (assert_return (invoke "i8x16.ge_s" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 -1 -1 -1 0 -1 0 0 -1 -1 -1 0 -1 -1 0 0 -1) + (v128.const i8x16 -1 -1 -1 0 -1 0 0 -1 -1 -1 0 -1 -1 0 0 -1) ) (assert_return (invoke "i8x16.ge_u" - (v128.const i32x4 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) - (v128.const i32x4 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) + (v128.const i8x16 0 127 13 128 1 13 129 42 0 127 255 42 1 13 129 42) + (v128.const i8x16 0 255 13 42 129 127 0 128 0 255 13 42 129 127 0 128) ) - (v128.const i32x4 -1 0 -1 -1 0 0 -1 0 -1 0 -1 -1 0 0 -1 0) + (v128.const i8x16 -1 0 -1 -1 0 0 -1 0 -1 0 -1 -1 0 0 -1 0) ) ;; i16x8 comparisons (assert_return (invoke "i16x8.eq" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 -1 0 0 0 0 0 0 0) + (v128.const i16x8 -1 0 0 0 0 0 0 0) ) (assert_return (invoke "i16x8.ne" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 0 -1 -1 -1 -1 -1 -1 -1) + (v128.const i16x8 0 -1 -1 -1 -1 -1 -1 -1) ) (assert_return (invoke "i16x8.lt_s" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 0 0 0 -1 0 -1 0 -1) + (v128.const i16x8 0 0 0 -1 0 -1 0 -1) ) (assert_return (invoke "i16x8.lt_u" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 0 0 0 0 -1 0 -1 0) + (v128.const i16x8 0 0 0 0 -1 0 -1 0) ) (assert_return (invoke "i16x8.gt_s" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 0 -1 -1 0 -1 0 -1 0) + (v128.const i16x8 0 -1 -1 0 -1 0 -1 0) ) (assert_return (invoke "i16x8.gt_u" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 0 -1 -1 -1 0 -1 0 -1) + (v128.const i16x8 0 -1 -1 -1 0 -1 0 -1) ) (assert_return (invoke "i16x8.le_s" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 -1 0 0 -1 0 -1 0 -1) + (v128.const i16x8 -1 0 0 -1 0 -1 0 -1) ) (assert_return (invoke "i16x8.le_u" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 -1 0 0 0 -1 0 -1 0) + (v128.const i16x8 -1 0 0 0 -1 0 -1 0) ) (assert_return (invoke "i16x8.ge_s" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 -1 -1 -1 0 -1 0 -1 0) + (v128.const i16x8 -1 -1 -1 0 -1 0 -1 0) ) (assert_return (invoke "i16x8.ge_u" - (v128.const i32x4 0 32767 13 32768 1 32769 42 40000) - (v128.const i32x4 0 13 1 32767 32769 42 40000 32767) + (v128.const i16x8 0 32767 13 32768 1 32769 42 40000) + (v128.const i16x8 0 13 1 32767 32769 42 40000 32767) ) - (v128.const i32x4 -1 -1 -1 -1 0 -1 0 -1) + (v128.const i16x8 -1 -1 -1 -1 0 -1 0 -1) ) ;; i32x4 comparisons @@ -498,18 +498,18 @@ (assert_return (invoke "f32x4.gt" (v128.const f32x4 0 -1 1 0) (v128.const f32x4 0 0 -1 1)) (v128.const i32x4 0 0 -1 0)) (assert_return (invoke "f32x4.le" (v128.const f32x4 0 -1 1 0) (v128.const f32x4 0 0 -1 1)) (v128.const i32x4 -1 -1 0 -1)) (assert_return (invoke "f32x4.ge" (v128.const f32x4 0 -1 1 0) (v128.const f32x4 0 0 -1 1)) (v128.const i32x4 -1 0 -1 0)) -(assert_return (invoke "f32x4.eq" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 0 0 0 -1)) -(assert_return (invoke "f32x4.ne" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 -1 -1 -1 0)) -(assert_return (invoke "f32x4.lt" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 0 0 0 0)) -(assert_return (invoke "f32x4.gt" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 0 0 0 0)) -(assert_return (invoke "f32x4.le" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 0 0 0 -1)) -(assert_return (invoke "f32x4.ge" (v128.const f32x4 nan 0 nan infinity) (v128.const f32x4 0 nan nan infinity)) (v128.const i32x4 0 0 0 -1)) -(assert_return (invoke "f32x4.eq" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 0 0 0 0)) -(assert_return (invoke "f32x4.ne" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 -1 -1 -1 -1)) -(assert_return (invoke "f32x4.lt" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 -1 -1 0 0)) -(assert_return (invoke "f32x4.gt" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 0 0 0 0)) -(assert_return (invoke "f32x4.le" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 -1 -1 0 0)) -(assert_return (invoke "f32x4.ge" (v128.const f32x4 -infinity 0 nan -infinity) (v128.const f32x4 0 infinity infinity nan)) (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "f32x4.eq" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 0 0 0 -1)) +(assert_return (invoke "f32x4.ne" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 -1 -1 -1 0)) +(assert_return (invoke "f32x4.lt" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "f32x4.gt" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "f32x4.le" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 0 0 0 -1)) +(assert_return (invoke "f32x4.ge" (v128.const f32x4 nan 0 nan inf) (v128.const f32x4 0 nan nan inf)) (v128.const i32x4 0 0 0 -1)) +(assert_return (invoke "f32x4.eq" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "f32x4.ne" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 -1 -1 -1 -1)) +(assert_return (invoke "f32x4.lt" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 -1 -1 0 0)) +(assert_return (invoke "f32x4.gt" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 0 0 0 0)) +(assert_return (invoke "f32x4.le" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 -1 -1 0 0)) +(assert_return (invoke "f32x4.ge" (v128.const f32x4 -inf 0 nan -inf) (v128.const f32x4 0 inf inf nan)) (v128.const i32x4 0 0 0 0)) ;; f64x2 comparisons (assert_return (invoke "f64x2.eq" (v128.const f64x2 0 1) (v128.const f64x2 0 0)) (v128.const i64x2 -1 0)) @@ -518,12 +518,12 @@ (assert_return (invoke "f64x2.gt" (v128.const f64x2 0 1) (v128.const f64x2 0 0)) (v128.const i64x2 0 -1)) (assert_return (invoke "f64x2.le" (v128.const f64x2 0 1) (v128.const f64x2 0 0)) (v128.const i64x2 -1 0)) (assert_return (invoke "f64x2.ge" (v128.const f64x2 0 1) (v128.const f64x2 0 0)) (v128.const i64x2 -1 -1)) -(assert_return (invoke "f64x2.eq" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 0 0)) -(assert_return (invoke "f64x2.ne" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 -1 -1)) -(assert_return (invoke "f64x2.lt" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 0 -1)) -(assert_return (invoke "f64x2.gt" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 0 0)) -(assert_return (invoke "f64x2.le" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 0 -1)) -(assert_return (invoke "f64x2.ge" (v128.const f64x2 nan 0) (v128.const f64x2 infinity infinity)) (v128.const i64x2 0 0)) +(assert_return (invoke "f64x2.eq" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 0 0)) +(assert_return (invoke "f64x2.ne" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 -1 -1)) +(assert_return (invoke "f64x2.lt" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 0 -1)) +(assert_return (invoke "f64x2.gt" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 0 0)) +(assert_return (invoke "f64x2.le" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 0 -1)) +(assert_return (invoke "f64x2.ge" (v128.const f64x2 nan 0) (v128.const f64x2 inf inf)) (v128.const i64x2 0 0)) ;; bitwise operations (assert_return (invoke "v128.not" (v128.const i32x4 0 -1 0 -1)) (v128.const i32x4 -1 0 -1 0)) @@ -597,99 +597,99 @@ (assert_return (invoke "i8x16.abs" (v128.const i8x16 0 1 42 -3 -56 127 -128 -126 0 -1 -42 3 56 -127 -128 126)) (v128.const i8x16 0 1 42 3 56 127 -128 126 0 1 42 3 56 127 -128 126) ) -(assert_return (invoke "i8x16.neg" (v128.const i32x4 0 1 42 -3 -56 127 -128 -126 0 -1 -42 3 56 -127 -128 126)) - (v128.const i32x4 0 -1 -42 3 56 -127 -128 126 0 1 42 -3 -56 127 -128 -126) +(assert_return (invoke "i8x16.neg" (v128.const i8x16 0 1 42 -3 -56 127 -128 -126 0 -1 -42 3 56 -127 -128 126)) + (v128.const i8x16 0 -1 -42 3 56 -127 -128 126 0 1 42 -3 -56 127 -128 -126) ) -(assert_return (invoke "i8x16.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) (i32.const 0)) -(assert_return (invoke "i8x16.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0)) (i32.const 0)) -(assert_return (invoke "i8x16.all_true" (v128.const i32x4 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1)) (i32.const 0)) -(assert_return (invoke "i8x16.all_true" (v128.const i32x4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) (i32.const 1)) +(assert_return (invoke "i8x16.all_true" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)) (i32.const 0)) +(assert_return (invoke "i8x16.all_true" (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0)) (i32.const 0)) +(assert_return (invoke "i8x16.all_true" (v128.const i8x16 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1)) (i32.const 0)) +(assert_return (invoke "i8x16.all_true" (v128.const i8x16 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1)) (i32.const 1)) (assert_return (invoke "i8x16.bitmask" (v128.const i8x16 -1 0 1 -128 127 -127 0 128 -1 0 1 -128 127 -127 0 128)) (i32.const 43433)) -(assert_return (invoke "i8x16.shl" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) - (v128.const i32x4 0 2 4 8 16 32 64 -128 0 6 12 24 48 96 -64 -128) +(assert_return (invoke "i8x16.shl" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) + (v128.const i8x16 0 2 4 8 16 32 64 -128 0 6 12 24 48 96 -64 -128) ) -(assert_return (invoke "i8x16.shl" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) - (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) +(assert_return (invoke "i8x16.shl" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) + (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) ) -(assert_return (invoke "i8x16.shr_u" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) - (v128.const i32x4 0 0 1 2 4 8 16 32 64 1 3 6 12 24 48 96) +(assert_return (invoke "i8x16.shr_u" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) + (v128.const i8x16 0 0 1 2 4 8 16 32 64 1 3 6 12 24 48 96) ) -(assert_return (invoke "i8x16.shr_u" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) - (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) +(assert_return (invoke "i8x16.shr_u" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) + (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) ) -(assert_return (invoke "i8x16.shr_s" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) - (v128.const i32x4 0 0 1 2 4 8 16 32 -64 1 3 6 12 24 48 -32) +(assert_return (invoke "i8x16.shr_s" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 1)) + (v128.const i8x16 0 0 1 2 4 8 16 32 -64 1 3 6 12 24 48 -32) ) -(assert_return (invoke "i8x16.shr_s" (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) - (v128.const i32x4 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) +(assert_return (invoke "i8x16.shr_s" (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) (i32.const 8)) + (v128.const i8x16 0 1 2 4 8 16 32 64 -128 3 6 12 24 48 96 -64) ) (assert_return (invoke "i8x16.add" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 3 17 0 0 0 135 109 46 145 225 48 184 17 249 128 215) + (v128.const i8x16 3 17 0 0 0 135 109 46 145 225 48 184 17 249 128 215) ) (assert_return (invoke "i8x16.add_sat_s" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 3 17 0 128 0 135 109 46 127 225 48 184 17 249 127 215) + (v128.const i8x16 3 17 0 128 0 135 109 46 127 225 48 184 17 249 127 215) ) (assert_return (invoke "i8x16.add_sat_u" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 3 255 255 255 255 135 109 46 145 225 255 184 17 255 128 215) + (v128.const i8x16 3 255 255 255 255 135 109 46 145 225 255 184 17 255 128 215) ) (assert_return (invoke "i8x16.sub" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 253 67 254 0 254 123 159 12 61 167 158 100 17 251 130 187) + (v128.const i8x16 253 67 254 0 254 123 159 12 61 167 158 100 17 251 130 187) ) (assert_return (invoke "i8x16.sub_sat_s" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 253 67 254 0 127 128 159 12 61 167 158 128 17 251 130 127) + (v128.const i8x16 253 67 254 0 127 128 159 12 61 167 158 128 17 251 130 127) ) (assert_return (invoke "i8x16.sub_sat_u" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) - (v128.const i32x4 0 0 254 0 0 123 0 12 61 167 158 100 17 0 0 0) + (v128.const i8x16 0 0 254 0 0 123 0 12 61 167 158 100 17 0 0 0) ) (assert_return (invoke "i8x16.min_s" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) (v128.const i8x16 0 231 255 128 129 129 6 17 42 196 231 142 0 250 1 142) ) (assert_return (invoke "i8x16.min_u" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) (v128.const i8x16 0 42 1 128 127 6 6 17 42 29 73 42 0 250 1 73) ) (assert_return (invoke "i8x16.max_s" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) (v128.const i8x16 3 42 1 128 127 6 103 29 103 29 73 42 17 255 127 73) ) (assert_return (invoke "i8x16.max_u" - (v128.const i32x4 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) - (v128.const i32x4 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) + (v128.const i8x16 0 42 255 128 127 129 6 29 103 196 231 142 17 250 1 73) + (v128.const i8x16 3 231 1 128 129 6 103 17 42 29 73 42 0 255 127 142) ) (v128.const i8x16 3 231 255 128 129 129 103 29 103 196 231 142 17 255 127 142) ) @@ -705,96 +705,96 @@ (assert_return (invoke "i16x8.abs" (v128.const i16x8 0 1 42 -3 -56 32767 -32768 32766)) (v128.const i16x8 0 1 42 3 56 32767 -32768 32766) ) -(assert_return (invoke "i16x8.neg" (v128.const i32x4 0 1 42 -3 -56 32767 -32768 32766)) - (v128.const i32x4 0 -1 -42 3 56 -32767 -32768 -32766) +(assert_return (invoke "i16x8.neg" (v128.const i16x8 0 1 42 -3 -56 32767 -32768 32766)) + (v128.const i16x8 0 -1 -42 3 56 -32767 -32768 -32766) ) -(assert_return (invoke "i16x8.all_true" (v128.const i32x4 0 0 0 0 0 0 0 0)) (i32.const 0)) -(assert_return (invoke "i16x8.all_true" (v128.const i32x4 0 0 1 0 0 0 0 0)) (i32.const 0)) -(assert_return (invoke "i16x8.all_true" (v128.const i32x4 1 1 1 1 1 0 1 1)) (i32.const 0)) -(assert_return (invoke "i16x8.all_true" (v128.const i32x4 1 1 1 1 1 1 1 1)) (i32.const 1)) +(assert_return (invoke "i16x8.all_true" (v128.const i16x8 0 0 0 0 0 0 0 0)) (i32.const 0)) +(assert_return (invoke "i16x8.all_true" (v128.const i16x8 0 0 1 0 0 0 0 0)) (i32.const 0)) +(assert_return (invoke "i16x8.all_true" (v128.const i16x8 1 1 1 1 1 0 1 1)) (i32.const 0)) +(assert_return (invoke "i16x8.all_true" (v128.const i16x8 1 1 1 1 1 1 1 1)) (i32.const 1)) (assert_return (invoke "i16x8.bitmask" (v128.const i16x8 -1 0 1 -32768 32767 -32767 0 32768)) (i32.const 169)) -(assert_return (invoke "i16x8.shl" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i32x4 0 16 32 256 512 4096 8192 0)) -(assert_return (invoke "i16x8.shl" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i32x4 0 8 16 128 256 2048 4096 -32768)) -(assert_return (invoke "i16x8.shr_u" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i32x4 0 4 8 64 128 1024 2048 16384)) -(assert_return (invoke "i16x8.shr_u" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i32x4 0 8 16 128 256 2048 4096 -32768)) -(assert_return (invoke "i16x8.shr_s" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i32x4 0 4 8 64 128 1024 2048 -16384)) -(assert_return (invoke "i16x8.shr_s" (v128.const i32x4 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i32x4 0 8 16 128 256 2048 4096 -32768)) +(assert_return (invoke "i16x8.shl" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i16x8 0 16 32 256 512 4096 8192 0)) +(assert_return (invoke "i16x8.shl" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i16x8 0 8 16 128 256 2048 4096 -32768)) +(assert_return (invoke "i16x8.shr_u" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i16x8 0 4 8 64 128 1024 2048 16384)) +(assert_return (invoke "i16x8.shr_u" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i16x8 0 8 16 128 256 2048 4096 -32768)) +(assert_return (invoke "i16x8.shr_s" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 1)) (v128.const i16x8 0 4 8 64 128 1024 2048 -16384)) +(assert_return (invoke "i16x8.shr_s" (v128.const i16x8 0 8 16 128 256 2048 4096 -32768) (i32.const 16)) (v128.const i16x8 0 8 16 128 256 2048 4096 -32768)) (assert_return (invoke "i16x8.add" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 768 65281 0 0 34560 12288 63744 32768) + (v128.const i16x8 768 65281 0 0 34560 12288 63744 32768) ) (assert_return (invoke "i16x8.add_sat_s" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 768 65281 32768 0 34560 12288 63744 32767) + (v128.const i16x8 768 65281 32768 0 34560 12288 63744 32767) ) (assert_return (invoke "i16x8.add_sat_u" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 768 65281 65535 65535 34560 65535 65535 32768) + (v128.const i16x8 768 65281 65535 65535 34560 65535 65535 32768) ) (assert_return (invoke "i16x8.sub" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 64768 65279 0 65024 31488 40448 64256 32764) + (v128.const i16x8 64768 65279 0 65024 31488 40448 64256 32764) ) (assert_return (invoke "i16x8.sub_sat_s" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 64768 65279 0 32767 32768 40448 64256 32764) + (v128.const i16x8 64768 65279 0 32767 32768 40448 64256 32764) ) (assert_return (invoke "i16x8.sub_sat_u" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 0 65279 0 0 31488 40448 0 32764) + (v128.const i16x8 0 65279 0 0 31488 40448 0 32764) ) (assert_return (invoke "i16x8.mul" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 0 65280 0 0 0 0 0 65532) + (v128.const i16x8 0 65280 0 0 0 0 0 65532) ) (assert_return (invoke "i16x8.min_s" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 0 65280 32768 33024 33024 59136 64000 2) + (v128.const i16x8 0 65280 32768 33024 33024 59136 64000 2) ) (assert_return (invoke "i16x8.min_u" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 0 1 32768 32512 1536 18688 64000 2) + (v128.const i16x8 0 1 32768 32512 1536 18688 64000 2) ) (assert_return (invoke "i16x8.max_s" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 768 1 32768 32512 1536 18688 65280 32766) + (v128.const i16x8 768 1 32768 32512 1536 18688 65280 32766) ) (assert_return (invoke "i16x8.max_u" - (v128.const i32x4 0 65280 32768 32512 33024 59136 64000 32766) - (v128.const i32x4 768 1 32768 33024 1536 18688 65280 2) + (v128.const i16x8 0 65280 32768 32512 33024 59136 64000 32766) + (v128.const i16x8 768 1 32768 33024 1536 18688 65280 2) ) - (v128.const i32x4 768 65280 32768 33024 33024 59136 65280 32766) + (v128.const i16x8 768 65280 32768 33024 33024 59136 65280 32766) ) (assert_return (invoke "i16x8.avgr_u" @@ -933,7 +933,7 @@ (v128.const i32x4 0xffffffff 0x80000001 42 0xc0000000) ) (assert_return - (invoke "i32x4.dot_i16x8_s" (v128.const i32x4 0 1 2 3 4 5 6 7) (v128.const i32x4 -1 2 -3 4 5 6 -7 -8)) + (invoke "i32x4.dot_i16x8_s" (v128.const i16x8 0 1 2 3 4 5 6 7) (v128.const i16x8 -1 2 -3 4 5 6 -7 -8)) (v128.const i32x4 2 6 50 -98) ) @@ -952,45 +952,45 @@ (assert_return (invoke "i64x2.mul" (v128.const i64x2 2 42) (v128.const i64x2 0x8000000000000001 0)) (v128.const i64x2 2 0)) ;; f32x4 arithmetic -(assert_return (invoke "f32x4.abs" (v128.const f32x4 -0 nan -infinity 5)) (v128.const f32x4 0 nan infinity 5)) -(assert_return (invoke "f32x4.neg" (v128.const f32x4 -0 nan -infinity 5)) (v128.const f32x4 0 -nan infinity -5)) -(assert_return (invoke "f32x4.sqrt" (v128.const f32x4 -0 nan infinity 4)) (v128.const f32x4 -0 nan infinity 2)) -(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 1)) (v128.const f32x4 nan nan infinity 43)) -(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity -infinity 1)) (v128.const f32x4 nan nan infinity 41)) -(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity infinity 2)) (v128.const f32x4 nan nan infinity 84)) -(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan infinity 42) (v128.const f32x4 42 infinity 2 2)) (v128.const f32x4 nan nan infinity 21)) +(assert_return (invoke "f32x4.abs" (v128.const f32x4 -0 nan -inf 5)) (v128.const f32x4 0 nan inf 5)) +(assert_return (invoke "f32x4.neg" (v128.const f32x4 -0 nan -inf 5)) (v128.const f32x4 0 -nan inf -5)) +(assert_return (invoke "f32x4.sqrt" (v128.const f32x4 -0 nan inf 4)) (v128.const f32x4 -0 nan inf 2)) +(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan inf 42) (v128.const f32x4 42 inf inf 1)) (v128.const f32x4 nan nan inf 43)) +(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan inf 42) (v128.const f32x4 42 inf -inf 1)) (v128.const f32x4 nan nan inf 41)) +(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan inf 42) (v128.const f32x4 42 inf inf 2)) (v128.const f32x4 nan nan inf 84)) +(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan inf 42) (v128.const f32x4 42 inf 2 2)) (v128.const f32x4 nan nan inf 21)) (assert_return (invoke "f32x4.min" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 -0 -0 nan nan)) (assert_return (invoke "f32x4.max" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 0 0 nan nan)) (assert_return (invoke "f32x4.pmin" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 -0 0 nan 5)) (assert_return (invoke "f32x4.pmax" (v128.const f32x4 -0 0 nan 5) (v128.const f32x4 0 -0 5 nan)) (v128.const f32x4 -0 0 nan 5)) -(assert_return (invoke "f32x4.ceil" (v128.const f32x4 -0 0 infinity -infinity)) (v128.const f32x4 -0 0 infinity -infinity)) +(assert_return (invoke "f32x4.ceil" (v128.const f32x4 -0 0 inf -inf)) (v128.const f32x4 -0 0 inf -inf)) (assert_return (invoke "f32x4.ceil" (v128.const f32x4 nan 42 0.5 -0.5)) (v128.const f32x4 nan 42 1 -0)) (assert_return (invoke "f32x4.ceil" (v128.const f32x4 1.5 -1.5 4.2 -4.2)) (v128.const f32x4 2 -1 5 -4)) -(assert_return (invoke "f32x4.floor" (v128.const f32x4 -0 0 infinity -infinity)) (v128.const f32x4 -0 0 infinity -infinity)) +(assert_return (invoke "f32x4.floor" (v128.const f32x4 -0 0 inf -inf)) (v128.const f32x4 -0 0 inf -inf)) (assert_return (invoke "f32x4.floor" (v128.const f32x4 nan 42 0.5 -0.5)) (v128.const f32x4 nan 42 0 -1)) (assert_return (invoke "f32x4.floor" (v128.const f32x4 1.5 -1.5 4.2 -4.2)) (v128.const f32x4 1 -2 4 -5)) -(assert_return (invoke "f32x4.trunc" (v128.const f32x4 -0 0 infinity -infinity)) (v128.const f32x4 -0 0 infinity -infinity)) +(assert_return (invoke "f32x4.trunc" (v128.const f32x4 -0 0 inf -inf)) (v128.const f32x4 -0 0 inf -inf)) (assert_return (invoke "f32x4.trunc" (v128.const f32x4 nan 42 0.5 -0.5)) (v128.const f32x4 nan 42 0 -0)) (assert_return (invoke "f32x4.trunc" (v128.const f32x4 1.5 -1.5 4.2 -4.2)) (v128.const f32x4 1 -1 4 -4)) -(assert_return (invoke "f32x4.nearest" (v128.const f32x4 -0 0 infinity -infinity)) (v128.const f32x4 -0 0 infinity -infinity)) +(assert_return (invoke "f32x4.nearest" (v128.const f32x4 -0 0 inf -inf)) (v128.const f32x4 -0 0 inf -inf)) (assert_return (invoke "f32x4.nearest" (v128.const f32x4 nan 42 0.5 -0.5)) (v128.const f32x4 nan 42 0 -0)) (assert_return (invoke "f32x4.nearest" (v128.const f32x4 1.5 -1.5 4.2 -4.2)) (v128.const f32x4 2 -2 4 -4)) ;; f64x2 arithmetic (assert_return (invoke "f64x2.abs" (v128.const f64x2 -0 nan)) (v128.const f64x2 0 nan)) -(assert_return (invoke "f64x2.abs" (v128.const f64x2 -infinity 5)) (v128.const f64x2 infinity 5)) +(assert_return (invoke "f64x2.abs" (v128.const f64x2 -inf 5)) (v128.const f64x2 inf 5)) (assert_return (invoke "f64x2.neg" (v128.const f64x2 -0 nan)) (v128.const f64x2 0 -nan)) -(assert_return (invoke "f64x2.neg" (v128.const f64x2 -infinity 5)) (v128.const f64x2 infinity -5)) +(assert_return (invoke "f64x2.neg" (v128.const f64x2 -inf 5)) (v128.const f64x2 inf -5)) (assert_return (invoke "f64x2.sqrt" (v128.const f64x2 -0 nan)) (v128.const f64x2 -0 nan)) -(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 infinity 4)) (v128.const f64x2 infinity 2)) -(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) -(assert_return (invoke "f64x2.add" (v128.const f64x2 infinity 42) (v128.const f64x2 infinity 1)) (v128.const f64x2 infinity 43)) -(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) -(assert_return (invoke "f64x2.sub" (v128.const f64x2 infinity 42) (v128.const f64x2 -infinity 1)) (v128.const f64x2 infinity 41)) -(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) -(assert_return (invoke "f64x2.mul" (v128.const f64x2 infinity 42) (v128.const f64x2 infinity 2)) (v128.const f64x2 infinity 84)) -(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42 infinity)) (v128.const f64x2 nan nan)) -(assert_return (invoke "f64x2.div" (v128.const f64x2 infinity 42) (v128.const f64x2 2 2)) (v128.const f64x2 infinity 21)) +(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 inf 4)) (v128.const f64x2 inf 2)) +(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42 inf)) (v128.const f64x2 nan nan)) +(assert_return (invoke "f64x2.add" (v128.const f64x2 inf 42) (v128.const f64x2 inf 1)) (v128.const f64x2 inf 43)) +(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42 inf)) (v128.const f64x2 nan nan)) +(assert_return (invoke "f64x2.sub" (v128.const f64x2 inf 42) (v128.const f64x2 -inf 1)) (v128.const f64x2 inf 41)) +(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42 inf)) (v128.const f64x2 nan nan)) +(assert_return (invoke "f64x2.mul" (v128.const f64x2 inf 42) (v128.const f64x2 inf 2)) (v128.const f64x2 inf 84)) +(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42 inf)) (v128.const f64x2 nan nan)) +(assert_return (invoke "f64x2.div" (v128.const f64x2 inf 42) (v128.const f64x2 2 2)) (v128.const f64x2 inf 21)) (assert_return (invoke "f64x2.min" (v128.const f64x2 -0 0) (v128.const f64x2 0 -0)) (v128.const f64x2 -0 -0)) (assert_return (invoke "f64x2.min" (v128.const f64x2 nan 5) (v128.const f64x2 5 nan)) (v128.const f64x2 nan nan)) (assert_return (invoke "f64x2.max" (v128.const f64x2 -0 0) (v128.const f64x2 0 -0)) (v128.const f64x2 0 0)) @@ -1000,25 +1000,25 @@ (assert_return (invoke "f64x2.pmax" (v128.const f64x2 -0 0) (v128.const f64x2 0 -0)) (v128.const f64x2 -0 0)) (assert_return (invoke "f64x2.pmax" (v128.const f64x2 nan 5) (v128.const f64x2 5 nan)) (v128.const f64x2 nan 5)) (assert_return (invoke "f64x2.ceil" (v128.const f64x2 -0 0)) (v128.const f64x2 -0 0)) -(assert_return (invoke "f64x2.ceil" (v128.const f64x2 infinity -infinity)) (v128.const f64x2 infinity -infinity)) +(assert_return (invoke "f64x2.ceil" (v128.const f64x2 inf -inf)) (v128.const f64x2 inf -inf)) (assert_return (invoke "f64x2.ceil" (v128.const f64x2 nan 42)) (v128.const f64x2 nan 42)) (assert_return (invoke "f64x2.ceil" (v128.const f64x2 0.5 -0.5)) (v128.const f64x2 1 -0)) (assert_return (invoke "f64x2.ceil" (v128.const f64x2 1.5 -1.5)) (v128.const f64x2 2 -1)) (assert_return (invoke "f64x2.ceil" (v128.const f64x2 4.2 -4.2)) (v128.const f64x2 5 -4)) (assert_return (invoke "f64x2.floor" (v128.const f64x2 -0 0)) (v128.const f64x2 -0 0)) -(assert_return (invoke "f64x2.floor" (v128.const f64x2 infinity -infinity)) (v128.const f64x2 infinity -infinity)) +(assert_return (invoke "f64x2.floor" (v128.const f64x2 inf -inf)) (v128.const f64x2 inf -inf)) (assert_return (invoke "f64x2.floor" (v128.const f64x2 nan 42)) (v128.const f64x2 nan 42)) (assert_return (invoke "f64x2.floor" (v128.const f64x2 0.5 -0.5)) (v128.const f64x2 0 -1)) (assert_return (invoke "f64x2.floor" (v128.const f64x2 1.5 -1.5)) (v128.const f64x2 1 -2)) (assert_return (invoke "f64x2.floor" (v128.const f64x2 4.2 -4.2)) (v128.const f64x2 4 -5)) (assert_return (invoke "f64x2.trunc" (v128.const f64x2 -0 0)) (v128.const f64x2 -0 0)) -(assert_return (invoke "f64x2.trunc" (v128.const f64x2 infinity -infinity)) (v128.const f64x2 infinity -infinity)) +(assert_return (invoke "f64x2.trunc" (v128.const f64x2 inf -inf)) (v128.const f64x2 inf -inf)) (assert_return (invoke "f64x2.trunc" (v128.const f64x2 nan 42)) (v128.const f64x2 nan 42)) (assert_return (invoke "f64x2.trunc" (v128.const f64x2 0.5 -0.5)) (v128.const f64x2 0 -0)) (assert_return (invoke "f64x2.trunc" (v128.const f64x2 1.5 -1.5)) (v128.const f64x2 1 -1)) (assert_return (invoke "f64x2.trunc" (v128.const f64x2 4.2 -4.2)) (v128.const f64x2 4 -4)) (assert_return (invoke "f64x2.nearest" (v128.const f64x2 -0 0)) (v128.const f64x2 -0 0)) -(assert_return (invoke "f64x2.nearest" (v128.const f64x2 infinity -infinity)) (v128.const f64x2 infinity -infinity)) +(assert_return (invoke "f64x2.nearest" (v128.const f64x2 inf -inf)) (v128.const f64x2 inf -inf)) (assert_return (invoke "f64x2.nearest" (v128.const f64x2 nan 42)) (v128.const f64x2 nan 42)) (assert_return (invoke "f64x2.nearest" (v128.const f64x2 0.5 -0.5)) (v128.const f64x2 0 -0)) (assert_return (invoke "f64x2.nearest" (v128.const f64x2 1.5 -1.5)) (v128.const f64x2 2 -2)) @@ -1050,8 +1050,8 @@ ) ;; conversions -(assert_return (invoke "i32x4.trunc_sat_f32x4_s" (v128.const f32x4 42 nan infinity -infinity)) (v128.const i32x4 42 0 2147483647 -2147483648)) -(assert_return (invoke "i32x4.trunc_sat_f32x4_u" (v128.const f32x4 42 nan infinity -infinity)) (v128.const i32x4 42 0 4294967295 0)) +(assert_return (invoke "i32x4.trunc_sat_f32x4_s" (v128.const f32x4 42 nan inf -inf)) (v128.const i32x4 42 0 2147483647 -2147483648)) +(assert_return (invoke "i32x4.trunc_sat_f32x4_u" (v128.const f32x4 42 nan inf -inf)) (v128.const i32x4 42 0 4294967295 0)) (assert_return (invoke "f32x4.convert_i32x4_s" (v128.const i32x4 0 -1 2147483647 -2147483648)) (v128.const f32x4 0 -1 2147483648 -2147483648)) (assert_return (invoke "f32x4.convert_i32x4_u" (v128.const i32x4 0 -1 2147483647 -2147483648)) (v128.const f32x4 0 4294967296 2147483648 2147483648)) (assert_return |