aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYvan Roux <yvan.roux@linaro.org>2015-08-23 22:14:34 +0200
committerLinaro Code Review <review@review.linaro.org>2015-08-27 14:30:59 +0000
commitbaae47ab892450bae13199b3fbf17b3cf518facf (patch)
treed2ec74a18995398016812570a6c843f362e3892d
parente1d72a396ec6b00d7aec12670109aade69a3d42f (diff)
gcc/testsuite/
Backport from trunk r223372. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqmovn.c: New file. gcc/testsuite/ Backport from trunk r223373. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqmovun.c: New file. gcc/testsuite/ Backport from trunk r223374. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c: New file. gcc/testsuite/ Backport from trunk r223375. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c: New file. gcc/testsuite/ Backport from trunk r223376. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c: New file. gcc/testsuite/ Backport from trunk r223377. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrshl.c: New file. gcc/testsuite/ Backport from trunk r223379. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c: New file. gcc/testsuite/ Backport from trunk r223380. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c: New file. gcc/testsuite/ Backport from trunk r223381. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqshl.c: New file. gcc/testsuite/ Backport from trunk r223382. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c: New file. gcc/testsuite/ Backport from trunk r223384. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c: New file. gcc/testsuite/ Backport from trunk r223385. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c: New file. gcc/testsuite/ Backport from trunk r223386. 2015-05-19 Christophe Lyon <christophe.lyon@linaro.org> * gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c: New file. Change-Id: I4eba75140dbb9a108f0159c0097360c64af79823
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c134
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c161
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c169
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c155
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c1090
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c174
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c189
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c829
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c234
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c263
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c177
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c133
13 files changed, 3801 insertions, 0 deletions
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c
new file mode 100644
index 00000000000..45c2db9480a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovn.c
@@ -0,0 +1,134 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,32,2) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x12, 0x12, 0x12, 0x12,
+ 0x12, 0x12, 0x12, 0x12 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x1278, 0x1278, 0x1278, 0x1278 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x12345678, 0x12345678 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x82, 0x82, 0x82, 0x82,
+ 0x82, 0x82, 0x82, 0x82 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x8765, 0x8765, 0x8765, 0x8765 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x87654321, 0x87654321 };
+
+/* Expected values of cumulative_saturation flag when saturation occurs. */
+int VECT_VAR(expected_cumulative_sat1,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat1,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat1,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat1,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat1,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat1,uint,32,2) = 1;
+
+/* Expected results when saturation occurs. */
+VECT_VAR_DECL(expected1,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected1,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected1,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected1,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected1,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+#define INSN_NAME vqmovn
+#define TEST_MSG "VQMOVN"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN_NAME)
+{
+ /* Basic test: y=OP(x), then store the result. */
+#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##_##T2##W2(VECT_VAR(vector, T1, W2, N)); \
+ vst1##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* No need for 64 bits variants. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, uint, 64, 2);
+
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ /* Fill input vector with arbitrary values. */
+ VDUP(vector, q, int, s, 16, 8, 0x12);
+ VDUP(vector, q, int, s, 32, 4, 0x1278);
+ VDUP(vector, q, int, s, 64, 2, 0x12345678);
+ VDUP(vector, q, uint, u, 16, 8, 0x82);
+ VDUP(vector, q, uint, u, 32, 4, 0x8765);
+ VDUP(vector, q, uint, u, 64, 2, 0x87654321);
+
+ /* Apply a unary operator named INSN_NAME. */
+#define CMT ""
+ TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+
+
+ /* Fill input vector with arbitrary values which cause cumulative
+ saturation. */
+ VDUP(vector, q, int, s, 16, 8, 0x1234);
+ VDUP(vector, q, int, s, 32, 4, 0x12345678);
+ VDUP(vector, q, int, s, 64, 2, 0x1234567890ABLL);
+ VDUP(vector, q, uint, u, 16, 8, 0x8234);
+ VDUP(vector, q, uint, u, 32, 4, 0x87654321);
+ VDUP(vector, q, uint, u, 64, 2, 0x8765432187654321ULL);
+
+ /* Apply a unary operator named INSN_NAME. */
+#undef CMT
+#define CMT " (with saturation)"
+ TEST_UNARY_OP(INSN_NAME, int, s, 8, 16, 8, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 16, 32, 4, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, int, s, 32, 64, 2, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat1, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat1, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected1, CMT);
+}
+
+int main (void)
+{
+ exec_vqmovn ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c
new file mode 100644
index 00000000000..1eeb4c8488c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqmovun.c
@@ -0,0 +1,93 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,32,2) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x34, 0x34, 0x34, 0x34,
+ 0x34, 0x34, 0x34, 0x34 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x5678, 0x5678, 0x5678, 0x5678 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x12345678, 0x12345678 };
+
+/* Expected values of cumulative_saturation flag with negative input. */
+int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 1;
+
+/* Expected results with negative input. */
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
+
+#define INSN_NAME vqmovun
+#define TEST_MSG "VQMOVUN"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN_NAME)
+{
+ /* Basic test: y=OP(x), then store the result. */
+#define TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##_s##W2(VECT_VAR(vector, int, W2, N)); \
+ vst1##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_UNARY_OP(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_UNARY_OP1(INSN, T1, T2, W, W2, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ /* Fill input vector with arbitrary values. */
+ VDUP(vector, q, int, s, 16, 8, 0x34);
+ VDUP(vector, q, int, s, 32, 4, 0x5678);
+ VDUP(vector, q, int, s, 64, 2, 0x12345678);
+
+ /* Apply a unary operator named INSN_NAME. */
+#define CMT ""
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+
+ /* Fill input vector with negative values. */
+ VDUP(vector, q, int, s, 16, 8, 0x8234);
+ VDUP(vector, q, int, s, 32, 4, 0x87654321);
+ VDUP(vector, q, int, s, 64, 2, 0x8765432187654321LL);
+
+ /* Apply a unary operator named INSN_NAME. */
+#undef CMT
+#define CMT " (negative input)"
+ TEST_UNARY_OP(INSN_NAME, uint, u, 8, 16, 8, expected_cumulative_sat_neg, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 16, 32, 4, expected_cumulative_sat_neg, CMT);
+ TEST_UNARY_OP(INSN_NAME, uint, u, 32, 64, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+}
+
+int main (void)
+{
+ exec_vqmovun ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c
new file mode 100644
index 00000000000..915594a4a56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh.c
@@ -0,0 +1,161 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff5, 0xfff6, 0xfff7, 0xfff7 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag when multiplication
+ saturates. */
+int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
+
+/* Expected results when multiplication saturates. */
+VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+/* Expected values of cumulative_saturation flag when rounding
+ should not cause saturation. */
+int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
+
+/* Expected results when rounding should not cause saturation. */
+VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_round,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+#define INSN vqrdmulh
+#define TEST_MSG "VQRDMULH"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* vector_res = vqrdmulh(vector,vector2), then store the result. */
+#define TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH2(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRDMULH(Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH1(INSN, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ DECL_VARIABLE(vector, int, 16, 4);
+ DECL_VARIABLE(vector, int, 32, 2);
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, int, 16, 8);
+ DECL_VARIABLE(vector_res, int, 32, 4);
+
+ DECL_VARIABLE(vector2, int, 16, 4);
+ DECL_VARIABLE(vector2, int, 32, 2);
+ DECL_VARIABLE(vector2, int, 16, 8);
+ DECL_VARIABLE(vector2, int, 32, 4);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, , int, s, 16, 4);
+ VLOAD(vector, buffer, , int, s, 32, 2);
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+
+ /* Initialize vector2. */
+ VDUP(vector2, , int, s, 16, 4, 0x5555);
+ VDUP(vector2, , int, s, 32, 2, 0xBB);
+ VDUP(vector2, q, int, s, 16, 8, 0x33);
+ VDUP(vector2, q, int, s, 32, 4, 0x22);
+
+#define CMT ""
+ TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+
+ /* Now use input values such that the multiplication causes
+ saturation. */
+#define TEST_MSG_MUL " (check mul cumulative saturation)"
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector2, , int, s, 16, 4, 0x8000);
+ VDUP(vector2, , int, s, 32, 2, 0x80000000);
+ VDUP(vector2, q, int, s, 16, 8, 0x8000);
+ VDUP(vector2, q, int, s, 32, 4, 0x80000000);
+
+ TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
+ TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat_mul, TEST_MSG_MUL);
+ TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat_mul, TEST_MSG_MUL);
+ TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat_mul, TEST_MSG_MUL);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_mul, TEST_MSG_MUL);
+
+ /* Use input values where rounding produces a result equal to the
+ saturation value, but does not set the saturation flag. */
+#define TEST_MSG_ROUND " (check rounding)"
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector2, , int, s, 16, 4, 0x8001);
+ VDUP(vector2, , int, s, 32, 2, 0x80000001);
+ VDUP(vector2, q, int, s, 16, 8, 0x8001);
+ VDUP(vector2, q, int, s, 32, 4, 0x80000001);
+
+ TEST_VQRDMULH(, int, s, 16, 4, expected_cumulative_sat_round, TEST_MSG_ROUND);
+ TEST_VQRDMULH(, int, s, 32, 2, expected_cumulative_sat_round, TEST_MSG_ROUND);
+ TEST_VQRDMULH(q, int, s, 16, 8, expected_cumulative_sat_round, TEST_MSG_ROUND);
+ TEST_VQRDMULH(q, int, s, 32, 4, expected_cumulative_sat_round, TEST_MSG_ROUND);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_round, TEST_MSG_ROUND);
+}
+
+int main (void)
+{
+ exec_vqrdmulh ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c
new file mode 100644
index 00000000000..2235e745dc4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_lane.c
@@ -0,0 +1,169 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag when multiplication
+ saturates. */
+int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
+
+/* Expected results when multiplication saturates. */
+VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+/* Expected values of cumulative_saturation flag when rounding
+ should not cause saturation. */
+int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
+
+/* Expected results when rounding should not cause saturation. */
+VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_round,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+#define INSN vqrdmulh
+#define TEST_MSG "VQRDMULH_LANE"
+
+#define FNNAME1(NAME) void exec_ ## NAME ## _lane (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* vector_res = vqrdmulh_lane(vector,vector2,lane), then store the result. */
+#define TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_lane_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N2), \
+ L); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH_LANE2(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRDMULH_LANE(Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH_LANE1(INSN, Q, T1, T2, W, N, N2, L, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ DECL_VARIABLE(vector, int, 16, 4);
+ DECL_VARIABLE(vector, int, 32, 2);
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, int, 16, 8);
+ DECL_VARIABLE(vector_res, int, 32, 4);
+
+ /* vector2: vqrdmulh_lane and vqrdmulhq_lane have a 2nd argument with
+ the same number of elements, so we need only one variable of each
+ type. */
+ DECL_VARIABLE(vector2, int, 16, 4);
+ DECL_VARIABLE(vector2, int, 32, 2);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, , int, s, 16, 4);
+ VLOAD(vector, buffer, , int, s, 32, 2);
+
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+
+ /* Initialize vector2. */
+ VDUP(vector2, , int, s, 16, 4, 0x55);
+ VDUP(vector2, , int, s, 32, 2, 0xBB);
+
+ /* Choose lane arbitrarily. */
+#define CMT ""
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+
+ /* Now use input values such that the multiplication causes
+ saturation. */
+#define TEST_MSG_MUL " (check mul cumulative saturation)"
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector2, , int, s, 16, 4, 0x8000);
+ VDUP(vector2, , int, s, 32, 2, 0x80000000);
+
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_mul, TEST_MSG_MUL);
+
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector2, , int, s, 16, 4, 0x8001);
+ VDUP(vector2, , int, s, 32, 2, 0x80000001);
+
+ /* Use input values where rounding produces a result equal to the
+ saturation value, but does not set the saturation flag. */
+#define TEST_MSG_ROUND " (check rounding)"
+ TEST_VQRDMULH_LANE(, int, s, 16, 4, 4, 2, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(, int, s, 32, 2, 2, 1, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(q, int, s, 16, 8, 4, 3, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_LANE(q, int, s, 32, 4, 2, 0, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_round, TEST_MSG_ROUND);
+}
+
+int main (void)
+{
+ exec_vqrdmulh_lane ();
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c
new file mode 100644
index 00000000000..7b43f71bf09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrdmulh_n.c
@@ -0,0 +1,155 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfffc, 0xfffc, 0xfffc, 0xfffd };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x6, 0x6, 0x6, 0x5,
+ 0x5, 0x4, 0x4, 0x4 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffe, 0xfffffffe,
+ 0xfffffffe, 0xfffffffe };
+
+/* Expected values of cumulative_saturation flag when multiplication
+ saturates. */
+int VECT_VAR(expected_cumulative_sat_mul,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_mul,int,32,4) = 1;
+
+/* Expected results when multiplication saturates. */
+VECT_VAR_DECL(expected_mul,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_mul,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_mul,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+/* Expected values of cumulative_saturation flag when rounding
+ should not cause saturation. */
+int VECT_VAR(expected_cumulative_sat_round,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_round,int,32,4) = 0;
+
+/* Expected results when rounding should not cause saturation. */
+VECT_VAR_DECL(expected_round,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_round,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_round,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+
+#define INSN vqrdmulh
+#define TEST_MSG "VQRDMULH_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME ## _n (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ int i;
+
+ /* vector_res = vqrdmulh_n(vector,val), then store the result. */
+#define TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ L); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH_N2(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRDMULH_N(Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRDMULH_N1(INSN, Q, T1, T2, W, N, L, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ DECL_VARIABLE(vector, int, 16, 4);
+ DECL_VARIABLE(vector, int, 32, 2);
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, int, 16, 8);
+ DECL_VARIABLE(vector_res, int, 32, 4);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, , int, s, 16, 4);
+ VLOAD(vector, buffer, , int, s, 32, 2);
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+
+ /* Choose multiplier arbitrarily. */
+#define CMT ""
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x2233, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x12345678, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0xCD12, expected_cumulative_sat, CMT);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0xFA23456, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+
+ /* Now use input values such that the multiplication causes
+ saturation. */
+#define TEST_MSG_MUL " (check mul cumulative saturation)"
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x8000, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000000, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8000, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000000, expected_cumulative_sat_mul,
+ TEST_MSG_MUL);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_mul, TEST_MSG_MUL);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_mul, TEST_MSG_MUL);
+
+ /* Use input values where rounding produces a result equal to the
+ saturation value, but does not set the saturation flag. */
+#define TEST_MSG_ROUND " (check rounding)"
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+
+ TEST_VQRDMULH_N(, int, s, 16, 4, 0x8001, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(, int, s, 32, 2, 0x80000001, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(q, int, s, 16, 8, 0x8001, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+ TEST_VQRDMULH_N(q, int, s, 32, 4, 0x80000001, expected_cumulative_sat_round,
+ TEST_MSG_ROUND);
+
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_round, TEST_MSG_ROUND);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_round, TEST_MSG_ROUND);
+}
+
+int main (void)
+{
+ exec_vqrdmulh_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c
new file mode 100644
index 00000000000..3f0cb377072
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshl.c
@@ -0,0 +1,1090 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag with input=0. */
+int VECT_VAR(expected_cumulative_sat_0,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,64,2) = 0;
+
+/* Expected results with input=0. */
+VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with input=0 and
+ negative shift amount. */
+int VECT_VAR(expected_cumulative_sat_0_neg,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,2) = 0;
+
+/* Expected results with input=0 and negative shift amount. */
+VECT_VAR_DECL(expected_0_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
+ 0xe8, 0xea, 0xec, 0xee };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xff80, 0xff88, 0xff90, 0xff98 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffff000, 0xfffff100 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xffffffffffffff80 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x8000000000000000, 0x8000000000000000 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_saturation flag with negative shift
+ amount. */
+int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,64,2) = 0;
+
+/* Expected results with negative shift amount. */
+VECT_VAR_DECL(expected_neg,int,8,8) [] = { 0xfc, 0xfc, 0xfd, 0xfd,
+ 0xfd, 0xfd, 0xfe, 0xfe };
+VECT_VAR_DECL(expected_neg,int,16,4) [] = { 0xfffc, 0xfffc, 0xfffd, 0xfffd };
+VECT_VAR_DECL(expected_neg,int,32,2) [] = { 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected_neg,int,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x3c, 0x3c, 0x3d, 0x3d,
+ 0x3d, 0x3d, 0x3e, 0x3e };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x3ffc, 0x3ffc, 0x3ffd, 0x3ffd };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x1ffffffe, 0x1ffffffe };
+VECT_VAR_DECL(expected_neg,uint,64,1) [] = { 0xfffffffffffffff };
+VECT_VAR_DECL(expected_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,8,16) [] = { 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2 };
+VECT_VAR_DECL(expected_neg,uint,16,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x80000, 0x80000,
+ 0x80000, 0x80000 };
+VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0x100000000000, 0x100000000000 };
+
+/* Expected values of cumulative_saturation flag with input=max and
+ shift by -1. */
+int VECT_VAR(expected_cumulative_sat_minus1,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus1,uint,64,2) = 0;
+
+/* Expected results with input=max and shift by -1. */
+VECT_VAR_DECL(expected_minus1,int,8,8) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_minus1,int,16,4) [] = { 0x4000, 0x4000, 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_minus1,int,32,2) [] = { 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_minus1,int,64,1) [] = { 0x4000000000000000 };
+VECT_VAR_DECL(expected_minus1,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_minus1,uint,16,4) [] = { 0x8000, 0x8000, 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_minus1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_minus1,uint,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_minus1,int,8,16) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_minus1,int,16,8) [] = { 0x4000, 0x4000, 0x4000, 0x4000,
+ 0x4000, 0x4000, 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_minus1,int,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_minus1,int,64,2) [] = { 0x4000000000000000,
+ 0x4000000000000000 };
+VECT_VAR_DECL(expected_minus1,uint,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_minus1,uint,16,8) [] = { 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_minus1,uint,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_minus1,uint,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+
+/* Expected values of cumulative_saturation flag with input=max and
+ shift by -3. */
+int VECT_VAR(expected_cumulative_sat_minus3,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_minus3,uint,64,2) = 0;
+
+/* Expected results with input=max and shift by -3. */
+VECT_VAR_DECL(expected_minus3,int,8,8) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_minus3,int,16,4) [] = { 0x1000, 0x1000, 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_minus3,int,32,2) [] = { 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_minus3,int,64,1) [] = { 0x1000000000000000 };
+VECT_VAR_DECL(expected_minus3,uint,8,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_minus3,uint,16,4) [] = { 0x2000, 0x2000, 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_minus3,uint,32,2) [] = { 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_minus3,uint,64,1) [] = { 0x2000000000000000 };
+VECT_VAR_DECL(expected_minus3,int,8,16) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_minus3,int,16,8) [] = { 0x1000, 0x1000, 0x1000, 0x1000,
+ 0x1000, 0x1000, 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_minus3,int,32,4) [] = { 0x10000000, 0x10000000,
+ 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_minus3,int,64,2) [] = { 0x1000000000000000,
+ 0x1000000000000000 };
+VECT_VAR_DECL(expected_minus3,uint,8,16) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_minus3,uint,16,8) [] = { 0x2000, 0x2000, 0x2000, 0x2000,
+ 0x2000, 0x2000, 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_minus3,uint,32,4) [] = { 0x20000000, 0x20000000,
+ 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_minus3,uint,64,2) [] = { 0x2000000000000000,
+ 0x2000000000000000 };
+
+/* Expected values of cumulative_saturation flag with input=max and
+ large shift amount. */
+int VECT_VAR(expected_cumulative_sat_large_sh,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_large_sh,uint,64,2) = 1;
+
+/* Expected results with input=max and large shift amount. */
+VECT_VAR_DECL(expected_large_sh,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_large_sh,int,16,4) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_large_sh,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_large_sh,int,64,1) [] = { 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_large_sh,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_large_sh,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_large_sh,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_large_sh,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_large_sh,int,8,16) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_large_sh,int,16,8) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_large_sh,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_large_sh,int,64,2) [] = { 0x7fffffffffffffff,
+ 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_large_sh,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_large_sh,uint,16,8) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_large_sh,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_large_sh,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_saturation flag with negative input and
+ large shift amount. */
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large_sh,uint,64,2) = 1;
+
+/* Expected results with negative input and large shift amount. */
+VECT_VAR_DECL(expected_neg_large_sh,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_neg_large_sh,int,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_neg_large_sh,int,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_neg_large_sh,int,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_neg_large_sh,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,32,2) [] = { 0xffffffff,
+ 0xffffffff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_neg_large_sh,int,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_neg_large_sh,int,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_neg_large_sh,int,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_neg_large_sh,int,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+VECT_VAR_DECL(expected_neg_large_sh,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,16,8) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,32,4) [] = { 0xffffffff,
+ 0xffffffff,
+ 0xffffffff,
+ 0xffffffff };
+VECT_VAR_DECL(expected_neg_large_sh,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_saturation flag with max/min input and
+ large negative shift amount. */
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_large_neg_sh,uint,64,2) = 0;
+
+/* Expected results with max/min input and large negative shift amount. */
+VECT_VAR_DECL(expected_large_neg_sh,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_large_neg_sh,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with input=0 and
+ large negative shift amount. */
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_large_neg_sh,uint,64,2) = 0;
+
+/* Expected results with input=0 and large negative shift amount. */
+VECT_VAR_DECL(expected_0_large_neg_sh,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_large_neg_sh,uint,64,2) [] = { 0x0, 0x0 };
+
+#define INSN vqrshl
+#define TEST_MSG "VQRSHL/VQRSHLQ"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: v3=vqrshl(v1,v2), then store the result. */
+#define TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector_shift, T3, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRSHL(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ DECL_VARIABLE_SIGNED_VARIANTS(vector_shift);
+
+ clean_results ();
+
+ /* Fill input vector with 0, to check saturation on limits. */
+ VDUP(vector, , int, s, 8, 8, 0);
+ VDUP(vector, , int, s, 16, 4, 0);
+ VDUP(vector, , int, s, 32, 2, 0);
+ VDUP(vector, , int, s, 64, 1, 0);
+ VDUP(vector, , uint, u, 8, 8, 0);
+ VDUP(vector, , uint, u, 16, 4, 0);
+ VDUP(vector, , uint, u, 32, 2, 0);
+ VDUP(vector, , uint, u, 64, 1, 0);
+ VDUP(vector, q, int, s, 8, 16, 0);
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 8, 16, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0);
+ VDUP(vector, q, uint, u, 32, 4, 0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Choose init value arbitrarily, will be used as shift amount */
+ /* Use values equal to or one-less-than the type width to check
+ behaviour on limits. */
+ VDUP(vector_shift, , int, s, 8, 8, 7);
+ VDUP(vector_shift, , int, s, 16, 4, 15);
+ VDUP(vector_shift, , int, s, 32, 2, 31);
+ VDUP(vector_shift, , int, s, 64, 1, 63);
+ VDUP(vector_shift, q, int, s, 8, 16, 8);
+ VDUP(vector_shift, q, int, s, 16, 8, 16);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+
+#define CMT " (with input = 0)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0, CMT);
+
+
+ /* Use negative shift amounts. */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+#undef CMT
+#define CMT " (input 0 and negative shift amount)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0_neg, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0_neg, CMT);
+
+
+ /* Test again, with predefined input values. */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose init value arbitrarily, will be used as shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 1);
+ VDUP(vector_shift, , int, s, 16, 4, 3);
+ VDUP(vector_shift, , int, s, 32, 2, 8);
+ VDUP(vector_shift, , int, s, 64, 1, 3);
+ VDUP(vector_shift, q, int, s, 8, 16, 10);
+ VDUP(vector_shift, q, int, s, 16, 8, 12);
+ VDUP(vector_shift, q, int, s, 32, 4, 31);
+ VDUP(vector_shift, q, int, s, 64, 2, 63);
+
+#undef CMT
+#define CMT ""
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Use negative shift amounts. */
+ VDUP(vector_shift, , int, s, 8, 8, -2);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+#undef CMT
+#define CMT " (negative shift amount)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_neg, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on
+ limits. */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Use -1 shift amount to check cumulative saturation with
+ round_const. */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -1);
+ VDUP(vector_shift, , int, s, 32, 2, -1);
+ VDUP(vector_shift, , int, s, 64, 1, -1);
+ VDUP(vector_shift, q, int, s, 8, 16, -1);
+ VDUP(vector_shift, q, int, s, 16, 8, -1);
+ VDUP(vector_shift, q, int, s, 32, 4, -1);
+ VDUP(vector_shift, q, int, s, 64, 2, -1);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: shift by -1)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_minus1, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_minus1, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_minus1, CMT);
+
+
+ /* Use -3 shift amount to check cumulative saturation with
+ round_const. */
+ VDUP(vector_shift, , int, s, 8, 8, -3);
+ VDUP(vector_shift, , int, s, 16, 4, -3);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -3);
+ VDUP(vector_shift, q, int, s, 8, 16, -3);
+ VDUP(vector_shift, q, int, s, 16, 8, -3);
+ VDUP(vector_shift, q, int, s, 32, 4, -3);
+ VDUP(vector_shift, q, int, s, 64, 2, -3);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: shift by -3)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_minus3, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_minus3, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_minus3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_minus3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_minus3, CMT);
+
+
+ /* Use large shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 10);
+ VDUP(vector_shift, , int, s, 16, 4, 20);
+ VDUP(vector_shift, , int, s, 32, 2, 40);
+ VDUP(vector_shift, , int, s, 64, 1, 70);
+ VDUP(vector_shift, q, int, s, 8, 16, 10);
+ VDUP(vector_shift, q, int, s, 16, 8, 20);
+ VDUP(vector_shift, q, int, s, 32, 4, 40);
+ VDUP(vector_shift, q, int, s, 64, 2, 70);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: large shift amount)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_sh, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_large_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_large_sh, CMT);
+
+
+ /* Fill input vector with negative values, to check saturation on
+ limits. */
+ VDUP(vector, , int, s, 8, 8, 0x80);
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, , int, s, 64, 1, 0x8000000000000000LL);
+ VDUP(vector, q, int, s, 8, 16, 0x80);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector, q, int, s, 64, 2, 0x8000000000000000LL);
+
+ /* Use large shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 10);
+ VDUP(vector_shift, , int, s, 16, 4, 20);
+ VDUP(vector_shift, , int, s, 32, 2, 40);
+ VDUP(vector_shift, , int, s, 64, 1, 70);
+ VDUP(vector_shift, q, int, s, 8, 16, 10);
+ VDUP(vector_shift, q, int, s, 16, 8, 20);
+ VDUP(vector_shift, q, int, s, 32, 4, 40);
+ VDUP(vector_shift, q, int, s, 64, 2, 70);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: large shift amount with negative input)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg_large_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg_large_sh, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_neg_large_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_neg_large_sh, CMT);
+
+
+ /* Fill input vector with negative and positive values, to check
+ * saturation on limits */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, int, s, 8, 16, 0x80);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector, q, int, s, 64, 2, 0x8000000000000000LL);
+
+ /* Use large negative shift amount */
+ VDUP(vector_shift, , int, s, 8, 8, -10);
+ VDUP(vector_shift, , int, s, 16, 4, -20);
+ VDUP(vector_shift, , int, s, 32, 2, -40);
+ VDUP(vector_shift, , int, s, 64, 1, -70);
+ VDUP(vector_shift, q, int, s, 8, 16, -10);
+ VDUP(vector_shift, q, int, s, 16, 8, -20);
+ VDUP(vector_shift, q, int, s, 32, 4, -40);
+ VDUP(vector_shift, q, int, s, 64, 2, -70);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: large negative shift amount)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_large_neg_sh, CMT);
+
+
+ /* Fill input vector with 0, to check saturation in case of large
+ * shift amount */
+ VDUP(vector, , int, s, 8, 8, 0);
+ VDUP(vector, , int, s, 16, 4, 0);
+ VDUP(vector, , int, s, 32, 2, 0);
+ VDUP(vector, , int, s, 64, 1, 0);
+ VDUP(vector, q, int, s, 8, 16, 0);
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+
+ /* Use large shift amount */
+ VDUP(vector_shift, , int, s, 8, 8, -10);
+ VDUP(vector_shift, , int, s, 16, 4, -20);
+ VDUP(vector_shift, , int, s, 32, 2, -40);
+ VDUP(vector_shift, , int, s, 64, 1, -70);
+ VDUP(vector_shift, q, int, s, 8, 16, -10);
+ VDUP(vector_shift, q, int, s, 16, 8, -20);
+ VDUP(vector_shift, q, int, s, 32, 4, -40);
+ VDUP(vector_shift, q, int, s, 64, 2, -70);
+
+#undef CMT
+#define CMT " (checking cumulative saturation: large negative shift amount with 0 input)"
+ TEST_VQRSHL(int, , int, s, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , int, s, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 8, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 16, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 32, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, , uint, u, 64, 1, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, int, s, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_large_neg_sh, CMT);
+ TEST_VQRSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_large_neg_sh, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_large_neg_sh, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_large_neg_sh, CMT);
+}
+
+int main (void)
+{
+ exec_vqrshl ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c
new file mode 100644
index 00000000000..7bbcb856f67
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrn_n.c
@@ -0,0 +1,174 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfc };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff8, 0xfff9, 0xfff9, 0xfffa };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+/* Expected values of cumulative_saturation flag with shift by 3. */
+int VECT_VAR(expected_cumulative_sat_sh3,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_sh3,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_sh3,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_sh3,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_sh3,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_sh3,uint,64,2) = 1;
+
+/* Expected results with shift by 3. */
+VECT_VAR_DECL(expected_sh3,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_sh3,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_sh3,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_sh3,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_sh3,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_sh3,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+/* Expected values of cumulative_saturation flag with shift by max
+ amount. */
+int VECT_VAR(expected_cumulative_sat_shmax,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_shmax,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_shmax,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_shmax,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_shmax,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_shmax,uint,64,2) = 1;
+
+/* Expected results with shift by max amount. */
+VECT_VAR_DECL(expected_shmax,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_shmax,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_shmax,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_shmax,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_shmax,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_shmax,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+#define INSN vqrshrn_n
+#define TEST_MSG "VQRSHRN_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: y=vqrshrn_n(x,v), then store the result. */
+#define TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
+ VECT_VAR(vector_res, T1, W2, N) = \
+ INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_##T2##W2(VECT_VAR(result, T1, W2, N), \
+ VECT_VAR(vector_res, T1, W2, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRSHRN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, uint, 64, 2);
+
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+ VLOAD(vector, buffer, q, int, s, 64, 2);
+ VLOAD(vector, buffer, q, uint, u, 16, 8);
+ VLOAD(vector, buffer, q, uint, u, 32, 4);
+ VLOAD(vector, buffer, q, uint, u, 64, 2);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT ""
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 2, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+
+
+ /* Another set of tests, shifting max value by 3. */
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+#undef CMT
+#define CMT " (check saturation: shift by 3)"
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat_sh3, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat_sh3, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_sh3, CMT);
+
+
+ /* Shift by max amount. */
+#undef CMT
+#define CMT " (check saturation: shift by max)"
+ TEST_VQRSHRN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(uint, u, 16, 8, 8, 8, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(uint, u, 32, 16, 4, 16, expected_cumulative_sat_shmax, CMT);
+ TEST_VQRSHRN_N(uint, u, 64, 32, 2, 32, expected_cumulative_sat_shmax, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_shmax, CMT);
+}
+
+int main (void)
+{
+ exec_vqrshrn_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c
new file mode 100644
index 00000000000..f5e431e7870
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqrshrun_n.c
@@ -0,0 +1,189 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag with negative unput. */
+int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
+
+/* Expected results with negative input. */
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with max input value
+ shifted by 1. */
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,64,2) = 1;
+
+/* Expected results with max input value shifted by 1. */
+VECT_VAR_DECL(expected_max_sh1,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_sh1,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_sh1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max_sh1,uint,64,1) [] = { 0x3333333333333333 };
+
+/* Expected values of cumulative_saturation flag with max input value
+ shifted by max amount. */
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,64,2) = 0;
+
+/* Expected results with max input value shifted by max amount. */
+VECT_VAR_DECL(expected_max_shmax,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_shmax,uint,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_shmax,uint,32,2) [] = { 0x80000000, 0x80000000 };
+
+/* Expected values of cumulative_saturation flag with min input value
+ shifted by max amount. */
+int VECT_VAR(expected_cumulative_sat_min_shmax,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_min_shmax,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_min_shmax,int,64,2) = 1;
+
+/* Expected results with min input value shifted by max amount. */
+VECT_VAR_DECL(expected_min_shmax,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,uint,32,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with inputs in usual
+ range. */
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+
+/* Expected results with inputs in usual range. */
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x49, 0x49, 0x49, 0x49,
+ 0x49, 0x49, 0x49, 0x49 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xdeadbf, 0xdeadbf };
+
+#define INSN vqrshrun_n
+#define TEST_MSG "VQRSHRUN_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: y=vqrshrun_n(x,v), then store the result. */
+#define TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, uint, W2, N)); \
+ VECT_VAR(vector_res, uint, W2, N) = \
+ INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_u##W2(VECT_VAR(result, uint, W2, N), \
+ VECT_VAR(vector_res, uint, W2, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQRSHRUN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQRSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ /* Fill input vector with negative values, to check saturation on
+ limits. */
+ VDUP(vector, q, int, s, 16, 8, -2);
+ VDUP(vector, q, int, s, 32, 4, -3);
+ VDUP(vector, q, int, s, 64, 2, -4);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT " (negative input)"
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on
+ limits. */
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+
+ /* shift by 1. */
+#undef CMT
+#define CMT " (check cumulative saturation: shift by 1)"
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 1, expected_cumulative_sat_max_sh1, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh1, CMT);
+
+
+ /* shift by max. */
+#undef CMT
+#define CMT " (check cumulative saturation: shift by max, positive input)"
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_shmax, CMT);
+
+
+ /* Fill input vector with min value, to check saturation on limits. */
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector, q, int, s, 64, 2, 0x8000000000000000LL);
+
+ /* shift by max */
+#undef CMT
+#define CMT " (check cumulative saturation: shift by max, negative input)"
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_min_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_min_shmax, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_min_shmax, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_min_shmax, CMT);
+
+
+ /* Fill input vector with positive values, to check normal case. */
+ VDUP(vector, q, int, s, 16, 8, 0x1234);
+ VDUP(vector, q, int, s, 32, 4, 0x87654321);
+ VDUP(vector, q, int, s, 64, 2, 0xDEADBEEF);
+
+ /* shift arbitrary amount. */
+#undef CMT
+#define CMT ""
+ TEST_VQRSHRUN_N(int, s, 16, 8, 8, 6, expected_cumulative_sat, CMT);
+ TEST_VQRSHRUN_N(int, s, 32, 16, 4, 7, expected_cumulative_sat, CMT);
+ TEST_VQRSHRUN_N(int, s, 64, 32, 2, 8, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vqrshrun_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c
new file mode 100644
index 00000000000..a1cccc2df28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl.c
@@ -0,0 +1,829 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag with input=0. */
+int VECT_VAR(expected_cumulative_sat_0,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0,uint,64,2) = 0;
+
+/* Expected results with input=0. */
+VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with input=0 and
+ negative shift amount. */
+int VECT_VAR(expected_cumulative_sat_0_neg,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_0_neg,uint,64,2) = 0;
+
+/* Expected results with input=0 and negative shift amount. */
+VECT_VAR_DECL(expected_0_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_neg,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
+ 0xe8, 0xea, 0xec, 0xee };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xff80, 0xff88, 0xff90, 0xff98 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffff000, 0xfffff100 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xfffffffffffffffe };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x1ffffffffffffffe };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_sat_saturation flag with negative shift
+ amount. */
+int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_neg,uint,64,2) = 0;
+
+/* Expected results with negative shift amount. */
+VECT_VAR_DECL(expected_neg,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfb, 0xfb };
+VECT_VAR_DECL(expected_neg,int,16,4) [] = { 0xfffc, 0xfffc, 0xfffc, 0xfffc };
+VECT_VAR_DECL(expected_neg,int,32,2) [] = { 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected_neg,int,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x78, 0x78, 0x79, 0x79,
+ 0x7a, 0x7a, 0x7b, 0x7b };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x3ffc, 0x3ffc, 0x3ffc, 0x3ffc };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x1ffffffe, 0x1ffffffe };
+VECT_VAR_DECL(expected_neg,uint,64,1) [] = { 0xfffffffffffffff };
+VECT_VAR_DECL(expected_neg,int,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_neg,int,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_neg,int,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_neg,int,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+VECT_VAR_DECL(expected_neg,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_neg,uint,16,8) [] = { 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f };
+VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x7ffff, 0x7ffff,
+ 0x7ffff, 0x7ffff };
+VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0xfffffffffff, 0xfffffffffff };
+
+/* Expected values of cumulative_sat_saturation flag with negative
+ input and large shift amount. */
+int VECT_VAR(expected_cumulative_sat_neg_large,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg_large,uint,64,2) = 1;
+
+/* Expected results with negative input and large shift amount. */
+VECT_VAR_DECL(expected_neg_large,int,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_neg_large,int,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_neg_large,int,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_neg_large,int,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_neg_large,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_neg_large,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_neg_large,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_neg_large,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_neg_large,int,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_neg_large,int,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_neg_large,int,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_neg_large,int,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+VECT_VAR_DECL(expected_neg_large,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_neg_large,uint,16,8) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_neg_large,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_neg_large,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_sat_saturation flag with max input
+ and shift by -1. */
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_minus1,uint,64,2) = 0;
+
+/* Expected results with max input and shift by -1. */
+VECT_VAR_DECL(expected_max_minus1,int,8,8) [] = { 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x3f, 0x3f, 0x3f, 0x3f };
+VECT_VAR_DECL(expected_max_minus1,int,16,4) [] = { 0x3fff, 0x3fff,
+ 0x3fff, 0x3fff };
+VECT_VAR_DECL(expected_max_minus1,int,32,2) [] = { 0x3fffffff, 0x3fffffff };
+VECT_VAR_DECL(expected_max_minus1,int,64,1) [] = { 0x3fffffffffffffff };
+VECT_VAR_DECL(expected_max_minus1,uint,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_minus1,uint,16,4) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_minus1,uint,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_minus1,uint,64,1) [] = { 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_max_minus1,int,8,16) [] = { 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x3f, 0x3f, 0x3f, 0x3f };
+VECT_VAR_DECL(expected_max_minus1,int,16,8) [] = { 0x3fff, 0x3fff,
+ 0x3fff, 0x3fff,
+ 0x3fff, 0x3fff,
+ 0x3fff, 0x3fff };
+VECT_VAR_DECL(expected_max_minus1,int,32,4) [] = { 0x3fffffff, 0x3fffffff,
+ 0x3fffffff, 0x3fffffff };
+VECT_VAR_DECL(expected_max_minus1,int,64,2) [] = { 0x3fffffffffffffff,
+ 0x3fffffffffffffff };
+VECT_VAR_DECL(expected_max_minus1,uint,8,16) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_minus1,uint,16,8) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_minus1,uint,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_minus1,uint,64,2) [] = { 0x7fffffffffffffff,
+ 0x7fffffffffffffff };
+
+/* Expected values of cumulative_sat_saturation flag with max input
+ and large shift amount. */
+int VECT_VAR(expected_cumulative_sat_max_large,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_large,uint,64,2) = 1;
+
+/* Expected results with max input and large shift amount. */
+VECT_VAR_DECL(expected_max_large,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_large,int,16,4) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_large,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_large,int,64,1) [] = { 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_max_large,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_large,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_large,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max_large,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_max_large,int,8,16) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_large,int,16,8) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_large,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_large,int,64,2) [] = { 0x7fffffffffffffff,
+ 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_max_large,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_large,uint,16,8) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_large,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max_large,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_sat_saturation flag with saturation
+ on 64-bits values. */
+int VECT_VAR(expected_cumulative_sat_64,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_64,int,64,2) = 1;
+
+/* Expected results with saturation on 64-bits values.. */
+VECT_VAR_DECL(expected_64,int,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_64,int,64,2) [] = { 0x7fffffffffffffff,
+ 0x7fffffffffffffff };
+
+#define INSN vqshl
+#define TEST_MSG "VQSHL/VQSHLQ"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: v3=vqshl(v1,v2), then store the result. */
+#define TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector_shift, T3, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHL2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQSHL(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHL1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ DECL_VARIABLE_SIGNED_VARIANTS(vector_shift);
+
+ clean_results ();
+
+ /* Fill input vector with 0, to check saturation on limits. */
+ VDUP(vector, , int, s, 8, 8, 0);
+ VDUP(vector, , int, s, 16, 4, 0);
+ VDUP(vector, , int, s, 32, 2, 0);
+ VDUP(vector, , int, s, 64, 1, 0);
+ VDUP(vector, , uint, u, 8, 8, 0);
+ VDUP(vector, , uint, u, 16, 4, 0);
+ VDUP(vector, , uint, u, 32, 2, 0);
+ VDUP(vector, , uint, u, 64, 1, 0);
+ VDUP(vector, q, int, s, 8, 16, 0);
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 8, 16, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0);
+ VDUP(vector, q, uint, u, 32, 4, 0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Choose init value arbitrarily, will be used as shift amount */
+ /* Use values equal or one-less-than the type width to check
+ behaviour on limits. */
+
+ /* 64-bits vectors first. */
+ /* Shift 8-bits lanes by 7... */
+ VDUP(vector_shift, , int, s, 8, 8, 7);
+ /* ... except: lane 0 (by 6), lane 1 (by 8) and lane 2 (by 9). */
+ VSET_LANE(vector_shift, , int, s, 8, 8, 0, 6);
+ VSET_LANE(vector_shift, , int, s, 8, 8, 1, 8);
+ VSET_LANE(vector_shift, , int, s, 8, 8, 2, 9);
+
+ /* Shift 16-bits lanes by 15... */
+ VDUP(vector_shift, , int, s, 16, 4, 15);
+ /* ... except: lane 0 (by 14), lane 1 (by 16), and lane 2 (by 17). */
+ VSET_LANE(vector_shift, , int, s, 16, 4, 0, 14);
+ VSET_LANE(vector_shift, , int, s, 16, 4, 1, 16);
+ VSET_LANE(vector_shift, , int, s, 16, 4, 2, 17);
+
+ /* Shift 32-bits lanes by 31... */
+ VDUP(vector_shift, , int, s, 32, 2, 31);
+ /* ... except lane 1 (by 30). */
+ VSET_LANE(vector_shift, , int, s, 32, 2, 1, 30);
+
+ /* Shift 64 bits lane by 63. */
+ VDUP(vector_shift, , int, s, 64, 1, 63);
+
+ /* 128-bits vectors. */
+ /* Shift 8-bits lanes by 8. */
+ VDUP(vector_shift, q, int, s, 8, 16, 8);
+ /* Shift 16-bits lanes by 16. */
+ VDUP(vector_shift, q, int, s, 16, 8, 16);
+ /* Shift 32-bits lanes by 32... */
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ /* ... except lane 1 (by 33). */
+ VSET_LANE(vector_shift, q, int, s, 32, 4, 1, 33);
+
+ /* Shift 64-bits lanes by 64... */
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+ /* ... except lane 1 (by 62). */
+ VSET_LANE(vector_shift, q, int, s, 64, 2, 1, 62);
+
+#define CMT " (with input = 0)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0, CMT);
+
+
+ /* Use negative shift amounts */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+#undef CMT
+#define CMT " (input 0 and negative shift amount)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_0_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_0_neg, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0_neg, CMT);
+
+ /* Test again, with predefined input values. */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose init value arbitrarily, will be used as shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 1);
+ VDUP(vector_shift, , int, s, 16, 4, 3);
+ VDUP(vector_shift, , int, s, 32, 2, 8);
+ VDUP(vector_shift, , int, s, 64, 1, -3);
+ VDUP(vector_shift, q, int, s, 8, 16, 10);
+ VDUP(vector_shift, q, int, s, 16, 8, 12);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 63);
+
+#undef CMT
+#define CMT ""
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Use negative shift amounts */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+#undef CMT
+#define CMT " (negative shift amount)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_neg, CMT);
+
+
+ /* Use large shift amounts. */
+ VDUP(vector_shift, , int, s, 8, 8, 8);
+ VDUP(vector_shift, , int, s, 16, 4, 16);
+ VDUP(vector_shift, , int, s, 32, 2, 32);
+ VDUP(vector_shift, , int, s, 64, 1, 64);
+ VDUP(vector_shift, q, int, s, 8, 16, 8);
+ VDUP(vector_shift, q, int, s, 16, 8, 16);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+
+#undef CMT
+#define CMT " (large shift amount, negative input)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_neg_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_neg_large, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_neg_large, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_neg_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_neg_large, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on limits */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Shift by -1 */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -1);
+ VDUP(vector_shift, , int, s, 32, 2, -1);
+ VDUP(vector_shift, , int, s, 64, 1, -1);
+ VDUP(vector_shift, q, int, s, 8, 16, -1);
+ VDUP(vector_shift, q, int, s, 16, 8, -1);
+ VDUP(vector_shift, q, int, s, 32, 4, -1);
+ VDUP(vector_shift, q, int, s, 64, 2, -1);
+
+#undef CMT
+#define CMT " (max input, shift by -1)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_max_minus1, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_max_minus1, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_minus1, CMT);
+
+
+ /* Use large shift amounts */
+ VDUP(vector_shift, , int, s, 8, 8, 8);
+ VDUP(vector_shift, , int, s, 16, 4, 16);
+ VDUP(vector_shift, , int, s, 32, 2, 32);
+ VDUP(vector_shift, , int, s, 64, 1, 64);
+ VDUP(vector_shift, q, int, s, 8, 16, 8);
+ VDUP(vector_shift, q, int, s, 16, 8, 16);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+
+#undef CMT
+#define CMT " (max input, large shift amount)"
+ TEST_VQSHL(int, , int, s, 8, 8, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , int, s, 16, 4, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , int, s, 32, 2, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , uint, u, 8, 8, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , uint, u, 16, 4, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , uint, u, 32, 2, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, , uint, u, 64, 1, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, int, s, 8, 16, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, int, s, 16, 8, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, int, s, 32, 4, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 8, 16, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 16, 8, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 32, 4, expected_cumulative_sat_max_large, CMT);
+ TEST_VQSHL(int, q, uint, u, 64, 2, expected_cumulative_sat_max_large, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_large, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_large, CMT);
+
+
+ /* Check 64 bits saturation. */
+ VDUP(vector, , int, s, 64, 1, -10);
+ VDUP(vector_shift, , int, s, 64, 1, 64);
+ VDUP(vector, q, int, s, 64, 2, 10);
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+
+#undef CMT
+#define CMT " (check saturation on 64 bits)"
+ TEST_VQSHL(int, , int, s, 64, 1, expected_cumulative_sat_64, CMT);
+ TEST_VQSHL(int, q, int, s, 64, 2, expected_cumulative_sat_64, CMT);
+
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_64, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_64, CMT);
+}
+
+int main (void)
+{
+ exec_vqshl ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c
new file mode 100644
index 00000000000..cb9c4585142
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshl_n.c
@@ -0,0 +1,234 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xc0, 0xc4, 0xc8, 0xcc,
+ 0xd0, 0xd4, 0xd8, 0xdc };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xffe0, 0xffe2, 0xffe4, 0xffe6 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xffffffe0, 0xffffffe2 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xffffffffffffffc0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xc0, 0xc4, 0xc8, 0xcc,
+ 0xd0, 0xd4, 0xd8, 0xdc,
+ 0xe0, 0xe4, 0xe8, 0xec,
+ 0xf0, 0xf4, 0xf8, 0xfc };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xffe0, 0xffe2, 0xffe4, 0xffe6,
+ 0xffe8, 0xffea, 0xffec, 0xffee };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffffe0, 0xffffffe2,
+ 0xffffffe4, 0xffffffe6 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffffffc0, 0xffffffffffffffc4 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_saturation flag with max positive input. */
+int VECT_VAR(expected_cumulative_sat_max,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max,uint,64,2) = 1;
+
+/* Expected results with max positive input. */
+VECT_VAR_DECL(expected_max,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max,int,64,1) [] = { 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_max,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_max,int,8,16) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max,int,16,8) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max,int,32,4) [] = { 0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max,int,64,2) [] = { 0x7fffffffffffffff,
+ 0x7fffffffffffffff };
+VECT_VAR_DECL(expected_max,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+#define INSN vqshl
+#define TEST_MSG "VQSHL_N/VQSHLQ_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME ##_n (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: v2=vqshl_n(v1,v), then store the result. */
+#define TEST_VQSHL_N2(INSN, Q, T1, T2, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W, N)); \
+ VECT_VAR(vector_res, T1, W, N) = \
+ INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHL_N2(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQSHL_N(T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHL_N1(INSN, T3, Q, T1, T2, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT ""
+ TEST_VQSHL_N(, int, s, 8, 8, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, int, s, 16, 4, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, int, s, 32, 2, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, int, s, 64, 1, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, uint, u, 8, 8, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, uint, u, 16, 4, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, uint, u, 32, 2, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(, uint, u, 64, 1, 3, expected_cumulative_sat, CMT);
+
+ TEST_VQSHL_N(q, int, s, 8, 16, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, int, s, 16, 8, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, int, s, 32, 4, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, int, s, 64, 2, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, uint, u, 8, 16, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, uint, u, 16, 8, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, uint, u, 32, 4, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHL_N(q, uint, u, 64, 2, 3, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on limits. */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+#undef CMT
+#define CMT " (with max input)"
+ TEST_VQSHL_N(, int, s, 8, 8, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, int, s, 16, 4, 1, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, int, s, 32, 2, 1, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, int, s, 64, 1, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, uint, u, 8, 8, 3, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, uint, u, 16, 4, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, uint, u, 32, 2, 3, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(, uint, u, 64, 1, 3, expected_cumulative_sat_max, CMT);
+
+ TEST_VQSHL_N(q, int, s, 8, 16, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, int, s, 16, 8, 1, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, int, s, 32, 4, 1, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, int, s, 64, 2, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, uint, u, 8, 16, 3, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, uint, u, 16, 8, 2, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, uint, u, 32, 4, 3, expected_cumulative_sat_max, CMT);
+ TEST_VQSHL_N(q, uint, u, 64, 2, 3, expected_cumulative_sat_max, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max, CMT);
+}
+
+int main (void)
+{
+ exec_vqshl_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c
new file mode 100644
index 00000000000..a357fbe3748
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshlu_n.c
@@ -0,0 +1,263 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag with negative
+ input. */
+int VECT_VAR(expected_cumulative_sat_neg,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
+
+/* Expected results with negative input. */
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_neg,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with shift by 1. */
+int VECT_VAR(expected_cumulative_sat_sh1,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_sh1,int,64,2) = 0;
+
+/* Expected results with shift by 1. */
+VECT_VAR_DECL(expected_sh1,uint,8,8) [] = { 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe };
+VECT_VAR_DECL(expected_sh1,uint,16,4) [] = { 0xfffe, 0xfffe, 0xfffe, 0xfffe };
+VECT_VAR_DECL(expected_sh1,uint,32,2) [] = { 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected_sh1,uint,64,1) [] = { 0xfffffffffffffffe };
+VECT_VAR_DECL(expected_sh1,uint,8,16) [] = { 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe,
+ 0xfe, 0xfe, 0xfe, 0xfe };
+VECT_VAR_DECL(expected_sh1,uint,16,8) [] = { 0xfffe, 0xfffe, 0xfffe, 0xfffe,
+ 0xfffe, 0xfffe, 0xfffe, 0xfffe };
+VECT_VAR_DECL(expected_sh1,uint,32,4) [] = { 0xfffffffe, 0xfffffffe,
+ 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected_sh1,uint,64,2) [] = { 0xfffffffffffffffe,
+ 0xfffffffffffffffe };
+
+/* Expected values of cumulative_saturation flag with shift by 2. */
+int VECT_VAR(expected_cumulative_sat_sh2,int,8,8) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,16,4) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,32,2) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,64,1) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,8,16) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_sh2,int,64,2) = 1;
+
+/* Expected results with shift by 2. */
+VECT_VAR_DECL(expected_sh2,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_sh2,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_sh2,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_sh2,uint,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_sh2,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_sh2,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_sh2,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_sh2,uint,64,2) [] = { 0xffffffffffffffff,
+ 0xffffffffffffffff };
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,8,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,2) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,1) = 0;
+int VECT_VAR(expected_cumulative_sat,int,8,16) = 0;
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x8, 0x8, 0x8, 0x8 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x18, 0x18 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x40 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa0, 0xa0,
+ 0xa0, 0xa0, 0xa0, 0xa0 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x180, 0x180, 0x180, 0x180,
+ 0x180, 0x180, 0x180, 0x180 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x380, 0x380, 0x380, 0x380 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x800, 0x800 };
+
+
+#define INSN vqshlu
+#define TEST_MSG "VQSHLU_N/VQSHLUQ_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME ## _n(void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: v2=vqshlu_n(v1,v), then store the result. */
+#define TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T3, W, N)); \
+ VECT_VAR(vector_res, T3, W, N) = \
+ INSN##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1##Q##_##T4##W(VECT_VAR(result, T3, W, N), \
+ VECT_VAR(vector_res, T3, W, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHLU_N2(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQSHLU_N(Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHLU_N1(INSN, Q, T1, T2, T3, T4, W, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Fill input vector with negative values, to check saturation on
+ limits. */
+ VDUP(vector, , int, s, 8, 8, -1);
+ VDUP(vector, , int, s, 16, 4, -2);
+ VDUP(vector, , int, s, 32, 2, -3);
+ VDUP(vector, , int, s, 64, 1, -4);
+ VDUP(vector, q, int, s, 8, 16, -1);
+ VDUP(vector, q, int, s, 16, 8, -2);
+ VDUP(vector, q, int, s, 32, 4, -3);
+ VDUP(vector, q, int, s, 64, 2, -4);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT " (negative input)"
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_neg, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on
+ limits. */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFULL);
+
+ /* shift by 1. */
+#undef CMT
+#define CMT " (shift by 1)"
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 1, expected_cumulative_sat_sh1, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 1, expected_cumulative_sat_sh1, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_sh1, CMT);
+
+ /* shift by 2 to force saturation. */
+#undef CMT
+#define CMT " (shift by 2)"
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 2, expected_cumulative_sat_sh2, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 2, expected_cumulative_sat_sh2, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_sh2, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_sh2, CMT);
+
+
+ /* Fill input vector with positive values, to check normal case. */
+ VDUP(vector, , int, s, 8, 8, 1);
+ VDUP(vector, , int, s, 16, 4, 2);
+ VDUP(vector, , int, s, 32, 2, 3);
+ VDUP(vector, , int, s, 64, 1, 4);
+ VDUP(vector, q, int, s, 8, 16, 5);
+ VDUP(vector, q, int, s, 16, 8, 6);
+ VDUP(vector, q, int, s, 32, 4, 7);
+ VDUP(vector, q, int, s, 64, 2, 8);
+
+ /* Arbitrary shift amount. */
+#undef CMT
+#define CMT ""
+ TEST_VQSHLU_N(, int, s, uint, u, 8, 8, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 16, 4, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 32, 2, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(, int, s, uint, u, 64, 1, 4, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 8, 16, 5, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 16, 8, 6, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 32, 4, 7, expected_cumulative_sat, CMT);
+ TEST_VQSHLU_N(q, int, s, uint, u, 64, 2, 8, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vqshlu_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c
new file mode 100644
index 00000000000..b3556f46c26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrn_n.c
@@ -0,0 +1,177 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,uint,64,2) = 1;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfb, 0xfb };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff8, 0xfff8, 0xfff9, 0xfff9 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+/* Expected values of cumulative_saturation flag with max input value
+ shifted by 3. */
+int VECT_VAR(expected_cumulative_sat_max_sh3,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh3,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh3,int,64,2) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh3,uint,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh3,uint,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh3,uint,64,2) = 1;
+
+/* Expected results with max input value shifted by 3. */
+VECT_VAR_DECL(expected_max_sh3,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_sh3,int,16,4) [] = { 0x7fff, 0x7fff, 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_sh3,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_sh3,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_sh3,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_sh3,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+/* Expected values of cumulative_saturation flag with max input value
+ shifted by type size. */
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,int,64,2) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,uint,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,uint,32,4) = 0;
+int VECT_VAR(expected_cumulative_sat_max_shmax,uint,64,2) = 0;
+
+/* Expected results with max input value shifted by type size. */
+VECT_VAR_DECL(expected_max_shmax,int,8,8) [] = { 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f };
+VECT_VAR_DECL(expected_max_shmax,int,16,4) [] = { 0x7fff, 0x7fff,
+ 0x7fff, 0x7fff };
+VECT_VAR_DECL(expected_max_shmax,int,32,2) [] = { 0x7fffffff, 0x7fffffff };
+VECT_VAR_DECL(expected_max_shmax,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_shmax,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_shmax,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+#define INSN vqshrn_n
+#define TEST_MSG "VQSHRN_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: y=vqshrn_n(x,v), then store the result. */
+#define TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, T1, W2, N)); \
+ VECT_VAR(vector_res, T1, W2, N) = \
+ INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_##T2##W2(VECT_VAR(result, T1, W2, N), \
+ VECT_VAR(vector_res, T1, W2, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHRN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQSHRN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHRN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, uint, 64, 2);
+
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+ VLOAD(vector, buffer, q, int, s, 64, 2);
+ VLOAD(vector, buffer, q, uint, u, 16, 8);
+ VLOAD(vector, buffer, q, uint, u, 32, 4);
+ VLOAD(vector, buffer, q, uint, u, 64, 2);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT ""
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 2, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+
+
+ /* Use max possible value as input. */
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+#undef CMT
+#define CMT " (check saturation: shift by 3)"
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 3, expected_cumulative_sat_max_sh3, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 3, expected_cumulative_sat_max_sh3, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh3, CMT);
+
+
+#undef CMT
+#define CMT " (check saturation: shift by max)"
+ TEST_VQSHRN_N(int, s, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(int, s, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(int, s, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(uint, u, 16, 8, 8, 8, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(uint, u, 32, 16, 4, 16, expected_cumulative_sat_max_shmax, CMT);
+ TEST_VQSHRN_N(uint, u, 64, 32, 2, 32, expected_cumulative_sat_max_shmax, CMT);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_shmax, CMT);
+}
+
+int main (void)
+{
+ exec_vqshrn_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c
new file mode 100644
index 00000000000..ce1a3ff09b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vqshrun_n.c
@@ -0,0 +1,133 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected values of cumulative_saturation flag with negative input. */
+int VECT_VAR(expected_cumulative_sat_neg,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_neg,int,64,2) = 1;
+
+/* Expected results with negative input. */
+VECT_VAR_DECL(expected_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_neg,uint,32,2) [] = { 0x0, 0x0 };
+
+/* Expected values of cumulative_saturation flag with max input value
+ shifted by 1. */
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,16,8) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat_max_sh1,int,64,2) = 1;
+
+/* Expected results with max input value shifted by 1. */
+VECT_VAR_DECL(expected_max_sh1,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_max_sh1,uint,16,4) [] = { 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_max_sh1,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_max_sh1,uint,64,1) [] = { 0x3333333333333333 };
+
+/* Expected values of cumulative_saturation flag. */
+int VECT_VAR(expected_cumulative_sat,int,16,8) = 0;
+int VECT_VAR(expected_cumulative_sat,int,32,4) = 1;
+int VECT_VAR(expected_cumulative_sat,int,64,2) = 0;
+
+/* Expected results. */
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x48, 0x48, 0x48, 0x48,
+ 0x48, 0x48, 0x48, 0x48 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xdeadbe, 0xdeadbe };
+
+
+#define INSN vqshrun_n
+#define TEST_MSG "VQSHRUN_N"
+
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN)
+{
+ /* Basic test: y=vqshrun_n(x,v), then store the result. */
+#define TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ Set_Neon_Cumulative_Sat(0, VECT_VAR(vector_res, uint, W2, N)); \
+ VECT_VAR(vector_res, uint, W2, N) = \
+ INSN##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_u##W2(VECT_VAR(result, uint, W2, N), \
+ VECT_VAR(vector_res, uint, W2, N)); \
+ CHECK_CUMULATIVE_SAT(TEST_MSG, T1, W, N, EXPECTED_CUMULATIVE_SAT, CMT)
+
+ /* Two auxliary macros are necessary to expand INSN */
+#define TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHRUN_N2(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+#define TEST_VQSHRUN_N(T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT) \
+ TEST_VQSHRUN_N1(INSN, T1, T2, W, W2, N, V, EXPECTED_CUMULATIVE_SAT, CMT)
+
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ /* Fill input vector with negative values, to check saturation on
+ limits. */
+ VDUP(vector, q, int, s, 16, 8, -2);
+ VDUP(vector, q, int, s, 32, 4, -3);
+ VDUP(vector, q, int, s, 64, 2, -4);
+
+ /* Choose shift amount arbitrarily. */
+#define CMT " (negative input)"
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 3, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 4, expected_cumulative_sat_neg, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 2, expected_cumulative_sat_neg, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_neg, CMT);
+
+
+ /* Fill input vector with max value, to check saturation on
+ limits. */
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+
+#undef CMT
+#define CMT " (check cumulative saturation)"
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 1, expected_cumulative_sat_max_sh1, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 1, expected_cumulative_sat_max_sh1, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh1, CMT);
+
+
+ /* Fill input vector with positive values, to check normal case. */
+ VDUP(vector, q, int, s, 16, 8, 0x1234);
+ VDUP(vector, q, int, s, 32, 4, 0x87654321);
+ VDUP(vector, q, int, s, 64, 2, 0xDEADBEEF);
+
+#undef CMT
+#define CMT ""
+ TEST_VQSHRUN_N(int, s, 16, 8, 8, 6, expected_cumulative_sat, CMT);
+ TEST_VQSHRUN_N(int, s, 32, 16, 4, 7, expected_cumulative_sat, CMT);
+ TEST_VQSHRUN_N(int, s, 64, 32, 2, 8, expected_cumulative_sat, CMT);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vqshrun_n ();
+ return 0;
+}