aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYvan Roux <yvan.roux@linaro.org>2015-08-23 22:09:22 +0200
committerLinaro Code Review <review@review.linaro.org>2015-08-28 12:19:51 +0000
commit1f92729d00f8989defaabb15c61a954400523997 (patch)
treef2e852cb9eb0515c07921f6f65713c3fcc6c9f2e
parentfcfdd04431e5fff93741576a87bb60789b98463e (diff)
gcc/
Backport from trunk r222907. 2015-05-08 Alan Lawrence <alan.lawrence@arm.com> * optabs.c (vector_compare_rtx): Handle RTL operands having VOIDmode. gcc/ Backport from trunk r222908. 2015-05-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/aarch64-simd.md (aarch64_vcond_internal<mode><mode>, vcond<mode><mode>, vcondu<mode><mode>): Add DImode variant. gcc/ Backport from trunk r222909. 2015-05-08 Alan Lawrence <alan.lawrence@arm.com> * config/aarch64/arm_neon.h (vceq_s64, vceq_u64, vceqz_s64, vceqz_u64, vcge_s64, vcge_u64, vcgez_s64, vcgt_s64, vcgt_u64, vcgtz_s64, vcle_s64, vcle_u64, vclez_s64, vclt_s64, vclt_u64, vcltz_s64, vtst_s64, vtst_u64): Rewrite using gcc vector extensions. gcc/testsuite/ Backport from trunk r222909. 2015-05-08 Alan Lawrence <alan.lawrence@arm.com> * gcc.target/aarch64/singleton_intrinsics_1.c: Generalize regex to allow cmlt or sshr. Change-Id: Id2cf95a86a0bef3ada6725d77a7215009e919fe0
-rw-r--r--gcc/config/aarch64/aarch64-simd.md36
-rw-r--r--gcc/config/aarch64/arm_neon.h36
-rw-r--r--gcc/optabs.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c4
4 files changed, 51 insertions, 41 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 5342c3d20d2..b90f93841f8 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2057,13 +2057,13 @@
})
(define_expand "aarch64_vcond_internal<mode><mode>"
- [(set (match_operand:VDQ_I 0 "register_operand")
- (if_then_else:VDQ_I
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand")
+ (if_then_else:VSDQ_I_DI
(match_operator 3 "comparison_operator"
- [(match_operand:VDQ_I 4 "register_operand")
- (match_operand:VDQ_I 5 "nonmemory_operand")])
- (match_operand:VDQ_I 1 "nonmemory_operand")
- (match_operand:VDQ_I 2 "nonmemory_operand")))]
+ [(match_operand:VSDQ_I_DI 4 "register_operand")
+ (match_operand:VSDQ_I_DI 5 "nonmemory_operand")])
+ (match_operand:VSDQ_I_DI 1 "nonmemory_operand")
+ (match_operand:VSDQ_I_DI 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
rtx op1 = operands[1];
@@ -2365,13 +2365,13 @@
})
(define_expand "vcond<mode><mode>"
- [(set (match_operand:VALL 0 "register_operand")
- (if_then_else:VALL
+ [(set (match_operand:VALLDI 0 "register_operand")
+ (if_then_else:VALLDI
(match_operator 3 "comparison_operator"
- [(match_operand:VALL 4 "register_operand")
- (match_operand:VALL 5 "nonmemory_operand")])
- (match_operand:VALL 1 "nonmemory_operand")
- (match_operand:VALL 2 "nonmemory_operand")))]
+ [(match_operand:VALLDI 4 "register_operand")
+ (match_operand:VALLDI 5 "nonmemory_operand")])
+ (match_operand:VALLDI 1 "nonmemory_operand")
+ (match_operand:VALLDI 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1],
@@ -2398,13 +2398,13 @@
})
(define_expand "vcondu<mode><mode>"
- [(set (match_operand:VDQ_I 0 "register_operand")
- (if_then_else:VDQ_I
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand")
+ (if_then_else:VSDQ_I_DI
(match_operator 3 "comparison_operator"
- [(match_operand:VDQ_I 4 "register_operand")
- (match_operand:VDQ_I 5 "nonmemory_operand")])
- (match_operand:VDQ_I 1 "nonmemory_operand")
- (match_operand:VDQ_I 2 "nonmemory_operand")))]
+ [(match_operand:VSDQ_I_DI 4 "register_operand")
+ (match_operand:VSDQ_I_DI 5 "nonmemory_operand")])
+ (match_operand:VSDQ_I_DI 1 "nonmemory_operand")
+ (match_operand:VSDQ_I_DI 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1],
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index e9cc82577a9..9896e8c21af 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -11619,7 +11619,7 @@ vceq_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vceq_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) {__a[0] == __b[0] ? -1ll : 0ll};
+ return (uint64x1_t) (__a == __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -11643,7 +11643,7 @@ vceq_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vceq_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__a[0] == __b[0] ? -1ll : 0ll};
+ return (__a == __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -11779,7 +11779,7 @@ vceqz_s32 (int32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vceqz_s64 (int64x1_t __a)
{
- return (uint64x1_t) {__a[0] == 0ll ? -1ll : 0ll};
+ return (uint64x1_t) (__a == __AARCH64_INT64_C (0));
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -11803,7 +11803,7 @@ vceqz_u32 (uint32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vceqz_u64 (uint64x1_t __a)
{
- return (uint64x1_t) {__a[0] == 0ll ? -1ll : 0ll};
+ return (__a == __AARCH64_UINT64_C (0));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -11933,7 +11933,7 @@ vcge_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcge_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) {__a[0] >= __b[0] ? -1ll : 0ll};
+ return (uint64x1_t) (__a >= __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -11957,7 +11957,7 @@ vcge_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcge_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__a[0] >= __b[0] ? -1ll : 0ll};
+ return (__a >= __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12081,7 +12081,7 @@ vcgez_s32 (int32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcgez_s64 (int64x1_t __a)
{
- return (uint64x1_t) {__a[0] >= 0ll ? -1ll : 0ll};
+ return (uint64x1_t) (__a >= __AARCH64_INT64_C (0));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12175,7 +12175,7 @@ vcgt_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcgt_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) (__a[0] > __b[0] ? -1ll : 0ll);
+ return (uint64x1_t) (__a > __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -12199,7 +12199,7 @@ vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) (__a[0] > __b[0] ? -1ll : 0ll);
+ return (__a > __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12323,7 +12323,7 @@ vcgtz_s32 (int32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcgtz_s64 (int64x1_t __a)
{
- return (uint64x1_t) {__a[0] > 0ll ? -1ll : 0ll};
+ return (uint64x1_t) (__a > __AARCH64_INT64_C (0));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12417,7 +12417,7 @@ vcle_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcle_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) {__a[0] <= __b[0] ? -1ll : 0ll};
+ return (uint64x1_t) (__a <= __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -12441,7 +12441,7 @@ vcle_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcle_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__a[0] <= __b[0] ? -1ll : 0ll};
+ return (__a <= __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12565,7 +12565,7 @@ vclez_s32 (int32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vclez_s64 (int64x1_t __a)
{
- return (uint64x1_t) {__a[0] <= 0ll ? -1ll : 0ll};
+ return (uint64x1_t) (__a <= __AARCH64_INT64_C (0));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12659,7 +12659,7 @@ vclt_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vclt_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) {__a[0] < __b[0] ? -1ll : 0ll};
+ return (uint64x1_t) (__a < __b);
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -12683,7 +12683,7 @@ vclt_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vclt_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {__a[0] < __b[0] ? -1ll : 0ll};
+ return (__a < __b);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -12807,7 +12807,7 @@ vcltz_s32 (int32x2_t __a)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vcltz_s64 (int64x1_t __a)
{
- return (uint64x1_t) {__a[0] < 0ll ? -1ll : 0ll};
+ return (uint64x1_t) (__a < __AARCH64_INT64_C (0));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
@@ -23767,7 +23767,7 @@ vtst_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vtst_s64 (int64x1_t __a, int64x1_t __b)
{
- return (uint64x1_t) {(__a[0] & __b[0]) ? -1ll : 0ll};
+ return (uint64x1_t) ((__a & __b) != __AARCH64_INT64_C (0));
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
@@ -23791,7 +23791,7 @@ vtst_u32 (uint32x2_t __a, uint32x2_t __b)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vtst_u64 (uint64x1_t __a, uint64x1_t __b)
{
- return (uint64x1_t) {(__a[0] & __b[0]) ? -1ll : 0ll};
+ return ((__a & __b) != __AARCH64_UINT64_C (0));
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
diff --git a/gcc/optabs.c b/gcc/optabs.c
index e9dc7981c63..df452a828d5 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -6544,18 +6544,28 @@ vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
{
struct expand_operand ops[2];
rtx rtx_op0, rtx_op1;
+ machine_mode m0, m1;
enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
- /* Expand operands. */
+ /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
+ has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
+ cases, use the original mode. */
rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
EXPAND_STACK_PARM);
+ m0 = GET_MODE (rtx_op0);
+ if (m0 == VOIDmode)
+ m0 = TYPE_MODE (TREE_TYPE (t_op0));
+
rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
EXPAND_STACK_PARM);
+ m1 = GET_MODE (rtx_op1);
+ if (m1 == VOIDmode)
+ m1 = TYPE_MODE (TREE_TYPE (t_op1));
- create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
- create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
+ create_input_operand (&ops[0], rtx_op0, m0);
+ create_input_operand (&ops[1], rtx_op1, m1);
if (!maybe_legitimize_operands (icode, 4, 2, ops))
gcc_unreachable ();
return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
diff --git a/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c b/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c
index 4a0934b01f9..633a0d24ead 100644
--- a/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/singleton_intrinsics_1.c
@@ -235,8 +235,8 @@ test_vrshl_u64 (uint64x1_t a, int64x1_t b)
return vrshl_u64 (a, b);
}
-/* For int64x1_t, sshr...#63 is output instead of the equivalent cmlt...#0. */
-/* { dg-final { scan-assembler-times "\\tsshr\\td\[0-9\]+" 2 } } */
+/* For int64x1_t, sshr...#63 is equivalent to cmlt...#0. */
+/* { dg-final { scan-assembler-times "\\t(?:sshr|cmlt)\\td\[0-9\]+" 2 } } */
int64x1_t
test_vshr_n_s64 (int64x1_t a)