aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Gretton-Dann <matthew.gretton-dann@linaro.org>2012-10-04 13:45:36 +0100
committerMatthew Gretton-Dann <matthew.gretton-dann@linaro.org>2012-10-04 13:45:36 +0100
commit2c39415cbb77437858f8de547d13be3b608316f8 (patch)
tree8fcf983a447b3e93066a9dcb962dc371f7952fc1
parented33849fdacafb363d81697f8495ff0dd2596540 (diff)
parentfee6f6e13796551df848aef55b88a3509d15209a (diff)
Merge from fsf gcc arm/aarch64-4_7-branch
(svn branches/arm/aarch64-4_7-branch 191926).
-rw-r--r--ChangeLog.linaro5
-rwxr-xr-xconfig.guess62
-rwxr-xr-xconfig.sub57
-rw-r--r--gcc/ChangeLog.aarch64413
-rw-r--r--gcc/common/config/aarch64/aarch64-common.c77
-rw-r--r--gcc/config.gcc123
-rw-r--r--gcc/config/aarch64/aarch64-arches.def29
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c1320
-rw-r--r--gcc/config/aarch64/aarch64-cores.def38
-rw-r--r--gcc/config/aarch64/aarch64-elf-raw.h32
-rw-r--r--gcc/config/aarch64/aarch64-elf.h123
-rw-r--r--gcc/config/aarch64/aarch64-generic.md38
-rw-r--r--gcc/config/aarch64/aarch64-linux.h44
-rw-r--r--gcc/config/aarch64/aarch64-modes.def54
-rw-r--r--gcc/config/aarch64/aarch64-option-extensions.def37
-rw-r--r--gcc/config/aarch64/aarch64-opts.h64
-rw-r--r--gcc/config/aarch64/aarch64-protos.h258
-rw-r--r--gcc/config/aarch64/aarch64-simd.md3264
-rw-r--r--gcc/config/aarch64/aarch64-tune.md5
-rw-r--r--gcc/config/aarch64/aarch64.c6988
-rw-r--r--gcc/config/aarch64/aarch64.h824
-rw-r--r--gcc/config/aarch64/aarch64.md2921
-rw-r--r--gcc/config/aarch64/aarch64.opt100
-rw-r--r--gcc/config/aarch64/arm_neon.h25592
-rw-r--r--gcc/config/aarch64/constraints.md167
-rw-r--r--gcc/config/aarch64/gentune.sh32
-rw-r--r--gcc/config/aarch64/iterators.md716
-rw-r--r--gcc/config/aarch64/large.md312
-rw-r--r--gcc/config/aarch64/predicates.md297
-rw-r--r--gcc/config/aarch64/small.md287
-rw-r--r--gcc/config/aarch64/sync.md467
-rw-r--r--gcc/config/aarch64/t-aarch6432
-rw-r--r--gcc/config/aarch64/t-aarch64-linux22
-rwxr-xr-xgcc/configure13
-rw-r--r--gcc/configure.ac13
-rw-r--r--gcc/doc/invoke.texi129
-rw-r--r--gcc/doc/md.texi137
-rw-r--r--gcc/read-rtl.c200
-rw-r--r--gcc/testsuite/ChangeLog.aarch64184
-rw-r--r--gcc/testsuite/g++.dg/abi/aarch64_guard1.C17
-rw-r--r--gcc/testsuite/g++.dg/other/PR23205.C2
-rw-r--r--gcc/testsuite/g++.dg/other/pr23205-2.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C2
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/20101011-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/20020312-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/20040813-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/builtin-apply2.c2
-rw-r--r--gcc/testsuite/gcc.dg/stack-usage-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr51106-2.s2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp67
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-2.h101
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-common.h139
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h159
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c71
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_2.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_3.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_4.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/ice_5.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/macro-def.h286
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_11.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_12.c44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_13.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_14.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_15.c21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_16.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_17.c37
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_18.c34
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_19.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_21.c21
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_22.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_23.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_24.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_25.c61
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_26.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_4.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_5.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_6.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_7.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_8.c24
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_9.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-1.c126
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-2.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-3.c46
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-4.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_complex.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_int128.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/test_quad_double.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/type-def.h157
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-11.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-12.c60
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-3.c86
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-4.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-5.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-6.c40
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-7.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-8.c25
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-9.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aapcs64/validate_memory.h81
-rw-r--r--gcc/testsuite/gcc.target/aarch64/aarch64.exp45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/adc-1.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/adc-2.c277
-rw-r--r--gcc/testsuite/gcc.target/aarch64/arch-diagnostics-1.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/arch-diagnostics-2.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/arg-type-diagnostics-1.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/asm-1.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/clrsb.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/clz.c9
-rw-r--r--gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-1.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-2.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-3.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-4.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/csinc-1.c72
-rw-r--r--gcc/testsuite/gcc.target/aarch64/csinv-1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/csneg-1.c50
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ctz.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/extend.c170
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt.x55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_double_long.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_double_uint.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_double_ulong.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_float_int.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_float_long.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_float_uint.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcvt_float_ulong.c15
-rw-r--r--gcc/testsuite/gcc.target/aarch64/ffs.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmadd.c55
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/frint.x66
-rw-r--r--gcc/testsuite/gcc.target/aarch64/frint_double.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/frint_float.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/index.c111
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mneg-1.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mneg-2.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mneg-3.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mnegl-1.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/mnegl-2.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c125
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pic-constantpool1.c30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pic-symrefplus.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c66
-rw-r--r--gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c1181
-rw-r--r--gcc/testsuite/gcc.target/aarch64/table-intrinsics.c439
-rw-r--r--gcc/testsuite/gcc.target/aarch64/tst-1.c49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c12
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-abs.c131
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-abs.x36
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-compile.c20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-faddv.c31
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-faddv.x23
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c7
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c105
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv-compile.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x43
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fp.c137
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-fp.x44
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c16
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-mull.c138
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect-mull.x49
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vect.x140
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c803
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vfp-1.c109
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vmlsq_laneq.c158
-rw-r--r--gcc/testsuite/gcc.target/aarch64/volatile-bitfields-1.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/volatile-bitfields-2.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/volatile-bitfields-3.c17
-rw-r--r--gcc/testsuite/gcc.target/aarch64/vsqrt.c66
-rw-r--r--gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f2
-rw-r--r--gcc/testsuite/lib/target-supports.exp39
-rw-r--r--libcpp/ChangeLog.aarch6413
-rwxr-xr-xlibcpp/configure1
-rw-r--r--libcpp/configure.ac1
-rw-r--r--libgcc/ChangeLog.aarch6456
-rw-r--r--libgcc/config.host13
-rw-r--r--libgcc/config/aarch64/crti.S68
-rw-r--r--libgcc/config/aarch64/crtn.S61
-rw-r--r--libgcc/config/aarch64/linux-unwind.h143
-rw-r--r--libgcc/config/aarch64/sfp-machine.h153
-rw-r--r--libgcc/config/aarch64/sync-cache.c57
-rw-r--r--libgcc/config/aarch64/t-aarch6421
-rw-r--r--libgcc/config/aarch64/t-softfp7
-rw-r--r--libgomp/ChangeLog.aarch6412
-rw-r--r--libgomp/configure.tgt4
-rw-r--r--libstdc++-v3/ChangeLog.aarch644
-rw-r--r--libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h60
-rw-r--r--libstdc++-v3/configure.host3
203 files changed, 54832 insertions, 57 deletions
diff --git a/ChangeLog.linaro b/ChangeLog.linaro
index 8a43da7d004..11ee2416ea4 100644
--- a/ChangeLog.linaro
+++ b/ChangeLog.linaro
@@ -1,3 +1,8 @@
+2012-10-03 Matthew Gretton-Dann <matthew.gretton-dann@linaro.org>
+
+ Merge from fsf gcc arm/aarch64-4_7-branch
+ (svn branches/arm/aarch64-4_7-branch 191926).
+
2012-10-02 Matthew Gretton-Dann <matthew.gretton-dann@linaro.org>
LP: #1053348
diff --git a/config.guess b/config.guess
index b02565c7b2f..137bedf2e28 100755
--- a/config.guess
+++ b/config.guess
@@ -2,9 +2,9 @@
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011 Free Software Foundation, Inc.
+# 2011, 2012 Free Software Foundation, Inc.
-timestamp='2011-06-03'
+timestamp='2012-08-14'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -17,9 +17,7 @@ timestamp='2011-06-03'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -57,8 +55,8 @@ GNU config.guess ($timestamp)
Originally written by Per Bothner.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free
-Software Foundation, Inc.
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -145,7 +143,7 @@ UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:NetBSD:*:*)
# NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
# *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
# switched to ELF, *-*-netbsd* would select the old
# object file format. This provides both forward
@@ -202,6 +200,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
echo "${machine}-${os}${release}"
exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
*:OpenBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
@@ -792,21 +794,26 @@ EOF
echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
exit ;;
*:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
amd64)
echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
*)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
esac
exit ;;
i*:CYGWIN*:*)
echo ${UNAME_MACHINE}-pc-cygwin
exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
+ i*:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
i*:windows32*:*)
# uname -m includes "-pc" on this system.
echo ${UNAME_MACHINE}-mingw32
@@ -861,6 +868,13 @@ EOF
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;
@@ -895,13 +909,16 @@ EOF
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
cris:Linux:*:*)
- echo cris-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-gnu
exit ;;
crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-gnu
exit ;;
frv:Linux:*:*)
- echo frv-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
LIBC=gnu
@@ -943,7 +960,7 @@ EOF
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
;;
or32:Linux:*:*)
- echo or32-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
padre:Linux:*:*)
echo sparc-unknown-linux-gnu
@@ -984,7 +1001,7 @@ EOF
echo ${UNAME_MACHINE}-dec-linux-gnu
exit ;;
x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
xtensa*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
@@ -1191,6 +1208,9 @@ EOF
BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
echo i586-pc-haiku
exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
@@ -1246,7 +1266,7 @@ EOF
NEO-?:NONSTOP_KERNEL:*:*)
echo neo-tandem-nsk${UNAME_RELEASE}
exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
+ NSE-*:NONSTOP_KERNEL:*:*)
echo nse-tandem-nsk${UNAME_RELEASE}
exit ;;
NSR-?:NONSTOP_KERNEL:*:*)
@@ -1315,11 +1335,11 @@ EOF
i*86:AROS:*:*)
echo ${UNAME_MACHINE}-pc-aros
exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
esac
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
eval $set_cc_for_build
cat >$dummy.c <<EOF
#ifdef _SEQUENT_
diff --git a/config.sub b/config.sub
index 78176a44029..bdda9e4a32c 100755
--- a/config.sub
+++ b/config.sub
@@ -2,9 +2,9 @@
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011 Free Software Foundation, Inc.
+# 2011, 2012 Free Software Foundation, Inc.
-timestamp='2011-10-29'
+timestamp='2012-08-18'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
@@ -21,9 +21,7 @@ timestamp='2011-10-29'
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -76,8 +74,8 @@ version="\
GNU config.sub ($timestamp)
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free
-Software Foundation, Inc.
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -125,13 +123,17 @@ esac
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
- linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
knetbsd*-gnu* | netbsd*-gnu* | \
kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
*)
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
if [ $basic_machine != $1 ]
@@ -223,6 +225,12 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
-lynx*)
os=-lynxos
;;
@@ -247,6 +255,7 @@ case $basic_machine in
# Some are omitted here because they have special meanings below.
1750a | 580 \
| a29k \
+ | aarch64 | aarch64_be \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
@@ -319,8 +328,7 @@ case $basic_machine in
c6x)
basic_machine=tic6x-unknown
;;
- m6811 | m68hc11 | m6812 | m68hc12 | picochip)
- # Motorola 68HC11/12.
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
basic_machine=$basic_machine-unknown
os=-none
;;
@@ -333,7 +341,10 @@ case $basic_machine in
strongarm | thumb | xscale)
basic_machine=arm-unknown
;;
-
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
xscaleeb)
basic_machine=armeb-unknown
;;
@@ -356,6 +367,7 @@ case $basic_machine in
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
+ | aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
@@ -719,7 +731,6 @@ case $basic_machine in
i370-ibm* | ibm*)
basic_machine=i370-ibm
;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
i*86v32)
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
os=-sysv32
@@ -780,6 +791,10 @@ case $basic_machine in
microblaze)
basic_machine=microblaze-xilinx
;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
mingw32)
basic_machine=i386-pc
os=-mingw32
@@ -816,6 +831,10 @@ case $basic_machine in
ms1-*)
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
;;
+ msys)
+ basic_machine=i386-pc
+ os=-msys
+ ;;
mvs)
basic_machine=i370-ibm
os=-mvs
@@ -1337,15 +1356,15 @@ case $os in
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-android* \
- | -linux-newlib* | -linux-uclibc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
@@ -1528,6 +1547,9 @@ case $basic_machine in
c4x-* | tic4x-*)
os=-coff
;;
+ hexagon-*)
+ os=-elf
+ ;;
tic54x-*)
os=-coff
;;
@@ -1555,9 +1577,6 @@ case $basic_machine in
;;
m68000-sun)
os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
;;
m68*-cisco)
os=-aout
diff --git a/gcc/ChangeLog.aarch64 b/gcc/ChangeLog.aarch64
new file mode 100644
index 00000000000..00b93a66489
--- /dev/null
+++ b/gcc/ChangeLog.aarch64
@@ -0,0 +1,413 @@
+2012-10-01 Ian Bolton <ian.bolton@arm.com>
+ Richard Henderson <rth@redhat.com>
+
+ * config/aarch64/aarch64.c (aarch64_expand_mov_immediate): Fix a
+ functional typo and refactor code in switch statement.
+ * config/aarch64/aarch64.md (add_losym): Handle symbol + offset.
+ * config/aarch64/predicates.md (aarch64_tls_ie_symref): Match const.
+ (aarch64_tls_le_symref): Likewise.
+
+2012-09-26 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/predicates.md (aarch64_simd_reg_or_zero): Remove
+ duplicate.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_shift_truncation_mask): Define.
+ (TARGET_SHIFT_TRUNCATION_MASK): Define.
+ * config/aarch64/aarch64.h (SHIFT_COUNT_TRUNCATED): Conditionalize on
+ TARGET_SIMD.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h (vrshrn_high_n_s16, vrshrn_high_n_s32)
+ (vrshrn_high_n_s64, vrshrn_high_n_u16, vrshrn_high_n_u32)
+ (vrshrn_high_n_u64, vshrn_high_n_s16, vshrn_high_n_s32)
+ (vshrn_high_n_s32, vshrn_high_n_s64, vshrn_high_n_u16, vshrn_high_n_u32)
+ (vshrn_high_n_u64): Fix template to reference correct operands.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h (vmovq_n_f64): Add.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h (vfmaq_lane_f64): Fix prototype and
+ assembler template accordingly.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_simd_imm_scalar_p): Declare.
+ * config/aarch64/aarch64.c (aarch64_simd_imm_scalar_p): New.
+ * config/aarch64/aarch64.md (*movdi_aarch64): Add alternative for moving
+ valid scalar immediate into a Advanved SIMD D-register.
+ * config/aarch64/constraints.md (Dd): New.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd.md (aarch64_cm<cmp><mode>): Tighten
+ predicate for operand 2 of the compare pattern to accept register
+ or zero.
+ * config/aarch64/predicates.md (aarch64_simd_reg_or_zero): New.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>): Split Q-reg
+ vector value move contained in general registers.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_simd_expand_builtin): Expand binary
+ operations' constant operand only if the predicate allows it.
+
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-builtins.c (aarch64_simd_builtin_data):
+ Populate intrinsic table with struct loads and store descriptors.
+ (init_aarch64_simd_builtins): Remove cruft.
+ (aarch64_simd_expand_builtin): Expand the builtins.
+ * config/aarch64/aarch64-modes.def: Define new vector modes for register
+ lists.
+ * config/aarch64/aarch64-protos.h (aarch64_simd_attr_length_move): New.
+ (aarch64_simd_mem_operand_p): New.
+ (aarch64_simd_imm_zero_p): New.
+ (aarch64_output_move_struct): New.
+ (aarch64_simd_disambiguate_copy): New.
+ * config/aarch64/aarch64-simd.md (simd_mode): Add OI, CI and XI to the
+ list.
+ (mov<mode>): Tighten predicates for simd operand.
+ (movmisalign<mode>): Likewise.
+ (*aarch64_simd_mov<mode>): Tighten predicates and constraints for simd
+ operands.
+ (*aarch64_combinez<mode>): New.
+ (vec_load_lanesoi<mode>, vec_store_lanesoi<mode>)
+ (vec_load_lanesci<mode>, vec_store_lanesci<mode>)
+ (vec_load_lanesxi<mode>)
+ (vec_store_lanesxi<mode>, mov<mode>, *aarch64_mov<mode>)
+ (aarch64_ld2<mode>_dreg, aarch64_ld3<mode>_dreg)
+ (aarch64_ld4<mode>_dreg, aarch64_ld<VSTRUCT:nregs><VDC:mode>)
+ (aarch64_ld<VSTRUCT:nregs><VQ:mode>)
+ (aarch64_get_dreg<VSTRUCT:mode><VDC:mode>)
+ (aarch64_get_qreg<VSTRUCT:mode><VQ:mode>, aarch64_st2<mode>_dreg)
+ (aarch64_st3<mode>_dreg, aarch64_st4<mode>_dreg)
+ (aarch64_st<VSTRUCT:nregs><VDC:mode>)
+ (aarch64_st<VSTRUCT:nregs><VQ:mode>)
+ (aarch64_set_qreg<VSTRUCT:mode><VQ:mode>): New expanders and patterns
+ for vector struct loads and stores.
+ * config/aarch64/aarch64.c (aarch64_vect_struct_mode_p): New.
+ (aarch64_vector_mode_p): New.
+ (aarch64_array_mode_supported_p): New.
+ (aarch64_hard_regno_mode_ok): Check that reglists don't go out of
+ range and don't allocate general regs to large int modes.
+ (aarch64_classify_address): Restrict addressing modes of large int
+ modes to same as SIMD addressing modes.
+ (aarch64_print_operand): Print specifiers for register lists.
+ (aarch64_legitimize_reload_address): Treat large int modes simliar to
+ SIMD modes.
+ (aarch64_class_max_nregs): Return the correct max number of register
+ for a particular mode.
+ (aarch64_legitimate_constant_p): Do not allow large int modes
+ immediate values.
+ (aarch64_simd_imm_zero_p): New.
+ (aarch64_simd_mem_operand_p): Check if mem operand has a valid SIMD
+ addressing mode.
+ (aarch64_simd_disambiguate_copy): Copy values that span multiple
+ register with and without overlapping.
+ (aarch64_simd_attr_length_move): Length of instruction sequence
+ depending on the mode.
+ * config/aarch64/aarch64.h (AARCH64_VALID_SIMD_QREG_MODE): New.
+ * config/aarch64/aarch64.md (UNSPEC_VSTRUCTDUMMY, UNSPEC_LD2)
+ (UNSPEC_LD3, UNSPEC_LD4, UNSPEC_ST2, UNSPEC_ST3, UNSPEC_ST4): New.
+ * config/aarch64/arm_neon.h: Remove assembler implementation of vector
+ struct loads and stores and add new C implementations.
+ * config/aarch64/constraints.md (Utv): New memory constraint for SIMD
+ memory operands.
+ (Dz): New.
+ * config/aarch64/iterators.md (VDIC, VSTRUCT, DX): New mode iterators.
+ (Vendreg, nregs, VRL2, VRL3, VRL4, VSTRUCT_DREG): New mode attributes.
+ * config/aarch64/predicates.md (aarch64_simd_struct_operand): New.
+ (aarch64_simd_general_operand): New.
+ (aarch64_simd_nonimmediate_operand): New.
+ (aarch64_simd_reg_or_zero): New.
+ (aarch64_simd_imm_zero): New.
+
+2012-09-20 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * config/aarch64/aarch64.md: Make unspec and unspecv constants
+ c_enums and split out to iterators.md and sync.md.
+ * config/aarch64/iterators.md: Add SIMD unspec c_enums.
+ * config/aarch64/sync.md: Add sync unspecv c_enums.
+
+2012-09-18 Ian Bolton <ian.bolton@arm.com>
+
+ * config/aarch64/aarch64.h: Define CTZ_DEFINED_VALUE_AT_ZERO.
+ * config/aarch64/aarch64.md (clrsb<mode>2): New pattern.
+ * config/aarch64/aarch64.md (rbit<mode>2): New pattern.
+ * config/aarch64/aarch64.md (ctz<mode>2): New pattern.
+
+2012-09-18 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64-linux.h (MULTIARCH_TUPLE): Remove.
+ (STANDARD_STARTFILE_PREFIX_1): Likewise.
+ (STANDARD_STARTFILE_PREFIX_2): Likewise.
+
+2012-09-17 Ian Bolton <ian.bolton@arm.com>
+
+ * config/aarch64/aarch64.md (csinc3<mode>): Turn into named
+ pattern.
+ * config/aarch64/aarch64.md (ffs<mode>2): New pattern.
+
+2012-09-17 Ian Bolton <ian.bolton@arm.com>
+
+ * config/aarch64/aarch64.md (fmsub<mode>4): Rename fnma<mode>4.
+ * config/aarch64/aarch64.md (fnmsub<mode>4): Rename fms<mode>4.
+ * config/aarch64/aarch64.md (fnmadd<mode>4): Rename fnms<mode>4.
+ * config/aarch64/aarch64.md (*fnmadd<mode>4): New pattern.
+
+2012-09-11 Sofiane Naci <sofiane.naci@arm.com>
+
+ * config.sub: Update to version 2010-08-18.
+ * config.guess: Update to version 2010-08-14.
+
+2012-09-10 James Greenhalgh <james.greenhalgh@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+
+ * common/config/aarch64/aarch64-common.c
+ (aarch_option_optimization_table): New.
+ (TARGET_OPTION_OPTIMIZATION_TABLE): Define.
+ * gcc/config.gcc ([aarch64] target_has_targetm_common): Set to yes.
+ * gcc/config/aarch64/aarch64-elf.h (ASM_OUTPUT_DEF): New definition.
+ * gcc/config/aarch64/aarch64.c (TARGET_MIN_ANCHOR_OFFSET): Define.
+ (TARGET_MAX_ANCHOR_OFFSET): Likewise.
+
+2012-09-10 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_classify_address):
+ Allow 16 byte modes in constant pool.
+
+2012-07-23 Ian Bolton <ian.bolton@arm.com>
+
+ * gcc/config/aarch64/aarch64.c (aarch64_print_operand): Use
+ aarch64_classify_symbolic_expression for classifying operands.
+
+ * gcc/config/aarch64/aarch64.c
+ (aarch64_classify_symbolic_expression): New function.
+
+ * gcc/config/aarch64/aarch64.c (aarch64_symbolic_constant_p):
+ New function.
+
+ * gcc/config/aarch64/predicates.md (aarch64_valid_symref):
+ Symbol with constant offset is a valid symbol reference.
+
+
+2012-07-17 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.c
+ (aarch64_regno_ok_for_index_p): Handle NULL reg_renumber.
+ (aarch64_regno_ok_for_base_p): Likewise.
+ (offset_7bit_signed_scaled_p): New.
+ (offset_9bit_signed_unscaled_p): New.
+ (offset_12bit_unsigned_scaled_p): New.
+ (aarch64_classify_address): Replace pair_p with allow_reg_index_p.
+ Conservative test for valid TImode and TFmode addresses. Use
+ offset_7bit_signed_scaled_p offset_9bit_signed_unscaled_p and
+ offset_12bit_unsigned_scaled_p. Remove explicit TImode and TFmode
+ tests.
+ * config/aarch64/aarch64.md (movti_aarch64): Replace 'm' with 'Ump'.
+ (movtf_aarch64): Replace 'm' with 'Ump', replace 'Utf' with 'm'.
+ * config/aarch64/constraints.md (Utf): Remove.
+ (Ump)
+
+2012-07-17 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_rtx_costs):
+ Move misplaced parenthesis.
+
+2012-07-17 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>):
+ Do not emit lsl for a shift of 0.
+ (*aarch64_simd_mov<mode>): Likwise.
+
+2012-07-04 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-linux.h (LINUX_TARGET_LINK_SPEC): Rename
+ LINUX_DYNAMIC_LINKER to GLIBC_DYNAMIC_LINKER.
+
+2012-06-29 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64.h (aarch64_cmodel): Fix enum name.
+
+2012-06-22 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd.md (aarch64_sq<r>dmulh_lane<mode>,
+ aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal,
+ aarch64_sqdmlal_lane<mode>, aarch64_sqdmlal_laneq<mode>,
+ aarch64_sqdmlsl_lane<mode>, aarch64_sqdmlsl_laneq<mode>,
+ aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal,
+ aarch64_sqdmlal2_lane<mode>, aarch64_sqdmlal2_laneq<mode>,
+ aarch64_sqdmlsl2_lane<mode>, aarch64_sqdmlsl2_laneq<mode>,
+ aarch64_sqdmull_lane<mode>_internal, aarch64_sqdmull_lane<mode>,
+ aarch64_sqdmull_laneq<mode>, aarch64_sqdmull2_lane<mode>_internal,
+ aarch64_sqdmull2_lane<mode>, aarch64_sqdmull2_laneq<mode>): Change the
+ constraint of the indexed operand to use <vwl> instead of w.
+ * config/aarch64/aarch64.c (aarch64_hard_regno_nregs): Add case for
+ FP_LO_REGS class.
+ (aarch64_regno_regclass): Return FP_LO_REGS if register in V0 - V15.
+ (aarch64_secondary_reload): Change condition to check for both FP reg
+ classes.
+ (aarch64_class_max_nregs): Add case for FP_LO_REGS.
+ * config/aarch64/aarch64.h (reg_class): New register class FP_LO_REGS.
+ (REG_CLASS_NAMES): Likewise.
+ (REG_CLASS_CONTENTS): Likewise.
+ (FP_LO_REGNUM_P): New.
+ * config/aarch64/aarch64.md (V15_REGNUM): New.
+ * config/aarch64/constraints.md (x): New register constraint.
+ * config/aarch64/iterators.md (vwx): New.
+
+2012-06-22 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/arm_neon.h (vpadd_f64): Remove.
+
+2012-06-22 Sofiane Naci <sofiane.naci@arm.com>
+
+ [AArch64] Update LINK_SPEC.
+
+ * config/aarch64/aarch64-linux.h (LINUX_TARGET_LINK_SPEC): Remove
+ %{version:-v}, %{b} and %{!dynamic-linker}.
+
+2012-06-22 Sofiane Naci <sofiane.naci@arm.com>
+
+ [AArch64] Replace sprintf with snprintf.
+
+ * config/aarch64/aarch64.c
+ (aarch64_elf_asm_constructor): Replace sprintf with snprintf.
+ (aarch64_elf_asm_destructor): Likewise.
+ (aarch64_output_casesi): Likewise.
+ (aarch64_output_asm_insn): Likewise.
+ * config/aarch64/aarch64-builtins.c (init_aarch64_simd_builtins):
+ Likewise.
+ * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>): Replace
+ sprintf with snprintf, and fix code layout.
+
+2012-06-22 Sofiane Naci <sofiane.naci@arm.com>
+
+ [AArch64] Fix documentation layout.
+
+ * doc/invoke.texi: Fix white spaces after dots.
+ Change aarch64*be-*-* to aarch64_be-*-*.
+ Add documentation for -mcmodel=tiny.
+ (-march): Fix formatting.
+ (-mcpu): Likewise.
+ (-mtune): Rephrase.
+ (-march and -mcpu feature modifiers): New subsection.
+
+2012-06-22 Sofiane Naci <sofiane.naci@arm.com>
+
+ [AArch64] Use Enums for code models option selection.
+
+ * config/aarch64/aarch64-elf-raw.h (AARCH64_DEFAULT_MEM_MODEL): Delete.
+ * config/aarch64/aarch64-linux.h (AARCH64_DEFAULT_MEM_MODEL): Delete.
+ * config/aarch64/aarch64-opts.h (enum aarch64_code_model): New.
+ * config/aarch64/aarch64-protos.h: Update comments.
+ * config/aarch64/aarch64.c: Update comments.
+ (aarch64_default_mem_model): Rename to aarch64_code_model.
+ (aarch64_expand_mov_immediate): Remove error message.
+ (aarch64_select_rtx_section): Remove assertion and update comment.
+ (aarch64_override_options): Move memory model initialization from here.
+ (struct aarch64_mem_model): Delete.
+ (aarch64_memory_models[]): Delete.
+ (initialize_aarch64_memory_model): Rename to
+ initialize_aarch64_code_model and update.
+ (aarch64_classify_symbol): Handle AARCH64_CMODEL_TINY and
+ AARCH64_CMODEL_TINY_PIC
+ * config/aarch64/aarch64.h
+ (enum aarch64_memory_model): Delete.
+ (aarch64_default_mem_model): Rename to aarch64_cmodel.
+ (HAS_LONG_COND_BRANCH): Update.
+ (HAS_LONG_UNCOND_BRANCH): Update.
+ * config/aarch64/aarch64.opt
+ (cmodel): New.
+ (mcmodel): Update.
+
+2012-06-22 Sofiane Naci <sofiane.naci@arm.com>
+
+ [AArch64] Use Enums for TLS option selection.
+
+ * config/aarch64/aarch64-opts.h (enum aarch64_tls_type): New.
+ * config/aarch64/aarch64.c
+ (aarch64_tls_dialect): Remove.
+ (tls_symbolic_operand_type): Update comment.
+ (aarch64_override_options): Remove TLS option setup code.
+ * config/aarch64/aarch64.h
+ (TARGET_TLS_TRADITIONAL): Remove.
+ (TARGET_TLS_DESC): Update definition.
+ (enum tls_dialect): Remove.
+ (enum tls_dialect aarch64_tls_dialect) Remove.
+ * config/aarch64/aarch64.opt
+ (tls_type): New.
+ (mtls-dialect): Update.
+
+2012-05-25 Ian Bolton <ian.bolton@arm.com>
+ Jim MacArthur <jim.macarthur@arm.com>
+ Marcus Shawcroft <marcus.shawcroft@arm.com>
+ Nigel Stephens <nigel.stephens@arm.com>
+ Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+ Sofiane Naci <sofiane.naci@arm.com>
+ Stephen Thomas <stephen,thomas@arm.com>
+ Tejas Belagod <tejas.belagod@arm.com>
+ Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * common/config/aarch64/aarch64-common.c: New file.
+ * config/aarch64/aarch64-arches.def: New file.
+ * config/aarch64/aarch64-builtins.c: New file.
+ * config/aarch64/aarch64-cores.def: New file.
+ * config/aarch64/aarch64-elf-raw.h: New file.
+ * config/aarch64/aarch64-elf.h: New file.
+ * config/aarch64/aarch64-generic.md: New file.
+ * config/aarch64/aarch64-linux.h: New file.
+ * config/aarch64/aarch64-modes.def: New file.
+ * config/aarch64/aarch64-option-extensions.def: New file.
+ * config/aarch64/aarch64-opts.h: New file.
+ * config/aarch64/aarch64-protos.h: New file.
+ * config/aarch64/aarch64-simd.md: New file.
+ * config/aarch64/aarch64-tune.md: New file.
+ * config/aarch64/aarch64.c: New file.
+ * config/aarch64/aarch64.h: New file.
+ * config/aarch64/aarch64.md: New file.
+ * config/aarch64/aarch64.opt: New file.
+ * config/aarch64/arm_neon.h: New file.
+ * config/aarch64/constraints.md: New file.
+ * config/aarch64/gentune.sh: New file.
+ * config/aarch64/iterators.md: New file.
+ * config/aarch64/large.md: New file.
+ * config/aarch64/predicates.md: New file.
+ * config/aarch64/small.md: New file.
+ * config/aarch64/sync.md: New file.
+ * config/aarch64/t-aarch64-linux: New file.
+ * config/aarch64/t-aarch64: New file.
+ * config.gcc: Add AArch64.
+ * configure.ac: Add AArch64 TLS support detection.
+ * configure: Regenerate.
+ * doc/extend.texi (Complex Numbers): Add AArch64.
+ * doc/invoke.texi (AArch64 Options): New.
+ * doc/md.texi (Machine Constraints): Add AArch64.
+
+ * read-rtl.c (rtx_list): New data structure.
+ (int_iterator_mapping): New data structure.
+ (int_iterator_data): New. List of int iterator details.
+ (num_int_iterator_data): New.
+ (ints): New group list.
+ (find_int): New. Find an int iterator in a list.
+ (dummy_uses_int_iterator): Dummy handle.
+ (dummy_apply_int_iterator): Dummy handle.
+ (uses_int_iterator_p): New.
+ (apply_iterator_to_rtx): Handle case for rtx field specifier 'i'.
+ (initialize_iterators): Initialize int iterators data struts.
+ (find_int_iterator): New. Find an Int iterators from a hash-table.
+ (add_int_iterator: Add int iterator to database.
+ (read_rtx): Parse and read int iterators mapping and attributes.
+ Initialize int iterators group's hash-table. Memory management.
+ (read_rtx_code): Handle case for rtl field specifier 'i'.
diff --git a/gcc/common/config/aarch64/aarch64-common.c b/gcc/common/config/aarch64/aarch64-common.c
new file mode 100644
index 00000000000..df724063d27
--- /dev/null
+++ b/gcc/common/config/aarch64/aarch64-common.c
@@ -0,0 +1,77 @@
+/* Common hooks for AArch64.
+ Copyright (C) 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm_p.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "opts.h"
+#include "flags.h"
+
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
+#endif
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION aarch64_handle_option
+
+/* Implement TARGET_HANDLE_OPTION.
+ This function handles the target specific options for CPU/target selection.
+
+ march wins over mcpu, so when march is defined, mcpu takes the same value,
+ otherwise march remains undefined. mtune can be used with either march or
+ mcpu. If march and mcpu are used together, the rightmost option wins.
+ mtune can be used with either march or mcpu. */
+
+static bool
+aarch64_handle_option (struct gcc_options *opts,
+ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
+ const struct cl_decoded_option *decoded,
+ location_t loc ATTRIBUTE_UNUSED)
+{
+ size_t code = decoded->opt_index;
+ const char *arg = decoded->arg;
+
+ switch (code)
+ {
+ case OPT_march_:
+ opts->x_aarch64_arch_string = arg;
+ opts->x_aarch64_cpu_string = arg;
+ return true;
+
+ case OPT_mcpu_:
+ opts->x_aarch64_cpu_string = arg;
+ opts->x_aarch64_arch_string = NULL;
+ return true;
+
+ case OPT_mtune_:
+ opts->x_aarch64_tune_string = arg;
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 7807090d60c..088ee87aa6a 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -317,6 +317,13 @@ m32c*-*-*)
tmake_file=m32c/t-m32c
target_has_targetm_common=no
;;
+aarch64*-*-*)
+ cpu_type=aarch64
+ need_64bit_hwint=yes
+ extra_headers="arm_neon.h"
+ extra_objs="aarch64-builtins.o"
+ target_has_targetm_common=yes
+ ;;
alpha*-*-*)
cpu_type=alpha
need_64bit_hwint=yes
@@ -770,6 +777,27 @@ case ${target} in
esac
case ${target} in
+aarch64*-*-elf)
+ tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h"
+ tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-elf-raw.h"
+ tmake_file="${tmake_file} aarch64/t-aarch64"
+ use_gcc_stdint=wrap
+ case $target in
+ aarch64_be-*)
+ tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
+ ;;
+ esac
+ ;;
+aarch64*-*-linux*)
+ tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h"
+ tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-linux.h"
+ tmake_file="${tmake_file} aarch64/t-aarch64 aarch64/t-aarch64-linux"
+ case $target in
+ aarch64_be-*)
+ tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
+ ;;
+ esac
+ ;;
alpha*-*-linux*)
tm_file="${tm_file} alpha/elf.h alpha/linux.h alpha/linux-elf.h glibc-stdint.h"
extra_options="${extra_options} alpha/elf.opt"
@@ -3024,6 +3052,92 @@ fi
supported_defaults=
case "${target}" in
+ aarch64*-*-*)
+ supported_defaults="cpu arch"
+ for which in cpu arch; do
+
+ eval "val=\$with_$which"
+ base_val=`echo $val | sed -e 's/\+.*//'`
+ ext_val=`echo $val | sed -e 's/[a-z0-9\-]\+//'`
+
+ if [ $which = arch ]; then
+ def=aarch64-arches.def
+ pattern=AARCH64_ARCH
+ else
+ def=aarch64-cores.def
+ pattern=AARCH64_CORE
+ fi
+
+ ext_mask=AARCH64_CPU_DEFAULT_FLAGS
+
+ # Find the base CPU or ARCH id in aarch64-cores.def or
+ # aarch64-arches.def
+ if [ x"$base_val" = x ] \
+ || grep "^$pattern(\"$base_val\"," \
+ ${srcdir}/config/aarch64/$def \
+ > /dev/null; then
+
+ if [ $which = arch ]; then
+ base_id=`grep "^$pattern(\"$base_val\"," \
+ ${srcdir}/config/aarch64/$def | \
+ sed -e 's/^[^,]*,[ ]*//' | \
+ sed -e 's/,.*$//'`
+ else
+ base_id=`grep "^$pattern(\"$base_val\"," \
+ ${srcdir}/config/aarch64/$def | \
+ sed -e 's/^[^,]*,[ ]*//' | \
+ sed -e 's/,.*$//'`
+ fi
+
+ while [ x"$ext_val" != x ]
+ do
+ ext_val=`echo $ext_val | sed -e 's/\+//'`
+ ext=`echo $ext_val | sed -e 's/\+.*//'`
+ base_ext=`echo $ext | sed -e 's/^no//'`
+
+ if [ x"$base_ext" = x ] \
+ || grep "^AARCH64_OPT_EXTENSION(\"$base_ext\"," \
+ ${srcdir}/config/aarch64/aarch64-option-extensions.def \
+ > /dev/null; then
+
+ ext_on=`grep "^AARCH64_OPT_EXTENSION(\"$base_ext\"," \
+ ${srcdir}/config/aarch64/aarch64-option-extensions.def | \
+ sed -e 's/^[^,]*,[ ]*//' | \
+ sed -e 's/,.*$//'`
+ ext_off=`grep "^AARCH64_OPT_EXTENSION(\"$base_ext\"," \
+ ${srcdir}/config/aarch64/aarch64-option-extensions.def | \
+ sed -e 's/^[^,]*,[ ]*[^,]*,[ ]*//' | \
+ sed -e 's/,.*$//' | \
+ sed -e 's/).*$//'`
+
+ if [ $ext = $base_ext ]; then
+ # Adding extension
+ ext_mask="("$ext_mask") | ("$ext_on")"
+ else
+ # Removing extension
+ ext_mask="("$ext_mask") & ~("$ext_off")"
+ fi
+
+ true
+ else
+ echo "Unknown extension used in --with-$which=$val" 1>&2
+ exit 1
+ fi
+ ext_val=`echo $ext_val | sed -e 's/[a-z0-9]\+//'`
+ done
+
+ ext_mask="(("$ext_mask") << 6)"
+ if [ x"$base_id" != x ]; then
+ target_cpu_cname="TARGET_CPU_$base_id | $ext_mask"
+ fi
+ true
+ else
+ echo "Unknown $which used in --with-$which=$val" 1>&2
+ exit 1
+ fi
+ done
+ ;;
+
alpha*-*-*)
supported_defaults="cpu tune"
for which in cpu tune; do
@@ -3512,6 +3626,15 @@ esac
# Set some miscellaneous flags for particular targets.
target_cpu_default2=
case ${target} in
+ aarch64*-*-*)
+ if test x$target_cpu_cname = x
+ then
+ target_cpu_default2=TARGET_CPU_generic
+ else
+ target_cpu_default2=$target_cpu_cname
+ fi
+ ;;
+
alpha*-*-*)
if test x$gas = xyes
then
diff --git a/gcc/config/aarch64/aarch64-arches.def b/gcc/config/aarch64/aarch64-arches.def
new file mode 100644
index 00000000000..3ac34baf17e
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-arches.def
@@ -0,0 +1,29 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Before using #include to read this file, define a macro:
+
+ AARCH64_ARCH(NAME, CORE, ARCH, FLAGS)
+
+ The NAME is the name of the architecture, represented as a string
+ constant. The CORE is the identifier for a core representative of
+ this architecture. ARCH is the architecture revision. FLAGS are
+ the flags implied by the architecture. */
+
+AARCH64_ARCH("armv8-a", generic, 8, AARCH64_FL_FOR_ARCH8)
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
new file mode 100644
index 00000000000..429a0dfdbfc
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -0,0 +1,1320 @@
+/* Builtins' description for AArch64 SIMD architecture.
+ Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "expr.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "optabs.h"
+
+enum aarch64_simd_builtin_type_bits
+{
+ T_V8QI = 0x0001,
+ T_V4HI = 0x0002,
+ T_V2SI = 0x0004,
+ T_V2SF = 0x0008,
+ T_DI = 0x0010,
+ T_DF = 0x0020,
+ T_V16QI = 0x0040,
+ T_V8HI = 0x0080,
+ T_V4SI = 0x0100,
+ T_V4SF = 0x0200,
+ T_V2DI = 0x0400,
+ T_V2DF = 0x0800,
+ T_TI = 0x1000,
+ T_EI = 0x2000,
+ T_OI = 0x4000,
+ T_XI = 0x8000,
+ T_SI = 0x10000,
+ T_HI = 0x20000,
+ T_QI = 0x40000
+};
+
+#define v8qi_UP T_V8QI
+#define v4hi_UP T_V4HI
+#define v2si_UP T_V2SI
+#define v2sf_UP T_V2SF
+#define di_UP T_DI
+#define df_UP T_DF
+#define v16qi_UP T_V16QI
+#define v8hi_UP T_V8HI
+#define v4si_UP T_V4SI
+#define v4sf_UP T_V4SF
+#define v2di_UP T_V2DI
+#define v2df_UP T_V2DF
+#define ti_UP T_TI
+#define ei_UP T_EI
+#define oi_UP T_OI
+#define xi_UP T_XI
+#define si_UP T_SI
+#define hi_UP T_HI
+#define qi_UP T_QI
+
+#define UP(X) X##_UP
+
+#define T_MAX 19
+
+typedef enum
+{
+ AARCH64_SIMD_BINOP,
+ AARCH64_SIMD_TERNOP,
+ AARCH64_SIMD_QUADOP,
+ AARCH64_SIMD_UNOP,
+ AARCH64_SIMD_GETLANE,
+ AARCH64_SIMD_SETLANE,
+ AARCH64_SIMD_CREATE,
+ AARCH64_SIMD_DUP,
+ AARCH64_SIMD_DUPLANE,
+ AARCH64_SIMD_COMBINE,
+ AARCH64_SIMD_SPLIT,
+ AARCH64_SIMD_LANEMUL,
+ AARCH64_SIMD_LANEMULL,
+ AARCH64_SIMD_LANEMULH,
+ AARCH64_SIMD_LANEMAC,
+ AARCH64_SIMD_SCALARMUL,
+ AARCH64_SIMD_SCALARMULL,
+ AARCH64_SIMD_SCALARMULH,
+ AARCH64_SIMD_SCALARMAC,
+ AARCH64_SIMD_CONVERT,
+ AARCH64_SIMD_FIXCONV,
+ AARCH64_SIMD_SELECT,
+ AARCH64_SIMD_RESULTPAIR,
+ AARCH64_SIMD_REINTERP,
+ AARCH64_SIMD_VTBL,
+ AARCH64_SIMD_VTBX,
+ AARCH64_SIMD_LOAD1,
+ AARCH64_SIMD_LOAD1LANE,
+ AARCH64_SIMD_STORE1,
+ AARCH64_SIMD_STORE1LANE,
+ AARCH64_SIMD_LOADSTRUCT,
+ AARCH64_SIMD_LOADSTRUCTLANE,
+ AARCH64_SIMD_STORESTRUCT,
+ AARCH64_SIMD_STORESTRUCTLANE,
+ AARCH64_SIMD_LOGICBINOP,
+ AARCH64_SIMD_SHIFTINSERT,
+ AARCH64_SIMD_SHIFTIMM,
+ AARCH64_SIMD_SHIFTACC
+} aarch64_simd_itype;
+
+typedef struct
+{
+ const char *name;
+ const aarch64_simd_itype itype;
+ const int bits;
+ const enum insn_code codes[T_MAX];
+ const unsigned int num_vars;
+ unsigned int base_fcode;
+} aarch64_simd_builtin_datum;
+
+#define CF(N, X) CODE_FOR_aarch64_##N##X
+
+#define VAR1(T, N, A) \
+ #N, AARCH64_SIMD_##T, UP (A), { CF (N, A) }, 1, 0
+#define VAR2(T, N, A, B) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B), { CF (N, A), CF (N, B) }, 2, 0
+#define VAR3(T, N, A, B, C) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C), \
+ { CF (N, A), CF (N, B), CF (N, C) }, 3, 0
+#define VAR4(T, N, A, B, C, D) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D) }, 4, 0
+#define VAR5(T, N, A, B, C, D, E) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E) }, 5, 0
+#define VAR6(T, N, A, B, C, D, E, F) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F) }, 6, 0
+#define VAR7(T, N, A, B, C, D, E, F, G) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G) }, 7, 0
+#define VAR8(T, N, A, B, C, D, E, F, G, H) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G) \
+ | UP (H), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H) }, 8, 0
+#define VAR9(T, N, A, B, C, D, E, F, G, H, I) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I) }, 9, 0
+#define VAR10(T, N, A, B, C, D, E, F, G, H, I, J) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I) | UP (J), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I), CF (N, J) }, 10, 0
+
+#define VAR11(T, N, A, B, C, D, E, F, G, H, I, J, K) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I) | UP (J) | UP (K), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I), CF (N, J), CF (N, K) }, 11, 0
+
+#define VAR12(T, N, A, B, C, D, E, F, G, H, I, J, K, L) \
+ #N, AARCH64_SIMD_##T, UP (A) | UP (B) | UP (C) | UP (D) \
+ | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I) | UP (J) | UP (K) | UP (L), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I), CF (N, J), CF (N, K), CF (N, L) }, 12, 0
+
+
+/* The mode entries in the following table correspond to the "key" type of the
+ instruction variant, i.e. equivalent to that which would be specified after
+ the assembler mnemonic, which usually refers to the last vector operand.
+ (Signed/unsigned/polynomial types are not differentiated between though, and
+ are all mapped onto the same mode for a given element size.) The modes
+ listed per instruction should be the same as those defined for that
+ instruction's pattern in aarch64_simd.md.
+ WARNING: Variants should be listed in the same increasing order as
+ aarch64_simd_builtin_type_bits. */
+
+static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
+ {VAR6 (CREATE, create, v8qi, v4hi, v2si, v2sf, di, df)},
+ {VAR6 (GETLANE, get_lane_signed,
+ v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+ {VAR7 (GETLANE, get_lane_unsigned,
+ v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di)},
+ {VAR4 (GETLANE, get_lane, v2sf, di, v4sf, v2df)},
+ {VAR6 (GETLANE, get_dregoi, v8qi, v4hi, v2si, v2sf, di, df)},
+ {VAR6 (GETLANE, get_qregoi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (GETLANE, get_dregci, v8qi, v4hi, v2si, v2sf, di, df)},
+ {VAR6 (GETLANE, get_qregci, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (GETLANE, get_dregxi, v8qi, v4hi, v2si, v2sf, di, df)},
+ {VAR6 (GETLANE, get_qregxi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (SETLANE, set_qregoi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (SETLANE, set_qregci, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (SETLANE, set_qregxi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+
+ {VAR5 (REINTERP, reinterpretv8qi, v8qi, v4hi, v2si, v2sf, di)},
+ {VAR5 (REINTERP, reinterpretv4hi, v8qi, v4hi, v2si, v2sf, di)},
+ {VAR5 (REINTERP, reinterpretv2si, v8qi, v4hi, v2si, v2sf, di)},
+ {VAR5 (REINTERP, reinterpretv2sf, v8qi, v4hi, v2si, v2sf, di)},
+ {VAR5 (REINTERP, reinterpretdi, v8qi, v4hi, v2si, v2sf, di)},
+ {VAR6 (REINTERP, reinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (REINTERP, reinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (REINTERP, reinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (REINTERP, reinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (REINTERP, reinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR6 (COMBINE, combine, v8qi, v4hi, v2si, v2sf, di, df)},
+
+ {VAR3 (BINOP, saddl, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, uaddl, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, saddl2, v16qi, v8hi, v4si)},
+ {VAR3 (BINOP, uaddl2, v16qi, v8hi, v4si)},
+ {VAR3 (BINOP, saddw, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, uaddw, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, saddw2, v16qi, v8hi, v4si)},
+ {VAR3 (BINOP, uaddw2, v16qi, v8hi, v4si)},
+ {VAR6 (BINOP, shadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+ {VAR6 (BINOP, uhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+ {VAR6 (BINOP, srhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+ {VAR6 (BINOP, urhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si)},
+ {VAR3 (BINOP, addhn, v8hi, v4si, v2di)},
+ {VAR3 (BINOP, raddhn, v8hi, v4si, v2di)},
+ {VAR3 (TERNOP, addhn2, v8hi, v4si, v2di)},
+ {VAR3 (TERNOP, raddhn2, v8hi, v4si, v2di)},
+ {VAR3 (BINOP, ssubl, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, usubl, v8qi, v4hi, v2si)},
+ {VAR3 (BINOP, ssubl2, v16qi, v8hi, v4si) },
+ {VAR3 (BINOP, usubl2, v16qi, v8hi, v4si) },
+ {VAR3 (BINOP, ssubw, v8qi, v4hi, v2si) },
+ {VAR3 (BINOP, usubw, v8qi, v4hi, v2si) },
+ {VAR3 (BINOP, ssubw2, v16qi, v8hi, v4si) },
+ {VAR3 (BINOP, usubw2, v16qi, v8hi, v4si) },
+ {VAR11 (BINOP, sqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR11 (BINOP, uqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR11 (BINOP, sqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR11 (BINOP, uqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR11 (BINOP, suqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR11 (BINOP, usqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi)},
+ {VAR6 (UNOP, sqmovun, di, v8hi, v4si, v2di, si, hi)},
+ {VAR6 (UNOP, sqmovn, di, v8hi, v4si, v2di, si, hi)},
+ {VAR6 (UNOP, uqmovn, di, v8hi, v4si, v2di, si, hi)},
+ {VAR10 (UNOP, sqabs, v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di, si, hi, qi)},
+ {VAR10 (UNOP, sqneg, v8qi, v4hi, v2si, v16qi, v8hi, v4si, v2di, si, hi, qi)},
+ {VAR2 (BINOP, pmul, v8qi, v16qi)},
+ {VAR4 (TERNOP, sqdmlal, v4hi, v2si, si, hi)},
+ {VAR4 (QUADOP, sqdmlal_lane, v4hi, v2si, si, hi) },
+ {VAR2 (QUADOP, sqdmlal_laneq, v4hi, v2si) },
+ {VAR2 (TERNOP, sqdmlal_n, v4hi, v2si) },
+ {VAR2 (TERNOP, sqdmlal2, v8hi, v4si)},
+ {VAR2 (QUADOP, sqdmlal2_lane, v8hi, v4si) },
+ {VAR2 (QUADOP, sqdmlal2_laneq, v8hi, v4si) },
+ {VAR2 (TERNOP, sqdmlal2_n, v8hi, v4si) },
+ {VAR4 (TERNOP, sqdmlsl, v4hi, v2si, si, hi)},
+ {VAR4 (QUADOP, sqdmlsl_lane, v4hi, v2si, si, hi) },
+ {VAR2 (QUADOP, sqdmlsl_laneq, v4hi, v2si) },
+ {VAR2 (TERNOP, sqdmlsl_n, v4hi, v2si) },
+ {VAR2 (TERNOP, sqdmlsl2, v8hi, v4si)},
+ {VAR2 (QUADOP, sqdmlsl2_lane, v8hi, v4si) },
+ {VAR2 (QUADOP, sqdmlsl2_laneq, v8hi, v4si) },
+ {VAR2 (TERNOP, sqdmlsl2_n, v8hi, v4si) },
+ {VAR4 (BINOP, sqdmull, v4hi, v2si, si, hi)},
+ {VAR4 (TERNOP, sqdmull_lane, v4hi, v2si, si, hi) },
+ {VAR2 (TERNOP, sqdmull_laneq, v4hi, v2si) },
+ {VAR2 (BINOP, sqdmull_n, v4hi, v2si) },
+ {VAR2 (BINOP, sqdmull2, v8hi, v4si) },
+ {VAR2 (TERNOP, sqdmull2_lane, v8hi, v4si) },
+ {VAR2 (TERNOP, sqdmull2_laneq, v8hi, v4si) },
+ {VAR2 (BINOP, sqdmull2_n, v8hi, v4si) },
+ {VAR6 (BINOP, sqdmulh, v4hi, v2si, v8hi, v4si, si, hi)},
+ {VAR6 (BINOP, sqrdmulh, v4hi, v2si, v8hi, v4si, si, hi)},
+ {VAR8 (BINOP, sshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR3 (SHIFTIMM, sshll_n, v8qi, v4hi, v2si) },
+ {VAR3 (SHIFTIMM, ushll_n, v8qi, v4hi, v2si) },
+ {VAR3 (SHIFTIMM, sshll2_n, v16qi, v8hi, v4si) },
+ {VAR3 (SHIFTIMM, ushll2_n, v16qi, v8hi, v4si) },
+ {VAR8 (BINOP, ushl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (BINOP, sshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (BINOP, ushl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR11 (BINOP, sqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR11 (BINOP, uqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR8 (BINOP, srshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (BINOP, urshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR11 (BINOP, sqrshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR11 (BINOP, uqrshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR8 (SHIFTIMM, sshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTIMM, ushr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTIMM, srshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTIMM, urshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTACC, ssra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTACC, usra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTACC, srsra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTACC, ursra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTINSERT, ssri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTINSERT, usri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTINSERT, ssli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR8 (SHIFTINSERT, usli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ {VAR11 (SHIFTIMM, sqshlu_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR11 (SHIFTIMM, sqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ {VAR11 (SHIFTIMM, uqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ { VAR6 (SHIFTIMM, sqshrun_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR6 (SHIFTIMM, sqrshrun_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR6 (SHIFTIMM, sqshrn_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR6 (SHIFTIMM, uqshrn_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR6 (SHIFTIMM, sqrshrn_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR6 (SHIFTIMM, uqrshrn_n, di, v8hi, v4si, v2di, si, hi) },
+ { VAR8 (BINOP, cmeq, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmge, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmgt, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmle, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmlt, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmhs, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmhi, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, cmtst, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR6 (TERNOP, sqdmulh_lane, v4hi, v2si, v8hi, v4si, si, hi) },
+ { VAR6 (TERNOP, sqrdmulh_lane, v4hi, v2si, v8hi, v4si, si, hi) },
+ { VAR3 (BINOP, addp, v8qi, v4hi, v2si) },
+ { VAR1 (UNOP, addp, di) },
+ { VAR11 (BINOP, dup_lane, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di,
+ si, hi, qi) },
+ { VAR3 (BINOP, fmax, v2sf, v4sf, v2df) },
+ { VAR3 (BINOP, fmin, v2sf, v4sf, v2df) },
+ { VAR6 (BINOP, smax, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (BINOP, smin, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (BINOP, umax, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (BINOP, umin, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR3 (UNOP, sqrt, v2sf, v4sf, v2df) },
+ {VAR12 (LOADSTRUCT, ld2,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR12 (LOADSTRUCT, ld3,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR12 (LOADSTRUCT, ld4,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR12 (STORESTRUCT, st2,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR12 (STORESTRUCT, st3,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+ {VAR12 (STORESTRUCT, st4,
+ v8qi, v4hi, v2si, v2sf, di, df, v16qi, v8hi, v4si, v4sf, v2di, v2df)},
+};
+
+#undef CF
+#undef VAR1
+#undef VAR2
+#undef VAR3
+#undef VAR4
+#undef VAR5
+#undef VAR6
+#undef VAR7
+#undef VAR8
+#undef VAR9
+#undef VAR10
+#undef VAR11
+
+#define NUM_DREG_TYPES 6
+#define NUM_QREG_TYPES 6
+
+void
+init_aarch64_simd_builtins (void)
+{
+ unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE;
+
+ /* Scalar type nodes. */
+ tree aarch64_simd_intQI_type_node;
+ tree aarch64_simd_intHI_type_node;
+ tree aarch64_simd_polyQI_type_node;
+ tree aarch64_simd_polyHI_type_node;
+ tree aarch64_simd_intSI_type_node;
+ tree aarch64_simd_intDI_type_node;
+ tree aarch64_simd_float_type_node;
+ tree aarch64_simd_double_type_node;
+
+ /* Pointer to scalar type nodes. */
+ tree intQI_pointer_node;
+ tree intHI_pointer_node;
+ tree intSI_pointer_node;
+ tree intDI_pointer_node;
+ tree float_pointer_node;
+ tree double_pointer_node;
+
+ /* Const scalar type nodes. */
+ tree const_intQI_node;
+ tree const_intHI_node;
+ tree const_intSI_node;
+ tree const_intDI_node;
+ tree const_float_node;
+ tree const_double_node;
+
+ /* Pointer to const scalar type nodes. */
+ tree const_intQI_pointer_node;
+ tree const_intHI_pointer_node;
+ tree const_intSI_pointer_node;
+ tree const_intDI_pointer_node;
+ tree const_float_pointer_node;
+ tree const_double_pointer_node;
+
+ /* Vector type nodes. */
+ tree V8QI_type_node;
+ tree V4HI_type_node;
+ tree V2SI_type_node;
+ tree V2SF_type_node;
+ tree V16QI_type_node;
+ tree V8HI_type_node;
+ tree V4SI_type_node;
+ tree V4SF_type_node;
+ tree V2DI_type_node;
+ tree V2DF_type_node;
+
+ /* Scalar unsigned type nodes. */
+ tree intUQI_type_node;
+ tree intUHI_type_node;
+ tree intUSI_type_node;
+ tree intUDI_type_node;
+
+ /* Opaque integer types for structures of vectors. */
+ tree intEI_type_node;
+ tree intOI_type_node;
+ tree intCI_type_node;
+ tree intXI_type_node;
+
+ /* Pointer to vector type nodes. */
+ tree V8QI_pointer_node;
+ tree V4HI_pointer_node;
+ tree V2SI_pointer_node;
+ tree V2SF_pointer_node;
+ tree V16QI_pointer_node;
+ tree V8HI_pointer_node;
+ tree V4SI_pointer_node;
+ tree V4SF_pointer_node;
+ tree V2DI_pointer_node;
+ tree V2DF_pointer_node;
+
+ /* Operations which return results as pairs. */
+ tree void_ftype_pv8qi_v8qi_v8qi;
+ tree void_ftype_pv4hi_v4hi_v4hi;
+ tree void_ftype_pv2si_v2si_v2si;
+ tree void_ftype_pv2sf_v2sf_v2sf;
+ tree void_ftype_pdi_di_di;
+ tree void_ftype_pv16qi_v16qi_v16qi;
+ tree void_ftype_pv8hi_v8hi_v8hi;
+ tree void_ftype_pv4si_v4si_v4si;
+ tree void_ftype_pv4sf_v4sf_v4sf;
+ tree void_ftype_pv2di_v2di_v2di;
+ tree void_ftype_pv2df_v2df_v2df;
+
+ tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
+ tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
+ tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
+
+ /* Create distinguished type nodes for AARCH64_SIMD vector element types,
+ and pointers to values of such types, so we can detect them later. */
+ aarch64_simd_intQI_type_node =
+ make_signed_type (GET_MODE_PRECISION (QImode));
+ aarch64_simd_intHI_type_node =
+ make_signed_type (GET_MODE_PRECISION (HImode));
+ aarch64_simd_polyQI_type_node =
+ make_signed_type (GET_MODE_PRECISION (QImode));
+ aarch64_simd_polyHI_type_node =
+ make_signed_type (GET_MODE_PRECISION (HImode));
+ aarch64_simd_intSI_type_node =
+ make_signed_type (GET_MODE_PRECISION (SImode));
+ aarch64_simd_intDI_type_node =
+ make_signed_type (GET_MODE_PRECISION (DImode));
+ aarch64_simd_float_type_node = make_node (REAL_TYPE);
+ aarch64_simd_double_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (aarch64_simd_float_type_node) = FLOAT_TYPE_SIZE;
+ TYPE_PRECISION (aarch64_simd_double_type_node) = DOUBLE_TYPE_SIZE;
+ layout_type (aarch64_simd_float_type_node);
+ layout_type (aarch64_simd_double_type_node);
+
+ /* Define typedefs which exactly correspond to the modes we are basing vector
+ types on. If you change these names you'll need to change
+ the table used by aarch64_mangle_type too. */
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
+ "__builtin_aarch64_simd_qi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
+ "__builtin_aarch64_simd_hi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
+ "__builtin_aarch64_simd_si");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
+ "__builtin_aarch64_simd_sf");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
+ "__builtin_aarch64_simd_di");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
+ "__builtin_aarch64_simd_df");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
+ "__builtin_aarch64_simd_poly8");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
+ "__builtin_aarch64_simd_poly16");
+
+ intQI_pointer_node = build_pointer_type (aarch64_simd_intQI_type_node);
+ intHI_pointer_node = build_pointer_type (aarch64_simd_intHI_type_node);
+ intSI_pointer_node = build_pointer_type (aarch64_simd_intSI_type_node);
+ intDI_pointer_node = build_pointer_type (aarch64_simd_intDI_type_node);
+ float_pointer_node = build_pointer_type (aarch64_simd_float_type_node);
+ double_pointer_node = build_pointer_type (aarch64_simd_double_type_node);
+
+ /* Next create constant-qualified versions of the above types. */
+ const_intQI_node = build_qualified_type (aarch64_simd_intQI_type_node,
+ TYPE_QUAL_CONST);
+ const_intHI_node = build_qualified_type (aarch64_simd_intHI_type_node,
+ TYPE_QUAL_CONST);
+ const_intSI_node = build_qualified_type (aarch64_simd_intSI_type_node,
+ TYPE_QUAL_CONST);
+ const_intDI_node = build_qualified_type (aarch64_simd_intDI_type_node,
+ TYPE_QUAL_CONST);
+ const_float_node = build_qualified_type (aarch64_simd_float_type_node,
+ TYPE_QUAL_CONST);
+ const_double_node = build_qualified_type (aarch64_simd_double_type_node,
+ TYPE_QUAL_CONST);
+
+ const_intQI_pointer_node = build_pointer_type (const_intQI_node);
+ const_intHI_pointer_node = build_pointer_type (const_intHI_node);
+ const_intSI_pointer_node = build_pointer_type (const_intSI_node);
+ const_intDI_pointer_node = build_pointer_type (const_intDI_node);
+ const_float_pointer_node = build_pointer_type (const_float_node);
+ const_double_pointer_node = build_pointer_type (const_double_node);
+
+ /* Now create vector types based on our AARCH64 SIMD element types. */
+ /* 64-bit vectors. */
+ V8QI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intQI_type_node, V8QImode);
+ V4HI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intHI_type_node, V4HImode);
+ V2SI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intSI_type_node, V2SImode);
+ V2SF_type_node =
+ build_vector_type_for_mode (aarch64_simd_float_type_node, V2SFmode);
+ /* 128-bit vectors. */
+ V16QI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intQI_type_node, V16QImode);
+ V8HI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intHI_type_node, V8HImode);
+ V4SI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intSI_type_node, V4SImode);
+ V4SF_type_node =
+ build_vector_type_for_mode (aarch64_simd_float_type_node, V4SFmode);
+ V2DI_type_node =
+ build_vector_type_for_mode (aarch64_simd_intDI_type_node, V2DImode);
+ V2DF_type_node =
+ build_vector_type_for_mode (aarch64_simd_double_type_node, V2DFmode);
+
+ /* Unsigned integer types for various mode sizes. */
+ intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
+ intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
+ intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
+ intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+
+ (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
+ "__builtin_aarch64_simd_uqi");
+ (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
+ "__builtin_aarch64_simd_uhi");
+ (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
+ "__builtin_aarch64_simd_usi");
+ (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+ "__builtin_aarch64_simd_udi");
+
+ /* Opaque integer types for structures of vectors. */
+ intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
+ intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
+ intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
+ intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
+
+ (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+ "__builtin_aarch64_simd_ti");
+ (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+ "__builtin_aarch64_simd_ei");
+ (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+ "__builtin_aarch64_simd_oi");
+ (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+ "__builtin_aarch64_simd_ci");
+ (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+ "__builtin_aarch64_simd_xi");
+
+ /* Pointers to vector types. */
+ V8QI_pointer_node = build_pointer_type (V8QI_type_node);
+ V4HI_pointer_node = build_pointer_type (V4HI_type_node);
+ V2SI_pointer_node = build_pointer_type (V2SI_type_node);
+ V2SF_pointer_node = build_pointer_type (V2SF_type_node);
+ V16QI_pointer_node = build_pointer_type (V16QI_type_node);
+ V8HI_pointer_node = build_pointer_type (V8HI_type_node);
+ V4SI_pointer_node = build_pointer_type (V4SI_type_node);
+ V4SF_pointer_node = build_pointer_type (V4SF_type_node);
+ V2DI_pointer_node = build_pointer_type (V2DI_type_node);
+ V2DF_pointer_node = build_pointer_type (V2DF_type_node);
+
+ /* Operations which return results as pairs. */
+ void_ftype_pv8qi_v8qi_v8qi =
+ build_function_type_list (void_type_node, V8QI_pointer_node,
+ V8QI_type_node, V8QI_type_node, NULL);
+ void_ftype_pv4hi_v4hi_v4hi =
+ build_function_type_list (void_type_node, V4HI_pointer_node,
+ V4HI_type_node, V4HI_type_node, NULL);
+ void_ftype_pv2si_v2si_v2si =
+ build_function_type_list (void_type_node, V2SI_pointer_node,
+ V2SI_type_node, V2SI_type_node, NULL);
+ void_ftype_pv2sf_v2sf_v2sf =
+ build_function_type_list (void_type_node, V2SF_pointer_node,
+ V2SF_type_node, V2SF_type_node, NULL);
+ void_ftype_pdi_di_di =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ aarch64_simd_intDI_type_node,
+ aarch64_simd_intDI_type_node, NULL);
+ void_ftype_pv16qi_v16qi_v16qi =
+ build_function_type_list (void_type_node, V16QI_pointer_node,
+ V16QI_type_node, V16QI_type_node, NULL);
+ void_ftype_pv8hi_v8hi_v8hi =
+ build_function_type_list (void_type_node, V8HI_pointer_node,
+ V8HI_type_node, V8HI_type_node, NULL);
+ void_ftype_pv4si_v4si_v4si =
+ build_function_type_list (void_type_node, V4SI_pointer_node,
+ V4SI_type_node, V4SI_type_node, NULL);
+ void_ftype_pv4sf_v4sf_v4sf =
+ build_function_type_list (void_type_node, V4SF_pointer_node,
+ V4SF_type_node, V4SF_type_node, NULL);
+ void_ftype_pv2di_v2di_v2di =
+ build_function_type_list (void_type_node, V2DI_pointer_node,
+ V2DI_type_node, V2DI_type_node, NULL);
+ void_ftype_pv2df_v2df_v2df =
+ build_function_type_list (void_type_node, V2DF_pointer_node,
+ V2DF_type_node, V2DF_type_node, NULL);
+
+ dreg_types[0] = V8QI_type_node;
+ dreg_types[1] = V4HI_type_node;
+ dreg_types[2] = V2SI_type_node;
+ dreg_types[3] = V2SF_type_node;
+ dreg_types[4] = aarch64_simd_intDI_type_node;
+ dreg_types[5] = aarch64_simd_double_type_node;
+
+ qreg_types[0] = V16QI_type_node;
+ qreg_types[1] = V8HI_type_node;
+ qreg_types[2] = V4SI_type_node;
+ qreg_types[3] = V4SF_type_node;
+ qreg_types[4] = V2DI_type_node;
+ qreg_types[5] = V2DF_type_node;
+
+ /* If NUM_DREG_TYPES != NUM_QREG_TYPES, we will need separate nested loops
+ for qreg and dreg reinterp inits. */
+ for (i = 0; i < NUM_DREG_TYPES; i++)
+ {
+ int j;
+ for (j = 0; j < NUM_DREG_TYPES; j++)
+ {
+ reinterp_ftype_dreg[i][j]
+ = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+ reinterp_ftype_qreg[i][j]
+ = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++)
+ {
+ aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
+ unsigned int j, codeidx = 0;
+
+ d->base_fcode = fcode;
+
+ for (j = 0; j < T_MAX; j++)
+ {
+ const char *const modenames[] = {
+ "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
+ "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
+ "ti", "ei", "oi", "xi", "si", "hi", "qi"
+ };
+ char namebuf[60];
+ tree ftype = NULL;
+ enum insn_code icode;
+ int is_load = 0;
+ int is_store = 0;
+
+ /* Skip if particular mode not supported. */
+ if ((d->bits & (1 << j)) == 0)
+ continue;
+
+ icode = d->codes[codeidx++];
+
+ switch (d->itype)
+ {
+ case AARCH64_SIMD_LOAD1:
+ case AARCH64_SIMD_LOAD1LANE:
+ case AARCH64_SIMD_LOADSTRUCTLANE:
+ case AARCH64_SIMD_LOADSTRUCT:
+ is_load = 1;
+ /* Fall through. */
+ case AARCH64_SIMD_STORE1:
+ case AARCH64_SIMD_STORE1LANE:
+ case AARCH64_SIMD_STORESTRUCTLANE:
+ case AARCH64_SIMD_STORESTRUCT:
+ if (!is_load)
+ is_store = 1;
+ /* Fall through. */
+ case AARCH64_SIMD_UNOP:
+ case AARCH64_SIMD_BINOP:
+ case AARCH64_SIMD_LOGICBINOP:
+ case AARCH64_SIMD_SHIFTINSERT:
+ case AARCH64_SIMD_TERNOP:
+ case AARCH64_SIMD_QUADOP:
+ case AARCH64_SIMD_GETLANE:
+ case AARCH64_SIMD_SETLANE:
+ case AARCH64_SIMD_CREATE:
+ case AARCH64_SIMD_DUP:
+ case AARCH64_SIMD_DUPLANE:
+ case AARCH64_SIMD_SHIFTIMM:
+ case AARCH64_SIMD_SHIFTACC:
+ case AARCH64_SIMD_COMBINE:
+ case AARCH64_SIMD_SPLIT:
+ case AARCH64_SIMD_CONVERT:
+ case AARCH64_SIMD_FIXCONV:
+ case AARCH64_SIMD_LANEMUL:
+ case AARCH64_SIMD_LANEMULL:
+ case AARCH64_SIMD_LANEMULH:
+ case AARCH64_SIMD_LANEMAC:
+ case AARCH64_SIMD_SCALARMUL:
+ case AARCH64_SIMD_SCALARMULL:
+ case AARCH64_SIMD_SCALARMULH:
+ case AARCH64_SIMD_SCALARMAC:
+ case AARCH64_SIMD_SELECT:
+ case AARCH64_SIMD_VTBL:
+ case AARCH64_SIMD_VTBX:
+ {
+ int k;
+ tree return_type = void_type_node, args = void_list_node;
+
+ /* Build a function type directly from the insn_data for this
+ builtin. The build_function_type() function takes care of
+ removing duplicates for us. */
+ for (k = insn_data[icode].n_operands - 1; k >= 0; k--)
+ {
+ tree eltype;
+
+ /* Skip an internal operand for vget_{low, high}. */
+ if (k == 2 && d->itype == AARCH64_SIMD_SPLIT)
+ continue;
+
+ if (is_load && k == 1)
+ {
+ /* AdvSIMD load patterns always have the memory operand
+ (a DImode pointer) in the operand 1 position. We
+ want a const pointer to the element type in that
+ position. */
+ gcc_assert (insn_data[icode].operand[k].mode ==
+ DImode);
+
+ switch (1 << j)
+ {
+ case T_V8QI:
+ case T_V16QI:
+ eltype = const_intQI_pointer_node;
+ break;
+
+ case T_V4HI:
+ case T_V8HI:
+ eltype = const_intHI_pointer_node;
+ break;
+
+ case T_V2SI:
+ case T_V4SI:
+ eltype = const_intSI_pointer_node;
+ break;
+
+ case T_V2SF:
+ case T_V4SF:
+ eltype = const_float_pointer_node;
+ break;
+
+ case T_DI:
+ case T_V2DI:
+ eltype = const_intDI_pointer_node;
+ break;
+
+ case T_DF:
+ case T_V2DF:
+ eltype = const_double_pointer_node;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (is_store && k == 0)
+ {
+ /* Similarly, AdvSIMD store patterns use operand 0 as
+ the memory location to store to (a DImode pointer).
+ Use a pointer to the element type of the store in
+ that position. */
+ gcc_assert (insn_data[icode].operand[k].mode ==
+ DImode);
+
+ switch (1 << j)
+ {
+ case T_V8QI:
+ case T_V16QI:
+ eltype = intQI_pointer_node;
+ break;
+
+ case T_V4HI:
+ case T_V8HI:
+ eltype = intHI_pointer_node;
+ break;
+
+ case T_V2SI:
+ case T_V4SI:
+ eltype = intSI_pointer_node;
+ break;
+
+ case T_V2SF:
+ case T_V4SF:
+ eltype = float_pointer_node;
+ break;
+
+ case T_DI:
+ case T_V2DI:
+ eltype = intDI_pointer_node;
+ break;
+
+ case T_DF:
+ case T_V2DF:
+ eltype = double_pointer_node;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ switch (insn_data[icode].operand[k].mode)
+ {
+ case VOIDmode:
+ eltype = void_type_node;
+ break;
+ /* Scalars. */
+ case QImode:
+ eltype = aarch64_simd_intQI_type_node;
+ break;
+ case HImode:
+ eltype = aarch64_simd_intHI_type_node;
+ break;
+ case SImode:
+ eltype = aarch64_simd_intSI_type_node;
+ break;
+ case SFmode:
+ eltype = aarch64_simd_float_type_node;
+ break;
+ case DFmode:
+ eltype = aarch64_simd_double_type_node;
+ break;
+ case DImode:
+ eltype = aarch64_simd_intDI_type_node;
+ break;
+ case TImode:
+ eltype = intTI_type_node;
+ break;
+ case EImode:
+ eltype = intEI_type_node;
+ break;
+ case OImode:
+ eltype = intOI_type_node;
+ break;
+ case CImode:
+ eltype = intCI_type_node;
+ break;
+ case XImode:
+ eltype = intXI_type_node;
+ break;
+ /* 64-bit vectors. */
+ case V8QImode:
+ eltype = V8QI_type_node;
+ break;
+ case V4HImode:
+ eltype = V4HI_type_node;
+ break;
+ case V2SImode:
+ eltype = V2SI_type_node;
+ break;
+ case V2SFmode:
+ eltype = V2SF_type_node;
+ break;
+ /* 128-bit vectors. */
+ case V16QImode:
+ eltype = V16QI_type_node;
+ break;
+ case V8HImode:
+ eltype = V8HI_type_node;
+ break;
+ case V4SImode:
+ eltype = V4SI_type_node;
+ break;
+ case V4SFmode:
+ eltype = V4SF_type_node;
+ break;
+ case V2DImode:
+ eltype = V2DI_type_node;
+ break;
+ case V2DFmode:
+ eltype = V2DF_type_node;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (k == 0 && !is_store)
+ return_type = eltype;
+ else
+ args = tree_cons (NULL_TREE, eltype, args);
+ }
+
+ ftype = build_function_type (return_type, args);
+ }
+ break;
+
+ case AARCH64_SIMD_RESULTPAIR:
+ {
+ switch (insn_data[icode].operand[1].mode)
+ {
+ case V8QImode:
+ ftype = void_ftype_pv8qi_v8qi_v8qi;
+ break;
+ case V4HImode:
+ ftype = void_ftype_pv4hi_v4hi_v4hi;
+ break;
+ case V2SImode:
+ ftype = void_ftype_pv2si_v2si_v2si;
+ break;
+ case V2SFmode:
+ ftype = void_ftype_pv2sf_v2sf_v2sf;
+ break;
+ case DImode:
+ ftype = void_ftype_pdi_di_di;
+ break;
+ case V16QImode:
+ ftype = void_ftype_pv16qi_v16qi_v16qi;
+ break;
+ case V8HImode:
+ ftype = void_ftype_pv8hi_v8hi_v8hi;
+ break;
+ case V4SImode:
+ ftype = void_ftype_pv4si_v4si_v4si;
+ break;
+ case V4SFmode:
+ ftype = void_ftype_pv4sf_v4sf_v4sf;
+ break;
+ case V2DImode:
+ ftype = void_ftype_pv2di_v2di_v2di;
+ break;
+ case V2DFmode:
+ ftype = void_ftype_pv2df_v2df_v2df;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case AARCH64_SIMD_REINTERP:
+ {
+ /* We iterate over 6 doubleword types, then 6 quadword
+ types. */
+ int rhs_d = j % NUM_DREG_TYPES;
+ int rhs_q = (j - NUM_DREG_TYPES) % NUM_QREG_TYPES;
+ switch (insn_data[icode].operand[0].mode)
+ {
+ case V8QImode:
+ ftype = reinterp_ftype_dreg[0][rhs_d];
+ break;
+ case V4HImode:
+ ftype = reinterp_ftype_dreg[1][rhs_d];
+ break;
+ case V2SImode:
+ ftype = reinterp_ftype_dreg[2][rhs_d];
+ break;
+ case V2SFmode:
+ ftype = reinterp_ftype_dreg[3][rhs_d];
+ break;
+ case DImode:
+ ftype = reinterp_ftype_dreg[4][rhs_d];
+ break;
+ case DFmode:
+ ftype = reinterp_ftype_dreg[5][rhs_d];
+ break;
+ case V16QImode:
+ ftype = reinterp_ftype_qreg[0][rhs_q];
+ break;
+ case V8HImode:
+ ftype = reinterp_ftype_qreg[1][rhs_q];
+ break;
+ case V4SImode:
+ ftype = reinterp_ftype_qreg[2][rhs_q];
+ break;
+ case V4SFmode:
+ ftype = reinterp_ftype_qreg[3][rhs_q];
+ break;
+ case V2DImode:
+ ftype = reinterp_ftype_qreg[4][rhs_q];
+ break;
+ case V2DFmode:
+ ftype = reinterp_ftype_qreg[5][rhs_q];
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_assert (ftype != NULL);
+
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
+ d->name, modenames[j]);
+
+ add_builtin_function (namebuf, ftype, fcode++, BUILT_IN_MD, NULL,
+ NULL_TREE);
+ }
+ }
+}
+
+static int
+aarch64_simd_builtin_compare (const void *a, const void *b)
+{
+ const aarch64_simd_builtin_datum *const key =
+ (const aarch64_simd_builtin_datum *) a;
+ const aarch64_simd_builtin_datum *const memb =
+ (const aarch64_simd_builtin_datum *) b;
+ unsigned int soughtcode = key->base_fcode;
+
+ if (soughtcode >= memb->base_fcode
+ && soughtcode < memb->base_fcode + memb->num_vars)
+ return 0;
+ else if (soughtcode < memb->base_fcode)
+ return -1;
+ else
+ return 1;
+}
+
+
+static enum insn_code
+locate_simd_builtin_icode (int fcode, aarch64_simd_itype * itype)
+{
+ aarch64_simd_builtin_datum key
+ = { NULL, (aarch64_simd_itype) 0, 0, {CODE_FOR_nothing}, 0, 0};
+ aarch64_simd_builtin_datum *found;
+ int idx;
+
+ key.base_fcode = fcode;
+ found = (aarch64_simd_builtin_datum *)
+ bsearch (&key, &aarch64_simd_builtin_data[0],
+ ARRAY_SIZE (aarch64_simd_builtin_data),
+ sizeof (aarch64_simd_builtin_data[0]),
+ aarch64_simd_builtin_compare);
+ gcc_assert (found);
+ idx = fcode - (int) found->base_fcode;
+ gcc_assert (idx >= 0 && idx < T_MAX && idx < (int) found->num_vars);
+
+ if (itype)
+ *itype = found->itype;
+
+ return found->codes[idx];
+}
+
+typedef enum
+{
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP
+} builtin_simd_arg;
+
+#define SIMD_MAX_BUILTIN_ARGS 5
+
+static rtx
+aarch64_simd_expand_args (rtx target, int icode, int have_retval,
+ tree exp, ...)
+{
+ va_list ap;
+ rtx pat;
+ tree arg[SIMD_MAX_BUILTIN_ARGS];
+ rtx op[SIMD_MAX_BUILTIN_ARGS];
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
+ int argc = 0;
+
+ if (have_retval
+ && (!target
+ || GET_MODE (target) != tmode
+ || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
+ target = gen_reg_rtx (tmode);
+
+ va_start (ap, exp);
+
+ for (;;)
+ {
+ builtin_simd_arg thisarg = (builtin_simd_arg) va_arg (ap, int);
+
+ if (thisarg == SIMD_ARG_STOP)
+ break;
+ else
+ {
+ arg[argc] = CALL_EXPR_ARG (exp, argc);
+ op[argc] = expand_normal (arg[argc]);
+ mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
+
+ switch (thisarg)
+ {
+ case SIMD_ARG_COPY_TO_REG:
+ /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
+ break;
+
+ case SIMD_ARG_CONSTANT:
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
+ "expected %<const int%>", argc + 1);
+ break;
+
+ case SIMD_ARG_STOP:
+ gcc_unreachable ();
+ }
+
+ argc++;
+ }
+ }
+
+ va_end (ap);
+
+ if (have_retval)
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (target, op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (target, op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ else
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!pat)
+ return 0;
+
+ emit_insn (pat);
+
+ return target;
+}
+
+/* Expand an AArch64 AdvSIMD builtin(intrinsic). */
+rtx
+aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
+{
+ aarch64_simd_itype itype;
+ enum insn_code icode = locate_simd_builtin_icode (fcode, &itype);
+
+ switch (itype)
+ {
+ case AARCH64_SIMD_UNOP:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_BINOP:
+ {
+ rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+ /* Handle constants only if the predicate allows it. */
+ bool op1_const_int_p =
+ (CONST_INT_P (arg2)
+ && (*insn_data[icode].operand[2].predicate)
+ (arg2, insn_data[icode].operand[2].mode));
+ return aarch64_simd_expand_args
+ (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ op1_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_STOP);
+ }
+
+ case AARCH64_SIMD_TERNOP:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_QUADOP:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_STOP);
+ case AARCH64_SIMD_LOAD1:
+ case AARCH64_SIMD_LOADSTRUCT:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_STORESTRUCT:
+ return aarch64_simd_expand_args (target, icode, 0, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_REINTERP:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_CREATE:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_COMBINE:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_GETLANE:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_SETLANE:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_SHIFTIMM:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP);
+
+ case AARCH64_SIMD_SHIFTACC:
+ case AARCH64_SIMD_SHIFTINSERT:
+ return aarch64_simd_expand_args (target, icode, 1, exp,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP);
+
+ default:
+ gcc_unreachable ();
+ }
+}
diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
new file mode 100644
index 00000000000..06cc9825d39
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-cores.def
@@ -0,0 +1,38 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This is a list of cores that implement AArch64.
+
+ Before using #include to read this file, define a macro:
+
+ AARCH64_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
+
+ The CORE_NAME is the name of the core, represented as a string constant.
+ The CORE_IDENT is the name of the core, represented as an identifier.
+ ARCH is the architecture revision implemented by the chip.
+ FLAGS are the bitwise-or of the traits that apply to that core.
+ This need not include flags implied by the architecture.
+ COSTS is the name of the rtx_costs routine to use. */
+
+/* V8 Architecture Processors.
+ This list currently contains example CPUs that implement AArch64, and
+ therefore serves as a template for adding more CPUs in the future. */
+
+AARCH64_CORE("example-1", large, 8, AARCH64_FL_FPSIMD, generic)
+AARCH64_CORE("example-2", small, 8, AARCH64_FL_FPSIMD, generic)
diff --git a/gcc/config/aarch64/aarch64-elf-raw.h b/gcc/config/aarch64/aarch64-elf-raw.h
new file mode 100644
index 00000000000..d9ec53ff60e
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-elf-raw.h
@@ -0,0 +1,32 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Support for bare-metal builds. */
+#ifndef GCC_AARCH64_ELF_RAW_H
+#define GCC_AARCH64_ELF_RAW_H
+
+#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
+#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
+#endif
+
+#endif /* GCC_AARCH64_ELF_RAW_H */
diff --git a/gcc/config/aarch64/aarch64-elf.h b/gcc/config/aarch64/aarch64-elf.h
new file mode 100644
index 00000000000..6d8b933729a
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-elf.h
@@ -0,0 +1,123 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_ELF_H
+#define GCC_AARCH64_ELF_H
+
+
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ aarch64_asm_output_labelref (FILE, NAME)
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.bss"
+
+#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
+#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
+#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
+
+/* Since we use .init_array/.fini_array we don't need the markers at
+ the start and end of the ctors/dtors arrays. */
+#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
+#define CTOR_LIST_END /* empty */
+#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
+#define DTOR_LIST_END /* empty */
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* Support for -falign-* switches. Use .p2align to ensure that code
+ sections are padded with NOP instructions, rather than zeros. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
+ do \
+ { \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", \
+ (int) (LOG), (int) (MAX_SKIP)); \
+ } \
+ } while (0)
+
+#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do { \
+ switch (GET_MODE (BODY)) \
+ { \
+ case QImode: \
+ asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ case HImode: \
+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ case SImode: \
+ case DImode: /* See comment in aarch64_output_casesi. */ \
+ asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ default: \
+ gcc_unreachable (); \
+ } \
+ } while (0)
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
+
+#define ASM_COMMENT_START "//"
+
+#define REGISTER_PREFIX ""
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX ""
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian:-EB} \
+%{mlittle-endian:-EL} \
+%{mcpu=*:-mcpu=%*} \
+%{march=*:-march=%*}"
+#endif
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section
+
+/* Stabs debug not required. */
+#undef DBX_DEBUGGING_INFO
+
+#endif /* GCC_AARCH64_ELF_H */
diff --git a/gcc/config/aarch64/aarch64-generic.md b/gcc/config/aarch64/aarch64-generic.md
new file mode 100644
index 00000000000..4c9e4555b3f
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-generic.md
@@ -0,0 +1,38 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Generic scheduler
+
+(define_automaton "aarch64")
+
+(define_cpu_unit "core" "aarch64")
+
+(define_attr "is_load" "yes,no"
+ (if_then_else (eq_attr "v8type" "fpsimd_load,fpsimd_load2,load1,load2")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_insn_reservation "load" 2
+ (eq_attr "is_load" "yes")
+ "core")
+
+(define_insn_reservation "nonload" 1
+ (eq_attr "is_load" "no")
+ "core")
diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h
new file mode 100644
index 00000000000..95aaafab95a
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-linux.h
@@ -0,0 +1,44 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_LINUX_H
+#define GCC_AARCH64_LINUX_H
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64.so.1"
+
+#define LINUX_TARGET_LINK_SPEC "%{h*} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
+ -X \
+ %{mbig-endian:-EB} %{mlittle-endian:-EL}"
+
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ } \
+ while (0)
+
+#endif /* GCC_AARCH64_LINUX_H */
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
new file mode 100644
index 00000000000..ac05881f9ca
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -0,0 +1,54 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+CC_MODE (CCFP);
+CC_MODE (CCFPE);
+CC_MODE (CC_SWP);
+CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */
+CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */
+CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
+VECTOR_MODES (FLOAT, 8); /* V2SF. */
+VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
+
+/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
+INT_MODE (OI, 32);
+
+/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
+ TImode). */
+INT_MODE (EI, 24);
+INT_MODE (CI, 48);
+INT_MODE (XI, 64);
+
+/* Vector modes for register lists. */
+VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */
+
+VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */
+
+VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */
+
+/* Quad float: 128-bit floating mode for long doubles. */
+FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def b/gcc/config/aarch64/aarch64-option-extensions.def
new file mode 100644
index 00000000000..a5d298a6103
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-option-extensions.def
@@ -0,0 +1,37 @@
+/* Copyright (C) 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This is a list of ISA extentsions in AArch64.
+
+ Before using #include to read this file, define a macro:
+
+ AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
+
+ EXT_NAME is the name of the extension, represented as a string constant.
+ FLAGS_ON are the bitwise-or of the features that the extension adds.
+ FLAGS_OFF are the bitwise-or of the features that the extension removes. */
+
+/* V8 Architecture Extensions.
+ This list currently contains example extensions for CPUs that implement
+ AArch64, and therefore serves as a template for adding more CPUs in the
+ future. */
+
+AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO)
diff --git a/gcc/config/aarch64/aarch64-opts.h b/gcc/config/aarch64/aarch64-opts.h
new file mode 100644
index 00000000000..6d7a2fdf969
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-opts.h
@@ -0,0 +1,64 @@
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Definitions for option handling for AArch64. */
+
+#ifndef GCC_AARCH64_OPTS_H
+#define GCC_AARCH64_OPTS_H
+
+/* The various cores that implement AArch64. */
+enum aarch64_processor
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ /* Used to indicate that no processor has been specified. */
+ generic,
+ /* Used to mark the end of the processor table. */
+ aarch64_none
+};
+
+/* TLS types. */
+enum aarch64_tls_type {
+ TLS_TRADITIONAL,
+ TLS_DESCRIPTORS
+};
+
+/* The code model defines the address generation strategy.
+ Most have a PIC and non-PIC variant. */
+enum aarch64_code_model {
+ /* Static code and data fit within a 1MB region.
+ Not fully implemented, mostly treated as SMALL. */
+ AARCH64_CMODEL_TINY,
+ /* Static code, data and GOT/PLT fit within a 1MB region.
+ Not fully implemented, mostly treated as SMALL_PIC. */
+ AARCH64_CMODEL_TINY_PIC,
+ /* Static code and data fit within a 4GB region.
+ The default non-PIC code model. */
+ AARCH64_CMODEL_SMALL,
+ /* Static code, data and GOT/PLT fit within a 4GB region.
+ The default PIC code model. */
+ AARCH64_CMODEL_SMALL_PIC,
+ /* No assumptions about addresses of code and data.
+ The PIC variant is not yet implemented. */
+ AARCH64_CMODEL_LARGE
+};
+
+#endif
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
new file mode 100644
index 00000000000..e6d35e4db37
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -0,0 +1,258 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_AARCH64_PROTOS_H
+#define GCC_AARCH64_PROTOS_H
+
+ /* This generator struct and enum is used to wrap a function pointer
+ to a function that generates an RTX fragment but takes either 3 or
+ 4 operands.
+
+ The omn flavour, wraps a function that generates a synchronization
+ instruction from 3 operands: old value, memory and new value.
+
+ The omrn flavour, wraps a function that generates a synchronization
+ instruction from 4 operands: old value, memory, required value and
+ new value. */
+
+enum aarch64_sync_generator_tag
+{
+ aarch64_sync_generator_omn,
+ aarch64_sync_generator_omrn
+};
+
+ /* Wrapper to pass around a polymorphic pointer to a sync instruction
+ generator and. */
+struct aarch64_sync_generator
+{
+ enum aarch64_sync_generator_tag op;
+ union
+ {
+ rtx (*omn) (rtx, rtx, rtx);
+ rtx (*omrn) (rtx, rtx, rtx, rtx);
+ } u;
+};
+
+/*
+ SYMBOL_CONTEXT_ADR
+ The symbol is used in a load-address operation.
+ SYMBOL_CONTEXT_MEM
+ The symbol is used as the address in a MEM.
+ */
+enum aarch64_symbol_context
+{
+ SYMBOL_CONTEXT_MEM,
+ SYMBOL_CONTEXT_ADR
+};
+
+/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
+ high and lo relocs that calculate the base address using a PC
+ relative reloc.
+ So to get the address of foo, we generate
+ adrp x0, foo
+ add x0, x0, :lo12:foo
+
+ To load or store something to foo, we could use the corresponding
+ load store variants that generate an
+ ldr x0, [x0,:lo12:foo]
+ or
+ str x1, [x0, :lo12:foo]
+
+ This corresponds to the small code model of the compiler.
+
+ SYMBOL_SMALL_GOT: Similar to the one above but this
+ gives us the GOT entry of the symbol being referred to :
+ Thus calculating the GOT entry for foo is done using the
+ following sequence of instructions. The ADRP instruction
+ gets us to the page containing the GOT entry of the symbol
+ and the got_lo12 gets us the actual offset in it.
+
+ adrp x0, :got:foo
+ ldr x0, [x0, :gotoff_lo12:foo]
+
+ This corresponds to the small PIC model of the compiler.
+
+ SYMBOL_SMALL_TLSGD
+ SYMBOL_SMALL_TLSDESC
+ SYMBOL_SMALL_GOTTPREL
+ SYMBOL_SMALL_TPREL
+ Each of of these represents a thread-local symbol, and corresponds to the
+ thread local storage relocation operator for the symbol being referred to.
+
+ SYMBOL_FORCE_TO_MEM : Global variables are addressed using
+ constant pool. All variable addresses are spilled into constant
+ pools. The constant pools themselves are addressed using PC
+ relative accesses. This only works for the large code model.
+ */
+enum aarch64_symbol_type
+{
+ SYMBOL_SMALL_ABSOLUTE,
+ SYMBOL_SMALL_GOT,
+ SYMBOL_SMALL_TLSGD,
+ SYMBOL_SMALL_TLSDESC,
+ SYMBOL_SMALL_GOTTPREL,
+ SYMBOL_SMALL_TPREL,
+ SYMBOL_FORCE_TO_MEM
+};
+
+/* A set of tuning parameters contains references to size and time
+ cost models and vectors for address cost calculations, register
+ move costs and memory move costs. */
+
+/* Extra costs for specific insns. Only records the cost above a
+ single insn. */
+
+struct cpu_rtx_cost_table
+{
+ const int memory_load;
+ const int memory_store;
+ const int register_shift;
+ const int int_divide;
+ const int float_divide;
+ const int double_divide;
+ const int int_multiply;
+ const int int_multiply_extend;
+ const int int_multiply_add;
+ const int int_multiply_extend_add;
+ const int float_multiply;
+ const int double_multiply;
+};
+
+/* Additional cost for addresses. */
+struct cpu_addrcost_table
+{
+ const int pre_modify;
+ const int post_modify;
+ const int register_offset;
+ const int register_extend;
+ const int imm_offset;
+};
+
+/* Additional costs for register copies. Cost is for one register. */
+struct cpu_regmove_cost
+{
+ const int GP2GP;
+ const int GP2FP;
+ const int FP2GP;
+ const int FP2FP;
+};
+
+struct tune_params
+{
+ const struct cpu_rtx_cost_table *const insn_extra_cost;
+ const struct cpu_addrcost_table *const addr_cost;
+ const struct cpu_regmove_cost *const regmove_cost;
+ const int memmov_cost;
+};
+
+HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
+bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
+bool aarch64_const_double_zero_rtx_p (rtx);
+bool aarch64_constant_address_p (rtx);
+bool aarch64_function_arg_regno_p (unsigned);
+bool aarch64_gen_movmemqi (rtx *);
+bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
+bool aarch64_is_long_call_p (rtx);
+bool aarch64_label_mentioned_p (rtx);
+bool aarch64_legitimate_pic_operand_p (rtx);
+bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
+bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
+bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
+bool aarch64_regno_ok_for_base_p (int, bool);
+bool aarch64_regno_ok_for_index_p (int, bool);
+bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
+bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
+bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
+bool aarch64_symbolic_address_p (rtx);
+bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
+ enum aarch64_symbol_type *);
+bool aarch64_uimm12_shift (HOST_WIDE_INT);
+const char *aarch64_output_casesi (rtx *);
+const char *aarch64_output_sync_insn (rtx, rtx *);
+const char *aarch64_output_sync_lock_release (rtx, rtx);
+enum aarch64_symbol_type aarch64_classify_symbol (rtx,
+ enum aarch64_symbol_context);
+enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
+int aarch64_asm_preferred_eh_data_format (int, int);
+int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
+int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
+int aarch64_simd_attr_length_move (rtx);
+int aarch64_simd_immediate_valid_for_move (rtx, enum machine_mode, rtx *,
+ int *, unsigned char *, int *,
+ int *);
+int aarch64_uxt_size (int, HOST_WIDE_INT);
+rtx aarch64_final_eh_return_addr (void);
+rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
+const char *aarch64_output_move_struct (rtx *operands);
+rtx aarch64_return_addr (int, rtx);
+rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
+bool aarch64_simd_mem_operand_p (rtx);
+rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
+rtx aarch64_tls_get_addr (void);
+unsigned aarch64_dbx_register_number (unsigned);
+unsigned aarch64_regno_regclass (unsigned);
+unsigned aarch64_trampoline_size (void);
+unsigned aarch64_sync_loop_insns (rtx, rtx *);
+void aarch64_asm_output_labelref (FILE *, const char *);
+void aarch64_elf_asm_named_section (const char *, unsigned, tree);
+void aarch64_expand_epilogue (bool);
+void aarch64_expand_mov_immediate (rtx, rtx);
+void aarch64_expand_prologue (void);
+void aarch64_expand_sync (enum machine_mode, struct aarch64_sync_generator *,
+ rtx, rtx, rtx, rtx);
+void aarch64_function_profiler (FILE *, int);
+void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
+ const_tree, unsigned);
+void aarch64_init_expanders (void);
+void aarch64_print_operand (FILE *, rtx, char);
+void aarch64_print_operand_address (FILE *, rtx);
+
+/* Initialize builtins for SIMD intrinsics. */
+void init_aarch64_simd_builtins (void);
+
+void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+ registers). */
+void aarch64_simd_emit_pair_result_insn (enum machine_mode,
+ rtx (*intfn) (rtx, rtx, rtx), rtx,
+ rtx);
+
+/* Expand builtins for SIMD intrinsics. */
+rtx aarch64_simd_expand_builtin (int, tree, rtx);
+
+void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* Emit code for reinterprets. */
+void aarch64_simd_reinterpret (rtx, rtx);
+
+void aarch64_split_doubleword_move (rtx, rtx);
+
+#if defined (RTX_CODE)
+
+bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
+enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
+rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
+
+#endif /* RTX_CODE */
+
+#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
new file mode 100644
index 00000000000..a7ddfb1c1d3
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -0,0 +1,3264 @@
+;; Machine description for AArch64 AdvSIMD architecture.
+;; Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+; Main data types used by the insntructions
+
+(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,HI,QI"
+ (const_string "unknown"))
+
+
+; Classification of AdvSIMD instructions for scheduling purposes.
+; Do not set this attribute and the "v8type" attribute together in
+; any instruction pattern.
+
+; simd_abd integer absolute difference and accumulate.
+; simd_abdl integer absolute difference and accumulate (long).
+; simd_adal integer add and accumulate (long).
+; simd_add integer addition/subtraction.
+; simd_addl integer addition/subtraction (long).
+; simd_addlv across lanes integer sum (long).
+; simd_addn integer addition/subtraction (narrow).
+; simd_addn2 integer addition/subtraction (narrow, high).
+; simd_addv across lanes integer sum.
+; simd_cls count leading sign/zero bits.
+; simd_cmp compare / create mask.
+; simd_cnt population count.
+; simd_dup duplicate element.
+; simd_dupgp duplicate general purpose register.
+; simd_ext bitwise extract from pair.
+; simd_fadd floating point add/sub.
+; simd_fcmp floating point compare.
+; simd_fcvti floating point convert to integer.
+; simd_fcvtl floating-point convert upsize.
+; simd_fcvtn floating-point convert downsize (narrow).
+; simd_fcvtn2 floating-point convert downsize (narrow, high).
+; simd_fdiv floating point division.
+; simd_fminmax floating point min/max.
+; simd_fminmaxv across lanes floating point min/max.
+; simd_fmla floating point multiply-add.
+; simd_fmla_elt floating point multiply-add (by element).
+; simd_fmul floating point multiply.
+; simd_fmul_elt floating point multiply (by element).
+; simd_fnegabs floating point neg/abs.
+; simd_frcpe floating point reciprocal estimate.
+; simd_frcps floating point reciprocal step.
+; simd_frecx floating point reciprocal exponent.
+; simd_frint floating point round to integer.
+; simd_fsqrt floating point square root.
+; simd_icvtf integer convert to floating point.
+; simd_ins insert element.
+; simd_insgp insert general purpose register.
+; simd_load1 load multiple structures to one register (LD1).
+; simd_load1r load single structure to all lanes of one register (LD1R).
+; simd_load1s load single structure to one lane of one register (LD1 [index]).
+; simd_load2 load multiple structures to two registers (LD1, LD2).
+; simd_load2r load single structure to all lanes of two registers (LD1R, LD2R).
+; simd_load2s load single structure to one lane of two registers (LD2 [index]).
+; simd_load3 load multiple structures to three registers (LD1, LD3).
+; simd_load3r load single structure to all lanes of three registers (LD3R).
+; simd_load3s load single structure to one lane of three registers (LD3 [index]).
+; simd_load4 load multiple structures to four registers (LD1, LD2, LD4).
+; simd_load4r load single structure to all lanes of four registers (LD4R).
+; simd_load4s load single structure to one lane of four registers (LD4 [index]).
+; simd_logic logical operation.
+; simd_logic_imm logcial operation (immediate).
+; simd_minmax integer min/max.
+; simd_minmaxv across lanes integer min/max,
+; simd_mla integer multiply-accumulate.
+; simd_mla_elt integer multiply-accumulate (by element).
+; simd_mlal integer multiply-accumulate (long).
+; simd_mlal_elt integer multiply-accumulate (by element, long).
+; simd_move move register.
+; simd_move_imm move immediate.
+; simd_movgp move element to general purpose register.
+; simd_mul integer multiply.
+; simd_mul_elt integer multiply (by element).
+; simd_mull integer multiply (long).
+; simd_mull_elt integer multiply (by element, long).
+; simd_negabs integer negate/absolute.
+; simd_rbit bitwise reverse.
+; simd_rcpe integer reciprocal estimate.
+; simd_rcps integer reciprocal square root.
+; simd_rev element reverse.
+; simd_sat_add integer saturating addition/subtraction.
+; simd_sat_mlal integer saturating multiply-accumulate (long).
+; simd_sat_mlal_elt integer saturating multiply-accumulate (by element, long).
+; simd_sat_mul integer saturating multiply.
+; simd_sat_mul_elt integer saturating multiply (by element).
+; simd_sat_mull integer saturating multiply (long).
+; simd_sat_mull_elt integer saturating multiply (by element, long).
+; simd_sat_negabs integer saturating negate/absolute.
+; simd_sat_shift integer saturating shift.
+; simd_sat_shift_imm integer saturating shift (immediate).
+; simd_sat_shiftn_imm integer saturating shift (narrow, immediate).
+; simd_sat_shiftn2_imm integer saturating shift (narrow, high, immediate).
+; simd_shift shift register/vector.
+; simd_shift_acc shift accumulate.
+; simd_shift_imm shift immediate.
+; simd_shift_imm_acc shift immediate and accumualte.
+; simd_shiftl shift register/vector (long).
+; simd_shiftl_imm shift register/vector (long, immediate).
+; simd_shiftn_imm shift register/vector (narrow, immediate).
+; simd_shiftn2_imm shift register/vector (narrow, high, immediate).
+; simd_store1 store multiple structures from one register (ST1).
+; simd_store1s store single structure from one lane of one register (ST1 [index]).
+; simd_store2 store multiple structures from two registers (ST1, ST2).
+; simd_store2s store single structure from one lane of two registers (ST2 [index]).
+; simd_store3 store multiple structures from three registers (ST1, ST3).
+; simd_store3s store single structure from one lane of three register (ST3 [index]).
+; simd_store4 store multiple structures from four registers (ST1, ST2, ST4).
+; simd_store4s store single structure from one lane for four registers (ST4 [index]).
+; simd_tbl table lookup.
+; simd_trn transpose.
+; simd_zip zip/unzip.
+
+(define_attr "simd_type"
+ "simd_abd,\
+ simd_abdl,\
+ simd_adal,\
+ simd_add,\
+ simd_addl,\
+ simd_addlv,\
+ simd_addn,\
+ simd_addn2,\
+ simd_addv,\
+ simd_cls,\
+ simd_cmp,\
+ simd_cnt,\
+ simd_dup,\
+ simd_dupgp,\
+ simd_ext,\
+ simd_fadd,\
+ simd_fcmp,\
+ simd_fcvti,\
+ simd_fcvtl,\
+ simd_fcvtn,\
+ simd_fcvtn2,\
+ simd_fdiv,\
+ simd_fminmax,\
+ simd_fminmaxv,\
+ simd_fmla,\
+ simd_fmla_elt,\
+ simd_fmul,\
+ simd_fmul_elt,\
+ simd_fnegabs,\
+ simd_frcpe,\
+ simd_frcps,\
+ simd_frecx,\
+ simd_frint,\
+ simd_fsqrt,\
+ simd_icvtf,\
+ simd_ins,\
+ simd_insgp,\
+ simd_load1,\
+ simd_load1r,\
+ simd_load1s,\
+ simd_load2,\
+ simd_load2r,\
+ simd_load2s,\
+ simd_load3,\
+ simd_load3r,\
+ simd_load3s,\
+ simd_load4,\
+ simd_load4r,\
+ simd_load4s,\
+ simd_logic,\
+ simd_logic_imm,\
+ simd_minmax,\
+ simd_minmaxv,\
+ simd_mla,\
+ simd_mla_elt,\
+ simd_mlal,\
+ simd_mlal_elt,\
+ simd_movgp,\
+ simd_move,\
+ simd_move_imm,\
+ simd_mul,\
+ simd_mul_elt,\
+ simd_mull,\
+ simd_mull_elt,\
+ simd_negabs,\
+ simd_rbit,\
+ simd_rcpe,\
+ simd_rcps,\
+ simd_rev,\
+ simd_sat_add,\
+ simd_sat_mlal,\
+ simd_sat_mlal_elt,\
+ simd_sat_mul,\
+ simd_sat_mul_elt,\
+ simd_sat_mull,\
+ simd_sat_mull_elt,\
+ simd_sat_negabs,\
+ simd_sat_shift,\
+ simd_sat_shift_imm,\
+ simd_sat_shiftn_imm,\
+ simd_sat_shiftn2_imm,\
+ simd_shift,\
+ simd_shift_acc,\
+ simd_shift_imm,\
+ simd_shift_imm_acc,\
+ simd_shiftl,\
+ simd_shiftl_imm,\
+ simd_shiftn_imm,\
+ simd_shiftn2_imm,\
+ simd_store1,\
+ simd_store1s,\
+ simd_store2,\
+ simd_store2s,\
+ simd_store3,\
+ simd_store3s,\
+ simd_store4,\
+ simd_store4s,\
+ simd_tbl,\
+ simd_trn,\
+ simd_zip,\
+ none"
+ (const_string "none"))
+
+
+; The "neon_type" attribute is used by the AArch32 backend. Below is a mapping
+; from "simd_type" to "neon_type".
+
+(define_attr "neon_type"
+ "neon_int_1,neon_int_2,neon_int_3,neon_int_4,neon_int_5,neon_vqneg_vqabs,
+ neon_vmov,neon_vaba,neon_vsma,neon_vaba_qqq,
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,neon_mul_qqq_8_16_32_ddd_32,
+ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,neon_mla_qqq_8_16,
+ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,
+ neon_mla_qqq_32_qqd_32_scalar,neon_mul_ddd_16_scalar_32_16_long_scalar,
+ neon_mul_qqd_32_scalar,neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,
+ neon_shift_1,neon_shift_2,neon_shift_3,neon_vshl_ddd,
+ neon_vqshl_vrshl_vqrshl_qqq,neon_vsra_vrsra,neon_fp_vadd_ddd_vabs_dd,
+ neon_fp_vadd_qqq_vabs_qq,neon_fp_vsum,neon_fp_vmul_ddd,neon_fp_vmul_qqd,
+ neon_fp_vmla_ddd,neon_fp_vmla_qqq,neon_fp_vmla_ddd_scalar,
+ neon_fp_vmla_qqq_scalar,neon_fp_vrecps_vrsqrts_ddd,
+ neon_fp_vrecps_vrsqrts_qqq,neon_bp_simple,neon_bp_2cycle,neon_bp_3cycle,
+ neon_ldr,neon_str,neon_vld1_1_2_regs,neon_vld1_3_4_regs,
+ neon_vld2_2_regs_vld1_vld2_all_lanes,neon_vld2_4_regs,neon_vld3_vld4,
+ neon_vst1_1_2_regs_vst2_2_regs,neon_vst1_3_4_regs,
+ neon_vst2_4_regs_vst3_vst4,neon_vst3_vst4,neon_vld1_vld2_lane,
+ neon_vld3_vld4_lane,neon_vst1_vst2_lane,neon_vst3_vst4_lane,
+ neon_vld3_vld4_all_lanes,neon_mcr,neon_mcr_2_mcrr,neon_mrc,neon_mrrc,
+ neon_ldm_2,neon_stm_2,none,unknown"
+ (cond [
+ (eq_attr "simd_type" "simd_dup") (const_string "neon_bp_simple")
+ (eq_attr "simd_type" "simd_movgp") (const_string "neon_bp_simple")
+ (eq_attr "simd_type" "simd_add,simd_logic,simd_logic_imm") (const_string "neon_int_1")
+ (eq_attr "simd_type" "simd_negabs,simd_addlv") (const_string "neon_int_3")
+ (eq_attr "simd_type" "simd_addn,simd_addn2,simd_addl,simd_sat_add,simd_sat_negabs") (const_string "neon_int_4")
+ (eq_attr "simd_type" "simd_move") (const_string "neon_vmov")
+ (eq_attr "simd_type" "simd_ins") (const_string "neon_mcr")
+ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V2SI,V8QI,V16QI,V2SI")) (const_string "neon_mul_qqq_8_16_32_ddd_32")
+ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V2SI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V16QI,V8HI")) (const_string "neon_mla_qqq_8_16")
+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V4SI")) (const_string "neon_mla_qqq_32_qqd_32_scalar")
+ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd")
+ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq")
+ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd_scalar")
+ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq_scalar")
+ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmul_ddd")
+ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmul_qqd")
+ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
+ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
+ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V8QI,V4HI,V2SI")) (const_string "neon_vshl_ddd")
+ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V16QI,V8HI,V4SI,V2DI")) (const_string "neon_shift_3")
+ (eq_attr "simd_type" "simd_minmax,simd_minmaxv") (const_string "neon_int_5")
+ (eq_attr "simd_type" "simd_shiftn_imm,simd_shiftn2_imm,simd_shiftl_imm,") (const_string "neon_shift_1")
+ (eq_attr "simd_type" "simd_load1,simd_load2") (const_string "neon_vld1_1_2_regs")
+ (eq_attr "simd_type" "simd_load3,simd_load3") (const_string "neon_vld1_3_4_regs")
+ (eq_attr "simd_type" "simd_load1r,simd_load2r,simd_load3r,simd_load4r") (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
+ (eq_attr "simd_type" "simd_load1s,simd_load2s") (const_string "neon_vld1_vld2_lane")
+ (eq_attr "simd_type" "simd_load3s,simd_load4s") (const_string "neon_vld3_vld4_lane")
+ (eq_attr "simd_type" "simd_store1,simd_store2") (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+ (eq_attr "simd_type" "simd_store3,simd_store4") (const_string "neon_vst1_3_4_regs")
+ (eq_attr "simd_type" "simd_store1s,simd_store2s") (const_string "neon_vst1_vst2_lane")
+ (eq_attr "simd_type" "simd_store3s,simd_store4s") (const_string "neon_vst3_vst4_lane")
+ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vrecps_vrsqrts_ddd")
+ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vrecps_vrsqrts_qqq")
+ (eq_attr "simd_type" "none") (const_string "none")
+ ]
+ (const_string "unknown")))
+
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+{
+ /* This pattern is not permitted to fail during expansion: if both arguments
+ are non-registers (e.g. memory := constant, which can be created by the
+ auto-vectorizer), force operand 1 into a register. */
+ if (!register_operand (operands[0], <MODE>mode)
+ && !register_operand (operands[1], <MODE>mode))
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+})
+
+(define_insn "aarch64_simd_dup<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r")))]
+ "TARGET_SIMD"
+ "dup\\t%0.<Vtype>, %<vw>1"
+ [(set_attr "simd_type" "simd_dupgp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_dup_lane<mode>"
+ [(set (match_operand:VDQ_I 0 "register_operand" "=w")
+ (vec_duplicate:VDQ_I
+ (vec_select:<VEL>
+ (match_operand:<VCON> 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+ )))]
+ "TARGET_SIMD"
+ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
+ [(set_attr "simd_type" "simd_dup")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_dup_lane<mode>"
+ [(set (match_operand:SDQ_I 0 "register_operand" "=w")
+ (vec_select:<VEL>
+ (match_operand:<VCON> 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+ ))]
+ "TARGET_SIMD"
+ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
+ [(set_attr "simd_type" "simd_dup")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_dup<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
+ [(set_attr "simd_type" "simd_dup")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+ [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
+ "=w, Utv, w, ?r, ?w, ?r, w")
+ (match_operand:VD 1 "aarch64_simd_general_operand"
+ "Utv, w, w, w, r, r, Dn"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "ld1\t{%0.<Vtype>}, %1";
+ case 1: return "st1\t{%1.<Vtype>}, %0";
+ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+ case 3: return "umov\t%0, %1.d[0]";
+ case 4: return "ins\t%0.d[0], %1";
+ case 5: return "mov\t%0, %1";
+ case 6:
+ {
+ int is_valid;
+ unsigned char widthc;
+ int width;
+ static char templ[40];
+ int shift = 0, mvn = 0;
+ const char *mnemonic;
+ int length = 0;
+
+ is_valid =
+ aarch64_simd_immediate_valid_for_move (operands[1], <MODE>mode,
+ &operands[1], &width, &widthc,
+ &mvn, &shift);
+ gcc_assert (is_valid != 0);
+
+ mnemonic = mvn ? "mvni" : "movi";
+ if (widthc != 'd')
+ length += snprintf (templ, sizeof (templ),
+ "%s\t%%0.%d%c, %%1",
+ mnemonic, 64 / width, widthc);
+ else
+ length += snprintf (templ, sizeof (templ), "%s\t%%d0, %%1", mnemonic);
+
+ if (shift != 0)
+ length += snprintf (templ + length, sizeof (templ) - length,
+ ", lsl %d", shift);
+ return templ;
+ }
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+ [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
+ "=w, Utv, w, ?r, ?w, ?r, w")
+ (match_operand:VQ 1 "aarch64_simd_general_operand"
+ "Utv, w, w, w, r, r, Dn"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "ld1\t{%0.<Vtype>}, %1";
+ case 1: return "st1\t{%1.<Vtype>}, %0";
+ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+ case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
+ case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
+ case 5: return "#";
+ case 6:
+ {
+ int is_valid;
+ unsigned char widthc;
+ int width;
+ static char templ[40];
+ int shift = 0, mvn = 0;
+
+ is_valid =
+ aarch64_simd_immediate_valid_for_move (operands[1], <MODE>mode,
+ &operands[1], &width, &widthc,
+ &mvn, &shift);
+ gcc_assert (is_valid != 0);
+ if (shift)
+ snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1, lsl %d",
+ mvn ? "mvni" : "movi",
+ 128 / width, widthc, shift);
+ else
+ snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1",
+ mvn ? "mvni" : "movi",
+ 128 / width, widthc);
+ return templ;
+ }
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "4,4,4,8,8,8,4")]
+)
+
+(define_split
+ [(set (match_operand:VQ 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (DImode, rdest);
+ src[0] = gen_rtx_REG (DImode, rsrc);
+ dest[1] = gen_rtx_REG (DImode, rdest + 1);
+ src[1] = gen_rtx_REG (DImode, rsrc + 1);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_insn "orn<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "bic<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VDQM 0 "register_operand" "=w")
+ (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
+ (match_operand:VDQM 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQM 0 "register_operand" "=w")
+ (neg:VDQM (match_operand:VDQM 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "neg\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_negabs")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "abs\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_negabs")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "not\t%0.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "simd_type" "simd_logic")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (vec_merge:VQ_S
+ (vec_duplicate:VQ_S
+ (match_operand:<VEL> 1 "register_operand" "r"))
+ (match_operand:VQ_S 3 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "TARGET_SIMD"
+ "ins\t%0.<Vetype>[%p2], %w1";
+ [(set_attr "simd_type" "simd_insgp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_lshr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_ashr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_imm_shl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))]
+ "TARGET_SIMD"
+ "shl\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_sshl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_shift")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")]
+ UNSPEC_ASHIFT_UNSIGNED))]
+ "TARGET_SIMD"
+ "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_shift")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_signed"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")]
+ UNSPEC_ASHIFT_SIGNED))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_shift")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "ashl<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount >= 0 && shift_amount < bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp,
+ convert_to_mode (<VEL>mode,
+ operands[2],
+ 0)));
+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "lshr<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount > 0 && shift_amount <= bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_negsi2 (tmp, operands[2]));
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+ convert_to_mode (<VEL>mode,
+ tmp, 0)));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
+ operands[1],
+ tmp1));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "ashr<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount > 0 && shift_amount <= bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_negsi2 (tmp, operands[2]));
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+ convert_to_mode (<VEL>mode,
+ tmp, 0)));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
+ operands[1],
+ tmp1));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "vashl<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:VDQ 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+;; Using mode VQ_S as there is no V2DImode neg!
+;; Negating individual lanes most certainly offsets the
+;; gain from vectorization.
+(define_expand "vashr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+ (match_operand:VQ_S 1 "register_operand" "")
+ (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
+ neg));
+ DONE;
+})
+
+(define_expand "vlshr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+ (match_operand:VQ_S 1 "register_operand" "")
+ (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
+ neg));
+ DONE;
+})
+
+(define_expand "vec_set<mode>"
+ [(match_operand:VQ_S 0 "register_operand" "+w")
+ (match_operand:<VEL> 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_setv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+ (vec_merge:V2DI
+ (vec_duplicate:V2DI
+ (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:V2DI 3 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "TARGET_SIMD"
+ "ins\t%0.d[%p2], %1";
+ [(set_attr "simd_type" "simd_insgp")
+ (set_attr "simd_mode" "V2DI")]
+)
+
+(define_expand "vec_setv2di"
+ [(match_operand:V2DI 0 "register_operand" "+w")
+ (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (vec_merge:VDQF
+ (vec_duplicate:VDQF
+ (match_operand:<VEL> 1 "register_operand" "w"))
+ (match_operand:VDQF 3 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "TARGET_SIMD"
+ "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
+ [(set_attr "simd_type" "simd_ins")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_set<mode>"
+ [(match_operand:VDQF 0 "register_operand" "+w")
+ (match_operand:<VEL> 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+
+(define_insn "aarch64_mla<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+ (match_operand:VQ_S 3 "register_operand" "w"))
+ (match_operand:VQ_S 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "simd_type" "simd_mla")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_mls<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
+ (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+ (match_operand:VQ_S 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "simd_type" "simd_mla")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Max/Min operations.
+(define_insn "<maxmin><mode>3"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
+ (match_operand:VQ_S 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_minmax")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Move into low-half clearing high half to 0.
+
+(define_insn "move_lo_quad_<mode>"
+ [(set (match_operand:VQ 0 "register_operand" "=w")
+ (vec_concat:VQ
+ (match_operand:<VHALF> 1 "register_operand" "w")
+ (vec_duplicate:<VHALF> (const_int 0))))]
+ "TARGET_SIMD"
+ "mov\\t%d0, %d1";
+ [(set_attr "simd_type" "simd_dup")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; Move into high-half.
+
+(define_insn "aarch64_simd_move_hi_quad_<mode>"
+ [(set (match_operand:VQ 0 "register_operand" "+w")
+ (vec_concat:VQ
+ (vec_select:<VHALF>
+ (match_dup 0)
+ (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
+ (match_operand:<VHALF> 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "ins\\t%0.d[1], %1.d[0]";
+ [(set_attr "simd_type" "simd_ins")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "move_hi_quad_<mode>"
+ [(match_operand:VQ 0 "register_operand" "")
+ (match_operand:<VHALF> 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+})
+
+;; Narrowing operations.
+
+;; For doubles.
+(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_shiftn_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_pack_trunc_<mode>"
+ [(match_operand:<VNARROWD> 0 "register_operand" "")
+ (match_operand:VDN 1 "register_operand" "")
+ (match_operand:VDN 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx tempreg = gen_reg_rtx (<VDBL>mode);
+
+ emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[1]));
+ emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[2]));
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
+ DONE;
+})
+
+;; For quads.
+
+(define_insn "vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_shiftn2_imm")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "8")]
+)
+
+;; Widening operations.
+
+(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
+ )))]
+ "TARGET_SIMD"
+ "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
+ [(set_attr "simd_type" "simd_shiftl_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
+ )))]
+ "TARGET_SIMD"
+ "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
+ [(set_attr "simd_type" "simd_shiftl_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_unpack<su>_hi_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+ }
+)
+
+(define_expand "vec_unpack<su>_lo_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+ }
+)
+
+;; Widening arithmetic.
+
+(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<su>mull %0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
+ [(set_attr "simd_type" "simd_mull")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_widen_<su>mult_lo_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
+ operands[1],
+ operands[2], p));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<su>mull2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_mull")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "vec_widen_<su>mult_hi_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
+ operands[1],
+ operands[2], p));
+ DONE;
+
+ }
+)
+
+;; FP vector operations.
+;; AArch64 AdvSIMD supports single-precision (32-bit) and
+;; double-precision (64-bit) floating-point data types and arithmetic as
+;; defined by the IEEE 754-2008 standard. This makes them vectorizable
+;; without the need for -ffast-math or -funsafe-math-optimizations.
+;;
+;; Floating-point operations can raise an exception. Vectorizing such
+;; operations are safe because of reasons explained below.
+;;
+;; ARMv8 permits an extension to enable trapped floating-point
+;; exception handling, however this is an optional feature. In the
+;; event of a floating-point exception being raised by vectorised
+;; code then:
+;; 1. If trapped floating-point exceptions are available, then a trap
+;; will be taken when any lane raises an enabled exception. A trap
+;; handler may determine which lane raised the exception.
+;; 2. Alternatively a sticky exception flag is set in the
+;; floating-point status register (FPSR). Software may explicitly
+;; test the exception flags, in which case the tests will either
+;; prevent vectorisation, allowing precise identification of the
+;; failing operation, or if tested outside of vectorisable regions
+;; then the specific operation and lane are not of interest.
+
+;; FP arithmetic operations.
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fadd")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fadd")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fmul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "div<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fdiv")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fneg\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_fnegabs")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fabs\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_fnegabs")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "fma<mode>4"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")
+ (match_operand:VDQF 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fmla")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_vmls<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
+ (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
+ (match_operand:VDQF 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "simd_type" "simd_fmla")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP Max/Min
+;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
+;; expression like:
+;; a = (b < c) ? b : c;
+;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
+;; either explicitly or indirectly via -ffast-math.
+;;
+;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
+;; The 'smax' and 'smin' RTL standard pattern names do not specify which
+;; operand will be returned when both operands are zero (i.e. they may not
+;; honour signed zeroes), or when either operand is NaN. Therefore GCC
+;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
+;; NaNs.
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (smax:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmaxnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fminmax")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (smin:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fminnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fminmax")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP 'across lanes' max and min ops.
+
+(define_insn "reduc_s<fmaxminv>_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+ FMAXMINV))]
+ "TARGET_SIMD"
+ "f<fmaxminv>nmv\\t%s0, %1.4s";
+ [(set_attr "simd_type" "simd_fminmaxv")
+ (set_attr "simd_mode" "V4SF")]
+)
+
+(define_insn "reduc_s<fmaxminv>_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ FMAXMINV))]
+ "TARGET_SIMD"
+ "f<fmaxminv>nmp\\t%0.<Vtype>, %1.<Vtype>, %1.<Vtype>";
+ [(set_attr "simd_type" "simd_fminmax")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; FP 'across lanes' add.
+
+(define_insn "aarch64_addvv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%0.4s, %1.4s, %1.4s"
+ [(set_attr "simd_type" "simd_fadd")
+ (set_attr "simd_mode" "V4SF")]
+)
+
+(define_expand "reduc_uplus_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (match_operand:V4SF 1 "register_operand" "w"))]
+ "TARGET_SIMD"
+{
+ rtx tmp = gen_reg_rtx (V4SFmode);
+ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
+ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "reduc_splus_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (match_operand:V4SF 1 "register_operand" "w"))]
+ "TARGET_SIMD"
+{
+ rtx tmp = gen_reg_rtx (V4SFmode);
+ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
+ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
+ DONE;
+})
+
+(define_insn "aarch64_addv<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_fadd")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "reduc_uplus_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+;; Reduction across lanes.
+
+(define_insn "aarch64_addv<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addv\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_addv")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_uplus_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "aarch64_addvv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addp\\t%d0, %1.2d"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "V2DI")]
+)
+
+(define_expand "reduc_uplus_v2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_v2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "aarch64_addvv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ "addp\\t%0.2s, %1.2s, %1.2s"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "V2SI")]
+)
+
+(define_expand "reduc_uplus_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_expand "reduc_splus_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ UNSPEC_ADDV))]
+ "TARGET_SIMD"
+ ""
+)
+
+(define_insn "reduc_<maxminv>_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ MAXMINV))]
+ "TARGET_SIMD"
+ "<maxminv>v\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_minmaxv")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "reduc_<maxminv>_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ MAXMINV))]
+ "TARGET_SIMD"
+ "<maxminv>p\\t%0.2s, %1.2s, %1.2s"
+ [(set_attr "simd_type" "simd_minmax")
+ (set_attr "simd_mode" "V2SI")]
+)
+
+;; Patterns for AArch64 SIMD Intrinsics.
+
+(define_expand "aarch64_create<mode>"
+ [(match_operand:VD_RE 0 "register_operand" "")
+ (match_operand:DI 1 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx src = gen_lowpart (<MODE>mode, operands[1]);
+ emit_move_insn (operands[0], src);
+ DONE;
+})
+
+(define_insn "aarch64_get_lane_signed<mode>"
+ [(set (match_operand:<VEL> 0 "register_operand" "=r")
+ (sign_extend:<VEL>
+ (vec_select:<VEL>
+ (match_operand:VQ_S 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ "smov\\t%0, %1.<Vetype>[%2]"
+ [(set_attr "simd_type" "simd_movgp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_get_lane_unsigned<mode>"
+ [(set (match_operand:<VEL> 0 "register_operand" "=r")
+ (zero_extend:<VEL>
+ (vec_select:<VEL>
+ (match_operand:VDQ 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ "umov\\t%<vw>0, %1.<Vetype>[%2]"
+ [(set_attr "simd_type" "simd_movgp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_get_lane<mode>"
+ [(set (match_operand:<VEL> 0 "register_operand" "=w")
+ (vec_select:<VEL>
+ (match_operand:VDQF 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+ "TARGET_SIMD"
+ "mov\\t%0.<Vetype>[0], %1.<Vetype>[%2]"
+ [(set_attr "simd_type" "simd_ins")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_get_lanedi"
+ [(match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[2], 0, 1);
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv8qi<mode>"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4hi<mode>"
+ [(match_operand:V4HI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2si<mode>"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2sf<mode>"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretdi<mode>"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:VD_RE 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv16qi<mode>"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv8hi<mode>"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4si<mode>"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4sf<mode>"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2di<mode>"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2df<mode>"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+;; In this insn, operand 1 should be low, and operand 2 the high part of the
+;; dest vector.
+
+(define_insn "*aarch64_combinez<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+ (vec_concat:<VDBL>
+ (match_operand:VDIC 1 "register_operand" "w")
+ (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
+ "TARGET_SIMD"
+ "mov\\t%0.8b, %1.8b"
+ [(set_attr "simd_type" "simd_move")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_combine<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+ (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
+ (match_operand:VDC 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "mov\\t%0.d[0], %1.d[0]\;ins\\t%0.d[1], %2.d[0]"
+ [(set_attr "simd_type" "simd_ins")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su><addsub>l<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>l2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_addl")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_saddl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_saddl2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_uaddl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_uaddl2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_ssubl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_ssubl2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_usubl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_usubl2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 1 "register_operand" "w"))
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_addl")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su><addsub>w<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_addl")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+ (ANY_EXTEND:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_addl")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_saddw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_uaddw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+
+(define_expand "aarch64_ssubw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_usubw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; <su><r>h<addsub>.
+
+(define_insn "aarch64_<sur>h<addsub><mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
+ (match_operand:VQ_S 2 "register_operand" "w")]
+ HADDSUB))]
+ "TARGET_SIMD"
+ "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <r><addsub>hn<q>.
+
+(define_insn "aarch64_<sur><addsub>hn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "register_operand" "w")]
+ ADDSUBHN))]
+ "TARGET_SIMD"
+ "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_addn")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_<sur><addsub>hn2<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (match_operand:VQN 2 "register_operand" "w")
+ (match_operand:VQN 3 "register_operand" "w")]
+ ADDSUBHN2))]
+ "TARGET_SIMD"
+ "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "simd_type" "simd_addn2")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; pmul.
+
+(define_insn "aarch64_pmul<mode>"
+ [(set (match_operand:VB 0 "register_operand" "=w")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "w")
+ (match_operand:VB 2 "register_operand" "w")]
+ UNSPEC_PMUL))]
+ "TARGET_SIMD"
+ "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; <su>q<addsub>
+
+(define_insn "aarch64_<su_optab><optab><mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:VSDQ_I 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; suqadd and usqadd
+
+(define_insn "aarch64_<sur>qadd<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
+ (match_operand:VSDQ_I 2 "register_operand" "w")]
+ USSUQADD))]
+ "TARGET_SIMD"
+ "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqmovun
+
+(define_insn "aarch64_sqmovun<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+ UNSPEC_SQXTUN))]
+ "TARGET_SIMD"
+ "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
+ (set_attr "simd_mode" "<MODE>")]
+ )
+
+;; sqmovn and uqmovn
+
+(define_insn "aarch64_<sur>qmovn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+ SUQMOVN))]
+ "TARGET_SIMD"
+ "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
+ (set_attr "simd_mode" "<MODE>")]
+ )
+
+;; <su>q<absneg>
+
+(define_insn "aarch64_s<optab><mode>"
+ [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
+ (UNQOPS:VSDQ_I_BHSI
+ (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_negabs")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sq<r>dmulh.
+
+(define_insn "aarch64_sq<r>dmulh<mode>"
+ [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
+ (unspec:VSDQ_HSI
+ [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+ (match_operand:VSDQ_HSI 2 "register_operand" "w")]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sq<r>dmulh_lane
+
+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
+ [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
+ (unspec:VSDQ_HSI
+ [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
+ return \"sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]\";"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdml[sa]l
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 3 "register_operand" "w")))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdml[sa]l_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+ ))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:SD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+ )
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlal_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+;; vqdml[sa]l_n
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (match_operand:<VEL> 3 "register_operand" "w"))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqdml[as]l2
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 3 "register_operand" "w")
+ (match_dup 4))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3], p));
+ DONE;
+})
+
+;; vqdml[sa]l2_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])
+ ))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlal2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (match_operand:<VEL> 3 "register_operand" "w"))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+ [(set_attr "simd_type" "simd_sat_mlal")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmlal2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VEL> 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VEL> 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+;; vqdmull
+
+(define_insn "aarch64_sqdmull<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 2 "register_operand" "w")))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdmull_lane
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:SD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VSD_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmull_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VD_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
+ (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+})
+
+;; vqdmull_n
+
+(define_insn "aarch64_sqdmull_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (match_operand:<VEL> 2 "register_operand" "w")))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqdmull2
+
+
+
+(define_insn "aarch64_sqdmull2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_dup 3)))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; vqdmull2_lane
+
+(define_insn "aarch64_sqdmull2_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmull2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+;; vqdmull2_n
+
+(define_insn "aarch64_sqdmull2_n<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (match_operand:<VEL> 2 "register_operand" "w")))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+ [(set_attr "simd_type" "simd_sat_mul")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqdmull2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VEL> 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; vshl
+
+(define_insn "aarch64_<sur>shl<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI
+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
+ VSHL))]
+ "TARGET_SIMD"
+ "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+ [(set_attr "simd_type" "simd_shift")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; vqshl
+
+(define_insn "aarch64_<sur>q<r>shl<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I
+ [(match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:VSDQ_I 2 "register_operand" "w")]
+ VQSHL))]
+ "TARGET_SIMD"
+ "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+ [(set_attr "simd_type" "simd_sat_shift")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshl_n
+
+(define_expand "aarch64_sshl_n<mode>"
+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "aarch64_ushl_n<mode>"
+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; vshll_n
+
+(define_insn "aarch64_<sur>shll_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLL))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+ if (INTVAL (operands[2]) == bit_width)
+ {
+ return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }
+ else {
+ return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshll_high_n
+
+(define_insn "aarch64_<sur>shll2_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLL))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+ if (INTVAL (operands[2]) == bit_width)
+ {
+ return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }
+ else {
+ return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vshr_n
+
+(define_expand "aarch64_sshr_n<mode>"
+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "aarch64_ushr_n<mode>"
+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; vrshr_n
+
+(define_insn "aarch64_<sur>shr_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VRSHR_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+ return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; v(r)sra_n
+
+(define_insn "aarch64_<sur>sra_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VSRA))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
+ return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ [(set_attr "simd_type" "simd_shift_imm_acc")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vs<lr>i_n
+
+(define_insn "aarch64_<sur>s<lr>i_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VSLRI))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
+ bit_width - <VSLRI:offsetlr> + 1);
+ return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ [(set_attr "simd_type" "simd_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; vqshl(u)
+
+(define_insn "aarch64_<sur>qshl<u>_n<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHL_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width);
+ return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "simd_type" "simd_sat_shift_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; vq(r)shr(u)n_n
+
+(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHRN_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+ return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+
+;; cm(eq|ge|le|lt|gt)
+
+(define_insn "aarch64_cm<cmp><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
+ (unspec:<V_cmp_result>
+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w,w")
+ (match_operand:VSDQ_I_DI 2 "aarch64_simd_reg_or_zero" "w,Z")]
+ VCMP_S))]
+ "TARGET_SIMD"
+ "@
+ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
+ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
+ [(set_attr "simd_type" "simd_cmp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; cm(hs|hi|tst)
+
+(define_insn "aarch64_cm<cmp><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
+ (unspec:<V_cmp_result>
+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
+ VCMP_U))]
+ "TARGET_SIMD"
+ "cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_cmp")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; addp
+
+(define_insn "aarch64_addp<mode>"
+ [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
+ (unspec:VD_BHSI
+ [(match_operand:VD_BHSI 1 "register_operand" "w")
+ (match_operand:VD_BHSI 2 "register_operand" "w")]
+ UNSPEC_ADDP))]
+ "TARGET_SIMD"
+ "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_insn "aarch64_addpdi"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI
+ [(match_operand:V2DI 1 "register_operand" "w")]
+ UNSPEC_ADDP))]
+ "TARGET_SIMD"
+ "addp\t%d0, %1.2d"
+ [(set_attr "simd_type" "simd_add")
+ (set_attr "simd_mode" "DI")]
+)
+
+;; v(max|min)
+
+(define_expand "aarch64_<maxmin><mode>"
+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
+ (MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
+ (match_operand:VDQ_BHSI 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+
+(define_insn "aarch64_<fmaxmin><mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")]
+ FMAXMIN))]
+ "TARGET_SIMD"
+ "<fmaxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "simd_type" "simd_fminmax")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+;; sqrt
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "simd_type" "simd_fsqrt")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "aarch64_sqrt<mode>"
+ [(match_operand:VDQF 0 "register_operand" "=w")
+ (match_operand:VDQF 1 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
+ DONE;
+})
+
+
+;; Patterns for vector struct loads and stores.
+
+(define_insn "vec_load_lanesoi<mode>"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD2))]
+ "TARGET_SIMD"
+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesoi<mode>"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_load_lanesci<mode>"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD3))]
+ "TARGET_SIMD"
+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesci<mode>"
+ [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:CI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_load_lanesxi<mode>"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD4))]
+ "TARGET_SIMD"
+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load4")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "vec_store_lanesxi<mode>"
+ [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:XI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store4")
+ (set_attr "simd_mode" "<MODE>")])
+
+;; Reload patterns for AdvSIMD register list operands.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ }
+})
+
+(define_insn "*aarch64_mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+
+{
+ switch (which_alternative)
+ {
+ case 0: return "#";
+ case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
+ case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "simd_type" "simd_move,simd_store<nregs>,simd_load<nregs>")
+ (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:OI 0 "register_operand" "")
+ (match_operand:OI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+ [(set (match_operand:CI 0 "register_operand" "")
+ (match_operand:CI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[3], src[3];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 3);
+})
+
+(define_split
+ [(set (match_operand:XI 0 "register_operand" "")
+ (match_operand:XI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[4], src[4];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+ dest[3] = gen_rtx_REG (TFmode, rdest + 3);
+ src[3] = gen_rtx_REG (TFmode, rsrc + 3);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 4);
+})
+
+(define_insn "aarch64_ld2<mode>_dreg"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (subreg:OI
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD2)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD2)
+ (vec_duplicate:VD (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld2<mode>_dreg"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (subreg:OI
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD2)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD2)
+ (const_int 0))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %T0.1d}, %1"
+ [(set_attr "simd_type" "simd_load2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld3<mode>_dreg"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (subreg:CI
+ (vec_concat:<VRL3>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0))))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld3<mode>_dreg"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (subreg:CI
+ (vec_concat:<VRL3>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD3)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD3)
+ (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD3)
+ (const_int 0))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %U0.1d}, %1"
+ [(set_attr "simd_type" "simd_load3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld4<mode>_dreg"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (subreg:XI
+ (vec_concat:<VRL4>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0))))
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0))))) 0))]
+ "TARGET_SIMD"
+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+ [(set_attr "simd_type" "simd_load4")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_ld4<mode>_dreg"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (subreg:XI
+ (vec_concat:<VRL4>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD4)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0)))
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %V0.1d}, %1"
+ [(set_attr "simd_type" "simd_load4")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "r")
+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+ emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
+ DONE;
+})
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "r")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+ emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
+ DONE;
+})
+
+;; Expanders for builtins to extract vector registers from large
+;; opaque integer modes.
+
+;; D-register list.
+
+(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
+ [(match_operand:VDC 0 "register_operand" "=w")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[2]);
+ rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
+ int offset = part * 16;
+
+ emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
+ emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
+ DONE;
+})
+
+;; Q-register list.
+
+(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VQ 0 "register_operand" "=w")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[2]);
+ int offset = part * 16;
+
+ emit_move_insn (operands[0],
+ gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
+ DONE;
+})
+
+;; Permuted-store expanders for neon intrinsics.
+
+(define_insn "aarch64_st2<mode>_dreg"
+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st2<mode>_dreg"
+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %T1.1d}, %0"
+ [(set_attr "simd_type" "simd_store2")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st3<mode>_dreg"
+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st3<mode>_dreg"
+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %U1.1d}, %0"
+ [(set_attr "simd_type" "simd_store3")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st4<mode>_dreg"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+ [(set_attr "simd_type" "simd_store4")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_insn "aarch64_st4<mode>_dreg"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %V1.1d}, %0"
+ [(set_attr "simd_type" "simd_store4")
+ (set_attr "simd_mode" "<MODE>")])
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+ emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
+ DONE;
+})
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+ emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
+ DONE;
+})
+
+;; Expander for builtins to insert vector registers into large
+;; opaque integer modes.
+
+;; Q-register list. We don't need a D-reg inserter as we zero
+;; extend them in arm_neon.h and insert the resulting Q-regs.
+
+(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "+w")
+ (match_operand:VSTRUCT 1 "register_operand" "0")
+ (match_operand:VQ 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[3]);
+ int offset = part * 16;
+
+ emit_move_insn (operands[0], operands[1]);
+ emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
+ operands[2]);
+ DONE;
+})
+
diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
new file mode 100644
index 00000000000..a654a91b43b
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-tune.md
@@ -0,0 +1,5 @@
+;; -*- buffer-read-only: t -*-
+;; Generated automatically by gentune.sh from aarch64-cores.def
+(define_attr "tune"
+ "large,small"
+ (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
new file mode 100644
index 00000000000..a148ee569b5
--- /dev/null
+++ b/gcc/config/aarch64/aarch64.c
@@ -0,0 +1,6988 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "insn-codes.h"
+#include "rtl.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "regs.h"
+#include "df.h"
+#include "hard-reg-set.h"
+#include "output.h"
+#include "expr.h"
+#include "reload.h"
+#include "toplev.h"
+#include "target.h"
+#include "target-def.h"
+#include "targhooks.h"
+#include "ggc.h"
+#include "integrate.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "gimple.h"
+#include "optabs.h"
+#include "dwarf2.h"
+
+/* Classifies an address.
+
+ ADDRESS_REG_IMM
+ A simple base register plus immediate offset.
+
+ ADDRESS_REG_WB
+ A base register indexed by immediate offset with writeback.
+
+ ADDRESS_REG_REG
+ A base register indexed by (optionally scaled) register.
+
+ ADDRESS_REG_UXTW
+ A base register indexed by (optionally scaled) zero-extended register.
+
+ ADDRESS_REG_SXTW
+ A base register indexed by (optionally scaled) sign-extended register.
+
+ ADDRESS_LO_SUM
+ A LO_SUM rtx with a base register and "LO12" symbol relocation.
+
+ ADDRESS_SYMBOLIC:
+ A constant symbolic address, in pc-relative literal pool. */
+
+enum aarch64_address_type {
+ ADDRESS_REG_IMM,
+ ADDRESS_REG_WB,
+ ADDRESS_REG_REG,
+ ADDRESS_REG_UXTW,
+ ADDRESS_REG_SXTW,
+ ADDRESS_LO_SUM,
+ ADDRESS_SYMBOLIC
+};
+
+struct aarch64_address_info {
+ enum aarch64_address_type type;
+ rtx base;
+ rtx offset;
+ int shift;
+ enum aarch64_symbol_type symbol_type;
+};
+
+/* The current code model. */
+enum aarch64_code_model aarch64_cmodel;
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS 1
+#endif
+
+static bool aarch64_composite_type_p (const_tree, enum machine_mode);
+static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
+ const_tree,
+ enum machine_mode *, int *,
+ bool *);
+static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
+static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
+static rtx aarch64_load_tp (rtx);
+static void aarch64_override_options_after_change (void);
+static int aarch64_simd_valid_immediate (rtx, enum machine_mode, int, rtx *,
+ int *, unsigned char *, int *, int *);
+static bool aarch64_vector_mode_supported_p (enum machine_mode);
+static unsigned bit_count (unsigned HOST_WIDE_INT);
+static bool aarch64_const_vec_all_same_int_p (rtx,
+ HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* The processor for which instructions should be scheduled. */
+enum aarch64_processor aarch64_tune = generic;
+
+/* The current tuning set. */
+const struct tune_params *aarch64_tune_params;
+
+/* Mask to specify which instructions we are allowed to generate. */
+unsigned long aarch64_isa_flags = 0;
+
+/* Mask to specify which instruction scheduling options should be used. */
+unsigned long aarch64_tune_flags = 0;
+
+/* Tuning models. */
+static const struct tune_params generic_tunings;
+
+/* A processor implementing AArch64. */
+struct processor
+{
+ const char *const name;
+ enum aarch64_processor core;
+ const char *arch;
+ const unsigned long flags;
+ const struct tune_params *const tune;
+};
+
+/* Processor cores implementing AArch64. */
+static const struct processor all_cores[] =
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ {"generic", generic, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
+ {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Architectures implementing AArch64. */
+static const struct processor all_architectures[] =
+{
+#define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
+ {NAME, CORE, #ARCH, FLAGS, NULL},
+#include "aarch64-arches.def"
+#undef AARCH64_ARCH
+ {"generic", generic, "8", AARCH64_FL_FOR_ARCH8, NULL},
+ {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Target specification. These are populated as commandline arguments
+ are processed, or NULL if not specified. */
+static const struct processor *selected_arch;
+static const struct processor *selected_cpu;
+static const struct processor *selected_tune;
+
+#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
+
+/* An ISA extension in the co-processor and main instruction set space. */
+struct aarch64_option_extension
+{
+ const char *const name;
+ const unsigned long flags_on;
+ const unsigned long flags_off;
+};
+
+/* ISA extensions in AArch64. */
+static const struct aarch64_option_extension all_extensions[] =
+{
+#define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
+ {NAME, FLAGS_ON, FLAGS_OFF},
+#include "aarch64-option-extensions.def"
+#undef AARCH64_OPT_EXTENSION
+ {NULL, 0, 0}
+};
+
+/* Used to track the size of an address when generating a pre/post
+ increment address. */
+static enum machine_mode aarch64_memory_reference_mode;
+
+/* Used to force GTY into this file. */
+static GTY(()) int gty_dummy;
+
+/* A table of valid AArch64 "bitmask immediate" values for
+ logical instructions. */
+
+#define AARCH64_NUM_BITMASKS 5334
+static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
+
+/* Did we set flag_omit_frame_pointer just so
+ aarch64_frame_pointer_required would be called? */
+static bool faked_omit_frame_pointer;
+
+typedef enum aarch64_cond_code
+{
+ AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
+ AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
+ AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
+}
+aarch64_cc;
+
+#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
+
+/* The condition codes of the processor, and the inverse function. */
+static const char * const aarch64_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+/* Provide a mapping from gcc register numbers to dwarf register numbers. */
+unsigned
+aarch64_dbx_register_number (unsigned regno)
+{
+ if (GP_REGNUM_P (regno))
+ return AARCH64_DWARF_R0 + regno - R0_REGNUM;
+ else if (regno == SP_REGNUM)
+ return AARCH64_DWARF_SP;
+ else if (FP_REGNUM_P (regno))
+ return AARCH64_DWARF_V0 + regno - V0_REGNUM;
+
+ /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
+ equivalent DWARF register. */
+ return DWARF_FRAME_REGISTERS;
+}
+
+/* Return TRUE if MODE is any of the large INT modes. */
+static bool
+aarch64_vect_struct_mode_p (enum machine_mode mode)
+{
+ return mode == OImode || mode == CImode || mode == XImode;
+}
+
+/* Return TRUE if MODE is any of the vector modes. */
+static bool
+aarch64_vector_mode_p (enum machine_mode mode)
+{
+ return aarch64_vector_mode_supported_p (mode)
+ || aarch64_vect_struct_mode_p (mode);
+}
+
+/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
+static bool
+aarch64_array_mode_supported_p (enum machine_mode mode,
+ unsigned HOST_WIDE_INT nelems)
+{
+ if (TARGET_SIMD
+ && AARCH64_VALID_SIMD_QREG_MODE (mode)
+ && (nelems >= 2 && nelems <= 4))
+ return true;
+
+ return false;
+}
+
+/* Implement HARD_REGNO_NREGS. */
+
+int
+aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
+{
+ switch (aarch64_regno_regclass (regno))
+ {
+ case FP_REGS:
+ case FP_LO_REGS:
+ return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
+ default:
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ }
+ gcc_unreachable ();
+}
+
+/* Implement HARD_REGNO_MODE_OK. */
+
+int
+aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return regno == CC_REGNUM;
+
+ if (regno == SP_REGNUM || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)
+ return mode == Pmode;
+
+ if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
+ return 1;
+
+ if (FP_REGNUM_P (regno))
+ {
+ if (aarch64_vect_struct_mode_p (mode))
+ return
+ (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return true if calls to DECL should be treated as
+ long-calls (ie called via a register). */
+static bool
+aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
+/* Return true if calls to symbol-ref SYM should be treated as
+ long-calls (ie called via a register). */
+bool
+aarch64_is_long_call_p (rtx sym)
+{
+ return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
+}
+
+/* Return true if the offsets to a zero/sign-extract operation
+ represent an expression that matches an extend operation. The
+ operands represent the paramters from
+
+ (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */
+bool
+aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
+ rtx extract_imm)
+{
+ HOST_WIDE_INT mult_val, extract_val;
+
+ if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
+ return false;
+
+ mult_val = INTVAL (mult_imm);
+ extract_val = INTVAL (extract_imm);
+
+ if (extract_val > 8
+ && extract_val < GET_MODE_BITSIZE (mode)
+ && exact_log2 (extract_val & ~7) > 0
+ && (extract_val & 7) <= 4
+ && mult_val == (1 << (extract_val & 7)))
+ return true;
+
+ return false;
+}
+
+/* Emit an insn that's a simple single-set. Both the operands must be
+ known to be valid. */
+inline static rtx
+emit_set_insn (rtx x, rtx y)
+{
+ return emit_insn (gen_rtx_SET (VOIDmode, x, y));
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. */
+rtx
+aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+
+ emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
+ return cc_reg;
+}
+
+/* Build the SYMBOL_REF for __tls_get_addr. */
+
+static GTY(()) rtx tls_get_addr_libfunc;
+
+rtx
+aarch64_tls_get_addr (void)
+{
+ if (!tls_get_addr_libfunc)
+ tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
+ return tls_get_addr_libfunc;
+}
+
+/* Return the TLS model to use for ADDR. */
+
+static enum tls_model
+tls_symbolic_operand_type (rtx addr)
+{
+ enum tls_model tls_kind = TLS_MODEL_NONE;
+ rtx sym, addend;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ split_const (addr, &sym, &addend);
+ if (GET_CODE (sym) == SYMBOL_REF)
+ tls_kind = SYMBOL_REF_TLS_MODEL (sym);
+ }
+ else if (GET_CODE (addr) == SYMBOL_REF)
+ tls_kind = SYMBOL_REF_TLS_MODEL (addr);
+
+ return tls_kind;
+}
+
+/* We'll allow lo_sum's in addresses in our legitimate addresses
+ so that combine would take care of combining addresses where
+ necessary, but for generation purposes, we'll generate the address
+ as :
+ RTL Absolute
+ tmp = hi (symbol_ref); adrp x1, foo
+ dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
+ nop
+
+ PIC TLS
+ adrp x1, :got:foo adrp tmp, :tlsgd:foo
+ ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
+ bl __tls_get_addr
+ nop
+
+ Load TLS symbol, depending on TLS mechanism and TLS access model.
+
+ Global Dynamic - Traditional TLS:
+ adrp tmp, :tlsgd:imm
+ add dest, tmp, #:tlsgd_lo12:imm
+ bl __tls_get_addr
+
+ Global Dynamic - TLS Descriptors:
+ adrp dest, :tlsdesc:imm
+ ldr tmp, [dest, #:tlsdesc_lo12:imm]
+ add dest, dest, #:tlsdesc_lo12:imm
+ blr tmp
+ mrs tp, tpidr_el0
+ add dest, dest, tp
+
+ Initial Exec:
+ mrs tp, tpidr_el0
+ adrp tmp, :gottprel:imm
+ ldr dest, [tmp, #:gottprel_lo12:imm]
+ add dest, dest, tp
+
+ Local Exec:
+ mrs tp, tpidr_el0
+ add t0, tp, #:tprel_hi12:imm
+ add t0, #:tprel_lo12_nc:imm
+*/
+
+static void
+aarch64_load_symref_appropriately (rtx dest, rtx imm,
+ enum aarch64_symbol_type type)
+{
+ switch (type)
+ {
+ case SYMBOL_SMALL_ABSOLUTE:
+ {
+ rtx tmp_reg = dest;
+ if (can_create_pseudo_p ())
+ {
+ tmp_reg = gen_reg_rtx (Pmode);
+ }
+
+ emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
+ emit_insn (gen_add_losym (dest, tmp_reg, imm));
+ return;
+ }
+
+ case SYMBOL_SMALL_GOT:
+ {
+ rtx tmp_reg = dest;
+ if (can_create_pseudo_p ())
+ {
+ tmp_reg = gen_reg_rtx (Pmode);
+ }
+ emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
+ emit_insn (gen_ldr_got_small (dest, tmp_reg, imm));
+ return;
+ }
+
+ case SYMBOL_SMALL_TLSGD:
+ {
+ rtx insns;
+ rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
+
+ start_sequence ();
+ emit_call_insn (gen_tlsgd_small (result, imm));
+ insns = get_insns ();
+ end_sequence ();
+
+ RTL_CONST_CALL_P (insns) = 1;
+ emit_libcall_block (insns, dest, result, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_TLSDESC:
+ {
+ rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM);
+ rtx tp;
+
+ emit_insn (gen_tlsdesc_small (imm));
+ tp = aarch64_load_tp (NULL);
+ emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0)));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_GOTTPREL:
+ {
+ rtx tmp_reg = gen_reg_rtx (Pmode);
+ rtx tp = aarch64_load_tp (NULL);
+ emit_insn (gen_tlsie_small (tmp_reg, imm));
+ emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg)));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_TPREL:
+ {
+ rtx tp = aarch64_load_tp (NULL);
+ emit_insn (gen_tlsle_small (dest, tp, imm));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Emit a move from SRC to DEST. Assume that the move expanders can
+ handle all moves if !can_create_pseudo_p (). The distinction is
+ important because, unlike emit_move_insn, the move expanders know
+ how to force Pmode objects into the constant pool even when the
+ constant pool address is not itself legitimate. */
+static rtx
+aarch64_emit_move (rtx dest, rtx src)
+{
+ return (can_create_pseudo_p ()
+ ? emit_move_insn (dest, src)
+ : emit_move_insn_1 (dest, src));
+}
+
+void
+aarch64_split_doubleword_move (rtx dst, rtx src)
+{
+ rtx low_dst;
+
+ gcc_assert (GET_MODE (dst) == TImode);
+
+ if (REG_P (dst) && REG_P (src))
+ {
+ int src_regno = REGNO (src);
+ int dst_regno = REGNO (dst);
+
+ gcc_assert (GET_MODE (src) == TImode);
+
+ /* Handle r -> w, w -> r. */
+ if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
+ {
+ emit_insn (gen_aarch64_movtilow_di (dst,
+ gen_lowpart (word_mode, src)));
+ emit_insn (gen_aarch64_movtihigh_di (dst,
+ gen_highpart (word_mode, src)));
+ return;
+ }
+ else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
+ {
+ emit_insn (gen_aarch64_movdi_tilow (gen_lowpart (word_mode, dst),
+ src));
+ emit_insn (gen_aarch64_movdi_tihigh (gen_highpart (word_mode, dst),
+ src));
+ return;
+ }
+ /* Fall through to r -> r cases. */
+ }
+
+ low_dst = gen_lowpart (word_mode, dst);
+ if (REG_P (low_dst)
+ && reg_overlap_mentioned_p (low_dst, src))
+ {
+ aarch64_emit_move (gen_highpart (word_mode, dst),
+ gen_highpart_mode (word_mode, TImode, src));
+ aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
+ }
+ else
+ {
+ aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
+ aarch64_emit_move (gen_highpart (word_mode, dst),
+ gen_highpart_mode (word_mode, TImode, src));
+ }
+}
+
+static rtx
+aarch64_force_temporary (rtx x, rtx value)
+{
+ if (can_create_pseudo_p ())
+ return force_reg (Pmode, value);
+ else
+ {
+ x = aarch64_emit_move (x, value);
+ return x;
+ }
+}
+
+
+static rtx
+aarch64_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
+{
+ if (!aarch64_plus_immediate (GEN_INT (offset), DImode))
+ {
+ rtx high;
+ /* Load the full offset into a register. This
+ might be improvable in the future. */
+ high = GEN_INT (offset);
+ offset = 0;
+ high = aarch64_force_temporary (temp, high);
+ reg = aarch64_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
+ }
+ return plus_constant (reg, offset);
+}
+
+void
+aarch64_expand_mov_immediate (rtx dest, rtx imm)
+{
+ enum machine_mode mode = GET_MODE (dest);
+ unsigned HOST_WIDE_INT mask;
+ int i;
+ bool first;
+ unsigned HOST_WIDE_INT val;
+ bool subtargets;
+ rtx subtarget;
+ int one_match, zero_match;
+
+ gcc_assert (mode == SImode || mode == DImode);
+
+ /* Check on what type of symbol it is. */
+ if (GET_CODE (imm) == SYMBOL_REF
+ || GET_CODE (imm) == LABEL_REF
+ || GET_CODE (imm) == CONST)
+ {
+ rtx mem, base, offset;
+ enum aarch64_symbol_type sty;
+
+ /* If we have (const (plus symbol offset)), separate out the offset
+ before we start classifying the symbol. */
+ split_const (imm, &base, &offset);
+
+ sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
+ switch (sty)
+ {
+ case SYMBOL_FORCE_TO_MEM:
+
+ if (offset != const0_rtx
+ && targetm.cannot_force_const_mem (mode, imm))
+ {
+ gcc_assert(can_create_pseudo_p ());
+ base = aarch64_force_temporary (dest, base);
+ base = aarch64_add_offset (NULL, base, INTVAL (offset));
+ aarch64_emit_move (dest, base);
+ return;
+ }
+ mem = force_const_mem (mode, imm);
+ gcc_assert (mem);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+ return;
+
+ case SYMBOL_SMALL_TLSGD:
+ case SYMBOL_SMALL_TLSDESC:
+ case SYMBOL_SMALL_GOTTPREL:
+ case SYMBOL_SMALL_GOT:
+ if (offset != const0_rtx)
+ {
+ gcc_assert(can_create_pseudo_p ());
+ base = aarch64_force_temporary (dest, base);
+ base = aarch64_add_offset (NULL, base, INTVAL (offset));
+ aarch64_emit_move (dest, base);
+ return;
+ }
+ /* FALLTHRU */
+
+ case SYMBOL_SMALL_TPREL:
+ case SYMBOL_SMALL_ABSOLUTE:
+ aarch64_load_symref_appropriately (dest, imm, sty);
+ return;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+ return;
+ }
+
+ if (!CONST_INT_P (imm))
+ {
+ if (GET_CODE (imm) == HIGH)
+ emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+ else
+ {
+ rtx mem = force_const_mem (mode, imm);
+ gcc_assert (mem);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+ }
+
+ return;
+ }
+
+ if (mode == SImode)
+ {
+ /* We know we can't do this in 1 insn, and we must be able to do it
+ in two; so don't mess around looking for sequences that don't buy
+ us anything. */
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
+ emit_insn (gen_insv_immsi (dest, GEN_INT (16),
+ GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
+ return;
+ }
+
+ /* Remaining cases are all for DImode. */
+
+ val = INTVAL (imm);
+ subtargets = optimize && can_create_pseudo_p ();
+
+ one_match = 0;
+ zero_match = 0;
+ mask = 0xffff;
+
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) == 0)
+ zero_match++;
+ else if ((val & mask) == mask)
+ one_match++;
+ }
+
+ if (one_match == 2)
+ {
+ mask = 0xffff;
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) != mask)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ return;
+ }
+ }
+ gcc_unreachable ();
+ }
+
+ if (zero_match == 2)
+ goto simple_sequence;
+
+ mask = 0x0ffff0000UL;
+ for (i = 16; i < 64; i += 16, mask <<= 16)
+ {
+ HOST_WIDE_INT comp = mask & ~(mask - 1);
+
+ if (aarch64_uimm12_shift (val - (val & mask)))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - (val & mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT ((val + comp) & mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - ((val + comp) & mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT ((val - comp) | ~mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - ((val - comp) | ~mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (val | ~mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - (val | ~mask))));
+ return;
+ }
+ }
+
+ /* See if we can do it by arithmetically combining two
+ immediates. */
+ for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+ {
+ int j;
+ mask = 0xffff;
+
+ if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
+ || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - aarch64_bitmasks[i])));
+ return;
+ }
+
+ for (j = 0; j < 64; j += 16, mask <<= 16)
+ {
+ if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (j),
+ GEN_INT ((val >> j) & 0xffff)));
+ return;
+ }
+ }
+ }
+
+ /* See if we can do it by logically combining two immediates. */
+ for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+ {
+ if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
+ {
+ int j;
+
+ for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+ if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
+ {
+ subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_iordi3 (dest, subtarget,
+ GEN_INT (aarch64_bitmasks[j])));
+ return;
+ }
+ }
+ else if ((val & aarch64_bitmasks[i]) == val)
+ {
+ int j;
+
+ for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+ if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
+ {
+
+ subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[j])));
+ emit_insn (gen_anddi3 (dest, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ return;
+ }
+ }
+ }
+
+ simple_sequence:
+ first = true;
+ mask = 0xffff;
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) != 0)
+ {
+ if (first)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ GEN_INT (val & mask)));
+ first = false;
+ }
+ else
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ }
+ }
+}
+
+static bool
+aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ /* Indirect calls are not currently supported. */
+ if (decl == NULL)
+ return false;
+
+ /* Cannot tail-call to long-calls, since these are outside of the
+ range of a branch instruction (we could handle this if we added
+ support for indirect tail-calls. */
+ if (aarch64_decl_is_long_call_p (decl))
+ return false;
+
+ return true;
+}
+
+/* Implement TARGET_PASS_BY_REFERENCE. */
+
+static bool
+aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+ enum machine_mode dummymode;
+ int nregs;
+
+ /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
+ size = (mode == BLKmode && type)
+ ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+
+ if (type)
+ {
+ /* Arrays always passed by reference. */
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ return true;
+ /* Other aggregates based on their size. */
+ if (AGGREGATE_TYPE_P (type))
+ size = int_size_in_bytes (type);
+ }
+
+ /* Variable sized arguments are always returned by reference. */
+ if (size < 0)
+ return true;
+
+ /* Can this be a candidate to be passed in fp/simd register(s)? */
+ if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+ &dummymode, &nregs,
+ NULL))
+ return false;
+
+ /* Arguments which are variable sized or larger than 2 registers are
+ passed by reference unless they are a homogenous floating point
+ aggregate. */
+ return size > 2 * UNITS_PER_WORD;
+}
+
+/* Return TRUE if VALTYPE is padded to its least significant bits. */
+static bool
+aarch64_return_in_msb (const_tree valtype)
+{
+ enum machine_mode dummy_mode;
+ int dummy_int;
+
+ /* Never happens in little-endian mode. */
+ if (!BYTES_BIG_ENDIAN)
+ return false;
+
+ /* Only composite types smaller than or equal to 16 bytes can
+ be potentially returned in registers. */
+ if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
+ || int_size_in_bytes (valtype) <= 0
+ || int_size_in_bytes (valtype) > 16)
+ return false;
+
+ /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
+ or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
+ is always passed/returned in the least significant bits of fp/simd
+ register(s). */
+ if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
+ &dummy_mode, &dummy_int, NULL))
+ return false;
+
+ return true;
+}
+
+/* Implement TARGET_FUNCTION_VALUE.
+ Define how to find the value returned by a function. */
+
+static rtx
+aarch64_function_value (const_tree type, const_tree func,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+ int unsignedp;
+ int count;
+ enum machine_mode ag_mode;
+
+ mode = TYPE_MODE (type);
+ if (INTEGRAL_TYPE_P (type))
+ mode = promote_function_mode (type, mode, &unsignedp, func, 1);
+
+ if (aarch64_return_in_msb (type))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+
+ if (size % UNITS_PER_WORD != 0)
+ {
+ size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+ &ag_mode, &count, NULL))
+ {
+ if (!aarch64_composite_type_p (type, mode))
+ {
+ gcc_assert (count == 1 && mode == ag_mode);
+ return gen_rtx_REG (mode, V0_REGNUM);
+ }
+ else
+ {
+ int i;
+ rtx par;
+
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
+ for (i = 0; i < count; i++)
+ {
+ rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+ GEN_INT (i * GET_MODE_SIZE (ag_mode)));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ return par;
+ }
+ }
+ else
+ return gen_rtx_REG (mode, R0_REGNUM);
+}
+
+/* Implements TARGET_FUNCTION_VALUE_REGNO_P.
+ Return true if REGNO is the number of a hard register in which the values
+ of called function may come back. */
+
+static bool
+aarch64_function_value_regno_p (const unsigned int regno)
+{
+ /* Maximum of 16 bytes can be returned in the general registers. Examples
+ of 16-byte return values are: 128-bit integers and 16-byte small
+ structures (excluding homogeneous floating-point aggregates). */
+ if (regno == R0_REGNUM || regno == R1_REGNUM)
+ return true;
+
+ /* Up to four fp/simd registers can return a function value, e.g. a
+ homogeneous floating-point aggregate having four members. */
+ if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
+ return !TARGET_GENERAL_REGS_ONLY;
+
+ return false;
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY.
+
+ If the type T of the result of a function is such that
+ void func (T arg)
+ would require that arg be passed as a value in a register (or set of
+ registers) according to the parameter passing rules, then the result
+ is returned in the same registers as would be used for such an
+ argument. */
+
+static bool
+aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+ enum machine_mode ag_mode;
+ int count;
+
+ if (!AGGREGATE_TYPE_P (type)
+ && TREE_CODE (type) != COMPLEX_TYPE
+ && TREE_CODE (type) != VECTOR_TYPE)
+ /* Simple scalar types always returned in registers. */
+ return false;
+
+ if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
+ type,
+ &ag_mode,
+ &count,
+ NULL))
+ return false;
+
+ /* Types larger than 2 registers returned in memory. */
+ size = int_size_in_bytes (type);
+ return (size < 0 || size > 2 * UNITS_PER_WORD);
+}
+
+static bool
+aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type, int *nregs)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ return aarch64_vfp_is_call_or_return_candidate (mode,
+ type,
+ &pcum->aapcs_vfp_rmode,
+ nregs,
+ NULL);
+}
+
+/* Given MODE and TYPE of a function argument, return the alignment in
+ bits. The idea is to suppress any stronger alignment requested by
+ the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
+ This is a helper function for local use only. */
+
+static unsigned int
+aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment;
+
+ if (type)
+ {
+ if (!integer_zerop (TYPE_SIZE (type)))
+ {
+ if (TYPE_MODE (type) == mode)
+ alignment = TYPE_ALIGN (type);
+ else
+ alignment = GET_MODE_ALIGNMENT (mode);
+ }
+ else
+ alignment = 0;
+ }
+ else
+ alignment = GET_MODE_ALIGNMENT (mode);
+
+ return alignment;
+}
+
+/* Layout a function argument according to the AAPCS64 rules. The rule
+ numbers refer to the rule numbers in the AAPCS64. */
+
+static void
+aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ int ncrn, nvrn, nregs;
+ bool allocate_ncrn, allocate_nvrn;
+
+ /* We need to do this once per argument. */
+ if (pcum->aapcs_arg_processed)
+ return;
+
+ pcum->aapcs_arg_processed = true;
+
+ allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
+ allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
+ mode,
+ type,
+ &nregs);
+
+ /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
+ The following code thus handles passing by SIMD/FP registers first. */
+
+ nvrn = pcum->aapcs_nvrn;
+
+ /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
+ and homogenous short-vector aggregates (HVA). */
+ if (allocate_nvrn)
+ {
+ if (nvrn + nregs <= NUM_FP_ARG_REGS)
+ {
+ pcum->aapcs_nextnvrn = nvrn + nregs;
+ if (!aarch64_composite_type_p (type, mode))
+ {
+ gcc_assert (nregs == 1);
+ pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
+ }
+ else
+ {
+ rtx par;
+ int i;
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
+ V0_REGNUM + nvrn + i);
+ tmp = gen_rtx_EXPR_LIST
+ (VOIDmode, tmp,
+ GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ pcum->aapcs_reg = par;
+ }
+ return;
+ }
+ else
+ {
+ /* C.3 NSRN is set to 8. */
+ pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
+ goto on_stack;
+ }
+ }
+
+ ncrn = pcum->aapcs_ncrn;
+ nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+
+ /* C6 - C9. though the sign and zero extension semantics are
+ handled elsewhere. This is the case where the argument fits
+ entirely general registers. */
+ if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
+ {
+ unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+ gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
+
+ /* C.8 if the argument has an alignment of 16 then the NGRN is
+ rounded up to the next even number. */
+ if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
+ {
+ ++ncrn;
+ gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
+ }
+ /* NREGS can be 0 when e.g. an empty structure is to be passed.
+ A reg is still generated for it, but the caller should be smart
+ enough not to use it. */
+ if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
+ }
+ else
+ {
+ rtx par;
+ int i;
+
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+ GEN_INT (i * UNITS_PER_WORD));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ pcum->aapcs_reg = par;
+ }
+
+ pcum->aapcs_nextncrn = ncrn + nregs;
+ return;
+ }
+
+ /* C.11 */
+ pcum->aapcs_nextncrn = NUM_ARG_REGS;
+
+ /* The argument is passed on stack; record the needed number of words for
+ this argument (we can re-use NREGS) and align the total size if
+ necessary. */
+on_stack:
+ pcum->aapcs_stack_words = nregs;
+ if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
+ pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
+ 16 / UNITS_PER_WORD) + 1;
+ return;
+}
+
+/* Implement TARGET_FUNCTION_ARG. */
+
+static rtx
+aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
+
+ if (mode == VOIDmode)
+ return NULL_RTX;
+
+ aarch64_layout_arg (pcum_v, mode, type, named);
+ return pcum->aapcs_reg;
+}
+
+void
+aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ rtx libname ATTRIBUTE_UNUSED,
+ const_tree fndecl ATTRIBUTE_UNUSED,
+ unsigned n_named ATTRIBUTE_UNUSED)
+{
+ pcum->aapcs_ncrn = 0;
+ pcum->aapcs_nvrn = 0;
+ pcum->aapcs_nextncrn = 0;
+ pcum->aapcs_nextnvrn = 0;
+ pcum->pcs_variant = ARM_PCS_AAPCS64;
+ pcum->aapcs_reg = NULL_RTX;
+ pcum->aapcs_arg_processed = false;
+ pcum->aapcs_stack_words = 0;
+ pcum->aapcs_stack_size = 0;
+
+ return;
+}
+
+static void
+aarch64_function_arg_advance (cumulative_args_t pcum_v,
+ enum machine_mode mode,
+ const_tree type,
+ bool named)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ if (pcum->pcs_variant == ARM_PCS_AAPCS64)
+ {
+ aarch64_layout_arg (pcum_v, mode, type, named);
+ gcc_assert ((pcum->aapcs_reg != NULL_RTX)
+ != (pcum->aapcs_stack_words != 0));
+ pcum->aapcs_arg_processed = false;
+ pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
+ pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
+ pcum->aapcs_stack_size += pcum->aapcs_stack_words;
+ pcum->aapcs_stack_words = 0;
+ pcum->aapcs_reg = NULL_RTX;
+ }
+}
+
+bool
+aarch64_function_arg_regno_p (unsigned regno)
+{
+ return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
+ || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
+}
+
+/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
+ PARM_BOUNDARY bits of alignment, but will be given anything up
+ to STACK_BOUNDARY bits if the type requires it. This makes sure
+ that both before and after the layout of each argument, the Next
+ Stacked Argument Address (NSAA) will have a minimum alignment of
+ 8 bytes. */
+
+static unsigned int
+aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+ if (alignment < PARM_BOUNDARY)
+ alignment = PARM_BOUNDARY;
+ if (alignment > STACK_BOUNDARY)
+ alignment = STACK_BOUNDARY;
+ return alignment;
+}
+
+/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
+
+ Return true if an argument passed on the stack should be padded upwards,
+ i.e. if the least-significant byte of the stack slot has useful data.
+
+ Small aggregate types are placed in the lowest memory address.
+
+ The related parameter passing rules are B.4, C.3, C.5 and C.14. */
+
+bool
+aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
+{
+ /* On little-endian targets, the least significant byte of every stack
+ argument is passed at the lowest byte address of the stack slot. */
+ if (!BYTES_BIG_ENDIAN)
+ return true;
+
+ /* Otherwise, integral types and floating point types are padded downward:
+ the least significant byte of a stack argument is passed at the highest
+ byte address of the stack slot. */
+ if (type
+ ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type))
+ : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
+ return false;
+
+ /* Everything else padded upward, i.e. data in first byte of stack slot. */
+ return true;
+}
+
+/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
+
+ It specifies padding for the last (may also be the only)
+ element of a block move between registers and memory. If
+ assuming the block is in the memory, padding upward means that
+ the last element is padded after its highest significant byte,
+ while in downward padding, the last element is padded at the
+ its least significant byte side.
+
+ Small aggregates and small complex types are always padded
+ upwards.
+
+ We don't need to worry about homogeneous floating-point or
+ short-vector aggregates; their move is not affected by the
+ padding direction determined here. Regardless of endianness,
+ each element of such an aggregate is put in the least
+ significant bits of a fp/simd register.
+
+ Return !BYTES_BIG_ENDIAN if the least significant byte of the
+ register has useful data, and return the opposite if the most
+ significant byte does. */
+
+bool
+aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
+ bool first ATTRIBUTE_UNUSED)
+{
+
+ /* Small composite types are always padded upward. */
+ if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
+ {
+ HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
+ : GET_MODE_SIZE (mode));
+ if (size < 2 * UNITS_PER_WORD)
+ return true;
+ }
+
+ /* Otherwise, use the default padding. */
+ return !BYTES_BIG_ENDIAN;
+}
+
+static enum machine_mode
+aarch64_libgcc_cmp_return_mode (void)
+{
+ return SImode;
+}
+
+static bool
+aarch64_frame_pointer_required (void)
+{
+ /* If the function contains dynamic stack allocations, we need to
+ use the frame pointer to access the static parts of the frame. */
+ if (cfun->calls_alloca)
+ return true;
+
+ /* We may have turned flag_omit_frame_pointer on in order to have this
+ function called; if we did, we also set the 'faked_omit_frame_pointer' flag
+ and we'll check it here.
+ If we really did set flag_omit_frame_pointer normally, then we return false
+ (no frame pointer required) in all cases. */
+
+ if (flag_omit_frame_pointer && !faked_omit_frame_pointer)
+ return false;
+ else if (flag_omit_leaf_frame_pointer)
+ return !current_function_is_leaf;
+ return true;
+}
+
+/* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+static void
+aarch64_layout_frame (void)
+{
+ HOST_WIDE_INT offset = 0;
+ int regno;
+
+ if (reload_completed && cfun->machine->frame.laid_out)
+ return;
+
+ cfun->machine->frame.fp_lr_offset = 0;
+
+ /* First mark all the registers that really need to be saved... */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[regno] = -1;
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[regno] = -1;
+
+ /* ... that includes the eh data registers (if needed)... */
+ if (crtl->calls_eh_return)
+ for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0;
+
+ /* ... and any callee saved register that dataflow says is live. */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ cfun->machine->frame.reg_offset[regno] = 0;
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ cfun->machine->frame.reg_offset[regno] = 0;
+
+ if (frame_pointer_needed)
+ {
+ cfun->machine->frame.reg_offset[R30_REGNUM] = 0;
+ cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
+ cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
+ }
+
+ /* Now assign stack slots for them. */
+ for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] != -1)
+ {
+ cfun->machine->frame.reg_offset[regno] = offset;
+ offset += UNITS_PER_WORD;
+ }
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] != -1)
+ {
+ cfun->machine->frame.reg_offset[regno] = offset;
+ offset += UNITS_PER_WORD;
+ }
+
+ if (frame_pointer_needed)
+ {
+ cfun->machine->frame.reg_offset[R29_REGNUM] = offset;
+ offset += UNITS_PER_WORD;
+ cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD;
+ }
+
+ if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1)
+ {
+ cfun->machine->frame.reg_offset[R30_REGNUM] = offset;
+ offset += UNITS_PER_WORD;
+ cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD;
+ }
+
+ cfun->machine->frame.padding0 =
+ (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
+ offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+ cfun->machine->frame.saved_regs_size = offset;
+ cfun->machine->frame.laid_out = true;
+}
+
+/* Make the last instruction frame-related and note that it performs
+ the operation described by FRAME_PATTERN. */
+
+static void
+aarch64_set_frame_expr (rtx frame_pattern)
+{
+ rtx insn;
+
+ insn = get_last_insn ();
+ RTX_FRAME_RELATED_P (insn) = 1;
+ RTX_FRAME_RELATED_P (frame_pattern) = 1;
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ frame_pattern,
+ REG_NOTES (insn));
+}
+
+static bool
+aarch64_register_saved_on_entry (int regno)
+{
+ return cfun->machine->frame.reg_offset[regno] != -1;
+}
+
+
+static void
+aarch64_save_or_restore_fprs (int start_offset, int increment,
+ bool restore, rtx base_rtx)
+
+{
+ unsigned regno;
+ unsigned regno2;
+ rtx insn;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ {
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ rtx mem;
+ mem = gen_mem_ref (DFmode,
+ plus_constant (base_rtx,
+ start_offset));
+
+ for (regno2 = regno + 1;
+ regno2 <= V31_REGNUM
+ && !aarch64_register_saved_on_entry (regno2);
+ regno2++)
+ {
+ /* Empty loop. */
+ }
+ if (regno2 <= V31_REGNUM &&
+ aarch64_register_saved_on_entry (regno2))
+ {
+ rtx mem2;
+ /* Next highest register to be saved. */
+ mem2 = gen_mem_ref (DFmode,
+ plus_constant
+ (base_rtx,
+ start_offset + increment));
+ if (restore == false)
+ {
+ insn = emit_insn
+ ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
+ mem2, gen_rtx_REG (DFmode, regno2)));
+
+ }
+ else
+ {
+ insn = emit_insn
+ ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
+ gen_rtx_REG (DFmode, regno2), mem2));
+
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
+ 1)) = 1;
+ regno = regno2;
+ start_offset += increment * 2;
+ }
+ else
+ {
+ if (restore == false)
+ insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
+ else
+ {
+ insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ }
+ start_offset += increment;
+ }
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+}
+
+
+/* offset from the stack pointer of where the saves and
+ restore's have to happen. */
+static void
+aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
+ bool restore)
+{
+ rtx insn;
+ rtx base_rtx = stack_pointer_rtx;
+ HOST_WIDE_INT start_offset = offset;
+ HOST_WIDE_INT increment = UNITS_PER_WORD;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+ unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
+ unsigned regno;
+ unsigned regno2;
+
+ for (regno = R0_REGNUM; regno <= limit; regno++)
+ {
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ rtx mem;
+ mem = gen_mem_ref (Pmode,
+ plus_constant (base_rtx,
+ start_offset));
+
+ for (regno2 = regno + 1;
+ regno2 <= limit
+ && !aarch64_register_saved_on_entry (regno2);
+ regno2++)
+ {
+ /* Empty loop. */
+ }
+ if (regno2 <= limit &&
+ aarch64_register_saved_on_entry (regno2))
+ {
+ rtx mem2;
+ /* Next highest register to be saved. */
+ mem2 = gen_mem_ref (Pmode,
+ plus_constant
+ (base_rtx,
+ start_offset + increment));
+ if (restore == false)
+ {
+ insn = emit_insn
+ ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
+ mem2, gen_rtx_REG (DImode, regno2)));
+
+ }
+ else
+ {
+ insn = emit_insn
+ ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
+ gen_rtx_REG (DImode, regno2), mem2));
+
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
+ 1)) = 1;
+ regno = regno2;
+ start_offset += increment * 2;
+ }
+ else
+ {
+ if (restore == false)
+ insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
+ else
+ {
+ insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ }
+ start_offset += increment;
+ }
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
+
+}
+
+/* AArch64 stack frames generated by this compiler look like:
+
+ +-------------------------------+
+ | |
+ | incoming stack arguments |
+ | |
+ +-------------------------------+ <-- arg_pointer_rtx
+ | |
+ | callee-allocated save area |
+ | for register varargs |
+ | |
+ +-------------------------------+
+ | |
+ | local variables |
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx
+ | |
+ | callee-saved registers |
+ | |
+ +-------------------------------+
+ | LR' |
+ +-------------------------------+
+ | FP' |
+ P +-------------------------------+ <-- hard_frame_pointer_rtx
+ | dynamic allocation |
+ +-------------------------------+
+ | |
+ | outgoing stack arguments |
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx
+
+ Dynamic stack allocations such as alloca insert data at point P.
+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
+ hard_frame_pointer_rtx unchanged. */
+
+/* Generate the prologue instructions for entry into a function.
+ Establish the stack frame by decreasing the stack pointer with a
+ properly calculated size and, if necessary, create a frame record
+ filled with the values of LR and previous frame pointer. The
+ current FP is also set up is it is in use. */
+
+void
+aarch64_expand_prologue (void)
+{
+ /* sub sp, sp, #<frame_size>
+ stp {fp, lr}, [sp, #<frame_size> - 16]
+ add fp, sp, #<frame_size> - hardfp_offset
+ stp {cs_reg}, [fp, #-16] etc.
+
+ sub sp, sp, <final_adjustment_if_any>
+ */
+ HOST_WIDE_INT original_frame_size; /* local variables + vararg save */
+ HOST_WIDE_INT frame_size, offset;
+ HOST_WIDE_INT fp_offset; /* FP offset from SP */
+ rtx insn;
+
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg)
+ && (cfun->stdarg || !cfun->machine->saved_varargs_size));
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = frame_size;
+
+ fp_offset = (offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size);
+
+ /* Store pairs and load pairs have a range only of +/- 512. */
+ if (offset >= 512)
+ {
+ /* When the frame has a large size, an initial decrease is done on
+ the stack pointer to jump over the callee-allocated save area for
+ register varargs, the local variable area and/or the callee-saved
+ register area. This will allow the pre-index write-back
+ store pair instructions to be used for setting up the stack frame
+ efficiently. */
+ offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+ if (offset >= 512)
+ offset = cfun->machine->frame.saved_regs_size;
+
+ frame_size -= (offset + crtl->outgoing_args_size);
+ fp_offset = 0;
+
+ if (frame_size >= 0x1000000)
+ {
+ rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+ emit_move_insn (op0, GEN_INT (-frame_size));
+ emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (-frame_size))));
+ }
+ else if (frame_size > 0)
+ {
+ if ((frame_size & 0xfff) != frame_size)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (-(frame_size
+ & ~(HOST_WIDE_INT)0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if ((frame_size & 0xfff) != 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (-(frame_size
+ & (HOST_WIDE_INT)0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ }
+ else
+ frame_size = -1;
+
+ if (offset > 0)
+ {
+ /* Save the frame pointer and lr if the frame pointer is needed
+ first. Make the frame pointer point to the location of the
+ old frame pointer on the stack. */
+ if (frame_pointer_needed)
+ {
+ rtx mem_fp, mem_lr;
+
+ if (fp_offset)
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (-offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ gen_rtx_MINUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset))));
+ mem_fp = gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx,
+ fp_offset));
+ mem_lr = gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx,
+ fp_offset
+ + UNITS_PER_WORD));
+ insn = emit_insn (gen_store_pairdi (mem_fp,
+ hard_frame_pointer_rtx,
+ mem_lr,
+ gen_rtx_REG (DImode,
+ LR_REGNUM)));
+ }
+ else
+ {
+ insn = emit_insn (gen_storewb_pairdi_di
+ (stack_pointer_rtx, stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, LR_REGNUM),
+ GEN_INT (-offset),
+ GEN_INT (GET_MODE_SIZE (DImode) - offset)));
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+ }
+
+ /* The first part of a frame-related parallel insn is always
+ assumed to be relevant to the frame calculations;
+ subsequent parts, are only frame-related if explicitly
+ marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Set up frame pointer to point to the location of the
+ previous frame pointer on the stack. */
+ insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (fp_offset)));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, hard_frame_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (fp_offset))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
+ hard_frame_pointer_rtx));
+ }
+ else
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (-offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ aarch64_save_or_restore_callee_save_registers
+ (fp_offset + cfun->machine->frame.hardfp_offset, 0);
+ }
+
+ /* when offset >= 512,
+ sub sp, sp, #<outgoing_args_size> */
+ if (frame_size > -1)
+ {
+ if (crtl->outgoing_args_size > 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (- crtl->outgoing_args_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+}
+
+/* Generate the epilogue instructions for returning from a function. */
+void
+aarch64_expand_epilogue (bool for_sibcall)
+{
+ HOST_WIDE_INT original_frame_size, frame_size, offset;
+ HOST_WIDE_INT fp_offset;
+ rtx insn;
+
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ fp_offset = (offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size);
+
+ /* Store pairs and load pairs have a range only of +/- 512. */
+ if (offset >= 512)
+ {
+ offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+ if (offset >= 512)
+ offset = cfun->machine->frame.saved_regs_size;
+
+ frame_size -= (offset + crtl->outgoing_args_size);
+ fp_offset = 0;
+ if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (crtl->outgoing_args_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ else
+ frame_size = -1;
+
+ /* If there were outgoing arguments or we've done dynamic stack
+ allocation, then restore the stack pointer from the frame
+ pointer. This is at most one insn and more efficient than using
+ GCC's internal mechanism. */
+ if (frame_pointer_needed
+ && (crtl->outgoing_args_size || cfun->calls_alloca))
+ {
+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ GEN_INT (- fp_offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ aarch64_save_or_restore_callee_save_registers
+ (fp_offset + cfun->machine->frame.hardfp_offset, 1);
+
+ /* Restore the frame pointer and lr if the frame pointer is needed. */
+ if (offset > 0)
+ {
+ if (frame_pointer_needed)
+ {
+ rtx mem_fp, mem_lr;
+
+ if (fp_offset)
+ {
+ mem_fp = gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx,
+ fp_offset));
+ mem_lr = gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx,
+ fp_offset
+ + UNITS_PER_WORD));
+ insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
+ mem_fp,
+ gen_rtx_REG (DImode,
+ LR_REGNUM),
+ mem_lr));
+ }
+ else
+ {
+ insn = emit_insn (gen_loadwb_pairdi_di
+ (stack_pointer_rtx,
+ stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, LR_REGNUM),
+ GEN_INT (offset),
+ GEN_INT (GET_MODE_SIZE (DImode) + offset)));
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode,
+ stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (offset))));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DImode, LR_REGNUM));
+
+ if (fp_offset)
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ else
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ /* Stack adjustment for exception handler. */
+ if (crtl->calls_eh_return)
+ {
+ /* We need to unwind the stack by the offset computed by
+ EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
+ based on SP. Ideally we would update the SP and define the
+ CFA along the lines of:
+
+ SP = SP + EH_RETURN_STACKADJ_RTX
+ (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
+
+ However the dwarf emitter only understands a constant
+ register offset.
+
+ The solution choosen here is to use the otherwise unused IP0
+ as a temporary register to hold the current SP value. The
+ CFA is described using IP0 then SP is modified. */
+
+ rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
+
+ insn = emit_move_insn (ip0, stack_pointer_rtx);
+ add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
+
+ /* Ensure the assignment to IP0 does not get optimized away. */
+ emit_use (ip0);
+ }
+
+ if (frame_size > -1)
+ {
+ if (frame_size >= 0x1000000)
+ {
+ rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+ emit_move_insn (op0, GEN_INT (frame_size));
+ emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (frame_size))));
+ }
+ else if (frame_size > 0)
+ {
+ if ((frame_size & 0xfff) != 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT ((frame_size
+ & (HOST_WIDE_INT) 0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if ((frame_size & 0xfff) != frame_size)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT ((frame_size
+ & ~ (HOST_WIDE_INT) 0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset))));
+ }
+
+ emit_use (gen_rtx_REG (DImode, LR_REGNUM));
+ if (!for_sibcall)
+ emit_jump_insn (ret_rtx);
+}
+
+/* Return the place to copy the exception unwinding return address to.
+ This will probably be a stack slot, but could (in theory be the
+ return register). */
+rtx
+aarch64_final_eh_return_addr (void)
+{
+ HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset;
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+ fp_offset = offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size;
+
+ if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
+ return gen_rtx_REG (DImode, LR_REGNUM);
+
+ /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
+ result in a store to save LR introduced by builtin_eh_return () being
+ incorrectly deleted because the alias is not detected.
+ So in the calculation of the address to copy the exception unwinding
+ return address to, we note 2 cases.
+ If FP is needed and the fp_offset is 0, it means that SP = FP and hence
+ we return a SP-relative location since all the addresses are SP-relative
+ in this case. This prevents the store from being optimized away.
+ If the fp_offset is not 0, then the addresses will be FP-relative and
+ therefore we return a FP-relative location. */
+
+ if (frame_pointer_needed)
+ {
+ if (fp_offset)
+ return gen_frame_mem (DImode,
+ plus_constant (hard_frame_pointer_rtx, UNITS_PER_WORD));
+ else
+ return gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx, UNITS_PER_WORD));
+ }
+
+ /* If FP is not needed, we calculate the location of LR, which would be
+ at the top of the saved registers block. */
+
+ return gen_frame_mem (DImode,
+ plus_constant (stack_pointer_rtx,
+ fp_offset
+ + cfun->machine->frame.saved_regs_size
+ - 2 * UNITS_PER_WORD));
+}
+
+/* Output code to build up a constant in a register. */
+static void
+aarch64_build_constant (FILE *file,
+ int regnum,
+ HOST_WIDE_INT val)
+{
+ if (aarch64_bitmask_imm (val, DImode))
+ asm_fprintf (file, "\tmovi\t%r, %wd\n", regnum, val);
+ else
+ {
+ int i;
+ int ncount = 0;
+ int zcount = 0;
+ HOST_WIDE_INT valp = val >> 16;
+ HOST_WIDE_INT valm;
+ HOST_WIDE_INT tval;
+
+ for (i = 16; i < 64; i += 16)
+ {
+ valm = (valp & 0xffff);
+
+ if (valm != 0)
+ ++ zcount;
+
+ if (valm != 0xffff)
+ ++ ncount;
+
+ valp >>= 16;
+ }
+
+ /* zcount contains the number of additional MOVK instructions
+ required if the constant is built up with an initial MOVZ instruction,
+ while ncount is the number of MOVK instructions required if starting
+ with a MOVN instruction. Choose the sequence that yields the fewest
+ number of instructions, preferring MOVZ instructions when they are both
+ the same. */
+ if (ncount < zcount)
+ {
+ asm_fprintf (file, "\tmovn\t%r, %wd\n", regnum, (~val) & 0xffff);
+ tval = 0xffff;
+ }
+ else
+ {
+ asm_fprintf (file, "\tmovz\t%r, %wd\n", regnum, val & 0xffff);
+ tval = 0;
+ }
+
+ val >>= 16;
+
+ for (i = 16; i < 64; i += 16)
+ {
+ if ((val & 0xffff) != tval)
+ asm_fprintf (file, "\tmovk\t%r, %wd, lsl %d\n",
+ regnum, val & 0xffff, i);
+ val >>= 16;
+ }
+ }
+}
+
+static void
+aarch64_add_constant (FILE *file, int regnum, int scratchreg,
+ HOST_WIDE_INT delta)
+{
+ HOST_WIDE_INT mdelta = delta;
+
+ if (mdelta < 0)
+ mdelta = -mdelta;
+
+ if (mdelta >= 4096 * 4096)
+ {
+ aarch64_build_constant (file, scratchreg, delta);
+ asm_fprintf (file, "\tadd\t%r, %r, %r\n", regnum, regnum,
+ scratchreg);
+ }
+ else if (mdelta > 0)
+ {
+ const char *const mi_op = delta < 0 ? "sub" : "add";
+
+ if (mdelta >= 4096)
+ asm_fprintf (file, "\t%s\t%r, %r, %wd, lsl 12\n", mi_op, regnum, regnum,
+ mdelta / 4096);
+
+ if (mdelta % 4096 != 0)
+ asm_fprintf (file, "\t%s\t%r, %r, %wd\n", mi_op, regnum, regnum,
+ mdelta % 4096);
+ }
+}
+
+/* Output code to add DELTA to the first argument, and then jump
+ to FUNCTION. Used for C++ multiple inheritance. */
+static void
+aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ /* The this pointer is always in x0. Note that this differs from
+ Arm where the this pointer maybe bumped to r1 if r0 is required
+ to return a pointer to an aggregate. On AArch64 a result value
+ pointer will be in x8. */
+ int this_regno = R0_REGNUM;
+
+ /* Make sure unwind info is emitted for the thunk if needed. */
+ final_start_function (emit_barrier (), file, 1);
+
+ if (vcall_offset == 0)
+ aarch64_add_constant (file, this_regno, IP1_REGNUM, delta);
+ else
+ {
+ gcc_assert ((vcall_offset & 0x7) == 0);
+
+ if (delta == 0)
+ asm_fprintf (file, "\tldr\t%r, [%r]\n", IP0_REGNUM, this_regno);
+ else if (delta >= -256 && delta < 256)
+ asm_fprintf (file, "\tldr\t%r, [%r,%wd]!\n", IP0_REGNUM, this_regno,
+ delta);
+ else
+ {
+ aarch64_add_constant (file, this_regno, IP1_REGNUM, delta);
+
+ asm_fprintf (file, "\tldr\t%r, [%r]\n", IP0_REGNUM, this_regno);
+ }
+
+ if (vcall_offset >= -256 && vcall_offset < 32768)
+ asm_fprintf (file, "\tldr\t%r, [%r,%wd]\n", IP1_REGNUM, IP0_REGNUM,
+ vcall_offset);
+ else
+ {
+ aarch64_build_constant (file, IP1_REGNUM, vcall_offset);
+ asm_fprintf (file, "\tldr\t%r, [%r,%r]\n", IP1_REGNUM, IP0_REGNUM,
+ IP1_REGNUM);
+ }
+
+ asm_fprintf (file, "\tadd\t%r, %r, %r\n", this_regno, this_regno,
+ IP1_REGNUM);
+ }
+
+ output_asm_insn ("b\t%a0", &XEXP (DECL_RTL (function), 0));
+ final_end_function ();
+}
+
+
+static int
+aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (*x) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (*x) != 0;
+
+ /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
+ TLS offsets, not real symbol references. */
+ if (GET_CODE (*x) == UNSPEC
+ && XINT (*x, 1) == UNSPEC_TLS)
+ return -1;
+
+ return 0;
+}
+
+static bool
+aarch64_tls_referenced_p (rtx x)
+{
+ if (!TARGET_HAVE_TLS)
+ return false;
+
+ return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
+}
+
+
+static int
+aarch64_bitmasks_cmp (const void *i1, const void *i2)
+{
+ const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
+ const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
+
+ if (*imm1 < *imm2)
+ return -1;
+ if (*imm1 > *imm2)
+ return +1;
+ return 0;
+}
+
+
+static void
+aarch64_build_bitmask_table (void)
+{
+ unsigned HOST_WIDE_INT mask, imm;
+ unsigned int log_e, e, s, r;
+ unsigned int nimms = 0;
+
+ for (log_e = 1; log_e <= 6; log_e++)
+ {
+ e = 1 << log_e;
+ if (e == 64)
+ mask = ~(HOST_WIDE_INT) 0;
+ else
+ mask = ((HOST_WIDE_INT) 1 << e) - 1;
+ for (s = 1; s < e; s++)
+ {
+ for (r = 0; r < e; r++)
+ {
+ /* set s consecutive bits to 1 (s < 64) */
+ imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
+ /* rotate right by r */
+ if (r != 0)
+ imm = ((imm >> r) | (imm << (e - r))) & mask;
+ /* replicate the constant depending on SIMD size */
+ switch (log_e) {
+ case 1: imm |= (imm << 2);
+ case 2: imm |= (imm << 4);
+ case 3: imm |= (imm << 8);
+ case 4: imm |= (imm << 16);
+ case 5: imm |= (imm << 32);
+ case 6:
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ gcc_assert (nimms < AARCH64_NUM_BITMASKS);
+ aarch64_bitmasks[nimms++] = imm;
+ }
+ }
+ }
+
+ gcc_assert (nimms == AARCH64_NUM_BITMASKS);
+ qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
+ aarch64_bitmasks_cmp);
+}
+
+
+/* Return true if val can be encoded as a 12-bit unsigned immediate with
+ a left shift of 0 or 12 bits. */
+bool
+aarch64_uimm12_shift (HOST_WIDE_INT val)
+{
+ return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
+ );
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+ register by a MOVZ instruction. */
+static bool
+aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (GET_MODE_SIZE (mode) > 4)
+ {
+ if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
+ return 1;
+ }
+ else
+ {
+ /* Ignore sign extension. */
+ val &= (HOST_WIDE_INT) 0xffffffff;
+ }
+ return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
+}
+
+
+/* Return true if val is a valid bitmask immediate. */
+bool
+aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (GET_MODE_SIZE (mode) < 8)
+ {
+ /* Replicate bit pattern. */
+ val &= (HOST_WIDE_INT) 0xffffffff;
+ val |= val << 32;
+ }
+ return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
+ sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+ register in a single instruction. */
+bool
+aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
+ return 1;
+ return aarch64_bitmask_imm (val, mode);
+}
+
+static bool
+aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ rtx base, offset;
+ if (GET_CODE (x) == HIGH)
+ return true;
+
+ split_const (x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
+ return (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR) != SYMBOL_FORCE_TO_MEM);
+
+ return aarch64_tls_referenced_p (x);
+}
+
+/* Return true if register REGNO is a valid index register.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+bool
+aarch64_regno_ok_for_index_p (int regno, bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+
+ if (!reg_renumber)
+ return false;
+
+ regno = reg_renumber[regno];
+ }
+ return GP_REGNUM_P (regno);
+}
+
+/* Return true if register REGNO is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+bool
+aarch64_regno_ok_for_base_p (int regno, bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+
+ if (!reg_renumber)
+ return false;
+
+ regno = reg_renumber[regno];
+ }
+
+ /* The fake registers will be eliminated to either the stack or
+ hard frame pointer, both of which are usually valid base registers.
+ Reload deals with the cases where the eliminated form isn't valid. */
+ return (GP_REGNUM_P (regno)
+ || regno == SP_REGNUM
+ || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM);
+}
+
+/* Return true if X is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+aarch64_base_register_rtx_p (rtx x, bool strict_p)
+{
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
+}
+
+/* Return true if address offset is a valid index. If it is, fill in INFO
+ appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+aarch64_classify_index (struct aarch64_address_info *info, rtx x,
+ enum machine_mode mode, bool strict_p)
+{
+ enum aarch64_address_type type;
+ rtx index;
+ int shift;
+
+ /* (reg:P) */
+ if ((REG_P (x) || GET_CODE (x) == SUBREG)
+ && GET_MODE (x) == Pmode)
+ {
+ type = ADDRESS_REG_REG;
+ index = x;
+ shift = 0;
+ }
+ /* (sign_extend:DI (reg:SI)) */
+ else if ((GET_CODE (x) == SIGN_EXTEND
+ || GET_CODE (x) == ZERO_EXTEND)
+ && GET_MODE (x) == DImode
+ && GET_MODE (XEXP (x, 0)) == SImode)
+ {
+ type = (GET_CODE (x) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (x, 0);
+ shift = 0;
+ }
+ /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
+ else if (GET_CODE (x) == MULT
+ && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (x, 0)) == DImode
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (x, 1)));
+ }
+ /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
+ else if (GET_CODE (x) == ASHIFT
+ && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (x, 0)) == DImode
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (x, 1));
+ }
+ /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
+ else if ((GET_CODE (x) == SIGN_EXTRACT
+ || GET_CODE (x) == ZERO_EXTRACT)
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ {
+ type = (GET_CODE (x) == SIGN_EXTRACT)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+ if (INTVAL (XEXP (x, 1)) != 32 + shift
+ || INTVAL (XEXP (x, 2)) != 0)
+ shift = -1;
+ }
+ /* (and:DI (mult:DI (reg:DI) (const_int scale))
+ (const_int 0xffffffff<<shift)) */
+ else if (GET_CODE (x) == AND
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+ if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+ shift = -1;
+ }
+ /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
+ else if ((GET_CODE (x) == SIGN_EXTRACT
+ || GET_CODE (x) == ZERO_EXTRACT)
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ {
+ type = (GET_CODE (x) == SIGN_EXTRACT)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (XEXP (x, 0), 1));
+ if (INTVAL (XEXP (x, 1)) != 32 + shift
+ || INTVAL (XEXP (x, 2)) != 0)
+ shift = -1;
+ }
+ /* (and:DI (ashift:DI (reg:DI) (const_int shift))
+ (const_int 0xffffffff<<shift)) */
+ else if (GET_CODE (x) == AND
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (XEXP (x, 0), 1));
+ if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+ shift = -1;
+ }
+ /* (mult:P (reg:P) (const_int scale)) */
+ else if (GET_CODE (x) == MULT
+ && GET_MODE (x) == Pmode
+ && GET_MODE (XEXP (x, 0)) == Pmode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_REG;
+ index = XEXP (x, 0);
+ shift = exact_log2 (INTVAL (XEXP (x, 1)));
+ }
+ /* (ashift:P (reg:P) (const_int shift)) */
+ else if (GET_CODE (x) == ASHIFT
+ && GET_MODE (x) == Pmode
+ && GET_MODE (XEXP (x, 0)) == Pmode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_REG;
+ index = XEXP (x, 0);
+ shift = INTVAL (XEXP (x, 1));
+ }
+ else
+ return false;
+
+ if (GET_CODE (index) == SUBREG)
+ index = SUBREG_REG (index);
+
+ if ((shift == 0 ||
+ (shift > 0 && shift <= 3
+ && (1 << shift) == GET_MODE_SIZE (mode)))
+ && REG_P (index)
+ && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
+ {
+ info->type = type;
+ info->offset = index;
+ info->shift = shift;
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+ return (offset >= -64 * GET_MODE_SIZE (mode)
+ && offset < 64 * GET_MODE_SIZE (mode)
+ && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+static inline bool
+offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT offset)
+{
+ return offset >= -256 && offset < 256;
+}
+
+static inline bool
+offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+ return (offset >= 0
+ && offset < 4096 * GET_MODE_SIZE (mode)
+ && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+/* Return true if X is a valid address for machine mode MODE. If it is,
+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
+ effect. OUTER_CODE is PARALLEL for a load/store pair. */
+
+static bool
+aarch64_classify_address (struct aarch64_address_info *info,
+ rtx x, enum machine_mode mode,
+ RTX_CODE outer_code, bool strict_p)
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx op0, op1;
+ bool allow_reg_index_p =
+ outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16;
+
+ /* Don't support anything other than POST_INC or REG addressing for
+ AdvSIMD. */
+ if (aarch64_vector_mode_p (mode)
+ && (code != POST_INC && code != REG))
+ return false;
+
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ info->type = ADDRESS_REG_IMM;
+ info->base = x;
+ info->offset = const0_rtx;
+ return aarch64_base_register_rtx_p (x, strict_p);
+
+ case PLUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_MODE_SIZE (mode) != 0
+ && CONST_INT_P (op1)
+ && aarch64_base_register_rtx_p (op0, strict_p))
+ {
+ HOST_WIDE_INT offset = INTVAL (op1);
+
+ info->type = ADDRESS_REG_IMM;
+ info->base = op0;
+ info->offset = op1;
+
+ /* TImode and TFmode values are allowed in both pairs of X
+ registers and individual Q registers. The available
+ address modes are:
+ X,X: 7-bit signed scaled offset
+ Q: 9-bit signed offset
+ We conservatively require an offset representable in either mode.
+ */
+ if (mode == TImode || mode == TFmode)
+ return (offset_7bit_signed_scaled_p (mode, offset)
+ && offset_9bit_signed_unscaled_p (mode, offset));
+
+ if (outer_code == PARALLEL)
+ return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && offset_7bit_signed_scaled_p (mode, offset));
+ else
+ return (offset_9bit_signed_unscaled_p (mode, offset)
+ || offset_12bit_unsigned_scaled_p (mode, offset));
+ }
+
+ if (allow_reg_index_p)
+ {
+ /* Look for base + (scaled/extended) index register. */
+ if (aarch64_base_register_rtx_p (op0, strict_p)
+ && aarch64_classify_index (info, op1, mode, strict_p))
+ {
+ info->base = op0;
+ return true;
+ }
+ if (aarch64_base_register_rtx_p (op1, strict_p)
+ && aarch64_classify_index (info, op0, mode, strict_p))
+ {
+ info->base = op1;
+ return true;
+ }
+ }
+
+ return false;
+
+ case POST_INC:
+ case POST_DEC:
+ case PRE_INC:
+ case PRE_DEC:
+ info->type = ADDRESS_REG_WB;
+ info->base = XEXP (x, 0);
+ info->offset = NULL_RTX;
+ return aarch64_base_register_rtx_p (info->base, strict_p);
+
+ case POST_MODIFY:
+ case PRE_MODIFY:
+ info->type = ADDRESS_REG_WB;
+ info->base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) == PLUS
+ && CONST_INT_P (XEXP (XEXP (x, 1), 1))
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
+ && aarch64_base_register_rtx_p (info->base, strict_p))
+ {
+ HOST_WIDE_INT offset;
+ info->offset = XEXP (XEXP (x, 1), 1);
+ offset = INTVAL (info->offset);
+
+ /* TImode and TFmode values are allowed in both pairs of X
+ registers and individual Q registers. The available
+ address modes are:
+ X,X: 7-bit signed scaled offset
+ Q: 9-bit signed offset
+ We conservatively require an offset representable in either mode.
+ */
+ if (mode == TImode || mode == TFmode)
+ return (offset_7bit_signed_scaled_p (mode, offset)
+ && offset_9bit_signed_unscaled_p (mode, offset));
+
+ if (outer_code == PARALLEL)
+ return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && offset_7bit_signed_scaled_p (mode, offset));
+ else
+ return offset_9bit_signed_unscaled_p (mode, offset);
+ }
+ return false;
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* load literal: pc-relative constant pool entry. */
+ info->type = ADDRESS_SYMBOLIC;
+ if (outer_code != PARALLEL)
+ {
+ rtx sym, addend;
+
+ split_const (x, &sym, &addend);
+ return (GET_CODE (sym) == LABEL_REF
+ || (GET_CODE (sym) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (sym)));
+ }
+ return false;
+
+ case LO_SUM:
+ info->type = ADDRESS_LO_SUM;
+ info->base = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ if (allow_reg_index_p
+ && aarch64_base_register_rtx_p (info->base, strict_p))
+ {
+ rtx sym, offs;
+ split_const (info->offset, &sym, &offs);
+ if (GET_CODE (sym) == SYMBOL_REF
+ && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
+ == SYMBOL_SMALL_ABSOLUTE))
+ {
+ /* The symbol and offset must be aligned to the access size. */
+ unsigned int align;
+ unsigned int ref_size;
+
+ if (CONSTANT_POOL_ADDRESS_P (sym))
+ align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
+ else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
+ {
+ tree exp = SYMBOL_REF_DECL (sym);
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+ align = CONSTANT_ALIGNMENT (exp, align);
+ }
+ else if (SYMBOL_REF_DECL (sym))
+ align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
+ else
+ align = BITS_PER_UNIT;
+
+ ref_size = GET_MODE_SIZE (mode);
+ if (ref_size == 0)
+ ref_size = GET_MODE_SIZE (DImode);
+
+ return ((INTVAL (offs) & (ref_size - 1)) == 0
+ && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
+ }
+ }
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+bool
+aarch64_symbolic_address_p (rtx x)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
+}
+
+/* Classify the base of symbolic expression X, given that X appears in
+ context CONTEXT. */
+static enum aarch64_symbol_type
+aarch64_classify_symbolic_expression (rtx x, enum aarch64_symbol_context context)
+{
+ rtx offset;
+ split_const (x, &x, &offset);
+ return aarch64_classify_symbol (x, context);
+}
+
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+ mode MODE. */
+static bool
+aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+ struct aarch64_address_info addr;
+
+ return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
+}
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+ mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
+ pair operation. */
+bool
+aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
+ RTX_CODE outer_code, bool strict_p)
+{
+ struct aarch64_address_info addr;
+
+ return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
+}
+
+/* Return TRUE if rtx X is immediate constant 0.0 */
+bool
+aarch64_const_double_zero_rtx_p (rtx x)
+{
+ REAL_VALUE_TYPE r;
+
+ if (GET_MODE (x) == VOIDmode)
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return !HONOR_SIGNED_ZEROS (GET_MODE (x));
+ return REAL_VALUES_EQUAL (r, dconst0);
+}
+
+enum machine_mode
+aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return CCFPmode;
+
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ return CCFPEmode;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ && y == const0_rtx
+ && (code == EQ || code == NE || code == LT || code == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS))
+ return CC_NZmode;
+
+ /* A compare with a shifted operand. Because of canonicalization,
+ the comparison will have to be swapped when we emit the assembly
+ code. */
+ if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
+ return CC_SWPmode;
+
+ /* A compare of a mode narrower than SI mode against zero can be done
+ by extending the value in the comparison. */
+ if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
+ && y == const0_rtx)
+ /* Only use sign-extension if we really need it. */
+ return ((code == GT || code == GE || code == LE || code == LT)
+ ? CC_SESWPmode : CC_ZESWPmode);
+
+ /* For everything else, return CCmode. */
+ return CCmode;
+}
+
+static unsigned
+aarch64_get_condition_code (rtx x)
+{
+ enum machine_mode mode = GET_MODE (XEXP (x, 0));
+ enum rtx_code comp_code = GET_CODE (x);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
+
+ switch (mode)
+ {
+ case CCFPmode:
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return AARCH64_GE;
+ case GT: return AARCH64_GT;
+ case LE: return AARCH64_LS;
+ case LT: return AARCH64_MI;
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case ORDERED: return AARCH64_VC;
+ case UNORDERED: return AARCH64_VS;
+ case UNLT: return AARCH64_LT;
+ case UNLE: return AARCH64_LE;
+ case UNGT: return AARCH64_HI;
+ case UNGE: return AARCH64_PL;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_GE;
+ case GT: return AARCH64_GT;
+ case LE: return AARCH64_LE;
+ case LT: return AARCH64_LT;
+ case GEU: return AARCH64_CS;
+ case GTU: return AARCH64_HI;
+ case LEU: return AARCH64_LS;
+ case LTU: return AARCH64_CC;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CC_SWPmode:
+ case CC_ZESWPmode:
+ case CC_SESWPmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_LE;
+ case GT: return AARCH64_LT;
+ case LE: return AARCH64_GE;
+ case LT: return AARCH64_GT;
+ case GEU: return AARCH64_LS;
+ case GTU: return AARCH64_CC;
+ case LEU: return AARCH64_CS;
+ case LTU: return AARCH64_HI;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CC_NZmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_PL;
+ case LT: return AARCH64_MI;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+}
+
+static unsigned
+bit_count (unsigned HOST_WIDE_INT value)
+{
+ unsigned count = 0;
+
+ while (value)
+ {
+ count++;
+ value &= value - 1;
+ }
+
+ return count;
+}
+
+void
+aarch64_print_operand (FILE *f, rtx x, char code)
+{
+ switch (code)
+ {
+ case 'e':
+ /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
+ {
+ int n;
+
+ if (GET_CODE (x) != CONST_INT
+ || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ switch (n)
+ {
+ case 3:
+ fputc ('b', f);
+ break;
+ case 4:
+ fputc ('h', f);
+ break;
+ case 5:
+ fputc ('w', f);
+ break;
+ default:
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ }
+ break;
+
+ case 'p':
+ {
+ int n;
+
+ /* Print N such that 2^N == X. */
+ if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%d", n);
+ }
+ break;
+
+ case 'P':
+ /* Print the number of non-zero bits in X (a const_int). */
+ if (GET_CODE (x) != CONST_INT)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%u", bit_count (INTVAL (x)));
+ break;
+
+ case 'H':
+ /* Print the higher numbered register of a pair (TImode) of regs. */
+ if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%r", REGNO (x) + 1);
+ break;
+
+ case 'Q':
+ /* Print the least significant register of a pair (TImode) of regs. */
+ if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
+ break;
+
+ case 'R':
+ /* Print the most significant register of a pair (TImode) of regs. */
+ if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
+ break;
+
+ case 'm':
+ /* Print a condition (eq, ne, etc). */
+
+ /* CONST_TRUE_RTX means always -- that's the default. */
+ if (x == const_true_rtx)
+ return;
+
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
+ break;
+
+ case 'M':
+ /* Print the inverse of a condition (eq <-> ne, etc). */
+
+ /* CONST_TRUE_RTX means never -- that's the default. */
+ if (x == const_true_rtx)
+ {
+ fputs ("nv", f);
+ return;
+ }
+
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
+ (aarch64_get_condition_code (x))], f);
+ break;
+
+ case 'b':
+ case 'h':
+ case 's':
+ case 'd':
+ case 'q':
+ /* Print a scalar FP/SIMD register name. */
+ if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+ {
+ output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code, REGNO (x) - V0_REGNUM);
+ break;
+
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ /* Print the first FP/SIMD register name in a list. */
+ if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+ {
+ output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "%sv%d", REGISTER_PREFIX,
+ REGNO (x) - V0_REGNUM + (code - 'S'));
+ break;
+
+ case 'w':
+ case 'x':
+ /* Print a general register name or the zero register (32-bit or
+ 64-bit). */
+ if (x == const0_rtx)
+ {
+ asm_fprintf (f, "%s%czr", REGISTER_PREFIX, code);
+ break;
+ }
+
+ if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
+ {
+ asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code,
+ REGNO (x) - R0_REGNUM);
+ break;
+ }
+
+ if (REG_P (x) && REGNO (x) == SP_REGNUM)
+ {
+ asm_fprintf (f, "%s%ssp", REGISTER_PREFIX, code == 'w' ? "w" : "");
+ break;
+ }
+
+ /* Fall through */
+
+ case 0:
+ /* Print a normal operand, if it's a general register, then we
+ assume DImode. */
+ if (x == NULL)
+ {
+ output_operand_lossage ("missing operand");
+ return;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ asm_fprintf (f, "%r", REGNO (x));
+ break;
+
+ case MEM:
+ aarch64_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ break;
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case CONST_INT:
+ asm_fprintf (f, "%wd", INTVAL (x));
+ break;
+
+ case CONST_VECTOR:
+ gcc_assert (aarch64_const_vec_all_same_int_p (x, HOST_WIDE_INT_MIN,
+ HOST_WIDE_INT_MAX));
+ asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
+ break;
+
+ default:
+ output_operand_lossage ("invalid operand");
+ return;
+ }
+ break;
+
+ case 'A':
+ if (GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_GOT:
+ asm_fprintf (asm_out_file, ":got:");
+ break;
+
+ case SYMBOL_SMALL_TLSGD:
+ asm_fprintf (asm_out_file, ":tlsgd:");
+ break;
+
+ case SYMBOL_SMALL_TLSDESC:
+ asm_fprintf (asm_out_file, ":tlsdesc:");
+ break;
+
+ case SYMBOL_SMALL_GOTTPREL:
+ asm_fprintf (asm_out_file, ":gottprel:");
+ break;
+
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel:");
+ break;
+
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case 'L':
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_GOT:
+ asm_fprintf (asm_out_file, ":lo12:");
+ break;
+
+ case SYMBOL_SMALL_TLSGD:
+ asm_fprintf (asm_out_file, ":tlsgd_lo12:");
+ break;
+
+ case SYMBOL_SMALL_TLSDESC:
+ asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
+ break;
+
+ case SYMBOL_SMALL_GOTTPREL:
+ asm_fprintf (asm_out_file, ":gottprel_lo12:");
+ break;
+
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
+ break;
+
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case 'G':
+
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel_hi12:");
+ break;
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ default:
+ output_operand_lossage ("invalid operand prefix '%%%c'", code);
+ return;
+ }
+}
+
+void
+aarch64_print_operand_address (FILE *f, rtx x)
+{
+ struct aarch64_address_info addr;
+
+ if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
+ MEM, true))
+ switch (addr.type)
+ {
+ case ADDRESS_REG_IMM:
+ if (addr.offset == const0_rtx)
+ asm_fprintf (f, "[%r]", REGNO (addr.base));
+ else
+ asm_fprintf (f, "[%r,%wd]", REGNO (addr.base),
+ INTVAL (addr.offset));
+ return;
+
+ case ADDRESS_REG_REG:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%r,%r]", REGNO (addr.base),
+ REGNO (addr.offset));
+ else
+ asm_fprintf (f, "[%r,%r,lsl %u]", REGNO (addr.base),
+ REGNO (addr.offset), addr.shift);
+ return;
+
+ case ADDRESS_REG_UXTW:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%r,w%d,uxtw]", REGNO (addr.base),
+ REGNO (addr.offset) - R0_REGNUM);
+ else
+ asm_fprintf (f, "[%r,w%d,uxtw %u]", REGNO (addr.base),
+ REGNO (addr.offset) - R0_REGNUM, addr.shift);
+ return;
+
+ case ADDRESS_REG_SXTW:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%r,w%d,sxtw]", REGNO (addr.base),
+ REGNO (addr.offset) - R0_REGNUM);
+ else
+ asm_fprintf (f, "[%r,w%d,sxtw %u]", REGNO (addr.base),
+ REGNO (addr.offset) - R0_REGNUM, addr.shift);
+ return;
+
+ case ADDRESS_REG_WB:
+ switch (GET_CODE (x))
+ {
+ case PRE_INC:
+ asm_fprintf (f, "[%r,%d]!", REGNO (addr.base),
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case POST_INC:
+ asm_fprintf (f, "[%r],%d", REGNO (addr.base),
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case PRE_DEC:
+ asm_fprintf (f, "[%r,-%d]!", REGNO (addr.base),
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case POST_DEC:
+ asm_fprintf (f, "[%r],-%d", REGNO (addr.base),
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case PRE_MODIFY:
+ asm_fprintf (f, "[%r,%wd]!", REGNO (addr.base),
+ INTVAL (addr.offset));
+ return;
+ case POST_MODIFY:
+ asm_fprintf (f, "[%r],%wd", REGNO (addr.base),
+ INTVAL (addr.offset));
+ return;
+ default:
+ break;
+ }
+ break;
+
+ case ADDRESS_LO_SUM:
+ asm_fprintf (f, "[%r,#:lo12:", REGNO (addr.base));
+ output_addr_const (f, addr.offset);
+ asm_fprintf (f, "]");
+ return;
+
+ case ADDRESS_SYMBOLIC:
+ break;
+ }
+
+ output_addr_const (f, x);
+}
+
+void
+aarch64_function_profiler (FILE *f ATTRIBUTE_UNUSED,
+ int labelno ATTRIBUTE_UNUSED)
+{
+ sorry ("function profiling");
+}
+
+bool
+aarch64_label_mentioned_p (rtx x)
+{
+ const char *fmt;
+ int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return true;
+
+ /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
+ referencing instruction, but they are constant offsets, not
+ symbols. */
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+ return false;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Implement REGNO_REG_CLASS. */
+
+unsigned
+aarch64_regno_regclass (unsigned regno)
+{
+ if (GP_REGNUM_P (regno))
+ return CORE_REGS;
+
+ if (regno == SP_REGNUM)
+ return STACK_REG;
+
+ if (regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)
+ return CORE_REGS;
+
+ if (FP_REGNUM_P (regno))
+ return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
+
+ return NO_REGS;
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and return the new rtx. */
+
+rtx
+aarch64_legitimize_reload_address (rtx *x_p,
+ enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED)
+{
+ rtx x = *x_p;
+
+ /* Do not allow mem (plus (reg, const)) if vector mode. */
+ if (aarch64_vector_mode_p (mode)
+ && GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ rtx orig_rtx = x;
+ x = copy_rtx (x);
+ push_reload (orig_rtx, NULL_RTX, x_p, NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, type);
+ return x;
+ }
+
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && REG_P (XEXP (XEXP (x, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, type);
+ return x;
+ }
+
+ /* We wish to handle large displacements off a base register by splitting
+ the addend across an add and the mem insn. This can cut the number of
+ extra insns needed from 3 to 1. It is only useful for load/store of a
+ single register with 12 bit offset field. */
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
+ && HARD_REGISTER_P (XEXP (x, 0))
+ && mode != TImode
+ && mode != TFmode
+ && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = val & 0xfff;
+ HOST_WIDE_INT high = val - low;
+ HOST_WIDE_INT offs;
+ rtx cst;
+
+ /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
+ BLKmode alignment. */
+ if (GET_MODE_SIZE (mode) == 0)
+ return NULL_RTX;
+
+ offs = low % GET_MODE_SIZE (mode);
+
+ /* Align misaligned offset by adjusting high part to compensate. */
+ if (offs != 0)
+ {
+ if (aarch64_uimm12_shift (high + offs))
+ {
+ /* Align down. */
+ low = low - offs;
+ high = high + offs;
+ }
+ else
+ {
+ /* Align up. */
+ offs = GET_MODE_SIZE (mode) - offs;
+ low = low + offs;
+ high = high + (low & 0x1000) - offs;
+ low &= 0xfff;
+ }
+ }
+
+ /* Check for overflow. */
+ if (high + low != val)
+ return NULL_RTX;
+
+ cst = GEN_INT (high);
+ if (!aarch64_uimm12_shift (high))
+ cst = force_const_mem (Pmode, cst);
+
+ /* Reload high part into base reg, leaving the low part
+ in the mem instruction. */
+ x = gen_rtx_PLUS (Pmode,
+ gen_rtx_PLUS (Pmode, XEXP (x, 0), cst),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, type);
+ return x;
+ }
+
+ return NULL_RTX;
+}
+
+
+static reg_class_t
+aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+ reg_class_t rclass,
+ enum machine_mode mode,
+ secondary_reload_info *sri)
+{
+ /* Address expressions of the form PLUS (SP, large_offset) need two
+ scratch registers, one for the constant, and one for holding a
+ copy of SP, since SP cannot be used on the RHS of an add-reg
+ instruction. */
+ if (mode == DImode
+ && GET_CODE (x) == PLUS
+ && XEXP (x, 0) == stack_pointer_rtx
+ && CONST_INT_P (XEXP (x, 1))
+ && !aarch64_uimm12_shift (INTVAL (XEXP (x, 1))))
+ {
+ sri->icode = CODE_FOR_reload_sp_immediate;
+ return NO_REGS;
+ }
+
+ /* Without the TARGET_SIMD instructions we cannot move a Q register
+ to a Q register directly. We need a scratch. */
+ if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
+ && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
+ && reg_class_subset_p (rclass, FP_REGS))
+ {
+ if (mode == TFmode)
+ sri->icode = CODE_FOR_aarch64_reload_movtf;
+ else if (mode == TImode)
+ sri->icode = CODE_FOR_aarch64_reload_movti;
+ return NO_REGS;
+ }
+
+ /* A TFmode or TImode memory access should be handled via an FP_REGS
+ because AArch64 has richer addressing modes for LDR/STR instructions
+ than LDP/STP instructions. */
+ if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS
+ && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
+ return FP_REGS;
+
+ if ((mode == TImode || mode == TFmode) && CONSTANT_P(x)
+ && reg_class_subset_p (rclass, FP_REGS))
+ return CORE_REGS;
+
+ return NO_REGS;
+}
+
+static bool
+aarch64_can_eliminate (const int from, const int to)
+{
+ /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+
+ if (frame_pointer_needed)
+ {
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return true;
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return false;
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+ && !cfun->calls_alloca)
+ return true;
+ if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return true;
+ return false;
+ }
+ else
+ {
+ /* If we decided that we didn't need a frame pointer but then used
+ LR in the function, then we do need a frame pointer after all, so
+ prevent this elimination to ensure a frame pointer is used. */
+
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+ && df_regs_ever_live_p (LR_REGNUM))
+ return false;
+ }
+ return true;
+}
+
+HOST_WIDE_INT
+aarch64_initial_elimination_offset (unsigned from, unsigned to)
+{
+ HOST_WIDE_INT frame_size;
+ HOST_WIDE_INT offset;
+
+ aarch64_layout_frame ();
+ frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size
+ + cfun->machine->saved_varargs_size);
+
+ frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT);
+ offset = frame_size;
+
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+ return offset - crtl->outgoing_args_size;
+
+ if (from == FRAME_POINTER_REGNUM)
+ return cfun->machine->frame.saved_regs_size;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+ {
+ HOST_WIDE_INT elim = crtl->outgoing_args_size
+ + cfun->machine->frame.saved_regs_size
+ - cfun->machine->frame.fp_lr_offset;
+ elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT);
+ return elim;
+ }
+ }
+
+ return offset;
+}
+
+
+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
+ previous frame. */
+
+rtx
+aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
+}
+
+
+static void
+aarch64_asm_trampoline_template (FILE *f)
+{
+ asm_fprintf (f, "\tldr\t%r, .+16\n", IP1_REGNUM);
+ asm_fprintf (f, "\tldr\t%r, .+20\n", STATIC_CHAIN_REGNUM);
+ asm_fprintf (f, "\tbr\t%r\n", IP1_REGNUM);
+ assemble_aligned_integer (4, const0_rtx);
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
+}
+
+unsigned
+aarch64_trampoline_size (void)
+{
+ return 32; /* 3 insns + padding + 2 dwords. */
+}
+
+static void
+aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx fnaddr, mem, a_tramp;
+
+ /* Don't need to copy the trailing D-words, we fill those in below. */
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE - 16), BLOCK_OP_NORMAL);
+ mem = adjust_address (m_tramp, DImode, 16);
+ fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ emit_move_insn (mem, fnaddr);
+
+ mem = adjust_address (m_tramp, DImode, 24);
+ emit_move_insn (mem, chain_value);
+
+ /* XXX We should really define a "clear_cache" pattern and use
+ gen_clear_cache(). */
+ a_tramp = XEXP (m_tramp, 0);
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
+ 0, VOIDmode, 2, a_tramp, Pmode,
+ plus_constant (a_tramp, TRAMPOLINE_SIZE), Pmode);
+}
+
+static unsigned char
+aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
+{
+ switch (regclass)
+ {
+ case CORE_REGS:
+ case POINTER_REGS:
+ case GENERAL_REGS:
+ case ALL_REGS:
+ case FP_REGS:
+ case FP_LO_REGS:
+ return
+ aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
+ (GET_MODE_SIZE (mode) + 7) / 8;
+ case STACK_REG:
+ return 1;
+
+ case NO_REGS:
+ return 0;
+
+ default:
+ break;
+ }
+ gcc_unreachable ();
+}
+
+static reg_class_t
+aarch64_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t regclass)
+{
+ return ((regclass == POINTER_REGS || regclass == STACK_REG)
+ ? GENERAL_REGS : regclass);
+}
+
+void
+aarch64_asm_output_labelref (FILE* f, const char *name)
+{
+ asm_fprintf (f, "%U%s", name);
+}
+
+static void
+aarch64_elf_asm_constructor (rtx symbol, int priority)
+{
+ if (priority == DEFAULT_INIT_PRIORITY)
+ default_ctor_section_asm_out_constructor (symbol, priority);
+ else
+ {
+ section *s;
+ char buf[18];
+ snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
+ s = get_section (buf, SECTION_WRITE, NULL);
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ fputs ("\t.dword\t", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputc ('\n', asm_out_file);
+ }
+}
+
+static void
+aarch64_elf_asm_destructor (rtx symbol, int priority)
+{
+ if (priority == DEFAULT_INIT_PRIORITY)
+ default_dtor_section_asm_out_destructor (symbol, priority);
+ else
+ {
+ section *s;
+ char buf[18];
+ snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
+ s = get_section (buf, SECTION_WRITE, NULL);
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ fputs ("\t.dword\t", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputc ('\n', asm_out_file);
+ }
+}
+
+const char*
+aarch64_output_casesi (rtx *operands)
+{
+ char buf[100];
+ char label[100];
+ rtx diff_vec = PATTERN (next_real_insn (operands[2]));
+ int index;
+ static const char *const patterns[4][2] =
+ {
+ {
+ "ldrb\t%w3, [%0,%w1,uxtw]",
+ "add\t%3, %4, %w3, sxtb #2"
+ },
+ {
+ "ldrh\t%w3, [%0,%w1,uxtw #1]",
+ "add\t%3, %4, %w3, sxth #2"
+ },
+ {
+ "ldr\t%w3, [%0,%w1,uxtw #2]",
+ "add\t%3, %4, %w3, sxtw #2"
+ },
+ /* We assume that DImode is only generated when not optimizing and
+ that we don't really need 64-bit address offsets. That would
+ imply an object file with 8GB of code in a single function! */
+ {
+ "ldr\t%w3, [%0,%w1,uxtw #2]",
+ "add\t%3, %4, %w3, sxtw #2"
+ }
+ };
+
+ gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
+
+ index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
+
+ gcc_assert (index >= 0 && index <= 3);
+
+ /* Need to implement table size reduction, by chaning the code below. */
+ output_asm_insn (patterns[index][0], operands);
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
+ snprintf (buf, sizeof (buf),
+ "adr\t%%4, %s", targetm.strip_name_encoding (label));
+ output_asm_insn (buf, operands);
+ output_asm_insn (patterns[index][1], operands);
+ output_asm_insn ("br\t%3", operands);
+ assemble_label (asm_out_file, label);
+ return "";
+}
+
+
+/* Return size in bits of an arithmetic operand which is shifted/scaled and
+ masked such that it is suitable for a UXTB, UXTH, or UXTW extend
+ operator. */
+
+int
+aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
+{
+ if (shift >= 0 && shift <= 3)
+ {
+ int size;
+ for (size = 8; size <= 32; size *= 2)
+ {
+ HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
+ if (mask == bits << shift)
+ return size;
+ }
+ }
+ return 0;
+}
+
+static bool
+aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_rtx x ATTRIBUTE_UNUSED)
+{
+ /* We can't use blocks for constants when we're using a per-function
+ constant pool. */
+ return false;
+}
+
+static section *
+aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx x ATTRIBUTE_UNUSED,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ /* Force all constant pool entries into the current function section. */
+ return function_section (current_function_decl);
+}
+
+
+/* Costs. */
+
+/* Helper function for rtx cost calculation. Strip a shift expression
+ from X. Returns the inner operand if successful, or the original
+ expression on failure. */
+static rtx
+aarch64_strip_shift (rtx x)
+{
+ rtx op = x;
+
+ if ((GET_CODE (op) == ASHIFT
+ || GET_CODE (op) == ASHIFTRT
+ || GET_CODE (op) == LSHIFTRT)
+ && CONST_INT_P (XEXP (op, 1)))
+ return XEXP (op, 0);
+
+ if (GET_CODE (op) == MULT
+ && CONST_INT_P (XEXP (op, 1))
+ && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
+ return XEXP (op, 0);
+
+ return x;
+}
+
+/* Helper function for rtx cost calculation. Strip a shift or extend
+ expression from X. Returns the inner operand if successful, or the
+ original expression on failure. We deal with a number of possible
+ canonicalization variations here. */
+static rtx
+aarch64_strip_shift_or_extend (rtx x)
+{
+ rtx op = x;
+
+ /* Zero and sign extraction of a widened value. */
+ if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
+ && XEXP (op, 2) == const0_rtx
+ && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
+ XEXP (op, 1)))
+ return XEXP (XEXP (op, 0), 0);
+
+ /* It can also be represented (for zero-extend) as an AND with an
+ immediate. */
+ if (GET_CODE (op) == AND
+ && GET_CODE (XEXP (op, 0)) == MULT
+ && CONST_INT_P (XEXP (XEXP (op, 0), 1))
+ && CONST_INT_P (XEXP (op, 1))
+ && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
+ INTVAL (XEXP (op, 1))) != 0)
+ return XEXP (XEXP (op, 0), 0);
+
+ /* Now handle extended register, as this may also have an optional
+ left shift by 1..4. */
+ if (GET_CODE (op) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == ZERO_EXTEND
+ || GET_CODE (op) == SIGN_EXTEND)
+ op = XEXP (op, 0);
+
+ if (op != x)
+ return op;
+
+ return aarch64_strip_shift (x);
+}
+
+/* Calculate the cost of calculating X, storing it in *COST. Result
+ is true if the total cost of the operation has now been calculated. */
+static bool
+aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
+ int param ATTRIBUTE_UNUSED, int *cost, bool speed)
+{
+ rtx op0, op1;
+ const struct cpu_rtx_cost_table *extra_cost
+ = aarch64_tune_params->insn_extra_cost;
+
+ switch (code)
+ {
+ case SET:
+ op0 = SET_DEST (x);
+ op1 = SET_SRC (x);
+
+ switch (GET_CODE (op0))
+ {
+ case MEM:
+ if (speed)
+ *cost += extra_cost->memory_store;
+
+ if (op1 != const0_rtx)
+ *cost += rtx_cost (op1, SET, 1, speed);
+ return true;
+
+ case SUBREG:
+ if (! REG_P (SUBREG_REG (op0)))
+ *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
+ /* Fall through. */
+ case REG:
+ /* Cost is just the cost of the RHS of the set. */
+ *cost += rtx_cost (op1, SET, 1, true);
+ return true;
+
+ case ZERO_EXTRACT: /* Bit-field insertion. */
+ case SIGN_EXTRACT:
+ /* Strip any redundant widening of the RHS to meet the width of
+ the target. */
+ if (GET_CODE (op1) == SUBREG)
+ op1 = SUBREG_REG (op1);
+ if ((GET_CODE (op1) == ZERO_EXTEND
+ || GET_CODE (op1) == SIGN_EXTEND)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
+ >= INTVAL (XEXP (op0, 1))))
+ op1 = XEXP (op1, 0);
+ *cost += rtx_cost (op1, SET, 1, speed);
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+
+ case MEM:
+ if (speed)
+ *cost += extra_cost->memory_load;
+
+ return true;
+
+ case NEG:
+ op0 = CONST0_RTX (GET_MODE (x));
+ op1 = XEXP (x, 0);
+ goto cost_minus;
+
+ case COMPARE:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (op1 == const0_rtx
+ && GET_CODE (op0) == AND)
+ {
+ x = op0;
+ goto cost_logic;
+ }
+
+ /* Comparisons can work if the order is swapped.
+ Canonicalization puts the more complex operation first, but
+ we want it in op1. */
+ if (! (REG_P (op0)
+ || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
+ {
+ op0 = XEXP (x, 1);
+ op1 = XEXP (x, 0);
+ }
+ goto cost_minus;
+
+ case MINUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ cost_minus:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
+ {
+ if (op0 != const0_rtx)
+ *cost += rtx_cost (op0, MINUS, 0, speed);
+
+ if (CONST_INT_P (op1))
+ {
+ if (!aarch64_uimm12_shift (INTVAL (op1)))
+ *cost += rtx_cost (op1, MINUS, 1, speed);
+ }
+ else
+ {
+ op1 = aarch64_strip_shift_or_extend (op1);
+ *cost += rtx_cost (op1, MINUS, 1, speed);
+ }
+ return true;
+ }
+
+ return false;
+
+ case PLUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1)))
+ {
+ *cost += rtx_cost (op0, PLUS, 0, speed);
+ }
+ else
+ {
+ rtx new_op0 = aarch64_strip_shift_or_extend (op0);
+
+ if (new_op0 == op0
+ && GET_CODE (op0) == MULT)
+ {
+ if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND
+ && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND)
+ || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND))
+ {
+ *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0,
+ speed)
+ + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1,
+ speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+ if (speed)
+ *cost += extra_cost->int_multiply_extend_add;
+ return true;
+ }
+ *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+ + rtx_cost (XEXP (op0, 1), MULT, 1, speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+
+ if (speed)
+ *cost += extra_cost->int_multiply_add;
+ }
+
+ *cost += (rtx_cost (new_op0, PLUS, 0, speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+ }
+ return true;
+ }
+
+ return false;
+
+ case IOR:
+ case XOR:
+ case AND:
+ cost_logic:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1)
+ && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
+ {
+ *cost += rtx_cost (op0, AND, 0, speed);
+ }
+ else
+ {
+ if (GET_CODE (op0) == NOT)
+ op0 = XEXP (op0, 0);
+ op0 = aarch64_strip_shift (op0);
+ *cost += (rtx_cost (op0, AND, 0, speed)
+ + rtx_cost (op1, AND, 1, speed));
+ }
+ return true;
+ }
+ return false;
+
+ case ZERO_EXTEND:
+ if ((GET_MODE (x) == DImode
+ && GET_MODE (XEXP (x, 0)) == SImode)
+ || GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
+ return true;
+ }
+ return false;
+
+ case SIGN_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed);
+ return true;
+ }
+ return false;
+
+ case ROTATE:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ *cost += COSTS_N_INSNS (2);
+ /* Fall through. */
+ case ROTATERT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ASHIFTRT:
+
+ /* Shifting by a register often takes an extra cycle. */
+ if (speed && !CONST_INT_P (XEXP (x, 1)))
+ *cost += extra_cost->register_shift;
+
+ *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed);
+ return true;
+
+ case HIGH:
+ if (!CONSTANT_P (XEXP (x, 0)))
+ *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed);
+ return true;
+
+ case LO_SUM:
+ if (!CONSTANT_P (XEXP (x, 1)))
+ *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed);
+ *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed);
+ return true;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed);
+ return true;
+
+ case MULT:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ *cost = COSTS_N_INSNS (1);
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1)
+ && exact_log2 (INTVAL (op1)) > 0)
+ {
+ *cost += rtx_cost (op0, ASHIFT, 0, speed);
+ return true;
+ }
+
+ if ((GET_CODE (op0) == ZERO_EXTEND
+ && GET_CODE (op1) == ZERO_EXTEND)
+ || (GET_CODE (op0) == SIGN_EXTEND
+ && GET_CODE (op1) == SIGN_EXTEND))
+ {
+ *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+ + rtx_cost (XEXP (op1, 0), MULT, 1, speed));
+ if (speed)
+ *cost += extra_cost->int_multiply_extend;
+ return true;
+ }
+
+ if (speed)
+ *cost += extra_cost->int_multiply;
+ }
+ else if (speed)
+ {
+ if (GET_MODE (x) == DFmode)
+ *cost += extra_cost->double_multiply;
+ else if (GET_MODE (x) == SFmode)
+ *cost += extra_cost->float_multiply;
+ }
+
+ return false; /* All arguments need to be in registers. */
+
+ case MOD:
+ case UMOD:
+ *cost = COSTS_N_INSNS (2);
+ if (speed)
+ {
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ *cost += (extra_cost->int_multiply_add
+ + extra_cost->int_divide);
+ else if (GET_MODE (x) == DFmode)
+ *cost += (extra_cost->double_multiply
+ + extra_cost->double_divide);
+ else if (GET_MODE (x) == SFmode)
+ *cost += (extra_cost->float_multiply
+ + extra_cost->float_divide);
+ }
+ return false; /* All arguments need to be in registers. */
+
+ case DIV:
+ case UDIV:
+ *cost = COSTS_N_INSNS (1);
+ if (speed)
+ {
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ *cost += extra_cost->int_divide;
+ else if (GET_MODE (x) == DFmode)
+ *cost += extra_cost->double_divide;
+ else if (GET_MODE (x) == SFmode)
+ *cost += extra_cost->float_divide;
+ }
+ return false; /* All arguments need to be in registers. */
+
+ default:
+ break;
+ }
+ return false;
+}
+
+static int
+aarch64_address_cost (rtx x ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
+{
+ enum rtx_code c = GET_CODE (x);
+ const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
+
+ if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
+ return addr_cost->pre_modify;
+
+ if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
+ return addr_cost->post_modify;
+
+ if (c == PLUS)
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return addr_cost->imm_offset;
+ else if (GET_CODE (XEXP (x, 0)) == MULT
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ return addr_cost->register_extend;
+
+ return addr_cost->register_offset;
+ }
+ else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
+ return addr_cost->imm_offset;
+
+ return 0;
+}
+
+static int
+aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ const struct cpu_regmove_cost *regmove_cost
+ = aarch64_tune_params->regmove_cost;
+
+ if (from == GENERAL_REGS && to == GENERAL_REGS)
+ return regmove_cost->GP2GP;
+ else if (from == GENERAL_REGS)
+ return regmove_cost->GP2FP;
+ else if (to == GENERAL_REGS)
+ return regmove_cost->FP2GP;
+
+ /* When AdvSIMD instructions are disabled it is not possible to move
+ a 128-bit value directly between Q registers. This is handled in
+ secondary reload. A general register is used as a scratch to move
+ the upper DI value and the lower DI value is moved directly,
+ hence the cost is the sum of three moves. */
+
+ if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128)
+ return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
+
+ return regmove_cost->FP2FP;
+}
+
+static int
+aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ return aarch64_tune_params->memmov_cost;
+}
+
+static void initialize_aarch64_code_model (void);
+
+/* Tuning parameters. */
+
+#if HAVE_DESIGNATED_INITIALIZERS
+#define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
+#else
+#define NAMED_PARAM(NAME, VAL) (VAL)
+#endif
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_rtx_cost_table generic_rtx_cost_table =
+{
+ NAMED_PARAM (memory_load, COSTS_N_INSNS (1)),
+ NAMED_PARAM (memory_store, COSTS_N_INSNS (0)),
+ NAMED_PARAM (register_shift, COSTS_N_INSNS (1)),
+ NAMED_PARAM (int_divide, COSTS_N_INSNS (6)),
+ NAMED_PARAM (float_divide, COSTS_N_INSNS (2)),
+ NAMED_PARAM (double_divide, COSTS_N_INSNS (6)),
+ NAMED_PARAM (int_multiply, COSTS_N_INSNS (1)),
+ NAMED_PARAM (int_multiply_extend, COSTS_N_INSNS (1)),
+ NAMED_PARAM (int_multiply_add, COSTS_N_INSNS (1)),
+ NAMED_PARAM (int_multiply_extend_add, COSTS_N_INSNS (1)),
+ NAMED_PARAM (float_multiply, COSTS_N_INSNS (0)),
+ NAMED_PARAM (double_multiply, COSTS_N_INSNS (1))
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_addrcost_table generic_addrcost_table =
+{
+ NAMED_PARAM (pre_modify, 0),
+ NAMED_PARAM (post_modify, 0),
+ NAMED_PARAM (register_offset, 0),
+ NAMED_PARAM (register_extend, 0),
+ NAMED_PARAM (imm_offset, 0)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_regmove_cost generic_regmove_cost =
+{
+ NAMED_PARAM (GP2GP, 1),
+ NAMED_PARAM (GP2FP, 2),
+ NAMED_PARAM (FP2GP, 2),
+ /* We currently do not provide direct support for TFmode Q->Q move.
+ Therefore we need to raise the cost above 2 in order to have
+ reload handle the situation. */
+ NAMED_PARAM (FP2FP, 4)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct tune_params generic_tunings =
+{
+ &generic_rtx_cost_table,
+ &generic_addrcost_table,
+ &generic_regmove_cost,
+ NAMED_PARAM (memmov_cost, 4)
+};
+
+
+/* Parse the architecture extension string. */
+
+static void
+aarch64_parse_extension (char *str)
+{
+ /* The extension string is parsed left to right. */
+ const struct aarch64_option_extension *opt = NULL;
+
+ /* Flag to say whether we are adding or removing an extension. */
+ int adding_ext = -1;
+
+ while (str != NULL && *str != 0)
+ {
+ char *ext;
+ size_t len;
+
+ str++;
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len >= 2 && strncmp (str, "no", 2) == 0)
+ {
+ adding_ext = 0;
+ len -= 2;
+ str += 2;
+ }
+ else if (len > 0)
+ adding_ext = 1;
+
+ if (len == 0)
+ {
+ error ("missing feature modifier after %qs", "+no");
+ return;
+ }
+
+ /* Scan over the extensions table trying to find an exact match. */
+ for (opt = all_extensions; opt->name != NULL; opt++)
+ {
+ if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
+ {
+ /* Add or remove the extension. */
+ if (adding_ext)
+ aarch64_isa_flags |= opt->flags_on;
+ else
+ aarch64_isa_flags &= ~(opt->flags_off);
+ break;
+ }
+ }
+
+ if (opt->name == NULL)
+ {
+ /* Extension not found in list. */
+ error ("unknown feature modifier %qs", str);
+ return;
+ }
+
+ str = ext;
+ };
+
+ return;
+}
+
+/* Parse the ARCH string. */
+
+static void
+aarch64_parse_arch (void)
+{
+ char *ext;
+ const struct processor *arch;
+ char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
+ size_t len;
+
+ strcpy (str, aarch64_arch_string);
+
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len == 0)
+ {
+ error ("missing arch name in -march=%qs", str);
+ return;
+ }
+
+ /* Loop through the list of supported ARCHs to find a match. */
+ for (arch = all_architectures; arch->name != NULL; arch++)
+ {
+ if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
+ {
+ selected_arch = arch;
+ aarch64_isa_flags = selected_arch->flags;
+ selected_cpu = &all_cores[selected_arch->core];
+
+ if (ext != NULL)
+ {
+ /* ARCH string contains at least one extension. */
+ aarch64_parse_extension (ext);
+ }
+
+ return;
+ }
+ }
+
+ /* ARCH name not found in list. */
+ error ("unknown value %qs for -march", str);
+ return;
+}
+
+/* Parse the CPU string. */
+
+static void
+aarch64_parse_cpu (void)
+{
+ char *ext;
+ const struct processor *cpu;
+ char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
+ size_t len;
+
+ strcpy (str, aarch64_cpu_string);
+
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len == 0)
+ {
+ error ("missing cpu name in -mcpu=%qs", str);
+ return;
+ }
+
+ /* Loop through the list of supported CPUs to find a match. */
+ for (cpu = all_cores; cpu->name != NULL; cpu++)
+ {
+ if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
+ {
+ selected_cpu = cpu;
+ aarch64_isa_flags = selected_cpu->flags;
+
+ if (ext != NULL)
+ {
+ /* CPU string contains at least one extension. */
+ aarch64_parse_extension (ext);
+ }
+
+ return;
+ }
+ }
+
+ /* CPU name not found in list. */
+ error ("unknown value %qs for -mcpu", str);
+ return;
+}
+
+/* Parse the TUNE string. */
+
+static void
+aarch64_parse_tune (void)
+{
+ const struct processor *cpu;
+ char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
+ strcpy (str, aarch64_tune_string);
+
+ /* Loop through the list of supported CPUs to find a match. */
+ for (cpu = all_cores; cpu->name != NULL; cpu++)
+ {
+ if (strcmp (cpu->name, str) == 0)
+ {
+ selected_tune = cpu;
+ return;
+ }
+ }
+
+ /* CPU name not found in list. */
+ error ("unknown value %qs for -mtune", str);
+ return;
+}
+
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+aarch64_override_options (void)
+{
+ /* march wins over mcpu, so when march is defined, mcpu takes the same value,
+ otherwise march remains undefined. mtune can be used with either march or
+ mcpu. */
+
+ if (aarch64_arch_string)
+ {
+ aarch64_parse_arch ();
+ aarch64_cpu_string = NULL;
+ }
+
+ if (aarch64_cpu_string)
+ {
+ aarch64_parse_cpu ();
+ selected_arch = NULL;
+ }
+
+ if (aarch64_tune_string)
+ {
+ aarch64_parse_tune ();
+ }
+
+ initialize_aarch64_code_model ();
+
+ aarch64_build_bitmask_table ();
+
+ /* This target defaults to strict volatile bitfields. */
+ if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
+ flag_strict_volatile_bitfields = 1;
+
+ /* If the user did not specify a processor, choose the default
+ one for them. This will be the CPU set during configuration using
+ --with-cpu, otherwise it is "generic". */
+ if (!selected_cpu)
+ {
+ selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
+ aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
+ }
+
+ gcc_assert (selected_cpu);
+
+ /* The selected cpu may be an architecture, so lookup tuning by core ID. */
+ if (!selected_tune)
+ selected_tune = &all_cores[selected_cpu->core];
+
+ aarch64_tune_flags = selected_tune->flags;
+ aarch64_tune = selected_tune->core;
+ aarch64_tune_params = selected_tune->tune;
+
+ aarch64_override_options_after_change ();
+}
+
+/* Implement targetm.override_options_after_change. */
+
+static void
+aarch64_override_options_after_change (void)
+{
+ faked_omit_frame_pointer = false;
+
+ /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so
+ that aarch64_frame_pointer_required will be called. We need to remember
+ whether flag_omit_frame_pointer was turned on normally or just faked. */
+
+ if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer)
+ {
+ flag_omit_frame_pointer = true;
+ faked_omit_frame_pointer = true;
+ }
+}
+
+static struct machine_function *
+aarch64_init_machine_status (void)
+{
+ struct machine_function *machine;
+ machine = ggc_alloc_cleared_machine_function ();
+ return machine;
+}
+
+void
+aarch64_init_expanders (void)
+{
+ init_machine_status = aarch64_init_machine_status;
+}
+
+/* A checking mechanism for the implementation of the various code models. */
+static void
+initialize_aarch64_code_model (void)
+{
+ if (flag_pic)
+ {
+ switch (aarch64_cmodel_var)
+ {
+ case AARCH64_CMODEL_TINY:
+ aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
+ break;
+ case AARCH64_CMODEL_SMALL:
+ aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
+ break;
+ case AARCH64_CMODEL_LARGE:
+ sorry ("code model %qs with -f%s", "large",
+ flag_pic > 1 ? "PIC" : "pic");
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ aarch64_cmodel = aarch64_cmodel_var;
+}
+
+/* Return true if SYMBOL_REF X binds locally. */
+
+static bool
+aarch64_symbol_binds_local_p (const_rtx x)
+{
+ return (SYMBOL_REF_DECL (x)
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+ : SYMBOL_REF_LOCAL_P (x));
+}
+
+/* Return true if SYMBOL_REF X is thread local */
+static bool
+aarch64_tls_symbol_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return false;
+
+ return SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Classify a TLS symbol into one of the TLS kinds. */
+enum aarch64_symbol_type
+aarch64_classify_tls_symbol (rtx x)
+{
+ enum tls_model tls_kind = tls_symbolic_operand_type (x);
+
+ switch (tls_kind)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ return SYMBOL_SMALL_GOTTPREL;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ return SYMBOL_SMALL_TPREL;
+
+ case TLS_MODEL_EMULATED:
+ case TLS_MODEL_NONE:
+ return SYMBOL_FORCE_TO_MEM;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the method that should be used to access SYMBOL_REF or
+ LABEL_REF X in context CONTEXT. */
+enum aarch64_symbol_type
+aarch64_classify_symbol (rtx x,
+ enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (x) == LABEL_REF)
+ {
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_LARGE:
+ return SYMBOL_FORCE_TO_MEM;
+
+ case AARCH64_CMODEL_TINY_PIC:
+ case AARCH64_CMODEL_TINY:
+ case AARCH64_CMODEL_SMALL_PIC:
+ case AARCH64_CMODEL_SMALL:
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_LARGE:
+ return SYMBOL_FORCE_TO_MEM;
+
+ case AARCH64_CMODEL_TINY:
+ case AARCH64_CMODEL_SMALL:
+
+ /* This is needed to get DFmode, TImode constants to be loaded off
+ the constant pool. Is it necessary to dump TImode values into
+ the constant pool. We don't handle TImode constant loads properly
+ yet and hence need to use the constant pool. */
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ return SYMBOL_FORCE_TO_MEM;
+
+ if (aarch64_tls_symbol_p (x))
+ return aarch64_classify_tls_symbol (x);
+
+ if (SYMBOL_REF_WEAK (x))
+ return SYMBOL_FORCE_TO_MEM;
+
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ case AARCH64_CMODEL_TINY_PIC:
+ case AARCH64_CMODEL_SMALL_PIC:
+
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ return SYMBOL_FORCE_TO_MEM;
+
+ if (aarch64_tls_symbol_p (x))
+ return aarch64_classify_tls_symbol (x);
+
+ if (!aarch64_symbol_binds_local_p (x))
+ return SYMBOL_SMALL_GOT;
+
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ default:
+ gcc_unreachable ();
+ }
+ /* By default push everything into the constant pool. */
+ return SYMBOL_FORCE_TO_MEM;
+}
+
+/* Return true if X is a symbolic constant that can be used in context
+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
+
+bool
+aarch64_symbolic_constant_p (rtx x, enum aarch64_symbol_context context,
+ enum aarch64_symbol_type *symbol_type)
+{
+ rtx offset;
+ split_const (x, &x, &offset);
+ if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ *symbol_type = aarch64_classify_symbol (x, context);
+ else
+ return false;
+
+ /* No checking of offset at this point. */
+ return true;
+}
+
+bool
+aarch64_constant_address_p (rtx x)
+{
+ return (CONSTANT_P (x) && memory_address_p (DImode, x));
+}
+
+bool
+aarch64_legitimate_pic_operand_p (rtx x)
+{
+ if (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
+ return false;
+
+ return true;
+}
+
+static bool
+aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
+{
+ /* Do not allow vector struct mode constants. We could support
+ 0 and -1 easily, but they need support in aarch64-simd.md. */
+ if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
+ return false;
+
+ /* This could probably go away because
+ we now decompose CONST_INTs according to expand_mov_immediate. */
+ if ((GET_CODE (x) == CONST_VECTOR
+ && aarch64_simd_valid_immediate (x, mode, false,
+ NULL, NULL, NULL, NULL, NULL) != -1)
+ || CONST_INT_P (x))
+ return !targetm.cannot_force_const_mem (mode, x);
+
+ if (GET_CODE (x) == HIGH
+ && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ return true;
+
+ return aarch64_constant_address_p (x);
+}
+
+static void
+aarch64_init_builtins (void)
+{
+ tree ftype, decl = NULL;
+
+ ftype = build_function_type (ptr_type_node, void_list_node);
+ decl = add_builtin_function ("__builtin_thread_pointer", ftype,
+ AARCH64_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ TREE_NOTHROW (decl) = 1;
+ TREE_READONLY (decl) = 1;
+
+ if (TARGET_SIMD)
+ init_aarch64_simd_builtins ();
+}
+
+static rtx
+aarch64_load_tp (rtx target)
+{
+ if (!target
+ || GET_MODE (target) != Pmode
+ || !register_operand (target, Pmode))
+ target = gen_reg_rtx (Pmode);
+
+ /* Can return in any reg. */
+ emit_insn (gen_aarch64_load_tp_hard (target));
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient. */
+static rtx
+aarch64_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ if (fcode == AARCH64_BUILTIN_THREAD_POINTER)
+ return aarch64_load_tp (target);
+
+ if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
+ return aarch64_simd_expand_builtin (fcode, exp, target);
+
+ return NULL_RTX;
+}
+
+/* On AAPCS systems, this is the "struct __va_list". */
+static GTY(()) tree va_list_type;
+
+/* Implement TARGET_BUILD_BUILTIN_VA_LIST.
+ Return the type to use as __builtin_va_list.
+
+ AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
+
+ struct __va_list
+ {
+ void *__stack;
+ void *__gr_top;
+ void *__vr_top;
+ int __gr_offs;
+ int __vr_offs;
+ }; */
+
+static tree
+aarch64_build_builtin_va_list (void)
+{
+ tree va_list_name;
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+
+ /* Create the type. */
+ va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
+ /* Give it the required name. */
+ va_list_name = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL,
+ get_identifier ("__va_list"),
+ va_list_type);
+ DECL_ARTIFICIAL (va_list_name) = 1;
+ TYPE_NAME (va_list_type) = va_list_name;
+
+ /* Create the fields. */
+ f_stack = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__stack"),
+ ptr_type_node);
+ f_grtop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gr_top"),
+ ptr_type_node);
+ f_vrtop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__vr_top"),
+ ptr_type_node);
+ f_groff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gr_offs"),
+ integer_type_node);
+ f_vroff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__vr_offs"),
+ integer_type_node);
+
+ DECL_ARTIFICIAL (f_stack) = 1;
+ DECL_ARTIFICIAL (f_grtop) = 1;
+ DECL_ARTIFICIAL (f_vrtop) = 1;
+ DECL_ARTIFICIAL (f_groff) = 1;
+ DECL_ARTIFICIAL (f_vroff) = 1;
+
+ DECL_FIELD_CONTEXT (f_stack) = va_list_type;
+ DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
+ DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
+ DECL_FIELD_CONTEXT (f_groff) = va_list_type;
+ DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
+
+ TYPE_FIELDS (va_list_type) = f_stack;
+ DECL_CHAIN (f_stack) = f_grtop;
+ DECL_CHAIN (f_grtop) = f_vrtop;
+ DECL_CHAIN (f_vrtop) = f_groff;
+ DECL_CHAIN (f_groff) = f_vroff;
+
+ /* Compute its layout. */
+ layout_type (va_list_type);
+
+ return va_list_type;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
+static void
+aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
+{
+ const CUMULATIVE_ARGS *cum;
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+ tree stack, grtop, vrtop, groff, vroff;
+ tree t;
+ int gr_save_area_size;
+ int vr_save_area_size;
+ int vr_offset;
+
+ cum = &crtl->args.info;
+ gr_save_area_size
+ = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
+ vr_save_area_size
+ = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
+
+ if (TARGET_GENERAL_REGS_ONLY)
+ {
+ if (cum->aapcs_nvrn > 0)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+ vr_save_area_size = 0;
+ }
+
+ f_stack = TYPE_FIELDS (va_list_type_node);
+ f_grtop = DECL_CHAIN (f_stack);
+ f_vrtop = DECL_CHAIN (f_grtop);
+ f_groff = DECL_CHAIN (f_vrtop);
+ f_vroff = DECL_CHAIN (f_groff);
+
+ stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
+ NULL_TREE);
+ grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
+ NULL_TREE);
+ vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
+ NULL_TREE);
+ groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
+ NULL_TREE);
+ vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
+ NULL_TREE);
+
+ /* Emit code to initialize STACK, which points to the next varargs stack
+ argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
+ by named arguments. STACK is 8-byte aligned. */
+ t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
+ if (cum->aapcs_stack_size > 0)
+ t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GRTOP, the top of the GR save area.
+ virtual_incoming_args_rtx should have been 16 byte aligned. */
+ t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize VRTOP, the top of the VR save area.
+ This address is gr_save_area_bytes below GRTOP, rounded
+ down to the next 16-byte boundary. */
+ t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
+ vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (vr_offset)
+ t = fold_build_pointer_plus_hwi (t, -vr_offset);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GROFF, the offset from GRTOP of the
+ next GPR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
+ build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Likewise emit code to initialize VROFF, the offset from FTOP
+ of the next VR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
+ build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
+
+static tree
+aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
+{
+ tree addr;
+ bool indirect_p;
+ bool is_ha; /* is HFA or HVA. */
+ bool dw_align; /* double-word align. */
+ enum machine_mode ag_mode = VOIDmode;
+ int nregs;
+ enum machine_mode mode;
+
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+ tree stack, f_top, f_off, off, arg, roundup, on_stack;
+ HOST_WIDE_INT size, rsize, adjust, align;
+ tree t, u, cond1, cond2;
+
+ indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect_p)
+ type = build_pointer_type (type);
+
+ mode = TYPE_MODE (type);
+
+ f_stack = TYPE_FIELDS (va_list_type_node);
+ f_grtop = DECL_CHAIN (f_stack);
+ f_vrtop = DECL_CHAIN (f_grtop);
+ f_groff = DECL_CHAIN (f_vrtop);
+ f_vroff = DECL_CHAIN (f_groff);
+
+ stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
+ f_stack, NULL_TREE);
+ size = int_size_in_bytes (type);
+ align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
+
+ dw_align = false;
+ adjust = 0;
+ if (aarch64_vfp_is_call_or_return_candidate (mode,
+ type,
+ &ag_mode,
+ &nregs,
+ &is_ha))
+ {
+ /* TYPE passed in fp/simd registers. */
+ if (TARGET_GENERAL_REGS_ONLY)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+
+ f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
+ unshare_expr (valist), f_vrtop, NULL_TREE);
+ f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
+ unshare_expr (valist), f_vroff, NULL_TREE);
+
+ rsize = nregs * UNITS_PER_VREG;
+
+ if (is_ha)
+ {
+ if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
+ adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
+ }
+ else if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_VREG)
+ {
+ adjust = UNITS_PER_VREG - size;
+ }
+ }
+ else
+ {
+ /* TYPE passed in general registers. */
+ f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
+ unshare_expr (valist), f_grtop, NULL_TREE);
+ f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
+ unshare_expr (valist), f_groff, NULL_TREE);
+ rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+ nregs = rsize / UNITS_PER_WORD;
+
+ if (align > 8)
+ dw_align = true;
+
+ if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_WORD)
+ {
+ adjust = UNITS_PER_WORD - size;
+ }
+ }
+
+ /* Get a local temporary for the field value. */
+ off = get_initialized_tmp_var (f_off, pre_p, NULL);
+
+ /* Emit code to branch if off >= 0. */
+ t = build2 (GE_EXPR, boolean_type_node, off,
+ build_int_cst (TREE_TYPE (off), 0));
+ cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
+
+ if (dw_align)
+ {
+ /* Emit: offs = (offs + 15) & -16. */
+ t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), 15));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
+ build_int_cst (TREE_TYPE (off), -16));
+ roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
+ }
+ else
+ roundup = NULL;
+
+ /* Update ap.__[g|v]r_offs */
+ t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), rsize));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
+
+ /* String up. */
+ if (roundup)
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+
+ /* [cond2] if (ap.__[g|v]r_offs > 0) */
+ u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
+ build_int_cst (TREE_TYPE (f_off), 0));
+ cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
+
+ /* String up: make sure the assignment happens before the use. */
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
+ COND_EXPR_ELSE (cond1) = t;
+
+ /* Prepare the trees handling the argument that is passed on the stack;
+ the top level node will store in ON_STACK. */
+ arg = get_initialized_tmp_var (stack, pre_p, NULL);
+ if (align > 8)
+ {
+ /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
+ t = fold_convert (intDI_type_node, arg);
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), 15));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -16));
+ t = fold_convert (TREE_TYPE (arg), t);
+ roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
+ }
+ else
+ roundup = NULL;
+ /* Advance ap.__stack */
+ t = fold_convert (intDI_type_node, arg);
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), size + 7));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -8));
+ t = fold_convert (TREE_TYPE (arg), t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
+ /* String up roundup and advance. */
+ if (roundup)
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+ /* String up with arg */
+ on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
+ /* Big-endianness related address adjustment. */
+ if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_WORD)
+ {
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
+ size_int (UNITS_PER_WORD - size));
+ on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
+ }
+
+ COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
+ COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
+
+ /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
+ t = off;
+ if (adjust)
+ t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), adjust));
+
+ t = fold_convert (sizetype, t);
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
+
+ if (is_ha)
+ {
+ /* type ha; // treat as "struct {ftype field[n];}"
+ ... [computing offs]
+ for (i = 0; i <nregs; ++i, offs += 16)
+ ha.field[i] = *((ftype *)(ap.__vr_top + offs));
+ return ha; */
+ int i;
+ tree tmp_ha, field_t, field_ptr_t;
+
+ /* Declare a local variable. */
+ tmp_ha = create_tmp_var_raw (type, "ha");
+ gimple_add_tmp_var (tmp_ha);
+
+ /* Establish the base type. */
+ switch (ag_mode)
+ {
+ case SFmode:
+ field_t = float_type_node;
+ field_ptr_t = float_ptr_type_node;
+ break;
+ case DFmode:
+ field_t = double_type_node;
+ field_ptr_t = double_ptr_type_node;
+ break;
+ case TFmode:
+ field_t = long_double_type_node;
+ field_ptr_t = long_double_ptr_type_node;
+ break;
+/* The half precision and quad precision are not fully supported yet. Enable
+ the following code after the support is complete. Need to find the correct
+ type node for __fp16 *. */
+#if 0
+ case HFmode:
+ field_t = float_type_node;
+ field_ptr_t = float_ptr_type_node;
+ break;
+#endif
+ case V2SImode:
+ case V4SImode:
+ {
+ tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
+ field_t = build_vector_type_for_mode (innertype, ag_mode);
+ field_ptr_t = build_pointer_type (field_t);
+ }
+ break;
+ default:
+ gcc_assert (0);
+ }
+
+ /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
+ tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
+ addr = t;
+ t = fold_convert (field_ptr_t, addr);
+ t = build2 (MODIFY_EXPR, field_t,
+ build1 (INDIRECT_REF, field_t, tmp_ha),
+ build1 (INDIRECT_REF, field_t, t));
+
+ /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
+ for (i = 1; i < nregs; ++i)
+ {
+ addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
+ u = fold_convert (field_ptr_t, addr);
+ u = build2 (MODIFY_EXPR, field_t,
+ build2 (MEM_REF, field_t, tmp_ha,
+ build_int_cst (field_ptr_t,
+ (i *
+ int_size_in_bytes (field_t)))),
+ build1 (INDIRECT_REF, field_t, u));
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
+ }
+
+ u = fold_convert (TREE_TYPE (f_top), tmp_ha);
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
+ }
+
+ COND_EXPR_ELSE (cond2) = t;
+ addr = fold_convert (build_pointer_type (type), cond1);
+ addr = build_va_arg_indirect_ref (addr);
+
+ if (indirect_p)
+ addr = build_va_arg_indirect_ref (addr);
+
+ return addr;
+}
+
+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ CUMULATIVE_ARGS local_cum;
+ int gr_saved, vr_saved;
+
+ /* The caller has advanced CUM up to, but not beyond, the last named
+ argument. Advance a local copy of CUM past the last "real" named
+ argument, to find out how many registers are left over. */
+ local_cum = *cum;
+ aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
+
+ /* Found out how many registers we need to save. */
+ gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
+ vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
+
+ if (TARGET_GENERAL_REGS_ONLY)
+ {
+ if (local_cum.aapcs_nvrn > 0)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+ vr_saved = 0;
+ }
+
+ if (!no_rtl)
+ {
+ if (gr_saved > 0)
+ {
+ rtx ptr, mem;
+
+ /* virtual_incoming_args_rtx should have been 16-byte aligned. */
+ ptr = plus_constant (virtual_incoming_args_rtx,
+ - gr_saved * UNITS_PER_WORD);
+ mem = gen_frame_mem (BLKmode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+
+ move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
+ mem, gr_saved);
+ }
+ if (vr_saved > 0)
+ {
+ /* We can't use move_block_from_reg, because it will use
+ the wrong mode, storing D regs only. */
+ enum machine_mode mode = TImode;
+ int off, i;
+
+ /* Set OFF to the offset from virtual_incoming_args_rtx of
+ the first vector register. The VR save area lies below
+ the GR one, and is aligned to 16 bytes. */
+ off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+ off -= vr_saved * UNITS_PER_VREG;
+
+ for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
+ {
+ rtx ptr, mem;
+
+ ptr = plus_constant (virtual_incoming_args_rtx, off);
+ mem = gen_frame_mem (mode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+ aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
+ off += UNITS_PER_VREG;
+ }
+ }
+ }
+
+ /* We don't save the size into *PRETEND_SIZE because we want to avoid
+ any complication of having crtl->args.pretend_args_size changed. */
+ cfun->machine->saved_varargs_size
+ = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+ STACK_BOUNDARY / BITS_PER_UNIT)
+ + vr_saved * UNITS_PER_VREG);
+}
+
+static void
+aarch64_conditional_register_usage (void)
+{
+ int i;
+ if (!TARGET_FLOAT)
+ {
+ for (i = V0_REGNUM; i <= V31_REGNUM; i++)
+ {
+ fixed_regs[i] = 1;
+ call_used_regs[i] = 1;
+ }
+ }
+}
+
+/* Walk down the type tree of TYPE counting consecutive base elements.
+ If *MODEP is VOIDmode, then set it to the first valid floating point
+ type. If a non-floating point type is found, or if a floating point
+ type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
+ otherwise return the count in the sub-tree. */
+static int
+aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
+{
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+
+ switch (TREE_CODE (type))
+ {
+ case REAL_TYPE:
+ mode = TYPE_MODE (type);
+ if (mode != DFmode && mode != SFmode && mode != TFmode)
+ return -1;
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ if (*modep == mode)
+ return 1;
+
+ break;
+
+ case COMPLEX_TYPE:
+ mode = TYPE_MODE (TREE_TYPE (type));
+ if (mode != DFmode && mode != SFmode && mode != TFmode)
+ return -1;
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ if (*modep == mode)
+ return 2;
+
+ break;
+
+ case VECTOR_TYPE:
+ /* Use V2SImode and V4SImode as representatives of all 64-bit
+ and 128-bit vector types. */
+ size = int_size_in_bytes (type);
+ switch (size)
+ {
+ case 8:
+ mode = V2SImode;
+ break;
+ case 16:
+ mode = V4SImode;
+ break;
+ default:
+ return -1;
+ }
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ /* Vector modes are considered to be opaque: two vectors are
+ equivalent for the purposes of being homogeneous aggregates
+ if they are the same size. */
+ if (*modep == mode)
+ return 1;
+
+ break;
+
+ case ARRAY_TYPE:
+ {
+ int count;
+ tree index = TYPE_DOMAIN (type);
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
+ if (count == -1
+ || !index
+ || !TYPE_MAX_VALUE (index)
+ || !host_integerp (TYPE_MAX_VALUE (index), 1)
+ || !TYPE_MIN_VALUE (index)
+ || !host_integerp (TYPE_MIN_VALUE (index), 1)
+ || count < 0)
+ return -1;
+
+ count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1)
+ - tree_low_cst (TYPE_MIN_VALUE (index), 1));
+
+ /* There must be no padding. */
+ if (!host_integerp (TYPE_SIZE (type), 1)
+ || (tree_low_cst (TYPE_SIZE (type), 1)
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ case RECORD_TYPE:
+ {
+ int count = 0;
+ int sub_count;
+ tree field;
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+ if (sub_count < 0)
+ return -1;
+ count += sub_count;
+ }
+
+ /* There must be no padding. */
+ if (!host_integerp (TYPE_SIZE (type), 1)
+ || (tree_low_cst (TYPE_SIZE (type), 1)
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ /* These aren't very interesting except in a degenerate case. */
+ int count = 0;
+ int sub_count;
+ tree field;
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+ if (sub_count < 0)
+ return -1;
+ count = count > sub_count ? count : sub_count;
+ }
+
+ /* There must be no padding. */
+ if (!host_integerp (TYPE_SIZE (type), 1)
+ || (tree_low_cst (TYPE_SIZE (type), 1)
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a composite
+ type as described in AAPCS64 \S 4.3. This includes aggregate, union and
+ array types. The C99 floating-point complex types are also considered
+ as composite types, according to AAPCS64 \S 7.1.1. The complex integer
+ types, which are GCC extensions and out of the scope of AAPCS64, are
+ treated as composite types here as well.
+
+ Note that MODE itself is not sufficient in determining whether a type
+ is such a composite type or not. This is because
+ stor-layout.c:compute_record_mode may have already changed the MODE
+ (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
+ structure with only one field may have its MODE set to the mode of the
+ field. Also an integer mode whose size matches the size of the
+ RECORD_TYPE type may be used to substitute the original mode
+ (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
+ solely relied on. */
+
+static bool
+aarch64_composite_type_p (const_tree type,
+ enum machine_mode mode)
+{
+ if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
+ return true;
+
+ if (mode == BLKmode
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ return true;
+
+ return false;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a short vector
+ type as described in AAPCS64 \S 4.1.2.
+
+ See the comment above aarch64_composite_type_p for the notes on MODE. */
+
+static bool
+aarch64_short_vector_p (const_tree type,
+ enum machine_mode mode)
+{
+ HOST_WIDE_INT size = -1;
+
+ if (type && TREE_CODE (type) == VECTOR_TYPE)
+ size = int_size_in_bytes (type);
+ else if (!aarch64_composite_type_p (type, mode)
+ && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
+ size = GET_MODE_SIZE (mode);
+
+ return (size == 8 || size == 16) ? true : false;
+}
+
+/* Return TRUE if an argument, whose type is described by TYPE and MODE,
+ shall be passed or returned in simd/fp register(s) (providing these
+ parameter passing registers are available).
+
+ Upon successful return, *COUNT returns the number of needed registers,
+ *BASE_MODE returns the mode of the individual register and when IS_HAF
+ is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
+ floating-point aggregate or a homogeneous short-vector aggregate. */
+
+static bool
+aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
+ const_tree type,
+ enum machine_mode *base_mode,
+ int *count,
+ bool *is_ha)
+{
+ enum machine_mode new_mode = VOIDmode;
+ bool composite_p = aarch64_composite_type_p (type, mode);
+
+ if (is_ha != NULL) *is_ha = false;
+
+ if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ || aarch64_short_vector_p (type, mode))
+ {
+ *count = 1;
+ new_mode = mode;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ if (is_ha != NULL) *is_ha = true;
+ *count = 2;
+ new_mode = GET_MODE_INNER (mode);
+ }
+ else if (type && composite_p)
+ {
+ int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
+
+ if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
+ {
+ if (is_ha != NULL) *is_ha = true;
+ *count = ag_count;
+ }
+ else
+ return false;
+ }
+ else
+ return false;
+
+ *base_mode = new_mode;
+ return true;
+}
+
+/* Implement TARGET_STRUCT_VALUE_RTX. */
+
+static rtx
+aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
+}
+
+/* Implements target hook vector_mode_supported_p. */
+static bool
+aarch64_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_SIMD
+ && (mode == V4SImode || mode == V8HImode
+ || mode == V16QImode || mode == V2DImode
+ || mode == V2SImode || mode == V4HImode
+ || mode == V8QImode || mode == V2SFmode
+ || mode == V4SFmode || mode == V2DFmode))
+ return true;
+
+ return false;
+}
+
+/* Return quad mode as the preferred SIMD mode. */
+static enum machine_mode
+aarch64_preferred_simd_mode (enum machine_mode mode)
+{
+ if (TARGET_SIMD)
+ switch (mode)
+ {
+ case DFmode:
+ return V2DFmode;
+ case SFmode:
+ return V4SFmode;
+ case SImode:
+ return V4SImode;
+ case HImode:
+ return V8HImode;
+ case QImode:
+ return V16QImode;
+ case DImode:
+ return V2DImode;
+ break;
+
+ default:;
+ }
+ return word_mode;
+}
+
+/* Legitimize a memory reference for sync primitive implemented using
+ LDXR/STXR instructions. We currently force the form of the reference
+ to be indirect without offset. */
+static rtx
+aarch64_legitimize_sync_memory (rtx memory)
+{
+ rtx addr = force_reg (Pmode, XEXP (memory, 0));
+ rtx legitimate_memory = gen_rtx_MEM (GET_MODE (memory), addr);
+
+ set_mem_alias_set (legitimate_memory, ALIAS_SET_MEMORY_BARRIER);
+ MEM_VOLATILE_P (legitimate_memory) = MEM_VOLATILE_P (memory);
+ return legitimate_memory;
+}
+
+/* An instruction emitter. */
+typedef void (* emit_f) (int label, const char *, rtx *);
+
+/* An instruction emitter that emits via the conventional
+ output_asm_insn. */
+static void
+aarch64_emit (int label ATTRIBUTE_UNUSED, const char *pattern, rtx *operands)
+{
+ output_asm_insn (pattern, operands);
+}
+
+/* Count the number of emitted synchronization instructions. */
+static unsigned aarch64_insn_count;
+
+/* An emitter that counts emitted instructions but does not actually
+ emit instruction into the the instruction stream. */
+static void
+aarch64_count (int label,
+ const char *pattern ATTRIBUTE_UNUSED,
+ rtx *operands ATTRIBUTE_UNUSED)
+{
+ if (! label)
+ ++ aarch64_insn_count;
+}
+
+static void
+aarch64_output_asm_insn (emit_f, int, rtx *,
+ const char *, ...) ATTRIBUTE_PRINTF_4;
+
+/* Construct a pattern using conventional output formatting and feed
+ it to output_asm_insn. Provides a mechanism to construct the
+ output pattern on the fly. Note the hard limit on the pattern
+ buffer size. */
+static void
+aarch64_output_asm_insn (emit_f emit, int label, rtx *operands,
+ const char *pattern, ...)
+{
+ va_list ap;
+ char buffer[256];
+
+ va_start (ap, pattern);
+ vsnprintf (buffer, sizeof (buffer), pattern, ap);
+ va_end (ap);
+ emit (label, buffer, operands);
+}
+
+/* Helper to figure out the instruction suffix required on LDXR/STXR
+ instructions for operations on an object of the specified mode. */
+static const char *
+aarch64_load_store_suffix (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case QImode: return "b";
+ case HImode: return "h";
+ case SImode: return "";
+ case DImode: return "";
+ default:
+ gcc_unreachable ();
+ }
+ return "";
+}
+
+/* Emit an excluive load instruction appropriate for the specified
+ mode. */
+static void
+aarch64_output_sync_load (emit_f emit,
+ enum machine_mode mode,
+ rtx target,
+ rtx memory,
+ bool with_barrier)
+{
+ const char *suffix = aarch64_load_store_suffix (mode);
+ rtx operands[2];
+
+ operands[0] = target;
+ operands[1] = memory;
+ aarch64_output_asm_insn (emit, 0, operands, "ld%sxr%s\t%%%s0, %%1",
+ with_barrier ? "a" : "", suffix,
+ mode == DImode ? "x" : "w");
+}
+
+/* Emit an exclusive store instruction appropriate for the specified
+ mode. */
+static void
+aarch64_output_sync_store (emit_f emit,
+ enum machine_mode mode,
+ rtx result,
+ rtx value,
+ rtx memory,
+ bool with_barrier)
+{
+ const char *suffix = aarch64_load_store_suffix (mode);
+ rtx operands[3];
+
+ operands[0] = result;
+ operands[1] = value;
+ operands[2] = memory;
+ aarch64_output_asm_insn (emit, 0, operands,
+ "st%sxr%s\t%%w0, %%%s1, %%2",
+ with_barrier ? "l" : "",
+ suffix,
+ mode == DImode ? "x" : "w");
+}
+
+/* Helper to emit a two operand instruction. */
+static void
+aarch64_output_op2 (emit_f emit, const char *mnemonic, rtx d, rtx s)
+{
+ rtx operands[2];
+ enum machine_mode mode;
+ const char *constraint;
+
+ mode = GET_MODE (d);
+ operands[0] = d;
+ operands[1] = s;
+ constraint = mode == DImode ? "" : "w";
+ aarch64_output_asm_insn (emit, 0, operands, "%s\t%%%s0, %%%s1", mnemonic,
+ constraint, constraint);
+}
+
+/* Helper to emit a three operand instruction. */
+static void
+aarch64_output_op3 (emit_f emit, const char *mnemonic, rtx d, rtx a, rtx b)
+{
+ rtx operands[3];
+ enum machine_mode mode;
+ const char *constraint;
+
+ mode = GET_MODE (d);
+ operands[0] = d;
+ operands[1] = a;
+ operands[2] = b;
+
+ constraint = mode == DImode ? "" : "w";
+ aarch64_output_asm_insn (emit, 0, operands, "%s\t%%%s0, %%%s1, %%%s2",
+ mnemonic, constraint, constraint, constraint);
+}
+
+/* Emit a load store exclusive synchronization loop.
+
+ do
+ old_value = [mem]
+ if old_value != required_value
+ break;
+ t1 = sync_op (old_value, new_value)
+ [mem] = t1, t2 = [0|1]
+ while ! t2
+
+ Note:
+ t1 == t2 is not permitted
+ t1 == old_value is permitted
+
+ required_value:
+
+ RTX register or const_int representing the required old_value for
+ the modify to continue, if NULL no comparsion is performed. */
+static void
+aarch64_output_sync_loop (emit_f emit,
+ enum machine_mode mode,
+ rtx old_value,
+ rtx memory,
+ rtx required_value,
+ rtx new_value,
+ rtx t1,
+ rtx t2,
+ enum attr_sync_op sync_op,
+ int acquire_barrier,
+ int release_barrier)
+{
+ rtx operands[1];
+
+ gcc_assert (t1 != t2);
+
+ aarch64_output_asm_insn (emit, 1, operands, "%sLSYT%%=:", LOCAL_LABEL_PREFIX);
+
+ aarch64_output_sync_load (emit, mode, old_value, memory, acquire_barrier);
+
+ if (required_value)
+ {
+ rtx operands[2];
+
+ operands[0] = old_value;
+ operands[1] = required_value;
+ aarch64_output_asm_insn (emit, 0, operands, "cmp\t%%0, %%1");
+ aarch64_output_asm_insn (emit, 0, operands, "bne\t%sLSYB%%=",
+ LOCAL_LABEL_PREFIX);
+ }
+
+ switch (sync_op)
+ {
+ case SYNC_OP_ADD:
+ aarch64_output_op3 (emit, "add", t1, old_value, new_value);
+ break;
+
+ case SYNC_OP_SUB:
+ aarch64_output_op3 (emit, "sub", t1, old_value, new_value);
+ break;
+
+ case SYNC_OP_IOR:
+ aarch64_output_op3 (emit, "orr", t1, old_value, new_value);
+ break;
+
+ case SYNC_OP_XOR:
+ aarch64_output_op3 (emit, "eor", t1, old_value, new_value);
+ break;
+
+ case SYNC_OP_AND:
+ aarch64_output_op3 (emit,"and", t1, old_value, new_value);
+ break;
+
+ case SYNC_OP_NAND:
+ aarch64_output_op3 (emit, "and", t1, old_value, new_value);
+ aarch64_output_op2 (emit, "mvn", t1, t1);
+ break;
+
+ case SYNC_OP_NONE:
+ t1 = new_value;
+ break;
+ }
+
+ aarch64_output_sync_store (emit, mode, t2, t1, memory, release_barrier);
+ operands[0] = t2;
+ aarch64_output_asm_insn (emit, 0, operands, "cbnz\t%%w0, %sLSYT%%=",
+ LOCAL_LABEL_PREFIX);
+
+ aarch64_output_asm_insn (emit, 1, operands, "%sLSYB%%=:", LOCAL_LABEL_PREFIX);
+}
+
+static rtx
+aarch64_get_sync_operand (rtx *operands, int index, rtx default_value)
+{
+ if (index > 0)
+ default_value = operands[index - 1];
+
+ return default_value;
+}
+
+#define FETCH_SYNC_OPERAND(NAME, DEFAULT) \
+ aarch64_get_sync_operand (operands, (int) get_attr_sync_##NAME (insn), \
+ DEFAULT);
+
+/* Extract the operands for a synchroniztion instruction from the
+ instructions attributes and emit the instruction. */
+static void
+aarch64_process_output_sync_insn (emit_f emit, rtx insn, rtx *operands)
+{
+ rtx result, memory, required_value, new_value, t1, t2;
+ int release_barrier;
+ int acquire_barrier = 1;
+ enum machine_mode mode;
+ enum attr_sync_op sync_op;
+
+ result = FETCH_SYNC_OPERAND (result, 0);
+ memory = FETCH_SYNC_OPERAND (memory, 0);
+ required_value = FETCH_SYNC_OPERAND (required_value, 0);
+ new_value = FETCH_SYNC_OPERAND (new_value, 0);
+ t1 = FETCH_SYNC_OPERAND (t1, 0);
+ t2 = FETCH_SYNC_OPERAND (t2, 0);
+ release_barrier =
+ get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES;
+ sync_op = get_attr_sync_op (insn);
+ mode = GET_MODE (memory);
+
+ aarch64_output_sync_loop (emit, mode, result, memory, required_value,
+ new_value, t1, t2, sync_op, acquire_barrier,
+ release_barrier);
+}
+
+/* Emit a synchronization instruction loop. */
+const char *
+aarch64_output_sync_insn (rtx insn, rtx *operands)
+{
+ aarch64_process_output_sync_insn (aarch64_emit, insn, operands);
+ return "";
+}
+
+/* Emit a store release instruction appropriate for the specified
+ mode. */
+const char *
+aarch64_output_sync_lock_release (rtx value, rtx memory)
+{
+ const char *suffix;
+ enum machine_mode mode;
+ rtx operands[2];
+ operands[0] = value;
+ operands[1] = memory;
+ mode = GET_MODE (memory);
+ suffix = aarch64_load_store_suffix (mode);
+ aarch64_output_asm_insn (aarch64_emit, 0, operands,
+ "stlr%s\t%%%s0, %%1",
+ suffix,
+ mode == DImode ? "x" : "w");
+ return "";
+}
+
+/* Count the number of machine instruction that will be emitted for a
+ synchronization instruction. Note that the emitter used does not
+ emit instructions, it just counts instructions being careful not
+ to count labels. */
+unsigned int
+aarch64_sync_loop_insns (rtx insn, rtx *operands)
+{
+ aarch64_insn_count = 0;
+ aarch64_process_output_sync_insn (aarch64_count, insn, operands);
+ return aarch64_insn_count;
+}
+
+/* Helper to call a target sync instruction generator, dealing with
+ the variation in operands required by the different generators. */
+static rtx
+aarch64_call_generator (struct aarch64_sync_generator *generator, rtx old_value,
+ rtx memory, rtx required_value, rtx new_value)
+{
+ switch (generator->op)
+ {
+ case aarch64_sync_generator_omn:
+ gcc_assert (! required_value);
+ return generator->u.omn (old_value, memory, new_value);
+
+ case aarch64_sync_generator_omrn:
+ gcc_assert (required_value);
+ return generator->u.omrn (old_value, memory, required_value, new_value);
+ }
+
+ return NULL;
+}
+
+/* Expand a synchronization loop. The synchronization loop is
+ expanded as an opaque block of instructions in order to ensure that
+ we do not subsequently get extraneous memory accesses inserted
+ within the critical region. The exclusive access property of
+ LDXR/STXR instructions is only guaranteed if there are no intervening
+ memory accesses. */
+void
+aarch64_expand_sync (enum machine_mode mode,
+ struct aarch64_sync_generator *generator,
+ rtx target, rtx memory, rtx required_value, rtx new_value)
+{
+ if (target == NULL)
+ target = gen_reg_rtx (mode);
+
+ memory = aarch64_legitimize_sync_memory (memory);
+ if (mode != SImode && mode != DImode)
+ {
+ rtx load_temp = gen_reg_rtx (SImode);
+
+ if (required_value)
+ required_value = convert_modes (SImode, mode, required_value, true);
+
+ new_value = convert_modes (SImode, mode, new_value, true);
+ emit_insn (aarch64_call_generator (generator, load_temp, memory,
+ required_value, new_value));
+ emit_move_insn (target, gen_lowpart (mode, load_temp));
+ }
+ else
+ {
+ emit_insn (aarch64_call_generator (generator, target, memory,
+ required_value, new_value));
+ }
+}
+
+/* Return the equivalent letter for size. */
+static unsigned char
+sizetochar (int size)
+{
+ switch (size)
+ {
+ case 64: return 'd';
+ case 32: return 's';
+ case 16: return 'h';
+ case 8 : return 'b';
+ default: gcc_unreachable ();
+ }
+}
+
+static int
+aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
+ rtx *modconst, int *elementwidth,
+ unsigned char *elementchar,
+ int *mvn, int *shift)
+{
+#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
+ matches = 1; \
+ for (i = 0; i < idx; i += (STRIDE)) \
+ if (!(TEST)) \
+ matches = 0; \
+ if (matches) \
+ { \
+ immtype = (CLASS); \
+ elsize = (ELSIZE); \
+ elchar = sizetochar (elsize); \
+ eshift = (SHIFT); \
+ emvn = (NEG); \
+ break; \
+ }
+
+ unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
+ unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned char bytes[16];
+ unsigned char elchar = 0;
+ int immtype = -1, matches;
+ unsigned int invmask = inverse ? 0xff : 0;
+ int eshift, emvn;
+
+ /* TODO: Vectors of float constants. */
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ return -1;
+
+ /* Splat vector constant out into a byte vector. */
+ for (i = 0; i < n_elts; i++)
+ {
+ rtx el = CONST_VECTOR_ELT (op, i);
+ unsigned HOST_WIDE_INT elpart;
+ unsigned int part, parts;
+
+ if (GET_CODE (el) == CONST_INT)
+ {
+ elpart = INTVAL (el);
+ parts = 1;
+ }
+ else if (GET_CODE (el) == CONST_DOUBLE)
+ {
+ elpart = CONST_DOUBLE_LOW (el);
+ parts = 2;
+ }
+ else
+ gcc_unreachable ();
+
+ for (part = 0; part < parts; part++)
+ {
+ unsigned int byte;
+ for (byte = 0; byte < innersize; byte++)
+ {
+ bytes[idx++] = (elpart & 0xff) ^ invmask;
+ elpart >>= BITS_PER_UNIT;
+ }
+ if (GET_CODE (el) == CONST_DOUBLE)
+ elpart = CONST_DOUBLE_HIGH (el);
+ }
+ }
+
+ /* Sanity check. */
+ gcc_assert (idx == GET_MODE_SIZE (mode));
+
+ do
+ {
+ CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
+
+ CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
+
+ CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
+
+ CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
+
+ CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
+
+ CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
+
+ CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
+
+ CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
+
+ CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
+
+ CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
+
+ CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
+
+ CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
+
+ CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
+
+ CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
+
+ CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 0, 0);
+
+ CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 0, 1);
+
+ CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
+
+ CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
+ && bytes[i] == bytes[(i + 8) % idx], 0, 0);
+ }
+ while (0);
+
+ /* TODO: Currently the assembler cannot handle types 12 to 15.
+ And there is no way to specify cmode through the compiler.
+ Disable them till there is support in the assembler. */
+ if (immtype == -1
+ || (immtype >= 12 && immtype <= 15)
+ || immtype == 18)
+ return -1;
+
+
+ if (elementwidth)
+ *elementwidth = elsize;
+
+ if (elementchar)
+ *elementchar = elchar;
+
+ if (mvn)
+ *mvn = emvn;
+
+ if (shift)
+ *shift = eshift;
+
+ if (modconst)
+ {
+ unsigned HOST_WIDE_INT imm = 0;
+
+ /* Un-invert bytes of recognized vector, if necessary. */
+ if (invmask != 0)
+ for (i = 0; i < idx; i++)
+ bytes[i] ^= invmask;
+
+ if (immtype == 17)
+ {
+ /* FIXME: Broken on 32-bit H_W_I hosts. */
+ gcc_assert (sizeof (HOST_WIDE_INT) == 8);
+
+ for (i = 0; i < 8; i++)
+ imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
+ << (i * BITS_PER_UNIT);
+
+ *modconst = GEN_INT (imm);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT imm = 0;
+
+ for (i = 0; i < elsize / BITS_PER_UNIT; i++)
+ imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
+
+ /* Construct 'abcdefgh' because the assembler cannot handle
+ generic constants. */
+ gcc_assert (shift != NULL && mvn != NULL);
+ if (*mvn)
+ imm = ~imm;
+ imm = (imm >> *shift) & 0xff;
+ *modconst = GEN_INT (imm);
+ }
+ }
+
+ return immtype;
+#undef CHECK
+}
+
+/* Return TRUE if rtx X is legal for use as either a AdvSIMD MOVI instruction
+ (or, implicitly, MVNI) immediate. Write back width per element
+ to *ELEMENTWIDTH (or zero for float elements), and a modified constant
+ (whatever should be output for a MOVI instruction) in *MODCONST. */
+int
+aarch64_simd_immediate_valid_for_move (rtx op, enum machine_mode mode,
+ rtx *modconst, int *elementwidth,
+ unsigned char *elementchar,
+ int *mvn, int *shift)
+{
+ rtx tmpconst;
+ int tmpwidth;
+ unsigned char tmpwidthc;
+ int tmpmvn = 0, tmpshift = 0;
+ int retval = aarch64_simd_valid_immediate (op, mode, 0, &tmpconst,
+ &tmpwidth, &tmpwidthc,
+ &tmpmvn, &tmpshift);
+
+ if (retval == -1)
+ return 0;
+
+ if (modconst)
+ *modconst = tmpconst;
+
+ if (elementwidth)
+ *elementwidth = tmpwidth;
+
+ if (elementchar)
+ *elementchar = tmpwidthc;
+
+ if (mvn)
+ *mvn = tmpmvn;
+
+ if (shift)
+ *shift = tmpshift;
+
+ return 1;
+}
+
+static bool
+aarch64_const_vec_all_same_int_p (rtx x,
+ HOST_WIDE_INT minval,
+ HOST_WIDE_INT maxval)
+{
+ HOST_WIDE_INT firstval;
+ int count, i;
+
+ if (GET_CODE (x) != CONST_VECTOR
+ || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
+ return false;
+
+ firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
+ if (firstval < minval || firstval > maxval)
+ return false;
+
+ count = CONST_VECTOR_NUNITS (x);
+ for (i = 1; i < count; i++)
+ if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
+ return false;
+
+ return true;
+}
+
+/* Check of immediate shift constants are within range. */
+bool
+aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
+{
+ int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
+ if (left)
+ return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
+ else
+ return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
+}
+
+bool
+aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
+{
+ int nunits;
+ int i;
+
+ if (GET_CODE (x) != CONST_VECTOR)
+ return false;
+
+ nunits = GET_MODE_NUNITS (mode);
+
+ for (i = 0; i < nunits; i++)
+ if (INTVAL (CONST_VECTOR_ELT (x, i)) != 0)
+ return false;
+
+ return true;
+}
+
+bool
+aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT imm = INTVAL (x);
+ int i;
+
+ for (i = 0; i < 8; i++)
+ {
+ unsigned int byte = imm & 0xff;
+ if (byte != 0xff && byte != 0)
+ return false;
+ imm >>= 8;
+ }
+
+ return true;
+}
+
+/* Return a const_int vector of VAL. */
+rtx
+aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
+{
+ int nunits = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (nunits);
+ int i;
+
+ for (i=0; i < nunits; i++)
+ RTVEC_ELT (v, i) = GEN_INT (val);
+
+ return gen_rtx_CONST_VECTOR (mode, v);
+}
+
+/* Construct and return a PARALLEL RTX vector. */
+rtx
+aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
+{
+ int nunits = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (nunits / 2);
+ int base = high ? nunits / 2 : 0;
+ rtx t1;
+ int i;
+
+ for (i=0; i < nunits / 2; i++)
+ RTVEC_ELT (v, i) = GEN_INT (base + i);
+
+ t1 = gen_rtx_PARALLEL (mode, v);
+ return t1;
+}
+
+/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
+ HIGH (exclusive). */
+void
+aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+ HOST_WIDE_INT lane;
+ gcc_assert (GET_CODE (operand) == CONST_INT);
+ lane = INTVAL (operand);
+
+ if (lane < low || lane >= high)
+ error ("lane out of range");
+}
+
+void
+aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+ gcc_assert (GET_CODE (operand) == CONST_INT);
+ HOST_WIDE_INT lane = INTVAL (operand);
+
+ if (lane < low || lane >= high)
+ error ("constant out of range");
+}
+
+/* Emit code to reinterpret one AdvSIMD type as another,
+ without altering bits. */
+void
+aarch64_simd_reinterpret (rtx dest, rtx src)
+{
+ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
+}
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+ registers). */
+void
+aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
+ rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
+ rtx op1)
+{
+ rtx mem = gen_rtx_MEM (mode, destaddr);
+ rtx tmp1 = gen_reg_rtx (mode);
+ rtx tmp2 = gen_reg_rtx (mode);
+
+ emit_insn (intfn (tmp1, op1, tmp2));
+
+ emit_move_insn (mem, tmp1);
+ mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
+ emit_move_insn (mem, tmp2);
+}
+
+/* Return TRUE if OP is a valid vector addressing mode. */
+bool
+aarch64_simd_mem_operand_p (rtx op)
+{
+ return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG);
+}
+
+/* Set up OPERANDS for a register copy from SRC to DEST, taking care
+ not to early-clobber SRC registers in the process.
+
+ We assume that the operands described by SRC and DEST represent a
+ decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
+ number of components into which the copy has been decomposed. */
+void
+aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
+ rtx *src, unsigned int count)
+{
+ unsigned int i;
+
+ if (!reg_overlap_mentioned_p (operands[0], operands[1])
+ || REGNO (operands[0]) < REGNO (operands[1]))
+ {
+ for (i = 0; i < count; i++)
+ {
+ operands[2 * i] = dest[i];
+ operands[2 * i + 1] = src[i];
+ }
+ }
+ else
+ {
+ for (i = 0; i < count; i++)
+ {
+ operands[2 * i] = dest[count - i - 1];
+ operands[2 * i + 1] = src[count - i - 1];
+ }
+ }
+}
+
+/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
+ one of VSTRUCT modes: OI, CI or XI. */
+int
+aarch64_simd_attr_length_move (rtx insn)
+{
+ rtx reg, mem, addr;
+ int load;
+ enum machine_mode mode;
+
+ extract_insn_cached (insn);
+
+ if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
+ {
+ mode = GET_MODE (recog_data.operand[0]);
+ switch (mode)
+ {
+ case OImode:
+ return 8;
+ case CImode:
+ return 12;
+ case XImode:
+ return 16;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ return 4;
+}
+
+static unsigned HOST_WIDE_INT
+aarch64_shift_truncation_mask (enum machine_mode mode)
+{
+ return
+ (aarch64_vector_mode_supported_p (mode)
+ || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
+}
+
+#ifndef TLS_SECTION_ASM_FLAG
+#define TLS_SECTION_ASM_FLAG 'T'
+#endif
+
+void
+aarch64_elf_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ char flagchars[10], *f = flagchars;
+
+ /* If we have already declared this section, we can use an
+ abbreviated form to switch back to it -- unless this section is
+ part of a COMDAT groups, in which case GAS requires the full
+ declaration every time. */
+ if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ && (flags & SECTION_DECLARED))
+ {
+ fprintf (asm_out_file, "\t.section\t%s\n", name);
+ return;
+ }
+
+ if (!(flags & SECTION_DEBUG))
+ *f++ = 'a';
+ if (flags & SECTION_WRITE)
+ *f++ = 'w';
+ if (flags & SECTION_CODE)
+ *f++ = 'x';
+ if (flags & SECTION_SMALL)
+ *f++ = 's';
+ if (flags & SECTION_MERGE)
+ *f++ = 'M';
+ if (flags & SECTION_STRINGS)
+ *f++ = 'S';
+ if (flags & SECTION_TLS)
+ *f++ = TLS_SECTION_ASM_FLAG;
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ *f++ = 'G';
+ *f = '\0';
+
+ fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
+
+ if (!(flags & SECTION_NOTYPE))
+ {
+ const char *type;
+ const char *format;
+
+ if (flags & SECTION_BSS)
+ type = "nobits";
+ else
+ type = "progbits";
+
+#ifdef TYPE_OPERAND_FMT
+ format = "," TYPE_OPERAND_FMT;
+#else
+ format = ",@%s";
+#endif
+
+ fprintf (asm_out_file, format, type);
+
+ if (flags & SECTION_ENTSIZE)
+ fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ {
+ if (TREE_CODE (decl) == IDENTIFIER_NODE)
+ fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
+ else
+ fprintf (asm_out_file, ",%s,comdat",
+ IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
+ }
+ }
+
+ putc ('\n', asm_out_file);
+}
+
+/* Select a format to encode pointers in exception handling data. */
+int
+aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
+{
+ int type;
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_TINY:
+ case AARCH64_CMODEL_TINY_PIC:
+ case AARCH64_CMODEL_SMALL:
+ case AARCH64_CMODEL_SMALL_PIC:
+ /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
+ for everything. */
+ type = DW_EH_PE_sdata4;
+ break;
+ default:
+ /* No assumptions here. 8-byte relocs required. */
+ type = DW_EH_PE_sdata8;
+ break;
+ }
+ return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
+}
+
+static void
+aarch64_start_file (void)
+{
+ if (selected_arch)
+ asm_fprintf (asm_out_file, "\t.arch %s\n", selected_arch->name);
+ else if (selected_cpu)
+ asm_fprintf (asm_out_file, "\t.cpu %s\n", selected_cpu->name);
+ default_file_start();
+}
+
+/* Target hook for c_mode_for_suffix. */
+static enum machine_mode
+aarch64_c_mode_for_suffix (char suffix)
+{
+ if (suffix == 'q')
+ return TFmode;
+
+ return VOIDmode;
+}
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST aarch64_address_cost
+
+/* This hook will determines whether unnamed bitfields affect the alignment
+ of the containing structure. The hook returns true if the structure
+ should inherit the alignment requirements of an unnamed bitfield's
+ type. */
+#undef TARGET_ALIGN_ANON_BITFIELD
+#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
+ hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START aarch64_start_file
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
+
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
+
+/* Only the least significant bit is used for initialization guard
+ variables. */
+#undef TARGET_CXX_GUARD_MASK_BIT
+#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
+
+#undef TARGET_C_MODE_FOR_SUFFIX
+#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
+
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
+#endif
+
+#undef TARGET_CLASS_MAX_NREGS
+#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
+
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG aarch64_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
+
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE aarch64_function_value
+
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS aarch64_init_builtins
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
+
+#undef TARGET_LIBGCC_CMP_RETURN_MODE
+#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
+
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
+
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+
+/* This target hook should return true if accesses to volatile bitfields
+ should use the narrowest mode possible. It should return false if these
+ accesses should use the bitfield container type. */
+#undef TARGET_NARROW_VOLATILE_BITFIELD
+#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE aarch64_override_options
+
+#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
+#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
+ aarch64_override_options_after_change
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
+
+#undef TARGET_SHIFT_TRUNCATION_MASK
+#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB aarch64_return_in_msb
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS aarch64_rtx_costs
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
+
+#undef TARGET_ARRAY_MODE_SUPPORTED_P
+#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
+
+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-aarch64.h"
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
new file mode 100644
index 00000000000..dac11049866
--- /dev/null
+++ b/gcc/config/aarch64/aarch64.h
@@ -0,0 +1,824 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_AARCH64_H
+#define GCC_AARCH64_H
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__aarch64__"); \
+ if (TARGET_BIG_END) \
+ builtin_define ("__AARCH64EB__"); \
+ else \
+ builtin_define ("__AARCH64EL__"); \
+ } while (0)
+
+
+
+/* Target machine storage layout. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode || MODE == HImode) \
+ { \
+ MODE = SImode; \
+ } \
+ }
+
+/* Bits are always numbered from the LSBit. */
+#define BITS_BIG_ENDIAN 0
+
+/* Big/little-endian flavour. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* AdvSIMD is supported in the default configuration, unless disabled by
+ -mgeneral-regs-only. */
+#define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY
+#define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY
+
+#define UNITS_PER_WORD 8
+
+#define UNITS_PER_VREG 16
+
+#define PARM_BOUNDARY 64
+
+#define STACK_BOUNDARY 128
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 128
+
+#define SHORT_TYPE_SIZE 16
+
+#define INT_TYPE_SIZE 32
+
+#define LONG_TYPE_SIZE 64 /* XXX This should be an option */
+
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+
+#define DOUBLE_TYPE_SIZE 64
+
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* The architecture reserves all bits of the address for hardware use,
+ so the vbit must go into the delta field of pointers to member
+ functions. This is the same config as that in the AArch32
+ port. */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+/* Make strings word-aligned so that strcpy from constants will be
+ faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && !optimize_size \
+ && (ALIGN) < BITS_PER_WORD) \
+ ? BITS_PER_WORD : ALIGN)
+
+#define DATA_ALIGNMENT(EXP, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) \
+ ? BITS_PER_WORD : (ALIGN))
+
+#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
+
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Defined by the ABI */
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
+
+/* Using long long breaks -ansi and -std=c90, so these will need to be
+ made conditional for an LLP64 ABI. */
+
+#define SIZE_TYPE "long unsigned int"
+
+#define PTRDIFF_TYPE "long int"
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+
+/* Instruction tuning/selection flags. */
+
+/* Bit values used to identify processor capabilities. */
+#define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */
+#define AARCH64_FL_FP (1 << 1) /* Has FP. */
+#define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */
+#define AARCH64_FL_SLOWMUL (1 << 3) /* A slow multiply core. */
+
+/* Has FP and SIMD. */
+#define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD)
+
+/* Has FP without SIMD. */
+#define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD)
+
+/* Architecture flags that effect instruction selection. */
+#define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD)
+
+/* Macros to test ISA flags. */
+extern unsigned long aarch64_isa_flags;
+#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO)
+#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP)
+#define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD)
+
+/* Macros to test tuning flags. */
+extern unsigned long aarch64_tune_flags;
+#define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL)
+
+
+/* Standard register usage. */
+
+/* 31 64-bit general purpose registers R0-R30:
+ R30 LR (link register)
+ R29 FP (frame pointer)
+ R19-R28 Callee-saved registers
+ R18 The platform register; use as temporary register.
+ R17 IP1 The second intra-procedure-call temporary register
+ (can be used by call veneers and PLT code); otherwise use
+ as a temporary register
+ R16 IP0 The first intra-procedure-call temporary register (can
+ be used by call veneers and PLT code); otherwise use as a
+ temporary register
+ R9-R15 Temporary registers
+ R8 Structure value parameter / temporary register
+ R0-R7 Parameter/result registers
+
+ SP stack pointer, encoded as X/R31 where permitted.
+ ZR zero register, encoded as X/R31 elsewhere
+
+ 32 x 128-bit floating-point/vector registers
+ V16-V31 Caller-saved (temporary) registers
+ V8-V15 Callee-saved registers
+ V0-V7 Parameter/result registers
+
+ The vector register V0 holds scalar B0, H0, S0 and D0 in its least
+ significant bits. Unlike AArch32 S1 is not packed into D0,
+ etc. */
+
+/* Note that we don't mark X30 as a call-clobbered register. The idea is
+ that it's really the call instructions themselves which clobber X30.
+ We don't care what the called function does with it afterwards.
+
+ This approach makes it easier to implement sibcalls. Unlike normal
+ calls, sibcalls don't clobber X30, so the register reaches the
+ called function intact. EPILOGUE_USES says that X30 is useful
+ to the called function. */
+
+#define FIXED_REGISTERS \
+ { \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \
+ 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \
+ 1, 1, 1, /* SFP, AP, CC */ \
+ }
+
+#define CALL_USED_REGISTERS \
+ { \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
+ 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \
+ 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
+ 1, 1, 1, /* SFP, AP, CC */ \
+ }
+
+#define REGISTER_NAMES \
+ { \
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
+ "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
+ "sfp", "ap", "cc", \
+ }
+
+/* Generate the register aliases for core register N */
+#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
+ {"w" # N, R0_REGNUM + (N)}
+
+#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
+ {"d" # N, V0_REGNUM + (N)}, \
+ {"s" # N, V0_REGNUM + (N)}, \
+ {"h" # N, V0_REGNUM + (N)}, \
+ {"b" # N, V0_REGNUM + (N)}
+
+/* Provide aliases for all of the ISA defined register name forms.
+ These aliases are convenient for use in the clobber lists of inline
+ asm statements. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+ { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \
+ R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \
+ R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \
+ R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
+ R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
+ R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
+ R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
+ R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), /* 31 omitted */ \
+ V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \
+ V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \
+ V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \
+ V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
+ V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
+ V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
+ V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
+ V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \
+ }
+
+/* Say that the epilogue uses the return address register. Note that
+ in the case of sibcalls, the values "used by the epilogue" are
+ considered live at the start of the called function. */
+
+#define EPILOGUE_USES(REGNO) \
+ ((REGNO) == LR_REGNUM)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers. */
+#define EXIT_IGNORE_STACK 1
+
+#define STATIC_CHAIN_REGNUM R18_REGNUM
+#define HARD_FRAME_POINTER_REGNUM R29_REGNUM
+#define FRAME_POINTER_REGNUM SFP_REGNUM
+#define STACK_POINTER_REGNUM SP_REGNUM
+#define ARG_POINTER_REGNUM AP_REGNUM
+#define FIRST_PSEUDO_REGISTER 67
+
+/* The number of (integer) argument register available. */
+#define NUM_ARG_REGS 8
+#define NUM_FP_ARG_REGS 8
+
+/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
+ four members. */
+#define HA_MAX_NUM_FLDS 4
+
+/* External dwarf register number scheme. These number are used to
+ identify registers in dwarf debug information, the values are
+ defined by the AArch64 ABI. The numbering scheme is independent of
+ GCC's internal register numbering scheme. */
+
+#define AARCH64_DWARF_R0 0
+
+/* The number of R registers, note 31! not 32. */
+#define AARCH64_DWARF_NUMBER_R 31
+
+#define AARCH64_DWARF_SP 31
+#define AARCH64_DWARF_V0 64
+
+/* The number of V registers. */
+#define AARCH64_DWARF_NUMBER_V 32
+
+/* For signal frames we need to use an alternative return column. This
+ value must not correspond to a hard register and must be out of the
+ range of DWARF_FRAME_REGNUM(). */
+#define DWARF_ALT_FRAME_RETURN_COLUMN \
+ (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
+
+/* We add 1 extra frame register for use as the
+ DWARF_ALT_FRAME_RETURN_COLUMN. */
+#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
+
+
+#define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO)
+/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
+ can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same
+ as the default definition in dwarf2out.c. */
+#undef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO)
+
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
+
+#define HARD_REGNO_NREGS(REGNO, MODE) aarch64_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+#define DWARF2_UNWIND_INFO 1
+
+/* Use R0 through R3 to pass exception handling information. */
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
+
+/* Select a format to encode pointers in exception handling data. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
+
+/* The register that holds the return address in exception handlers. */
+#define AARCH64_EH_STACKADJ_REGNUM (R0_REGNUM + 4)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM)
+
+/* Don't use __builtin_setjmp until we've defined it. */
+#undef DONT_USE_BUILTIN_SETJMP
+#define DONT_USE_BUILTIN_SETJMP 1
+
+/* Register in which the structure value is to be returned. */
+#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
+
+/* Non-zero if REGNO is part of the Core register set.
+
+ The rather unusual way of expressing this check is to avoid
+ warnings when building the compiler when R0_REGNUM is 0 and REGNO
+ is unsigned. */
+#define GP_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
+
+#define FP_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
+
+#define FP_LO_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
+
+
+/* Register and constant classes. */
+
+enum reg_class
+{
+ NO_REGS,
+ CORE_REGS,
+ GENERAL_REGS,
+ STACK_REG,
+ POINTER_REGS,
+ FP_LO_REGS,
+ FP_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES /* Last */
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "CORE_REGS", \
+ "GENERAL_REGS", \
+ "STACK_REG", \
+ "POINTER_REGS", \
+ "FP_LO_REGS", \
+ "FP_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x7fffffff, 0x00000000, 0x00000003 }, /* CORE_REGS */ \
+ { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
+ { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
+ { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
+ { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \
+}
+
+#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
+
+#define INDEX_REG_CLASS CORE_REGS
+#define BASE_REG_CLASS POINTER_REGS
+
+/* Register pairs used to eliminate unneeded registers that point intoi
+ the stack frame. */
+#define ELIMINABLE_REGS \
+{ \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
+}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
+
+/* CPU/ARCH option handling. */
+#include "config/aarch64/aarch64-opts.h"
+
+enum target_cpus
+{
+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ TARGET_CPU_##IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ TARGET_CPU_generic
+};
+
+/* If there is no CPU defined at configure, use "generic" as default. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT \
+ (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6))
+#endif
+
+/* The processor for which instructions should be scheduled. */
+extern enum aarch64_processor aarch64_tune;
+
+/* RTL generation support. */
+#define INIT_EXPANDERS aarch64_init_expanders ()
+
+
+/* Stack layout; function entry, exit and calling. */
+#define STACK_GROWS_DOWNWARD 1
+
+#define FRAME_GROWS_DOWNWARD 0
+
+#define STARTING_FRAME_OFFSET 0
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Fix for VFP */
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define AARCH64_ROUND_UP(X, ALIGNMENT) \
+ (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1))
+
+#define AARCH64_ROUND_DOWN(X, ALIGNMENT) \
+ ((X) & ~((ALIGNMENT) - 1))
+
+#ifdef HOST_WIDE_INT
+struct GTY (()) aarch64_frame
+{
+ HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
+ HOST_WIDE_INT saved_regs_size;
+ /* Padding if needed after the all the callee save registers have
+ been saved. */
+ HOST_WIDE_INT padding0;
+ HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */
+ HOST_WIDE_INT fp_lr_offset; /* Space needed for saving fp and/or lr */
+
+ bool laid_out;
+};
+
+typedef struct GTY (()) machine_function
+{
+ struct aarch64_frame frame;
+
+ /* The number of extra stack bytes taken up by register varargs.
+ This area is allocated by the callee at the very top of the frame. */
+ HOST_WIDE_INT saved_varargs_size;
+
+} machine_function;
+#endif
+
+
+/* Which ABI to use. */
+enum arm_abi_type
+{
+ ARM_ABI_AAPCS64
+};
+
+enum arm_pcs
+{
+ ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */
+ ARM_PCS_UNKNOWN
+};
+
+
+extern enum arm_abi_type arm_abi;
+extern enum arm_pcs arm_pcs_variant;
+#ifndef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS64
+#endif
+
+#ifndef ARM_DEFAULT_PCS
+#define ARM_DEFAULT_PCS ARM_PCS_AAPCS64
+#endif
+
+/* We can't use enum machine_mode inside a generator file because it
+ hasn't been created yet; we shouldn't be using any code that
+ needs the real definition though, so this ought to be safe. */
+#ifdef GENERATOR_FILE
+#define MACHMODE int
+#else
+#include "insn-modes.h"
+#define MACHMODE enum machine_mode
+#endif
+
+
+/* AAPCS related state tracking. */
+typedef struct
+{
+ enum arm_pcs pcs_variant;
+ int aapcs_arg_processed; /* No need to lay out this argument again. */
+ int aapcs_ncrn; /* Next Core register number. */
+ int aapcs_nextncrn; /* Next next core register number. */
+ int aapcs_nvrn; /* Next Vector register number. */
+ int aapcs_nextnvrn; /* Next Next Vector register number. */
+ rtx aapcs_reg; /* Register assigned to this argument. This
+ is NULL_RTX if this parameter goes on
+ the stack. */
+ MACHMODE aapcs_vfp_rmode;
+ int aapcs_stack_words; /* If the argument is passed on the stack, this
+ is the number of words needed, after rounding
+ up. Only meaningful when
+ aapcs_reg == NULL_RTX. */
+ int aapcs_stack_size; /* The total size (in words, per 8 byte) of the
+ stack arg area so far. */
+} CUMULATIVE_ARGS;
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
+
+#define PAD_VARARGS_DOWN 0
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ aarch64_function_arg_regno_p(REGNO)
+
+
+/* ISA Features. */
+
+/* Addressing modes, etc. */
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_POST_MODIFY_DISP 1
+#define HAVE_PRE_MODIFY_DISP 1
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \
+do { \
+ rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \
+ IND_L); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+} while (0)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ aarch64_regno_ok_for_base_p (REGNO, true)
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ aarch64_regno_ok_for_index_p (REGNO, true)
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ aarch64_legitimate_pic_operand_p (X)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ Post-inc/dec are now explicitly handled by recog.c. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL)
+
+#define CASE_VECTOR_MODE Pmode
+
+#define DEFAULT_SIGNED_CHAR 0
+
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. We allow pairs of registers. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* Maximum bytes moved by a single instruction (load/store pair). */
+#define MOVE_MAX (UNITS_PER_WORD * 2)
+
+/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */
+#define AARCH64_CALL_RATIO 8
+
+/* When optimizing for size, give a better estimate of the length of a memcpy
+ call, but use the default otherwise. But move_by_pieces_ninsns() counts
+ memory-to-memory moves, and we'll have to generate a load & store for each,
+ so halve the value to take that into account. */
+#define MOVE_RATIO(speed) \
+ (((speed) ? 15 : AARCH64_CALL_RATIO) / 2)
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+ of the length of a memset call, but use the default otherwise. */
+#define CLEAR_RATIO(speed) \
+ ((speed) ? 15 : AARCH64_CALL_RATIO)
+
+/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when
+ optimizing for size adjust the ratio to account for the overhead of loading
+ the constant. */
+#define SET_RATIO(speed) \
+ ((speed) ? 15 : AARCH64_CALL_RATIO - 2)
+
+/* STORE_BY_PIECES_P can be used when copying a constant string, but
+ in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
+ For now we always fail this and let the move_by_pieces code copy
+ the string from read-only memory. */
+#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
+
+/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
+ rarely a good idea in straight-line code since it adds an extra address
+ dependency between each instruction. Better to use incrementing offsets. */
+#define USE_LOAD_POST_INCREMENT(MODE) 0
+#define USE_LOAD_POST_DECREMENT(MODE) 0
+#define USE_LOAD_PRE_INCREMENT(MODE) 0
+#define USE_LOAD_PRE_DECREMENT(MODE) 0
+#define USE_STORE_POST_INCREMENT(MODE) 0
+#define USE_STORE_POST_DECREMENT(MODE) 0
+#define USE_STORE_PRE_INCREMENT(MODE) 0
+#define USE_STORE_PRE_DECREMENT(MODE) 0
+
+/* ?? #define WORD_REGISTER_OPERATIONS */
+
+/* Define if loading from memory in MODE, an integral mode narrower than
+ BITS_PER_WORD will either zero-extend or sign-extend. The value of this
+ macro should be the code that says which one of the two operations is
+ implicitly done, or UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define this macro to be non-zero if instructions will fail to work
+ if given data not on the nominal alignment. */
+#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
+
+/* Define this macro to be non-zero if accessing less than a word of
+ memory is no faster than accessing a word of memory, i.e., if such
+ accesses require more than one instruction or if there is no
+ difference in cost.
+ Although there's no difference in instruction count or cycles,
+ in AArch64 we don't want to expand to a sub-word to a 64-bit access
+ if we don't have to, for power-saving reasons. */
+#define SLOW_BYTE_ACCESS 0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define NO_FUNCTION_CSE 1
+
+#define Pmode DImode
+#define FUNCTION_MODE Pmode
+
+#define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y)
+
+#define REVERSE_CONDITION(CODE, MODE) \
+ (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
+ ? reverse_condition_maybe_unordered (CODE) \
+ : reverse_condition (CODE))
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+
+#define RETURN_ADDR_RTX aarch64_return_addr
+
+#define TRAMPOLINE_SIZE aarch64_trampoline_size ()
+
+/* Trampolines contain dwords, so must be dword aligned. */
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* Put trampolines in the text section so that mapping symbols work
+ correctly. */
+#define TRAMPOLINE_SECTION text_section
+
+/* Costs, etc. */
+#define MEMORY_MOVE_COST(M, CLASS, IN) \
+ (GET_MODE_SIZE (M) < 8 ? 8 : GET_MODE_SIZE (M))
+
+/* To start with. */
+#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
+
+
+/* Assembly output. */
+
+/* For now we'll make all jump tables pc-relative. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
+ ((min < -0x1fff0 || max > 0x1fff0) ? SImode \
+ : (min < -0x1f0 || max > 0x1f0) ? HImode \
+ : QImode)
+
+/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
+#define ADDR_VEC_ALIGN(JUMPTABLE) 0
+
+#define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(STREAM, X) \
+ aarch64_print_operand_address (STREAM, X)
+
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+ aarch64_function_profiler (STREAM, LABELNO)
+
+/* For some reason, the Linux headers think they know how to define
+ these macros. They don't!!! */
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n"
+#define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n"
+
+#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
+ case '@': \
+ fputs (ASM_COMMENT_START, FILE); \
+ break; \
+ \
+ case 'r': \
+ fputs (REGISTER_PREFIX, FILE); \
+ fputs (reg_names[va_arg (ARGS, int)], FILE); \
+ break;
+
+#define CONSTANT_POOL_BEFORE_FUNCTION 0
+
+/* This definition should be relocated to aarch64-elf-raw.h. This macro
+ should be undefined in aarch64-linux.h and a clear_cache pattern
+ implmented to emit either the call to __aarch64_sync_cache_range()
+ directly or preferably the appropriate sycall or cache clear
+ instructions inline. */
+#define CLEAR_INSN_CACHE(beg, end) \
+ extern void __aarch64_sync_cache_range (void *, void *); \
+ __aarch64_sync_cache_range (beg, end)
+
+/* This should be integrated with the equivalent in the 32 bit
+ world. */
+enum aarch64_builtins
+{
+ AARCH64_BUILTIN_MIN,
+ AARCH64_BUILTIN_THREAD_POINTER,
+ AARCH64_SIMD_BUILTIN_BASE
+};
+
+/* VFP registers may only be accessed in the mode they
+ were set. */
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? reg_classes_intersect_p (FP_REGS, (CLASS)) \
+ : 0)
+
+
+#define SHIFT_COUNT_TRUNCATED !TARGET_SIMD
+
+/* Callee only saves lower 64-bits of a 128-bit register. Tell the
+ compiler the callee clobbers the top 64-bits when restoring the
+ bottom 64-bits. */
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8)
+
+/* Check TLS Descriptors mechanism is selected. */
+#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
+
+extern enum aarch64_code_model aarch64_cmodel;
+
+/* When using the tiny addressing model conditional and unconditional branches
+ can span the whole of the available address space (1MB). */
+#define HAS_LONG_COND_BRANCH \
+ (aarch64_cmodel == AARCH64_CMODEL_TINY \
+ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+#define HAS_LONG_UNCOND_BRANCH \
+ (aarch64_cmodel == AARCH64_CMODEL_TINY \
+ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+/* Modes valid for AdvSIMD Q registers. */
+#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
+ ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
+ || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode)
+
+#endif /* GCC_AARCH64_H */
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
new file mode 100644
index 00000000000..07aab96e116
--- /dev/null
+++ b/gcc/config/aarch64/aarch64.md
@@ -0,0 +1,2921 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register numbers
+(define_constants
+ [
+ (R0_REGNUM 0)
+ (R1_REGNUM 1)
+ (R2_REGNUM 2)
+ (R3_REGNUM 3)
+ (R4_REGNUM 4)
+ (R5_REGNUM 5)
+ (R6_REGNUM 6)
+ (R7_REGNUM 7)
+ (R8_REGNUM 8)
+ (R9_REGNUM 9)
+ (R10_REGNUM 10)
+ (R11_REGNUM 11)
+ (R12_REGNUM 12)
+ (R13_REGNUM 13)
+ (R14_REGNUM 14)
+ (R15_REGNUM 15)
+ (R16_REGNUM 16)
+ (IP0_REGNUM 16)
+ (R17_REGNUM 17)
+ (IP1_REGNUM 17)
+ (R18_REGNUM 18)
+ (R19_REGNUM 19)
+ (R20_REGNUM 20)
+ (R21_REGNUM 21)
+ (R22_REGNUM 22)
+ (R23_REGNUM 23)
+ (R24_REGNUM 24)
+ (R25_REGNUM 25)
+ (R26_REGNUM 26)
+ (R27_REGNUM 27)
+ (R28_REGNUM 28)
+ (R29_REGNUM 29)
+ (R30_REGNUM 30)
+ (LR_REGNUM 30)
+ (SP_REGNUM 31)
+ (V0_REGNUM 32)
+ (V15_REGNUM 47)
+ (V31_REGNUM 63)
+ (SFP_REGNUM 64)
+ (AP_REGNUM 65)
+ (CC_REGNUM 66)
+ ]
+)
+
+(define_c_enum "unspec" [
+ UNSPEC_CASESI
+ UNSPEC_CLS
+ UNSPEC_FRINTA
+ UNSPEC_FRINTI
+ UNSPEC_FRINTM
+ UNSPEC_FRINTP
+ UNSPEC_FRINTX
+ UNSPEC_FRINTZ
+ UNSPEC_GOTSMALLPIC
+ UNSPEC_GOTSMALLTLS
+ UNSPEC_LD2
+ UNSPEC_LD3
+ UNSPEC_LD4
+ UNSPEC_MB
+ UNSPEC_NOP
+ UNSPEC_PRLG_STK
+ UNSPEC_RBIT
+ UNSPEC_ST2
+ UNSPEC_ST3
+ UNSPEC_ST4
+ UNSPEC_TLS
+ UNSPEC_TLSDESC
+ UNSPEC_VSTRUCTDUMMY
+])
+
+(define_c_enum "unspecv" [
+ UNSPECV_EH_RETURN ; Represent EH_RETURN
+ ]
+)
+
+;; If further include files are added the defintion of MD_INCLUDES
+;; must be updated.
+
+(include "constraints.md")
+(include "predicates.md")
+(include "iterators.md")
+
+;; -------------------------------------------------------------------
+;; Synchronization Builtins
+;; -------------------------------------------------------------------
+
+;; The following sync_* attributes are applied to sychronization
+;; instruction patterns to control the way in which the
+;; synchronization loop is expanded.
+;; All instruction patterns that call aarch64_output_sync_insn ()
+;; should define these attributes. Refer to the comment above
+;; aarch64.c:aarch64_output_sync_loop () for more detail on the use of
+;; these attributes.
+
+;; Attribute specifies the operand number which contains the
+;; result of a synchronization operation. The result is the old value
+;; loaded from SYNC_MEMORY.
+(define_attr "sync_result" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number which contains the memory
+;; address to which the synchronization operation is being applied.
+(define_attr "sync_memory" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number which contains the required
+;; old value expected in the memory location. This attribute may be
+;; none if no required value test should be performed in the expanded
+;; code.
+(define_attr "sync_required_value" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of the new value to be stored
+;; into the memory location identitifed by the sync_memory attribute.
+(define_attr "sync_new_value" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of a temporary register
+;; which can be clobbered by the synchronization instruction sequence.
+;; The register provided byn SYNC_T1 may be the same as SYNC_RESULT is
+;; which case the result value will be clobbered and not available
+;; after the synchronization loop exits.
+(define_attr "sync_t1" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute specifies the operand number of a temporary register
+;; which can be clobbered by the synchronization instruction sequence.
+;; This register is used to collect the result of a store exclusive
+;; instruction.
+(define_attr "sync_t2" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Attribute that specifies whether or not the emitted synchronization
+;; loop must contain a release barrier.
+(define_attr "sync_release_barrier" "yes,no" (const_string "yes"))
+
+;; Attribute that specifies the operation that the synchronization
+;; loop should apply to the old and new values to generate the value
+;; written back to memory.
+(define_attr "sync_op" "none,add,sub,ior,xor,and,nand"
+ (const_string "none"))
+
+;; -------------------------------------------------------------------
+;; Instruction types and attributes
+;; -------------------------------------------------------------------
+
+;; Main data types used by the insntructions
+
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
+ (const_string "unknown"))
+
+(define_attr "mode2" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
+ (const_string "unknown"))
+
+; The "v8type" attribute is used to for fine grained classification of
+; AArch64 instructions. This table briefly explains the meaning of each type.
+
+; adc add/subtract with carry.
+; adcs add/subtract with carry (setting condition flags).
+; adr calculate address.
+; alu simple alu instruction (no memory or fp regs access).
+; alu_ext simple alu instruction (sign/zero-extended register).
+; alu_shift simple alu instruction, with a source operand shifted by a constant.
+; alus simple alu instruction (setting condition flags).
+; alus_ext simple alu instruction (sign/zero-extended register, setting condition flags).
+; alus_shift simple alu instruction, with a source operand shifted by a constant (setting condition flags).
+; bfm bitfield move operation.
+; branch branch.
+; call subroutine call.
+; ccmp conditional compare.
+; clz count leading zeros/sign bits.
+; csel conditional select.
+; dmb data memory barrier.
+; extend sign/zero-extend (specialised bitfield move).
+; extr extract register-sized bitfield encoding.
+; fpsimd_load load single floating point / simd scalar register from memory.
+; fpsimd_load2 load pair of floating point / simd scalar registers from memory.
+; fpsimd_store store single floating point / simd scalar register to memory.
+; fpsimd_store2 store pair floating point / simd scalar registers to memory.
+; fadd floating point add/sub.
+; fccmp floating point conditional compare.
+; fcmp floating point comparison.
+; fconst floating point load immediate.
+; fcsel floating point conditional select.
+; fcvt floating point convert (float to float).
+; fcvtf2i floating point convert (float to integer).
+; fcvti2f floating point convert (integer to float).
+; fdiv floating point division operation.
+; ffarith floating point abs, neg or cpy.
+; fmadd floating point multiply-add/sub.
+; fminmax floating point min/max.
+; fmov floating point move (float to float).
+; fmovf2i floating point move (float to integer).
+; fmovi2f floating point move (integer to float).
+; fmul floating point multiply.
+; frint floating point round to integral.
+; fsqrt floating point square root.
+; load_acq load-acquire.
+; load load single general register from memory
+; load2 load pair of general registers from memory
+; logic logical operation (register).
+; logic_imm and/or/xor operation (immediate).
+; logic_shift logical operation with shift.
+; logics logical operation (register, setting condition flags).
+; logics_imm and/or/xor operation (immediate, setting condition flags).
+; logics_shift logical operation with shift (setting condition flags).
+; madd integer multiply-add/sub.
+; maddl widening integer multiply-add/sub.
+; misc miscellaneous - any type that doesn't fit into the rest.
+; move integer move operation.
+; move2 double integer move operation.
+; movk move 16-bit immediate with keep.
+; movz move 16-bit immmediate with zero/one.
+; mrs system/special register move.
+; mulh 64x64 to 128-bit multiply (high part).
+; mull widening multiply.
+; mult integer multiply instruction.
+; prefetch memory prefetch.
+; rbit reverse bits.
+; rev reverse bytes.
+; sdiv integer division operation (signed).
+; shift variable shift operation.
+; shift_imm immediate shift operation (specialised bitfield move).
+; store_rel store-release.
+; store store single general register to memory.
+; store2 store pair of general registers to memory.
+; udiv integer division operation (unsigned).
+
+(define_attr "v8type"
+ "adc,\
+ adcs,\
+ adr,\
+ alu,\
+ alu_ext,\
+ alu_shift,\
+ alus,\
+ alus_ext,\
+ alus_shift,\
+ bfm,\
+ branch,\
+ call,\
+ ccmp,\
+ clz,\
+ csel,\
+ dmb,\
+ div,\
+ div64,\
+ extend,\
+ extr,\
+ fpsimd_load,\
+ fpsimd_load2,\
+ fpsimd_store2,\
+ fpsimd_store,\
+ fadd,\
+ fccmp,\
+ fcvt,\
+ fcvtf2i,\
+ fcvti2f,\
+ fcmp,\
+ fconst,\
+ fcsel,\
+ fdiv,\
+ ffarith,\
+ fmadd,\
+ fminmax,\
+ fmov,\
+ fmovf2i,\
+ fmovi2f,\
+ fmul,\
+ frint,\
+ fsqrt,\
+ load_acq,\
+ load1,\
+ load2,\
+ logic,\
+ logic_imm,\
+ logic_shift,\
+ logics,\
+ logics_imm,\
+ logics_shift,\
+ madd,\
+ maddl,\
+ misc,\
+ move,\
+ move2,\
+ movk,\
+ movz,\
+ mrs,\
+ mulh,\
+ mull,\
+ mult,\
+ prefetch,\
+ rbit,\
+ rev,\
+ sdiv,\
+ shift,\
+ shift_imm,\
+ store_rel,\
+ store1,\
+ store2,\
+ udiv"
+ (const_string "alu"))
+
+
+; The "type" attribute is used by the AArch32 backend. Below is a mapping
+; from "v8type" to "type".
+
+(define_attr "type"
+ "alu,alu_shift,block,branch,call,f_2_r,f_cvt,f_flag,f_loads,
+ f_loadd,f_stored,f_stores,faddd,fadds,fcmpd,fcmps,fconstd,fconsts,
+ fcpys,fdivd,fdivs,ffarithd,ffariths,fmacd,fmacs,fmuld,fmuls,load_byte,
+ load1,load2,mult,r_2_f,store1,store2"
+ (cond [
+ (eq_attr "v8type" "alu_shift,alus_shift,logic_shift,logics_shift") (const_string "alu_shift")
+ (eq_attr "v8type" "branch") (const_string "branch")
+ (eq_attr "v8type" "call") (const_string "call")
+ (eq_attr "v8type" "fmovf2i") (const_string "f_2_r")
+ (eq_attr "v8type" "fcvt,fcvtf2i,fcvti2f") (const_string "f_cvt")
+ (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")) (const_string "f_loads")
+ (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")) (const_string "f_loadd")
+ (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")) (const_string "f_stores")
+ (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")) (const_string "f_stored")
+ (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "DF")) (const_string "faddd")
+ (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "SF")) (const_string "fadds")
+ (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "DF")) (const_string "fcmpd")
+ (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "SF")) (const_string "fcmps")
+ (and (eq_attr "v8type" "fconst") (eq_attr "mode" "DF")) (const_string "fconstd")
+ (and (eq_attr "v8type" "fconst") (eq_attr "mode" "SF")) (const_string "fconsts")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")) (const_string "fdivd")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")) (const_string "fdivs")
+ (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "DF")) (const_string "ffarithd")
+ (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "SF")) (const_string "ffariths")
+ (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "DF")) (const_string "fmacd")
+ (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "SF")) (const_string "fmacs")
+ (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")) (const_string "fmuld")
+ (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")) (const_string "fmuls")
+ (and (eq_attr "v8type" "load1") (eq_attr "mode" "QI,HI")) (const_string "load_byte")
+ (and (eq_attr "v8type" "load1") (eq_attr "mode" "SI,DI,TI")) (const_string "load1")
+ (eq_attr "v8type" "load2") (const_string "load2")
+ (and (eq_attr "v8type" "mulh,mult,mull,madd,sdiv,udiv") (eq_attr "mode" "SI")) (const_string "mult")
+ (eq_attr "v8type" "fmovi2f") (const_string "r_2_f")
+ (eq_attr "v8type" "store1") (const_string "store1")
+ (eq_attr "v8type" "store2") (const_string "store2")
+ ]
+ (const_string "alu")))
+
+;; Attribute that specifies whether or not the instruction touches fp
+;; registers.
+(define_attr "fp" "no,yes" (const_string "no"))
+
+;; Attribute that specifies whether or not the instruction touches simd
+;; registers.
+(define_attr "simd" "no,yes" (const_string "no"))
+
+(define_attr "length" ""
+ (cond [(not (eq_attr "sync_memory" "none"))
+ (symbol_ref "aarch64_sync_loop_insns (insn, operands) * 4")
+ ] (const_int 4)))
+
+;; Attribute that controls whether an alternative is enabled or not.
+;; Currently it is only used to disable alternatives which touch fp or simd
+;; registers when -mgeneral-regs-only is specified.
+(define_attr "enabled" "no,yes"
+ (cond [(ior
+ (and (eq_attr "fp" "yes")
+ (eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
+ (and (eq_attr "simd" "yes")
+ (eq (symbol_ref "TARGET_SIMD") (const_int 0))))
+ (const_string "no")
+ ] (const_string "yes")))
+
+;; -------------------------------------------------------------------
+;; Pipeline descriptions and scheduling
+;; -------------------------------------------------------------------
+
+;; Processor types.
+(include "aarch64-tune.md")
+
+;; Scheduling
+(include "aarch64-generic.md")
+(include "large.md")
+(include "small.md")
+
+;; -------------------------------------------------------------------
+;; Jumps and other miscellaneous insns
+;; -------------------------------------------------------------------
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
+ ""
+ "br\\t%0"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "b\\t%l0"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand:GPI 1 "register_operand" "")
+ (match_operand:GPI 2 "aarch64_plus_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+ operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+ operands[2]);
+ operands[2] = const0_rtx;
+ "
+)
+
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand:GPF 1 "register_operand" "")
+ (match_operand:GPF 2 "aarch64_reg_or_zero" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+ operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+ operands[2]);
+ operands[2] = const0_rtx;
+ "
+)
+
+(define_insn "*condjump"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "b%m0\\t%l2"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "register_operand" "") ; Index
+ (match_operand:SI 1 "const_int_operand" "") ; Lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; Total range
+ (match_operand:DI 3 "" "") ; Table label
+ (match_operand:DI 4 "" "")] ; Out of range label
+ ""
+ {
+ if (operands[1] != const0_rtx)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ /* Canonical RTL says that if you have:
+
+ (minus (X) (CONST))
+
+ then this should be emitted as:
+
+ (plus (X) (-CONST))
+
+ The use of trunc_int_for_mode ensures that the resulting
+ constant can be represented in SImode, this is important
+ for the corner case where operand[1] is INT_MIN. */
+
+ operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode));
+
+ if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate)
+ (operands[1], SImode))
+ operands[1] = force_reg (SImode, operands[1]);
+ emit_insn (gen_addsi3 (reg, operands[0], operands[1]));
+ operands[0] = reg;
+ }
+
+ if (!aarch64_plus_operand (operands[2], SImode))
+ operands[2] = force_reg (SImode, operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx,
+ const0_rtx),
+ operands[0], operands[2], operands[4]));
+
+ operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
+ emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0],
+ operands[3]));
+ DONE;
+ }
+)
+
+(define_insn "casesi_dispatch"
+ [(parallel
+ [(set (pc)
+ (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_CASESI)))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:DI 3 "=r"))
+ (clobber (match_scratch:DI 4 "=r"))
+ (use (label_ref (match_operand 2 "" "")))])]
+ ""
+ "*
+ return aarch64_output_casesi (operands);
+ "
+ [(set_attr "length" "16")
+ (set_attr "v8type" "branch")]
+)
+
+(define_insn "nop"
+ [(unspec[(const_int 0)] UNSPEC_NOP)]
+ ""
+ "nop"
+ [(set_attr "v8type" "misc")]
+)
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_prologue ();
+ DONE;
+ "
+)
+
+(define_expand "epilogue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_epilogue (false);
+ DONE;
+ "
+)
+
+(define_expand "sibcall_epilogue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_epilogue (true);
+ DONE;
+ "
+)
+
+(define_insn "*do_return"
+ [(return)]
+ ""
+ "ret"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_insn "eh_return"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
+ UNSPECV_EH_RETURN)]
+ ""
+ "#"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_split
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "")]
+ UNSPECV_EH_RETURN)]
+ "reload_completed"
+ [(set (match_dup 1) (match_dup 0))]
+ {
+ operands[1] = aarch64_final_eh_return_addr ();
+ }
+)
+
+(define_insn "*cb<optab><mode>1"
+ [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "<cbz>\\t%<w>0, %l1"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_insn "*tb<optab><mode>1"
+ [(set (pc) (if_then_else
+ (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand 1 "const_int_operand" "n"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (clobber (match_scratch:DI 3 "=r"))]
+ ""
+ "*
+ if (get_attr_length (insn) == 8)
+ return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\";
+ return \"<tbz>\\t%<w>0, %1, %l2\";
+ "
+ [(set_attr "v8type" "branch")
+ (set_attr "mode" "<MODE>")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
+ (lt (minus (match_dup 2) (pc)) (const_int 32764)))
+ (const_int 4)
+ (const_int 8)))]
+)
+
+(define_insn "*cb<optab><mode>1"
+ [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (match_scratch:DI 2 "=r"))]
+ ""
+ "*
+ if (get_attr_length (insn) == 8)
+ return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\";
+ return \"<tbz>\\t%<w>0, <sizem1>, %l1\";
+ "
+ [(set_attr "v8type" "branch")
+ (set_attr "mode" "<MODE>")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
+ (lt (minus (match_dup 1) (pc)) (const_int 32764)))
+ (const_int 4)
+ (const_int 8)))]
+)
+
+;; -------------------------------------------------------------------
+;; Subroutine calls and sibcalls
+;; -------------------------------------------------------------------
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+ "
+ {
+ rtx callee;
+
+ /* In an untyped call, we can get NULL for operand 2. */
+ if (operands[2] == NULL)
+ operands[2] = const0_rtx;
+
+ /* Decide if we should generate indirect calls by loading the
+ 64-bit address of the callee into a register before performing
+ the branch-and-link. */
+ callee = XEXP (operands[0], 0);
+ if (GET_CODE (callee) == SYMBOL_REF
+ ? aarch64_is_long_call_p (callee)
+ : !REG_P (callee))
+ XEXP (operands[0], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+(define_insn "*call_reg"
+ [(call (mem:DI (match_operand:DI 0 "register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ ""
+ "blr\\t%0"
+ [(set_attr "v8type" "call")]
+)
+
+(define_insn "*call_symbol"
+ [(call (mem:DI (match_operand:DI 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && !aarch64_is_long_call_p (operands[0])"
+ "bl\\t%a0"
+ [(set_attr "v8type" "call")]
+)
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+ "
+ {
+ rtx callee;
+
+ /* In an untyped call, we can get NULL for operand 3. */
+ if (operands[3] == NULL)
+ operands[3] = const0_rtx;
+
+ /* Decide if we should generate indirect calls by loading the
+ 64-bit address of the callee into a register before performing
+ the branch-and-link. */
+ callee = XEXP (operands[1], 0);
+ if (GET_CODE (callee) == SYMBOL_REF
+ ? aarch64_is_long_call_p (callee)
+ : !REG_P (callee))
+ XEXP (operands[1], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ ""
+ "blr\\t%1"
+ [(set_attr "v8type" "call")]
+)
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ "GET_CODE (operands[1]) == SYMBOL_REF
+ && !aarch64_is_long_call_p (operands[1])"
+ "bl\\t%a1"
+ [(set_attr "v8type" "call")]
+)
+
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (return)
+ (use (match_operand 2 "" ""))])]
+ ""
+ {
+ if (operands[2] == NULL_RTX)
+ operands[2] = const0_rtx;
+ }
+)
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (return)
+ (use (match_operand 3 "" ""))])]
+ ""
+ {
+ if (operands[3] == NULL_RTX)
+ operands[3] = const0_rtx;
+ }
+)
+
+(define_insn "*sibcall_insn"
+ [(call (mem:DI (match_operand:DI 0 "" "X"))
+ (match_operand 1 "" ""))
+ (return)
+ (use (match_operand 2 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "b\\t%a0"
+ [(set_attr "v8type" "branch")]
+)
+
+(define_insn "*sibcall_value_insn"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" "X"))
+ (match_operand 2 "" "")))
+ (return)
+ (use (match_operand 3 "" ""))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "b\\t%a1"
+ [(set_attr "v8type" "branch")]
+)
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+ DONE;
+})
+
+;; -------------------------------------------------------------------
+;; Moves
+;; -------------------------------------------------------------------
+
+(define_expand "mov<mode>"
+ [(set (match_operand:SHORT 0 "nonimmediate_operand" "")
+ (match_operand:SHORT 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_insn "*mov<mode>_aarch64"
+ [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,r,m, r,*w")
+ (match_operand:SHORT 1 "general_operand" " r,M,m,rZ,*w,r"))]
+ "(register_operand (operands[0], <MODE>mode)
+ || aarch64_reg_or_zero (operands[1], <MODE>mode))"
+ "@
+ mov\\t%w0, %w1
+ mov\\t%w0, %1
+ ldr<size>\\t%w0, %1
+ str<size>\\t%w1, %0
+ umov\\t%w0, %1.<v>[0]
+ dup\\t%0.<Vallxd>, %w1"
+ [(set_attr "v8type" "move,alu,load1,store1,*,*")
+ (set_attr "simd_type" "*,*,*,*,simd_movgp,simd_dupgp")
+ (set_attr "mode" "<MODE>")
+ (set_attr "simd_mode" "<MODE>")]
+)
+
+(define_expand "mov<mode>"
+ [(set (match_operand:GPI 0 "nonimmediate_operand" "")
+ (match_operand:GPI 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+
+ if (CONSTANT_P (operands[1]))
+ {
+ aarch64_expand_mov_immediate (operands[0], operands[1]);
+ DONE;
+ }
+ "
+)
+
+(define_insn "*movsi_aarch64"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m, *w, r,*w")
+ (match_operand:SI 1 "aarch64_mov_operand" " r,M,m,rZ,rZ,*w,*w"))]
+ "(register_operand (operands[0], SImode)
+ || aarch64_reg_or_zero (operands[1], SImode))"
+ "@
+ mov\\t%w0, %w1
+ mov\\t%w0, %1
+ ldr\\t%w0, %1
+ str\\t%w1, %0
+ fmov\\t%s0, %w1
+ fmov\\t%w0, %s1
+ fmov\\t%s0, %s1"
+ [(set_attr "v8type" "move,alu,load1,store1,fmov,fmov,fmov")
+ (set_attr "mode" "SI")
+ (set_attr "fp" "*,*,*,*,yes,yes,yes")]
+)
+
+(define_insn "*movdi_aarch64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,m, r, r, *w, r,*w,w")
+ (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m,rZ,Usa,Ush,rZ,*w,*w,Dd"))]
+ "(register_operand (operands[0], DImode)
+ || aarch64_reg_or_zero (operands[1], DImode))"
+ "@
+ mov\\t%x0, %x1
+ mov\\t%0, %x1
+ mov\\t%x0, %1
+ mov\\t%x0, %1
+ ldr\\t%x0, %1
+ str\\t%x1, %0
+ adr\\t%x0, %a1
+ adrp\\t%x0, %A1
+ fmov\\t%d0, %x1
+ fmov\\t%x0, %d1
+ fmov\\t%d0, %d1
+ movi\\t%d0, %1"
+ [(set_attr "v8type" "move,move,move,alu,load1,store1,adr,adr,fmov,fmov,fmov,fmov")
+ (set_attr "mode" "DI")
+ (set_attr "fp" "*,*,*,*,*,*,*,*,yes,yes,yes,yes")]
+)
+
+(define_insn "insv_imm<mode>"
+ [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
+ (const_int 16)
+ (match_operand 1 "const_int_operand" "n"))
+ (match_operand 2 "const_int_operand" "n"))]
+ "INTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode)
+ && INTVAL (operands[1]) % 16 == 0
+ && INTVAL (operands[2]) <= 0xffff"
+ "movk\\t%<w>0, %2, lsl %1"
+ [(set_attr "v8type" "movk")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (TImode, operands[1]);
+ "
+)
+
+(define_insn "*movti_aarch64"
+ [(set (match_operand:TI 0
+ "nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m")
+ (match_operand:TI 1
+ "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))]
+ "(register_operand (operands[0], TImode)
+ || aarch64_reg_or_zero (operands[1], TImode))"
+ "@
+ #
+ #
+ #
+ orr\\t%0.16b, %1.16b, %1.16b
+ ldp\\t%0, %H0, %1
+ stp\\t%1, %H1, %0
+ stp\\txzr, xzr, %0
+ ldr\\t%q0, %1
+ str\\t%q1, %0"
+ [(set_attr "v8type" "move2,fmovi2f,fmovf2i,*, \
+ load2,store2,store2,fpsimd_load,fpsimd_store")
+ (set_attr "simd_type" "*,*,*,simd_move,*,*,*,*,*")
+ (set_attr "mode" "DI,DI,DI,TI,DI,DI,DI,TI,TI")
+ (set_attr "length" "8,8,8,4,4,4,4,4,4")
+ (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")
+ (set_attr "simd" "*,*,*,yes,*,*,*,*,*")])
+
+;; Split a TImode register-register or register-immediate move into
+;; its component DImode pieces, taking care to handle overlapping
+;; source and dest registers.
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (match_operand:TI 1 "aarch64_reg_or_imm" ""))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ aarch64_split_doubleword_move (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "mov<mode>"
+ [(set (match_operand:GPF 0 "nonimmediate_operand" "")
+ (match_operand:GPF 1 "general_operand" ""))]
+ ""
+ "
+ if (!TARGET_FLOAT)
+ {
+ sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+ FAIL;
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_insn "*movsf_aarch64"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "= w,?r,w,w,m,r,m ,r")
+ (match_operand:SF 1 "general_operand" "?rY, w,w,m,w,m,rY,r"))]
+ "TARGET_FLOAT && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ fmov\\t%s0, %w1
+ fmov\\t%w0, %s1
+ fmov\\t%s0, %s1
+ ldr\\t%s0, %1
+ str\\t%s1, %0
+ ldr\\t%w0, %1
+ str\\t%w1, %0
+ mov\\t%w0, %w1"
+ [(set_attr "v8type" "fmovi2f,fmovf2i,fmov,fpsimd_load,fpsimd_store,fpsimd_load,fpsimd_store,fmov")
+ (set_attr "mode" "SF")]
+)
+
+(define_insn "*movdf_aarch64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "= w,?r,w,w,m,r,m ,r")
+ (match_operand:DF 1 "general_operand" "?rY, w,w,m,w,m,rY,r"))]
+ "TARGET_FLOAT && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "@
+ fmov\\t%d0, %x1
+ fmov\\t%x0, %d1
+ fmov\\t%d0, %d1
+ ldr\\t%d0, %1
+ str\\t%d1, %0
+ ldr\\t%x0, %1
+ str\\t%x1, %0
+ mov\\t%x0, %x1"
+ [(set_attr "v8type" "fmovi2f,fmovf2i,fmov,fpsimd_load,fpsimd_store,fpsimd_load,fpsimd_store,move")
+ (set_attr "mode" "DF")]
+)
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ ""
+ "
+ if (!TARGET_FLOAT)
+ {
+ sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+ FAIL;
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (TFmode, operands[1]);
+ "
+)
+
+(define_insn "*movtf_aarch64"
+ [(set (match_operand:TF 0
+ "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump")
+ (match_operand:TF 1
+ "general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))]
+ "TARGET_FLOAT && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "@
+ orr\\t%0.16b, %1.16b, %1.16b
+ mov\\t%0, %1\;mov\\t%H0, %H1
+ fmov\\t%d0, %Q1\;fmov\\t%0.d[1], %R1
+ fmov\\t%Q0, %d1\;fmov\\t%R0, %1.d[1]
+ movi\\t%0.2d, #0
+ fmov\\t%s0, wzr
+ ldr\\t%q0, %1
+ str\\t%q1, %0
+ ldp\\t%0, %H0, %1
+ stp\\t%1, %H1, %0"
+ [(set_attr "v8type" "logic,move2,fmovi2f,fmovf2i,fconst,fconst,fpsimd_load,fpsimd_store,fpsimd_load2,fpsimd_store2")
+ (set_attr "mode" "DF,DF,DF,DF,DF,DF,TF,TF,DF,DF")
+ (set_attr "length" "4,8,8,8,4,4,4,4,4,4")
+ (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*")
+ (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")]
+)
+
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump"))
+ (set (match_operand:GPI 2 "register_operand" "=r")
+ (match_operand:GPI 3 "memory_operand" "m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (XEXP (operands[1], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "ldp\\t%<w>0, %<w>2, %1"
+ [(set_attr "v8type" "load2")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+ [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump")
+ (match_operand:GPI 1 "register_operand" "r"))
+ (set (match_operand:GPI 2 "memory_operand" "=m")
+ (match_operand:GPI 3 "register_operand" "r"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (XEXP (operands[0], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "stp\\t%<w>1, %<w>3, %0"
+ [(set_attr "v8type" "store2")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump"))
+ (set (match_operand:GPF 2 "register_operand" "=w")
+ (match_operand:GPF 3 "memory_operand" "m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (XEXP (operands[1], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "ldp\\t%<w>0, %<w>2, %1"
+ [(set_attr "v8type" "fpsimd_load2")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+ [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump")
+ (match_operand:GPF 1 "register_operand" "w"))
+ (set (match_operand:GPF 2 "memory_operand" "=m")
+ (match_operand:GPF 3 "register_operand" "w"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (XEXP (operands[0], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "stp\\t%<w>1, %<w>3, %0"
+ [(set_attr "v8type" "fpsimd_load2")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; Load pair with writeback. This is primarily used in function epilogues
+;; when restoring [fp,lr]
+(define_insn "loadwb_pair<GPI:mode>_<PTR:mode>"
+ [(parallel
+ [(set (match_operand:PTR 0 "register_operand" "=k")
+ (plus:PTR (match_operand:PTR 1 "register_operand" "0")
+ (match_operand:PTR 4 "const_int_operand" "n")))
+ (set (match_operand:GPI 2 "register_operand" "=r")
+ (mem:GPI (plus:PTR (match_dup 1)
+ (match_dup 4))))
+ (set (match_operand:GPI 3 "register_operand" "=r")
+ (mem:GPI (plus:PTR (match_dup 1)
+ (match_operand:PTR 5 "const_int_operand" "n"))))])]
+ "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+ "ldp\\t%<w>2, %<w>3, [%1], %4"
+ [(set_attr "v8type" "load2")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; Store pair with writeback. This is primarily used in function prologues
+;; when saving [fp,lr]
+(define_insn "storewb_pair<GPI:mode>_<PTR:mode>"
+ [(parallel
+ [(set (match_operand:PTR 0 "register_operand" "=&k")
+ (plus:PTR (match_operand:PTR 1 "register_operand" "0")
+ (match_operand:PTR 4 "const_int_operand" "n")))
+ (set (mem:GPI (plus:PTR (match_dup 0)
+ (match_dup 4)))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (set (mem:GPI (plus:PTR (match_dup 0)
+ (match_operand:PTR 5 "const_int_operand" "n")))
+ (match_operand:GPI 3 "register_operand" "r"))])]
+ "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+ "stp\\t%<w>2, %<w>3, [%0, %4]!"
+ [(set_attr "v8type" "store2")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Sign/Zero extension
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>sidi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*extendsidi2_aarch64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ sxtw\t%0, %w1
+ ldrsw\t%0, %1"
+ [(set_attr "v8type" "extend,load1")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "*zero_extendsidi2_aarch64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ uxtw\t%0, %w1
+ ldr\t%w0, %1"
+ [(set_attr "v8type" "extend,load1")
+ (set_attr "mode" "DI")]
+)
+
+(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand")
+ (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
+ [(set (match_operand:GPI 0 "register_operand" "=r,r")
+ (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ sxt<SHORT:size>\t%<GPI:w>0, %w1
+ ldrs<SHORT:size>\t%<GPI:w>0, %1"
+ [(set_attr "v8type" "extend,load1")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
+ [(set (match_operand:GPI 0 "register_operand" "=r,r")
+ (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ uxt<SHORT:size>\t%<GPI:w>0, %w1
+ ldr<SHORT:size>\t%w0, %1"
+ [(set_attr "v8type" "extend,load1")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_expand "<optab>qihi2"
+ [(set (match_operand:HI 0 "register_operand")
+ (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*<optab>qihi2_aarch64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ <su>xtb\t%w0, %w1
+ <ldrxt>b\t%w0, %1"
+ [(set_attr "v8type" "extend,load1")
+ (set_attr "mode" "HI")]
+)
+
+;; -------------------------------------------------------------------
+;; Simple arithmetic
+;; -------------------------------------------------------------------
+
+(define_expand "add<mode>3"
+ [(set
+ (match_operand:GPI 0 "register_operand" "")
+ (plus:GPI (match_operand:GPI 1 "register_operand" "")
+ (match_operand:GPI 2 "aarch64_pluslong_operand" "")))]
+ ""
+ "
+ if (! aarch64_plus_operand (operands[2], VOIDmode))
+ {
+ rtx subtarget = ((optimize && can_create_pseudo_p ())
+ ? gen_reg_rtx (<MODE>mode) : operands[0]);
+ HOST_WIDE_INT imm = INTVAL (operands[2]);
+
+ if (imm < 0)
+ imm = -(-imm & ~0xfff);
+ else
+ imm &= ~0xfff;
+
+ emit_insn (gen_add<mode>3 (subtarget, operands[1], GEN_INT (imm)));
+ operands[1] = subtarget;
+ operands[2] = GEN_INT (INTVAL (operands[2]) - imm);
+ }
+ "
+)
+
+(define_insn "*addsi3_aarch64"
+ [(set
+ (match_operand:SI 0 "register_operand" "=rk,rk,rk")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "%rk,rk,rk")
+ (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))]
+ ""
+ "@
+ add\\t%w0, %w1, %2
+ add\\t%w0, %w1, %w2
+ sub\\t%w0, %w1, #%n2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "SI")]
+)
+
+(define_insn "*adddi3_aarch64"
+ [(set
+ (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w")
+ (plus:DI
+ (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w")
+ (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))]
+ ""
+ "@
+ add\\t%x0, %x1, %2
+ add\\t%x0, %x1, %x2
+ sub\\t%x0, %x1, #%n2
+ add\\t%d0, %d1, %d2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "DI")
+ (set_attr "simd" "*,*,*,yes")]
+)
+
+(define_insn "*add<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+ (match_operand:GPI 2 "aarch64_plus_operand" "rI,J"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (plus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ adds\\t%<w>0, %<w>1, %<w>2
+ subs\\t%<w>0, %<w>1, #%n2"
+ [(set_attr "v8type" "alus")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3nr_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r")
+ (match_operand:GPI 1 "aarch64_plus_operand" "rI,J"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn\\t%<w>0, %<w>1
+ cmp\\t%<w>0, #%n1"
+ [(set_attr "v8type" "alus")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_<shift>_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_mul_imm_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_imm3" "Ui3"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_mult_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (mult:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*add_<optab><mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))
+ (match_operand:GPI 4 "register_operand" "r")))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (plus:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "adc")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt1"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "adc")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt2"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "adc")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add<mode>3_carryin_alt3"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "adc")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*add_uxt<mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (and:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n"))
+ (match_operand:GPI 4 "register_operand" "r")))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=rk")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "sub\\t%w0, %w1, %w2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "SI")]
+)
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=rk,!w")
+ (minus:DI (match_operand:DI 1 "register_operand" "r,!w")
+ (match_operand:DI 2 "register_operand" "r,!w")))]
+ ""
+ "@
+ sub\\t%x0, %x1, %x2
+ sub\\t%d0, %d1, %d2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "DI")
+ (set_attr "simd" "*,yes")]
+)
+
+
+(define_insn "*sub<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "subs\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "alus")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_<shift>_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (ASHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "sub\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_mul_imm_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (mult:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+ ""
+ "sub\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (ANY_EXTEND:GPI
+ (match_operand:ALLX 2 "register_operand" "r"))))]
+ ""
+ "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 2 "register_operand" "r"))
+ (match_operand 3 "aarch64_imm3" "Ui3"))))]
+ ""
+ "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*sub_<optab><mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*sub_uxt<mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (and:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n"))))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+ [(set_attr "v8type" "alu_ext")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "neg\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg<mode>2_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (match_dup 1)))]
+ ""
+ "negs\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "alus")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg_<shift>_<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (ASHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "neg\\t%<w>0, %<w>1, <shift> %2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*neg_mul_imm_<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (mult:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+ ""
+ "neg\\t%<w>0, %<w>1, lsl %p2"
+ [(set_attr "v8type" "alu_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "mul\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "mult")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*madd<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "madd\\t%<w>0, %<w>1, %<w>2, %<w>3"
+ [(set_attr "v8type" "madd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*msub<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))))]
+
+ ""
+ "msub\\t%<w>0, %<w>1, %<w>2, %<w>3"
+ [(set_attr "v8type" "madd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*mul<mode>_neg"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+
+ ""
+ "mneg\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "mult")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "<su_optab>mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "<su>mull\\t%0, %w1, %w2"
+ [(set_attr "v8type" "mull")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>maddsidi4"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI
+ (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "r")))]
+ ""
+ "<su>maddl\\t%0, %w1, %w2, %3"
+ [(set_attr "v8type" "maddl")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>msubsidi4"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI
+ (match_operand:DI 3 "register_operand" "r")
+ (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI
+ (match_operand:SI 2 "register_operand" "r")))))]
+ ""
+ "<su>msubl\\t%0, %w1, %w2, %3"
+ [(set_attr "v8type" "maddl")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "*<su_optab>mulsidi_neg"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (neg:DI
+ (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "<su>mnegl\\t%0, %w1, %w2"
+ [(set_attr "v8type" "mull")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "<su>muldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI
+ (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r"))
+ (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ ""
+ "<su>mulh\\t%0, %1, %2"
+ [(set_attr "v8type" "mulh")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "<su_optab>div<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "<su>div\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "<su>div")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Comparison insns
+;; -------------------------------------------------------------------
+
+(define_insn "*cmp<mode>"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:GPI 0 "register_operand" "r,r")
+ (match_operand:GPI 1 "aarch64_plus_operand" "rI,J")))]
+ ""
+ "@
+ cmp\\t%<w>0, %<w>1
+ cmn\\t%<w>0, #%n1"
+ [(set_attr "v8type" "alus")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp<mode>"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ "TARGET_FLOAT"
+ "@
+ fcmp\\t%<s>0, #0.0
+ fcmp\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "fcmp")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmpe<mode>"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ "TARGET_FLOAT"
+ "@
+ fcmpe\\t%<s>0, #0.0
+ fcmpe\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "fcmp")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp_swp_<shift>_reg<mode>"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (ASHIFT:GPI
+ (match_operand:GPI 0 "register_operand" "r")
+ (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "cmp\\t%<w>2, %<w>0, <shift> %1"
+ [(set_attr "v8type" "alus_shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmp_swp_<optab><ALLX:mode>_reg<GPI:mode>"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (ANY_EXTEND:GPI
+ (match_operand:ALLX 0 "register_operand" "r"))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>"
+ [(set_attr "v8type" "alus_ext")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+
+;; -------------------------------------------------------------------
+;; Store-flag and conditional select insns
+;; -------------------------------------------------------------------
+
+(define_expand "cstore<mode>4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand:GPI 2 "register_operand" "")
+ (match_operand:GPI 3 "aarch64_plus_operand" "")]))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_expand "cstore<mode>4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")]))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_insn "*cstore<mode>_insn"
+ [(set (match_operand:ALLI 0 "register_operand" "=r")
+ (match_operator:ALLI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "cset\\t%<w>0, %m1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cstore<mode>_neg"
+ [(set (match_operand:ALLI 0 "register_operand" "=r")
+ (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "csetm\\t%<w>0, %m1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "cmov<mode>6"
+ [(set (match_operand:GPI 0 "register_operand" "")
+ (if_then_else:GPI
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand:GPI 2 "register_operand" "")
+ (match_operand:GPI 3 "aarch64_plus_operand" "")])
+ (match_operand:GPI 4 "register_operand" "")
+ (match_operand:GPI 5 "register_operand" "")))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_expand "cmov<mode>6"
+ [(set (match_operand:GPF 0 "register_operand" "")
+ (if_then_else:GPF
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")])
+ (match_operand:GPF 4 "register_operand" "")
+ (match_operand:GPF 5 "register_operand" "")))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_insn "*cmov<mode>_insn"
+ [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:ALLI
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (match_operand:ALLI 3 "aarch64_reg_zero_or_m1" "rZ,rZ,UsM,UsM")
+ (match_operand:ALLI 4 "aarch64_reg_zero_or_m1" "rZ,UsM,rZ,UsM")))]
+ ""
+ ;; Final alternative should be unreachable, but included for completeness
+ "@
+ csel\\t%<w>0, %<w>3, %<w>4, %m1
+ csinv\\t%<w>0, %<w>3, <w>zr, %m1
+ csinv\\t%<w>0, %<w>4, <w>zr, %M1
+ mov\\t%<w>0, -1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*cmov<mode>_insn"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (if_then_else:GPF
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (match_operand:GPF 3 "register_operand" "w")
+ (match_operand:GPF 4 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcsel\\t%<s>0, %<s>3, %<s>4, %m1"
+ [(set_attr "v8type" "fcsel")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_expand "mov<mode>cc"
+ [(set (match_operand:ALLI 0 "register_operand" "")
+ (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "")
+ (match_operand:ALLI 2 "register_operand" "")
+ (match_operand:ALLI 3 "register_operand" "")))]
+ ""
+ {
+ rtx ccreg;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }
+)
+
+(define_expand "mov<GPF:mode><GPI:mode>cc"
+ [(set (match_operand:GPI 0 "register_operand" "")
+ (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "")
+ (match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")))]
+ ""
+ {
+ rtx ccreg;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }
+)
+
+(define_insn "*csinc2<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "csinc\\t%<w>0, %<w>1, %<w>1, %M2"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "csinc3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (plus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (const_int 1))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csinc\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*csinv3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (not:GPI (match_operand:GPI 3 "register_operand" "r"))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csinv\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*csneg3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (neg:GPI (match_operand:GPI 3 "register_operand" "r"))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csneg\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "v8type" "csel")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------
+;; Logical operations
+;; -------------------------------------------------------------------
+
+(define_insn "<optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r,r")
+ (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+ (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))]
+ ""
+ "<logical>\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "logic")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_<SHIFT:optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "v8type" "logic_shift")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (not:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "mvn\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "logic")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*one_cmpl_<optab><mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "mvn\\t%<w>0, %<w>1, <shift> %2"
+ [(set_attr "v8type" "logic_shift")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (not:GPI
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1"
+ [(set_attr "v8type" "logic")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (not:GPI
+ (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "v8type" "logic_shift")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (clz:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "clz\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "clz")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "ffs<mode>2"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")]
+ ""
+ {
+ rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx);
+ rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
+
+ emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+ emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+ emit_insn (gen_csinc3<mode>_insn (operands[0], x, ccreg, operands[0], const0_rtx));
+ DONE;
+ }
+)
+
+(define_insn "clrsb<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))]
+ ""
+ "cls\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "clz")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "rbit<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))]
+ ""
+ "rbit\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "rbit")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "ctz<mode>2"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")]
+ ""
+ {
+ emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+ emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "*and<mode>3nr_compare0"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
+ (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
+ (const_int 0)))]
+ ""
+ "tst\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "logics")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*and_<SHIFT:optab><mode>3nr_compare0"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (and:GPI (SHIFT:GPI
+ (match_operand:GPI 0 "register_operand" "r")
+ (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))]
+ ""
+ "tst\\t%<w>2, %<w>0, <SHIFT:shift> %1"
+ [(set_attr "v8type" "logics_shift")
+ (set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------
+;; Shifts
+;; -------------------------------------------------------------------
+
+(define_expand "<optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (ASHIFT:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "ashl<mode>3"
+ [(set (match_operand:SHORT 0 "register_operand")
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "rotr<mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (rotatert:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "rotl<mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (rotatert:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ /* (SZ - cnt) % SZ == -cnt % SZ */
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT ((-INTVAL (operands[2]))
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ else
+ operands[2] = expand_simple_unop (QImode, NEG, operands[2],
+ NULL_RTX, 1);
+ }
+)
+
+(define_insn "*<optab><mode>3_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "rUs<cmode>")))]
+ ""
+ "<shift>\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*ashl<mode>3_insn"
+ [(set (match_operand:SHORT 0 "register_operand" "=r")
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))]
+ ""
+ "lsl\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "v8type" "shift")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<optab><mode>3_insn"
+ [(set (match_operand:SHORT 0 "register_operand" "=r")
+ (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+ operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
+ return "<bfshift>\t%w0, %w1, %2, %3";
+}
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<ANY_EXTEND:optab><GPI:mode>_ashl<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_EXTEND:GPI
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*zero_extend<GPI:mode>_lshr<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (zero_extend:GPI
+ (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "ubfx\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+(define_insn "*extend<GPI:mode>_ashr<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (sign_extend:GPI
+ (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "sbfx\\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Bitfields
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ANY_EXTRACT:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")
+ (match_operand 3 "const_int_operand" "n")))]
+ ""
+ ""
+)
+
+(define_insn "*<optab><mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_EXTRACT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")
+ (match_operand 3 "const_int_operand" "n")))]
+ ""
+ "<su>bfx\\t%<w>0, %<w>1, %3, %2"
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "const_int_operand" "n")))]
+ ""
+ "<su>bfiz\\t%<GPI:w>0, %<GPI:w>1, %2, #<ALLX:sizen>"
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<GPI:MODE>")]
+)
+
+;; XXX We should match (any_extend (ashift)) here, like (and (ashift)) below
+
+(define_insn "*andim_ashift<mode>_bfiz"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))
+ (match_operand 3 "const_int_operand" "n")))]
+ "exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
+ && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
+ "ubfiz\\t%<w>0, %<w>1, %2, %P3"
+ [(set_attr "v8type" "bfm")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "bswap<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (bswap:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "rev\\t%<w>0, %<w>1"
+ [(set_attr "v8type" "rev")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point intrinsics
+;; -------------------------------------------------------------------
+
+;; trunc - nothrow
+
+(define_insn "btrunc<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTZ))]
+ "TARGET_FLOAT"
+ "frintz\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*lbtrunc<su_optab><GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTZ)))]
+ "TARGET_FLOAT"
+ "fcvtz<su>\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; ceil - nothrow
+
+(define_insn "ceil<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTP))]
+ "TARGET_FLOAT"
+ "frintp\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lceil<su_optab><GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTP)))]
+ "TARGET_FLOAT"
+ "fcvtp<su>\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; floor - nothrow
+
+(define_insn "floor<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTM))]
+ "TARGET_FLOAT"
+ "frintm\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lfloor<su_optab><GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTM)))]
+ "TARGET_FLOAT"
+ "fcvtm<su>\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; nearbyint - nothrow
+
+(define_insn "nearbyint<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTI))]
+ "TARGET_FLOAT"
+ "frinti\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; rint
+
+(define_insn "rint<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTX))]
+ "TARGET_FLOAT"
+ "frintx\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; round - nothrow
+
+(define_insn "round<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTA))]
+ "TARGET_FLOAT"
+ "frinta\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "frint")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "lround<su_optab><GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ UNSPEC_FRINTA)))]
+ "TARGET_FLOAT"
+ "fcvta<su>\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; fma - no throw
+
+(define_insn "fma<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")
+ (match_operand:GPF 3 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "v8type" "fmadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fnma<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")
+ (match_operand:GPF 3 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "v8type" "fmadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fms<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")
+ (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+ "TARGET_FLOAT"
+ "fnmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "v8type" "fmadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "fnms<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")
+ (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+ "TARGET_FLOAT"
+ "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "v8type" "fmadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
+(define_insn "*fnmadd<mode>4"
+ [(set (match_operand:GPF 0 "register_operand")
+ (neg:GPF (fma:GPF (match_operand:GPF 1 "register_operand")
+ (match_operand:GPF 2 "register_operand")
+ (match_operand:GPF 3 "register_operand"))))]
+ "!HONOR_SIGNED_ZEROS (<MODE>mode) && TARGET_FLOAT"
+ "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "v8type" "fmadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point conversions
+;; -------------------------------------------------------------------
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=w")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvt\\t%d0, %s1"
+ [(set_attr "v8type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "mode2" "SF")]
+)
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=w")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvt\\t%s0, %d1"
+ [(set_attr "v8type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "mode2" "DF")]
+)
+
+(define_insn "fix_trunc<GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvtzs\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "fixuns_trunc<GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvtzu\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "v8type" "fcvtf2i")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "float<GPI:mode><GPF:mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+ "TARGET_FLOAT"
+ "scvtf\\t%<GPF:s>0, %<GPI:w>1"
+ [(set_attr "v8type" "fcvti2f")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+(define_insn "floatuns<GPI:mode><GPF:mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+ "TARGET_FLOAT"
+ "ucvtf\\t%<GPF:s>0, %<GPI:w>1"
+ [(set_attr "v8type" "fcvt")
+ (set_attr "mode" "<GPF:MODE>")
+ (set_attr "mode2" "<GPI:MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point arithmetic
+;; -------------------------------------------------------------------
+
+(define_insn "add<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (plus:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fadd\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (minus:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fsub\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fadd")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (mult:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmul\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fmul")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "*fnmul<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (mult:GPF
+ (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fnmul\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fmul")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "div<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (div:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fdiv\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fdiv")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (neg:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fneg\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "ffarith")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fsqrt\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "fsqrt")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (abs:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fabs\\t%<s>0, %<s>1"
+ [(set_attr "v8type" "ffarith")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; Given that smax/smin do not specify the result when either input is NaN,
+;; we could use either FMAXNM or FMAX for smax, and either FMINNM or FMIN
+;; for smin.
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (smax:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmaxnm\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fminmax")
+ (set_attr "mode" "<MODE>")]
+)
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (smin:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fminnm\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "v8type" "fminmax")
+ (set_attr "mode" "<MODE>")]
+)
+
+;; -------------------------------------------------------------------
+;; Reload support
+;; -------------------------------------------------------------------
+
+;; Reload SP+imm where imm cannot be handled by a single ADD instruction.
+;; Must load imm into a scratch register and copy SP to the dest reg before
+;; adding, since SP cannot be used as a source register in an ADD
+;; instruction.
+(define_expand "reload_sp_immediate"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "" ""))
+ (clobber (match_operand:TI 2 "register_operand" "=&r"))])]
+ ""
+ {
+ rtx sp = XEXP (operands[1], 0);
+ rtx val = XEXP (operands[1], 1);
+ unsigned regno = REGNO (operands[2]);
+ rtx scratch = operands[1];
+ gcc_assert (GET_CODE (operands[1]) == PLUS);
+ gcc_assert (sp == stack_pointer_rtx);
+ gcc_assert (CONST_INT_P (val));
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (regno == REGNO (operands[0]))
+ regno++;
+ scratch = gen_rtx_REG (DImode, regno);
+
+ emit_move_insn (scratch, val);
+ emit_move_insn (operands[0], sp);
+ emit_insn (gen_adddi3 (operands[0], operands[0], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_reload_mov<mode>"
+ [(set (match_operand:TX 0 "register_operand" "=w")
+ (match_operand:TX 1 "register_operand" "w"))
+ (clobber (match_operand:DI 2 "register_operand" "=&r"))
+ ]
+ ""
+ {
+ rtx op0 = simplify_gen_subreg (TImode, operands[0], <MODE>mode, 0);
+ rtx op1 = simplify_gen_subreg (TImode, operands[1], <MODE>mode, 0);
+ gen_aarch64_movtilow_tilow (op0, op1);
+ gen_aarch64_movdi_tihigh (operands[2], op1);
+ gen_aarch64_movtihigh_di (op0, operands[2]);
+ DONE;
+ }
+)
+
+;; The following secondary reload helpers patterns are invoked
+;; after or during reload as we don't want these patterns to start
+;; kicking in during the combiner.
+
+(define_insn "aarch64_movdi_tilow"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI (match_operand:TI 1 "register_operand" "w")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%x0, %d1"
+ [(set_attr "v8type" "fmovf2i")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movdi_tihigh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "w")
+ (const_int 64))))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%x0, %1.d[1]"
+ [(set_attr "v8type" "fmovf2i")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movtihigh_di"
+ [(set (zero_extract:TI (match_operand:TI 0 "register_operand" "+w")
+ (const_int 64) (const_int 64))
+ (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%0.d[1], %x1"
+
+ [(set_attr "v8type" "fmovi2f")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movtilow_di"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%d0, %x1"
+
+ [(set_attr "v8type" "fmovi2f")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movtilow_tilow"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (zero_extend:TI
+ (truncate:DI (match_operand:TI 1 "register_operand" "w"))))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%d0, %d1"
+
+ [(set_attr "v8type" "fmovi2f")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4")
+ ])
+
+;; There is a deliberate reason why the parameters of high and lo_sum's
+;; don't have modes for ADRP and ADD instructions. This is to allow high
+;; and lo_sum's to be used with the labels defining the jump tables in
+;; rodata section.
+
+(define_insn "add_losym"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_valid_symref" "S")))]
+ ""
+ "add\\t%0, %1, :lo12:%a2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "DI")]
+
+)
+
+(define_insn "ldr_got_small"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(mem:DI (lo_sum:DI
+ (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "aarch64_valid_symref" "S")))]
+ UNSPEC_GOTSMALLPIC))]
+ ""
+ "ldr\\t%0, [%1, #:got_lo12:%a2]"
+ [(set_attr "v8type" "load1")
+ (set_attr "mode" "DI")]
+)
+
+(define_insn "aarch64_load_tp_hard"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_TLS))]
+ ""
+ "mrs\\t%0, tpidr_el0"
+ [(set_attr "v8type" "mrs")
+ (set_attr "mode" "DI")]
+)
+
+;; The TLS ABI specifically requires that the compiler does not schedule
+;; instructions in the TLS stubs, in order to enable linker relaxation.
+;; Therefore we treat the stubs as an atomic sequence.
+(define_expand "tlsgd_small"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (mem:DI (match_dup 2)) (const_int 1)))
+ (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "")] UNSPEC_GOTSMALLTLS)
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+{
+ operands[2] = aarch64_tls_get_addr ();
+})
+
+(define_insn "*tlsgd_small"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:DI (match_operand:DI 2 "" "")) (const_int 1)))
+ (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTSMALLTLS)
+ (clobber (reg:DI LR_REGNUM))
+ ]
+ ""
+ "adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop"
+ [(set_attr "v8type" "call")
+ (set_attr "length" "16")])
+
+(define_insn "tlsie_small"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "aarch64_tls_ie_symref" "S")]
+ UNSPEC_GOTSMALLTLS))]
+ ""
+ "adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]"
+ [(set_attr "v8type" "load1")
+ (set_attr "mode" "DI")
+ (set_attr "length" "8")]
+)
+
+(define_insn "tlsle_small"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "aarch64_tls_le_symref" "S")]
+ UNSPEC_GOTSMALLTLS))]
+ ""
+ "add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2"
+ [(set_attr "v8type" "alu")
+ (set_attr "mode" "DI")
+ (set_attr "length" "8")]
+)
+
+(define_insn "tlsdesc_small"
+ [(set (reg:DI R0_REGNUM)
+ (unspec:DI [(match_operand:DI 0 "aarch64_valid_symref" "S")]
+ UNSPEC_TLSDESC))
+ (clobber (reg:DI LR_REGNUM))
+ (clobber (match_scratch:DI 1 "=r"))]
+ "TARGET_TLS_DESC"
+ "adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1"
+ [(set_attr "v8type" "call")
+ (set_attr "length" "16")])
+
+(define_insn "stack_tie"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(match_operand:DI 0 "register_operand" "rk")
+ (match_operand:DI 1 "register_operand" "rk")]
+ UNSPEC_PRLG_STK))]
+ ""
+ ""
+ [(set_attr "length" "0")]
+)
+
+;; AdvSIMD Stuff
+(include "aarch64-simd.md")
+
+;; Synchronization Builtins
+(include "sync.md")
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
new file mode 100644
index 00000000000..6e62c6eae14
--- /dev/null
+++ b/gcc/config/aarch64/aarch64.opt
@@ -0,0 +1,100 @@
+; Machine description for AArch64 architecture.
+; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+; Contributed by ARM Ltd.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it
+; under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 3, or (at your option)
+; any later version.
+;
+; GCC is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/aarch64/aarch64-opts.h
+
+; The cpu/arch option names to use in cpu/arch selection.
+
+Variable
+const char *aarch64_arch_string
+
+Variable
+const char *aarch64_cpu_string
+
+Variable
+const char *aarch64_tune_string
+
+; The TLS dialect names to use with -mtls-dialect.
+
+Enum
+Name(tls_type) Type(enum aarch64_tls_type)
+The possible TLS dialects:
+
+EnumValue
+Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
+
+EnumValue
+Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
+
+; The code model option names for -mcmodel.
+
+Enum
+Name(cmodel) Type(enum aarch64_code_model)
+The code model option names for -mcmodel:
+
+EnumValue
+Enum(cmodel) String(tiny) Value(AARCH64_CMODEL_TINY)
+
+EnumValue
+Enum(cmodel) String(small) Value(AARCH64_CMODEL_SMALL)
+
+EnumValue
+Enum(cmodel) String(large) Value(AARCH64_CMODEL_LARGE)
+
+mbig-endian
+Target Report RejectNegative Mask(BIG_END)
+Assume target CPU is configured as big endian
+
+mgeneral-regs-only
+Target Report RejectNegative Mask(GENERAL_REGS_ONLY)
+Generate code which uses only the general registers
+
+mlittle-endian
+Target Report RejectNegative InverseMask(BIG_END)
+Assume target CPU is configured as little endian
+
+mcmodel=
+Target RejectNegative Joined Enum(cmodel) Var(aarch64_cmodel_var) Init(AARCH64_CMODEL_SMALL)
+Specify the code model
+
+mstrict-align
+Target Report RejectNegative Mask(STRICT_ALIGN)
+Don't assume that unaligned accesses are handled by the system
+
+momit-leaf-frame-pointer
+Target Report Save Var(flag_omit_leaf_frame_pointer) Init(1)
+Omit the frame pointer in leaf functions
+
+mtls-dialect=
+Target RejectNegative Joined Enum(tls_type) Var(aarch64_tls_dialect) Init(TLS_DESCRIPTORS)
+Specify TLS dialect
+
+march=
+Target RejectNegative Joined Var(aarch64_arch_string)
+-march=ARCH Use features of architecture ARCH
+
+mcpu=
+Target RejectNegative Joined Var(aarch64_cpu_string)
+-mcpu=CPU Use features of and optimize for CPU
+
+mtune=
+Target RejectNegative Joined Var(aarch64_tune_string)
+-mtune=CPU Optimize for CPU
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
new file mode 100644
index 00000000000..a092dfff94e
--- /dev/null
+++ b/gcc/config/aarch64/arm_neon.h
@@ -0,0 +1,25592 @@
+/* ARM NEON intrinsics include file.
+
+ Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _AARCH64_NEON_H_
+#define _AARCH64_NEON_H_
+
+#include <stdint.h>
+
+typedef __builtin_aarch64_simd_qi int8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_hi int16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_si int32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_di int64x1_t;
+typedef __builtin_aarch64_simd_si int32x1_t;
+typedef __builtin_aarch64_simd_hi int16x1_t;
+typedef __builtin_aarch64_simd_qi int8x1_t;
+typedef __builtin_aarch64_simd_df float64x1_t;
+typedef __builtin_aarch64_simd_sf float32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly8 poly8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly16 poly16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uqi uint8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uhi uint16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_usi uint32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_udi uint64x1_t;
+typedef __builtin_aarch64_simd_usi uint32x1_t;
+typedef __builtin_aarch64_simd_uhi uint16x1_t;
+typedef __builtin_aarch64_simd_uqi uint8x1_t;
+typedef __builtin_aarch64_simd_qi int8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_hi int16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_si int32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_di int64x2_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_sf float32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_df float64x2_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly8 poly8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly16 poly16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uqi uint8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uhi uint16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_usi uint32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_udi uint64x2_t
+ __attribute__ ((__vector_size__ (16)));
+
+typedef float float32_t;
+typedef double float64_t;
+typedef __builtin_aarch64_simd_poly8 poly8_t;
+typedef __builtin_aarch64_simd_poly16 poly16_t;
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct float64x2x2_t
+{
+ float64x2_t val[2];
+} float64x2x2_t;
+
+typedef struct float64x1x2_t
+{
+ float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct float64x2x3_t
+{
+ float64x2_t val[3];
+} float64x2x3_t;
+
+typedef struct float64x1x3_t
+{
+ float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct float64x2x4_t
+{
+ float64x2_t val[4];
+} float64x2x4_t;
+
+typedef struct float64x1x4_t
+{
+ float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
+ (int16x8_t) __b,
+ (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
+ (int32x4_t) __b,
+ (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
+ (int64x2_t) __b,
+ (int64x2_t) __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
+ (int16x8_t) __b,
+ (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
+ (int32x4_t) __b,
+ (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
+ (int64x2_t) __b,
+ (int64x2_t) __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdiv_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdivq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdivq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsubq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqadddi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqaddv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqsubv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqsubv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqsubv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqsubdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqsubv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqsubv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqsubv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqsubv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t) __a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t) __a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t) __a;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t) __a;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t) __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t) __a;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t) __a;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t) __a;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t) __a;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcreate_f64 (uint64_t __a)
+{
+ return (float64x1_t) __builtin_aarch64_createdf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t) __a;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8_t) __builtin_aarch64_get_lane_signedv8qi (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16_t) __builtin_aarch64_get_lane_signedv4hi (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32_t) __builtin_aarch64_get_lane_signedv2si (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32_t) __builtin_aarch64_get_lanev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
+ __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
+ __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32_t) __builtin_aarch64_get_lane_unsignedv2si ((int32x2_t) __a,
+ __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
+ __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
+ __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64_t) __builtin_aarch64_get_lanedi (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64_t) __builtin_aarch64_get_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return (int8_t) __builtin_aarch64_get_lane_signedv16qi (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return (int16_t) __builtin_aarch64_get_lane_signedv8hi (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return (int32_t) __builtin_aarch64_get_lane_signedv4si (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return (float32_t) __builtin_aarch64_get_lanev4sf (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vgetq_lane_f64 (float64x2_t __a, const int __b)
+{
+ return (float64_t) __builtin_aarch64_get_lanev2df (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
+ __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
+ __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32_t) __builtin_aarch64_get_lane_unsignedv4si ((int32x4_t) __a,
+ __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return (poly8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
+ __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return (poly16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
+ __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_get_lane_unsignedv2di (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64_t) __builtin_aarch64_get_lane_unsignedv2di ((int64x2_t) __a,
+ __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si ((int32x2_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_combinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_combinedi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcombine_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x2_t) __builtin_aarch64_combinedf (__a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+/* Start of temporary inline asm implementations. */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("saba %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("saba %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("saba %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("uaba %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("uaba %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("uaba %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("sabal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("sabal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("sabal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uabal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uabal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("uabal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("sabal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("sabal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("sabal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uabal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uabal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("uabal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("saba %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("saba %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("saba %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("uaba %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uaba %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uaba %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("sabd %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("sabd %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("sabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uabd %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uabd %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vabdd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("fabd %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_high_s8 (int8x16_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_high_s16 (int16x8_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_high_s32 (int32x4_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t a, int8x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t a, int16x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t a, int32x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabdq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fabd %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("sabd %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabd %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uabd %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabd %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vabds_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("fabd %s0, %s1, %s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("fabs %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("abs %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("abs %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("abs %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("fabs %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabsq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("fabs %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("abs %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("abs %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("abs %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabsq_s64 (int64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("abs %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vacged_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("facge %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vacges_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("facge %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vacgtd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("facgt %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vacgts_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("facgt %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlv_s8 (int8x8_t a)
+{
+ int16_t result;
+ __asm__ ("saddlv %h0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlv_s16 (int16x4_t a)
+{
+ int32_t result;
+ __asm__ ("saddlv %s0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlv_u8 (uint8x8_t a)
+{
+ uint16_t result;
+ __asm__ ("uaddlv %h0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlv_u16 (uint16x4_t a)
+{
+ uint32_t result;
+ __asm__ ("uaddlv %s0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlvq_s8 (int8x16_t a)
+{
+ int16_t result;
+ __asm__ ("saddlv %h0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlvq_s16 (int16x8_t a)
+{
+ int32_t result;
+ __asm__ ("saddlv %s0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlvq_s32 (int32x4_t a)
+{
+ int64_t result;
+ __asm__ ("saddlv %d0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlvq_u8 (uint8x16_t a)
+{
+ uint16_t result;
+ __asm__ ("uaddlv %h0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlvq_u16 (uint16x8_t a)
+{
+ uint32_t result;
+ __asm__ ("uaddlv %s0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlvq_u32 (uint32x4_t a)
+{
+ uint64_t result;
+ __asm__ ("uaddlv %d0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddv_s8 (int8x8_t a)
+{
+ int8_t result;
+ __asm__ ("addv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddv_s16 (int16x4_t a)
+{
+ int16_t result;
+ __asm__ ("addv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddv_u8 (uint8x8_t a)
+{
+ uint8_t result;
+ __asm__ ("addv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddv_u16 (uint16x4_t a)
+{
+ uint16_t result;
+ __asm__ ("addv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddvq_s8 (int8x16_t a)
+{
+ int8_t result;
+ __asm__ ("addv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddvq_s16 (int16x8_t a)
+{
+ int16_t result;
+ __asm__ ("addv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddvq_s32 (int32x4_t a)
+{
+ int32_t result;
+ __asm__ ("addv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddvq_u8 (uint8x16_t a)
+{
+ uint8_t result;
+ __asm__ ("addv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddvq_u16 (uint16x8_t a)
+{
+ uint16_t result;
+ __asm__ ("addv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddvq_u32 (uint32x4_t a)
+{
+ uint32_t result;
+ __asm__ ("addv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c)
+{
+ float32x2_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t a, poly8x8_t b, poly8x8_t c)
+{
+ poly8x8_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t a, poly16x4_t b, poly16x4_t c)
+{
+ poly16x4_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c)
+{
+ int64x1_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c)
+{
+ uint64x1_t result;
+ __asm__ ("bsl %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c)
+{
+ float32x4_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vbslq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ float64x2_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t a, poly8x16_t b, poly8x16_t c)
+{
+ poly8x16_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t a, poly16x8_t b, poly16x8_t c)
+{
+ poly16x8_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("bsl %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("facge %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("facge %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcageq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("facge %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("facgt %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("facgt %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcagtq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("facgt %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("facge %0.2s, %2.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("facge %0.4s, %2.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaleq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("facge %0.2d, %2.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("facgt %0.2s, %2.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("facgt %0.4s, %2.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaltq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("facgt %0.2d, %2.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("fcmeq %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_f64 (float64x1_t a, float64x1_t b)
+{
+ uint64x1_t result;
+ __asm__ ("fcmeq %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vceqd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("fcmeq %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("fcmeq %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("fcmeq %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vceqs_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("fcmeq %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vceqzd_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcmeq %d0,%d1,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vceqzs_f32 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcmeq %s0,%s1,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("fcmge %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_f64 (float64x1_t a, float64x1_t b)
+{
+ uint64x1_t result;
+ __asm__ ("fcmge %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("fcmge %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("fcmge %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("fcmgt %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_f64 (float64x1_t a, float64x1_t b)
+{
+ uint64x1_t result;
+ __asm__ ("fcmgt %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("fcmgt %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("fcmgt %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("fcmge %0.2s, %2.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_f64 (float64x1_t a, float64x1_t b)
+{
+ uint64x1_t result;
+ __asm__ ("fcmge %d0, %d2, %d1"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("fcmge %0.4s, %2.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("fcmge %0.2d, %2.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("cls %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("cls %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("cls %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("cls %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("cls %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("cls %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t a, float32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("fcmgt %0.2s, %2.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_f64 (float64x1_t a, float64x1_t b)
+{
+ uint64x1_t result;
+ __asm__ ("fcmgt %d0, %d2, %d1"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t a, float32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("fcmgt %0.4s, %2.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_f64 (float64x2_t a, float64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("fcmgt %0.2d, %2.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("clz %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("clz %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("clz %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("clz %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("clz %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("clz %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("clz %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("clz %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("clz %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("clz %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("clz %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("clz %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcopyq_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_f64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float64x2_t c_ = (c); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_p8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ poly8x16_t c_ = (c); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_p16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ poly16x8_t c_ = (c); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int8x16_t c_ = (c); \
+ int8x16_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int64x2_t c_ = (c); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint8x16_t c_ = (c); \
+ uint8x16_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint64x2_t c_ = (c); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+/* vcvt_f16_f32 not supported */
+
+/* vcvt_f32_f16 not supported */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_f64 (float64x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("fcvtn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("scvtf %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("ucvtf %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_f64_f32 (float32x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("fcvtl %0.2d,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcvt_f64_s64 (uint64x1_t a)
+{
+ float64x1_t result;
+ __asm__ ("scvtf %d0, %d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcvt_f64_u64 (uint64x1_t a)
+{
+ float64x1_t result;
+ __asm__ ("ucvtf %d0, %d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+/* vcvt_high_f16_f32 not supported */
+
+/* vcvt_high_f32_f16 not supported */
+
+static float32x2_t vdup_n_f32 (float32_t);
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvt_high_f32_f64 (float32x2_t a, float64x2_t b)
+{
+ float32x4_t result = vcombine_f32 (a, vdup_n_f32 (0.0f));
+ __asm__ ("fcvtn2 %0.4s,%2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_high_f64_f32 (float32x4_t a)
+{
+ float64x2_t result;
+ __asm__ ("fcvtl2 %0.2d,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcvt_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("scvtf %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ucvtf %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("fcvtzs %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("fcvtzu %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("fcvtzs %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("fcvtzu %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvta_s32_f32 (float32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("fcvtas %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvta_u32_f32 (float32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("fcvtau %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtad_s64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtas %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtad_u64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtau %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtaq_s32_f32 (float32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("fcvtas %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtaq_s64_f64 (float64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("fcvtas %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtaq_u32_f32 (float32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("fcvtau %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtaq_u64_f64 (float64x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("fcvtau %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtas_s64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtas %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtas_u64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtau %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtd_f64_s64 (int64_t a)
+{
+ int64_t result;
+ __asm__ ("scvtf %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtd_f64_u64 (uint64_t a)
+{
+ uint64_t result;
+ __asm__ ("ucvtf %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcvtd_n_f64_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64_t a_ = (a); \
+ int64_t result; \
+ __asm__ ("scvtf %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_f64_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64_t a_ = (a); \
+ uint64_t result; \
+ __asm__ ("ucvtf %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_s64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fcvtzs %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_u64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fcvtzu %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_s64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtzs %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_u64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtzu %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtm_s32_f32 (float32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("fcvtms %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtm_u32_f32 (float32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("fcvtmu %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtmd_s64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtms %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtmd_u64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtmu %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtmq_s32_f32 (float32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("fcvtms %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtmq_s64_f64 (float64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("fcvtms %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtmq_u32_f32 (float32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("fcvtmu %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtmq_u64_f64 (float64x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("fcvtmu %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtms_s64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtms %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtms_u64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtmu %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtn_s32_f32 (float32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("fcvtns %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtn_u32_f32 (float32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("fcvtnu %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtnd_s64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtns %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtnd_u64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtnu %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtnq_s32_f32 (float32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("fcvtns %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtnq_s64_f64 (float64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("fcvtns %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtnq_u32_f32 (float32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("fcvtnu %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtnq_u64_f64 (float64x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("fcvtnu %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtns_s64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtns %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtns_u64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtnu %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtp_s32_f32 (float32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("fcvtps %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtp_u32_f32 (float32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("fcvtpu %0.2s, %1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtpd_s64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtps %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtpd_u64_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("fcvtpu %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtpq_s32_f32 (float32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("fcvtps %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtpq_s64_f64 (float64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("fcvtps %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtpq_u32_f32 (float32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("fcvtpu %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtpq_u64_f64 (float64x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("fcvtpu %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtps_s64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtps %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtps_u64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtpu %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("scvtf %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("ucvtf %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_s64 (int64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("scvtf %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_u64 (uint64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("ucvtf %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcvtq_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("scvtf %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ucvtf %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f64_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("scvtf %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f64_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ucvtf %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("fcvtzs %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_s64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("fcvtzs %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("fcvtzu %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_u64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("fcvtzu %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("fcvtzs %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtq_s64_f64 (float64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("fcvtzs %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("fcvtzu %0.4s, %1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtq_u64_f64 (float64x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("fcvtzu %0.2d, %1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvts_f64_s32 (int32_t a)
+{
+ int32_t result;
+ __asm__ ("scvtf %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvts_f64_u32 (uint32_t a)
+{
+ uint32_t result;
+ __asm__ ("ucvtf %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcvts_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32_t a_ = (a); \
+ int32_t result; \
+ __asm__ ("scvtf %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32_t a_ = (a); \
+ uint32_t result; \
+ __asm__ ("ucvtf %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fcvtzs %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fcvtzu %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_s64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtzs %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_u64_f64 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtzu %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvtx_f32_f64 (float64x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("fcvtxn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtx_high_f32_f64 (float64x2_t a)
+{
+ float32x4_t result;
+ __asm__ ("fcvtxn2 %0.4s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtxd_f32_f64 (float64_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtxn %s0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vdup_lane_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("dup %0.2s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_p8(a, b) \
+ __extension__ \
+ ({ \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("dup %0.8b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_p16(a, b) \
+ __extension__ \
+ ({ \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("dup %0.4h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_s8(a, b) \
+ __extension__ \
+ ({ \
+ int8x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("dup %0.8b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("dup %0.4h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("dup %0.2s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x1_t a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ins %0.d[0],%1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_u8(a, b) \
+ __extension__ \
+ ({ \
+ uint8x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("dup %0.8b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("dup %0.4h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("dup %0.2s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdup_lane_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x1_t a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ins %0.d[0],%1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t a)
+{
+ float32x2_t result;
+ __asm__ ("dup %0.2s, %w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (uint32_t a)
+{
+ poly8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (uint32_t a)
+{
+ poly16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int32_t a)
+{
+ int8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int32_t a)
+{
+ int16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t a)
+{
+ int32x2_t result;
+ __asm__ ("dup %0.2s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t a)
+{
+ int64x1_t result;
+ __asm__ ("ins %0.d[0],%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint32_t a)
+{
+ uint8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint32_t a)
+{
+ uint16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t a)
+{
+ uint32x2_t result;
+ __asm__ ("dup %0.2s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t a)
+{
+ uint64x1_t result;
+ __asm__ ("ins %0.d[0],%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vdupd_lane_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x2_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("dup %d0, %1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("dup %0.4s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x1_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("dup %0.2d,%1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_p8(a, b) \
+ __extension__ \
+ ({ \
+ poly8x8_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("dup %0.16b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_p16(a, b) \
+ __extension__ \
+ ({ \
+ poly16x4_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("dup %0.8h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_s8(a, b) \
+ __extension__ \
+ ({ \
+ int8x8_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("dup %0.16b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x4_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("dup %0.8h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x2_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("dup %0.4s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x1_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("dup %0.2d,%1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_u8(a, b) \
+ __extension__ \
+ ({ \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("dup %0.16b,%1.b[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("dup %0.8h,%1.h[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("dup %0.4s,%1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vdupq_lane_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x1_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("dup %0.2d,%1.d[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t a)
+{
+ float32x4_t result;
+ __asm__ ("dup %0.4s, %w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdupq_n_f64 (float64_t a)
+{
+ float64x2_t result;
+ __asm__ ("dup %0.2d, %x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (uint32_t a)
+{
+ poly8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (uint32_t a)
+{
+ poly16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int32_t a)
+{
+ int8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int32_t a)
+{
+ int16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t a)
+{
+ int32x4_t result;
+ __asm__ ("dup %0.4s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t a)
+{
+ int64x2_t result;
+ __asm__ ("dup %0.2d,%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint32_t a)
+{
+ uint8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint32_t a)
+{
+ uint16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t a)
+{
+ uint32x4_t result;
+ __asm__ ("dup %0.4s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t a)
+{
+ uint64x2_t result;
+ __asm__ ("dup %0.2d,%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vdups_lane_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x4_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("dup %s0, %1.s[%2]" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64x1_t a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64x1_t a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64x1_t a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8x16_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8x16_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ float32x2_t result;
+ __asm__ ("fmla %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vfma_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x2_t c_ = (c); \
+ float32x2_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("fmla %0.2s,%2.2s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vfmad_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fmla %d0,%d1,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ float32x4_t result;
+ __asm__ ("fmla %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ float64x2_t result;
+ __asm__ ("fmla %0.2d,%2.2d,%3.2d"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vfmaq_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("fmla %0.4s,%2.4s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vfmaq_lane_f64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float64x2_t c_ = (c); \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("fmla %0.2d,%2.2d,%3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vfmas_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fmla %s0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ float32x2_t result;
+ __asm__ ("fmls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vfmsd_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fmls %d0,%d1,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ float32x4_t result;
+ __asm__ ("fmls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ float64x2_t result;
+ __asm__ ("fmls %0.2d,%2.2d,%3.2d"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vfmss_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fmls %s0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t a)
+{
+ float32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_high_f64 (float64x2_t a)
+{
+ float64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t a)
+{
+ poly8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t a)
+{
+ poly16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t a)
+{
+ int8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t a)
+{
+ int16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t a)
+{
+ int32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t a)
+{
+ int64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t a)
+{
+ uint8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t a)
+{
+ uint16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t a)
+{
+ uint32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t a)
+{
+ uint64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vget_lane_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x1_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("umov %x0, %1.d[%2]" \
+ : "=r"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t a)
+{
+ float32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_low_f64 (float64x2_t a)
+{
+ float64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t a)
+{
+ poly8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t a)
+{
+ poly16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t a)
+{
+ int8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t a)
+{
+ int16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t a)
+{
+ int32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t a)
+{
+ int64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t a)
+{
+ uint8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t a)
+{
+ uint16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t a)
+{
+ uint32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t a)
+{
+ uint64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[0]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("shsub %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("shsub %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("shsub %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uhsub %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uhsub %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uhsub %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("shsub %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("shsub %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("shsub %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uhsub %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uhsub %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uhsub %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (float32_t * a)
+{
+ float32x2_t result;
+ __asm__ ("ld1r {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_dup_f64 (float64_t * a)
+{
+ float64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (poly8_t * a)
+{
+ poly8x8_t result;
+ __asm__ ("ld1r {%0.8b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (poly16_t * a)
+{
+ poly16x4_t result;
+ __asm__ ("ld1r {%0.4h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (int8_t * a)
+{
+ int8x8_t result;
+ __asm__ ("ld1r {%0.8b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (int16_t * a)
+{
+ int16x4_t result;
+ __asm__ ("ld1r {%0.4h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (int32_t * a)
+{
+ int32x2_t result;
+ __asm__ ("ld1r {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (int64_t * a)
+{
+ int64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (uint8_t * a)
+{
+ uint8x8_t result;
+ __asm__ ("ld1r {%0.8b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (uint16_t * a)
+{
+ uint16x4_t result;
+ __asm__ ("ld1r {%0.4h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (uint32_t * a)
+{
+ uint32x2_t result;
+ __asm__ ("ld1r {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (uint64_t * a)
+{
+ uint64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (float32_t * a)
+{
+ float32x2_t result;
+ __asm__ ("ld1 {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_f64 (float64_t * a)
+{
+ float64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+#define vld1_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32_t * a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64_t * a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8_t * a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16_t * a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32_t * a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64_t * a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (poly8_t * a)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {%0.8b}, [%1]"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (poly16_t * a)
+{
+ poly16x4_t result;
+ __asm__ ("ld1 {%0.4h}, [%1]"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (int8_t * a)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {%0.8b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (int16_t * a)
+{
+ int16x4_t result;
+ __asm__ ("ld1 {%0.4h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (int32_t * a)
+{
+ int32x2_t result;
+ __asm__ ("ld1 {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (int64_t * a)
+{
+ int64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (uint8_t * a)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {%0.8b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (uint16_t * a)
+{
+ uint16x4_t result;
+ __asm__ ("ld1 {%0.4h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (uint32_t * a)
+{
+ uint32x2_t result;
+ __asm__ ("ld1 {%0.2s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (uint64_t * a)
+{
+ uint64x1_t result;
+ __asm__ ("ld1 {%0.1d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (float32_t * a)
+{
+ float32x4_t result;
+ __asm__ ("ld1r {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_f64 (float64_t * a)
+{
+ float64x2_t result;
+ __asm__ ("ld1r {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (poly8_t * a)
+{
+ poly8x16_t result;
+ __asm__ ("ld1r {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (poly16_t * a)
+{
+ poly16x8_t result;
+ __asm__ ("ld1r {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (int8_t * a)
+{
+ int8x16_t result;
+ __asm__ ("ld1r {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (int16_t * a)
+{
+ int16x8_t result;
+ __asm__ ("ld1r {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (int32_t * a)
+{
+ int32x4_t result;
+ __asm__ ("ld1r {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (int64_t * a)
+{
+ int64x2_t result;
+ __asm__ ("ld1r {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (uint8_t * a)
+{
+ uint8x16_t result;
+ __asm__ ("ld1r {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (uint16_t * a)
+{
+ uint16x8_t result;
+ __asm__ ("ld1r {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (uint32_t * a)
+{
+ uint32x4_t result;
+ __asm__ ("ld1r {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (uint64_t * a)
+{
+ uint64x2_t result;
+ __asm__ ("ld1r {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (float32_t * a)
+{
+ float32x4_t result;
+ __asm__ ("ld1 {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_f64 (float64_t * a)
+{
+ float64x2_t result;
+ __asm__ ("ld1 {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+#define vld1q_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t * a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t * a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8_t * a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t * a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t * a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64_t * a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%3],[%1]" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (poly8_t * a)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (poly16_t * a)
+{
+ poly16x8_t result;
+ __asm__ ("ld1 {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (int8_t * a)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (int16_t * a)
+{
+ int16x8_t result;
+ __asm__ ("ld1 {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (int32_t * a)
+{
+ int32x4_t result;
+ __asm__ ("ld1 {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (int64_t * a)
+{
+ int64x2_t result;
+ __asm__ ("ld1 {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (uint8_t * a)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {%0.16b},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (uint16_t * a)
+{
+ uint16x8_t result;
+ __asm__ ("ld1 {%0.8h},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (uint32_t * a)
+{
+ uint32x4_t result;
+ __asm__ ("ld1 {%0.4s},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (uint64_t * a)
+{
+ uint64x2_t result;
+ __asm__ ("ld1 {%0.2d},[%1]"
+ : "=w"(result)
+ : "r"(a)
+ : "memory");
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmaxnm %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmaxnm %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxnmq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmaxnm %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmvq_f32 (float32x4_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxnmv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxv_s8 (int8x8_t a)
+{
+ int8_t result;
+ __asm__ ("smaxv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxv_s16 (int16x4_t a)
+{
+ int16_t result;
+ __asm__ ("smaxv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxv_u8 (uint8x8_t a)
+{
+ uint8_t result;
+ __asm__ ("umaxv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxv_u16 (uint16x4_t a)
+{
+ uint16_t result;
+ __asm__ ("umaxv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxvq_f32 (float32x4_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxvq_s8 (int8x16_t a)
+{
+ int8_t result;
+ __asm__ ("smaxv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxvq_s16 (int16x8_t a)
+{
+ int16_t result;
+ __asm__ ("smaxv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxvq_s32 (int32x4_t a)
+{
+ int32_t result;
+ __asm__ ("smaxv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxvq_u8 (uint8x16_t a)
+{
+ uint8_t result;
+ __asm__ ("umaxv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxvq_u16 (uint16x8_t a)
+{
+ uint16_t result;
+ __asm__ ("umaxv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxvq_u32 (uint32x4_t a)
+{
+ uint32_t result;
+ __asm__ ("umaxv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmvq_f32 (float32x4_t a)
+{
+ float32_t result;
+ __asm__ ("fminnmv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminv_s8 (int8x8_t a)
+{
+ int8_t result;
+ __asm__ ("sminv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminv_s16 (int16x4_t a)
+{
+ int16_t result;
+ __asm__ ("sminv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminv_u8 (uint8x8_t a)
+{
+ uint8_t result;
+ __asm__ ("uminv %b0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminv_u16 (uint16x4_t a)
+{
+ uint16_t result;
+ __asm__ ("uminv %h0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminvq_f32 (float32x4_t a)
+{
+ float32_t result;
+ __asm__ ("fminv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminvq_s8 (int8x16_t a)
+{
+ int8_t result;
+ __asm__ ("sminv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminvq_s16 (int16x8_t a)
+{
+ int16_t result;
+ __asm__ ("sminv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminvq_s32 (int32x4_t a)
+{
+ int32_t result;
+ __asm__ ("sminv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminvq_u8 (uint8x16_t a)
+{
+ uint8_t result;
+ __asm__ ("uminv %b0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminvq_u16 (uint16x8_t a)
+{
+ uint16_t result;
+ __asm__ ("uminv %h0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminvq_u32 (uint32x4_t a)
+{
+ uint32_t result;
+ __asm__ ("uminv %s0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmla_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x2_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ float32x2_t t1; \
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fadd %0.2s, %0.2s, %1.2s" \
+ : "=w"(result), "=w"(t1) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmla_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+ float32x2_t result;
+ float32x2_t t1;
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+ int16x4_t result;
+ __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+ int32x2_t result;
+ __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("mla %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("mla %0.4h, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("mla %0.2s, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("mla %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mla %0.4h, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mla %0.2s, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlal_high_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlal_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlaq_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ float32x4_t t1; \
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fadd %0.4s, %0.4s, %1.4s" \
+ : "=w"(result), "=w"(t1) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlaq_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+ float32x4_t result;
+ float32x4_t t1;
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+ float64x2_t result;
+ float64x2_t t1;
+ __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fadd %0.2d, %0.2d, %1.2d"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+ int16x8_t result;
+ __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+ int32x4_t result;
+ __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("mla %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("mla %0.8h, %2.8h, %3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("mla %0.4s, %2.4s, %3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("mla %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mla %0.8h, %2.8h, %3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mla %0.4s, %2.4s, %3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmls_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x2_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ float32x2_t t1; \
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fsub %0.2s, %0.2s, %1.2s" \
+ : "=w"(result), "=w"(t1) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmls_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("mls %0.4h,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmls_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("mls %0.2s,%2.2s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmls_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("mls %0.4h,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmls_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("mls %0.2s,%2.2s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+ float32x2_t result;
+ float32x2_t t1;
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+ int16x4_t result;
+ __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+ int32x2_t result;
+ __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("mls %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("mls %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("mls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("mls %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mls %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlsl_high_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlsl2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlsl2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlsl_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlsl %0.8h, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl %0.4s, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl %0.2d, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlsl %0.8h, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl %0.4s, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl %0.2d, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlsq_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ float32x4_t t1; \
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s" \
+ : "=w"(result), "=w"(t1) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsq_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("mls %0.8h,%2.8h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsq_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("mls %0.4s,%2.4s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsq_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("mls %0.8h,%2.8h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsq_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("mls %0.4s,%2.4s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsq_laneq_f32(__a, __b, __c, __d) \
+ __extension__ \
+ ({ \
+ float32x4_t __c_ = (__c); \
+ float32x4_t __b_ = (__b); \
+ float32x4_t __a_ = (__a); \
+ float32x4_t __result; \
+ float32x4_t __t1; \
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s" \
+ : "=w"(__result), "=w"(__t1) \
+ : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
+ : /* No clobbers */); \
+ __result; \
+ })
+
+#define vmlsq_laneq_s16(__a, __b, __c, __d) \
+ __extension__ \
+ ({ \
+ int16x8_t __c_ = (__c); \
+ int16x8_t __b_ = (__b); \
+ int16x8_t __a_ = (__a); \
+ int16x8_t __result; \
+ __asm__ ("mls %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(__result) \
+ : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
+ : /* No clobbers */); \
+ __result; \
+ })
+
+#define vmlsq_laneq_s32(__a, __b, __c, __d) \
+ __extension__ \
+ ({ \
+ int32x4_t __c_ = (__c); \
+ int32x4_t __b_ = (__b); \
+ int32x4_t __a_ = (__a); \
+ int32x4_t __result; \
+ __asm__ ("mls %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(__result) \
+ : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
+ : /* No clobbers */); \
+ __result; \
+ })
+
+#define vmlsq_laneq_u16(__a, __b, __c, __d) \
+ __extension__ \
+ ({ \
+ uint16x8_t __c_ = (__c); \
+ uint16x8_t __b_ = (__b); \
+ uint16x8_t __a_ = (__a); \
+ uint16x8_t __result; \
+ __asm__ ("mls %0.8h, %2.8h, %3.h[%4]" \
+ : "=w"(__result) \
+ : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
+ : /* No clobbers */); \
+ __result; \
+ })
+
+#define vmlsq_laneq_u32(__a, __b, __c, __d) \
+ __extension__ \
+ ({ \
+ uint32x4_t __c_ = (__c); \
+ uint32x4_t __b_ = (__b); \
+ uint32x4_t __a_ = (__a); \
+ uint32x4_t __result; \
+ __asm__ ("mls %0.4s, %2.4s, %3.s[%4]" \
+ : "=w"(__result) \
+ : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
+ : /* No clobbers */); \
+ __result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+ float32x4_t result;
+ float32x4_t t1;
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+ float64x2_t result;
+ float64x2_t t1;
+ __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fsub %0.2d, %0.2d, %1.2d"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+ int16x8_t result;
+ __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+ int32x4_t result;
+ __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("mls %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("mls %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("mls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("mls %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mls %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t a)
+{
+ float32x2_t result;
+ __asm__ ("dup %0.2s, %w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (uint32_t a)
+{
+ poly8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (uint32_t a)
+{
+ poly16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int32_t a)
+{
+ int8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int32_t a)
+{
+ int16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t a)
+{
+ int32x2_t result;
+ __asm__ ("dup %0.2s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t a)
+{
+ int64x1_t result;
+ __asm__ ("ins %0.d[0],%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint32_t a)
+{
+ uint8x8_t result;
+ __asm__ ("dup %0.8b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint32_t a)
+{
+ uint16x4_t result;
+ __asm__ ("dup %0.4h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t a)
+{
+ uint32x2_t result;
+ __asm__ ("dup %0.2s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t a)
+{
+ uint64x1_t result;
+ __asm__ ("ins %0.d[0],%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_high_s8 (int8x16_t a)
+{
+ int16x8_t result;
+ __asm__ ("sshll2 %0.8h,%1.16b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_high_s16 (int16x8_t a)
+{
+ int32x4_t result;
+ __asm__ ("sshll2 %0.4s,%1.8h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_high_s32 (int32x4_t a)
+{
+ int64x2_t result;
+ __asm__ ("sshll2 %0.2d,%1.4s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_high_u8 (uint8x16_t a)
+{
+ uint16x8_t result;
+ __asm__ ("ushll2 %0.8h,%1.16b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_high_u16 (uint16x8_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ushll2 %0.4s,%1.8h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_high_u32 (uint32x4_t a)
+{
+ uint64x2_t result;
+ __asm__ ("ushll2 %0.2d,%1.4s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("sshll %0.8h,%1.8b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("sshll %0.4s,%1.4h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("sshll %0.2d,%1.2s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("ushll %0.8h,%1.8b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ushll %0.4s,%1.4h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("ushll %0.2d,%1.2s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.16b,%2.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.8h,%2.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.4s,%2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.16b,%2.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.8h,%2.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.4s,%2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("xtn %0.8b,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("xtn %0.4h,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("xtn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("xtn %0.8b,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("xtn %0.4h,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("xtn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t a)
+{
+ float32x4_t result;
+ __asm__ ("dup %0.4s, %w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmovq_n_f64 (float64_t a)
+{
+ return (float64x2_t) {a, a};
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (uint32_t a)
+{
+ poly8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (uint32_t a)
+{
+ poly16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int32_t a)
+{
+ int8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int32_t a)
+{
+ int16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t a)
+{
+ int32x4_t result;
+ __asm__ ("dup %0.4s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t a)
+{
+ int64x2_t result;
+ __asm__ ("dup %0.2d,%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint32_t a)
+{
+ uint8x16_t result;
+ __asm__ ("dup %0.16b,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint32_t a)
+{
+ uint16x8_t result;
+ __asm__ ("dup %0.8h,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t a)
+{
+ uint32x4_t result;
+ __asm__ ("dup %0.4s,%w1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t a)
+{
+ uint64x2_t result;
+ __asm__ ("dup %0.2d,%x1"
+ : "=w"(result)
+ : "r"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmul_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("fmul %0.2s,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("mul %0.2s,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_laneq_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("fmul %0.2s, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("mul %0.4h, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("mul %0.4h, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmul_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t a, float32_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t a, uint16_t b)
+{
+ uint16x4_t result;
+ __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t a, uint32_t b)
+{
+ uint32x2_t result;
+ __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmuld_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fmul %d0,%d1,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_s16 (int16x8_t a, int16_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_s32 (int32x4_t a, int32_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_u16 (uint16x8_t a, uint16_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_u32 (uint32x4_t a, uint32_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_high_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly16x8_t result;
+ __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_high_s8 (int8x16_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("smull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_s16 (int16x8_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_s32 (int32x4_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmull_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t a, int16_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull %0.4s,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t a, int32_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull %0.2d,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t a, uint16_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull %0.4s,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t a, uint32_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull %0.2d,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("pmull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t a, int8x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("smull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t a, int16x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t a, int32x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmulq_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("mul %0.4s,%1.4s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("mul %0.8h, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("mul %0.8h, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulq_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t a, float32_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_n_f64 (float64x2_t a, float64_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t a, uint16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t a, uint32_t b)
+{
+ uint32x4_t result;
+ __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmuls_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fmul %s0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmulx_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmulx %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmulx_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("fmulx %0.2s,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmulxd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("fmulx %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulxq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmulx %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulxq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmulx %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmulxq_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("fmulx %0.4s,%1.4s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulxq_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("fmulx %0.2d,%1.2d,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmulxs_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("fmulx %s0, %s1, %s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("fneg %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("neg %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("neg %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("neg %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("fneg %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vnegq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("fneg %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("neg %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("neg %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("neg %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vnegq_s64 (int64x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("neg %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t a, int8x8_t b)
+{
+ int16x4_t result;
+ __asm__ ("sadalp %0.4h,%2.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t a, int16x4_t b)
+{
+ int32x2_t result;
+ __asm__ ("sadalp %0.2s,%2.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t a, int32x2_t b)
+{
+ int64x1_t result;
+ __asm__ ("sadalp %0.1d,%2.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t a, uint8x8_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uadalp %0.4h,%2.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t a, uint16x4_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uadalp %0.2s,%2.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t a, uint32x2_t b)
+{
+ uint64x1_t result;
+ __asm__ ("uadalp %0.1d,%2.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sadalp %0.8h,%2.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("sadalp %0.4s,%2.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("sadalp %0.2d,%2.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uadalp %0.8h,%2.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uadalp %0.4s,%2.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uadalp %0.2d,%2.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("faddp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_addpv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_addpv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_addpv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpaddd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("faddp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t a)
+{
+ int16x4_t result;
+ __asm__ ("saddlp %0.4h,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t a)
+{
+ int32x2_t result;
+ __asm__ ("saddlp %0.2s,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t a)
+{
+ int64x1_t result;
+ __asm__ ("saddlp %0.1d,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t a)
+{
+ uint16x4_t result;
+ __asm__ ("uaddlp %0.4h,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t a)
+{
+ uint32x2_t result;
+ __asm__ ("uaddlp %0.2s,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t a)
+{
+ uint64x1_t result;
+ __asm__ ("uaddlp %0.1d,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t a)
+{
+ int16x8_t result;
+ __asm__ ("saddlp %0.8h,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t a)
+{
+ int32x4_t result;
+ __asm__ ("saddlp %0.4s,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t a)
+{
+ int64x2_t result;
+ __asm__ ("saddlp %0.2d,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t a)
+{
+ uint16x8_t result;
+ __asm__ ("uaddlp %0.8h,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t a)
+{
+ uint32x4_t result;
+ __asm__ ("uaddlp %0.4s,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t a)
+{
+ uint64x2_t result;
+ __asm__ ("uaddlp %0.2d,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpaddq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("faddp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpaddq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("faddp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpaddq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("addp %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("addp %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("addp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddq_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("addp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpaddq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("addp %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("addp %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("addp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddq_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("addp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpadds_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("faddp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("smaxp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("smaxp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("smaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("umaxp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("umaxp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("umaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmaxnmp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmaxnmp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmaxnmp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxnmqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fmaxnmp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxnms_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxnmp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmaxp %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpmaxq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("smaxp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpmaxq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("smaxp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpmaxq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("smaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("umaxp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umaxp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fmaxp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxs_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("sminp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("sminp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("sminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uminp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uminp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpminnm_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fminnmp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fminnmp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminnmq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fminnmp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminnmqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fminnmp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpminnms_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fminnmp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fminp %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpminq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("sminp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpminq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sminp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpminq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpminq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uminp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpminq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uminp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpminq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fminp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmins_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fminp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqmlalh_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t a_ = (a); \
+ int32_t result; \
+ __asm__ ("sqdmlal %s0,%h1,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqmlalh_s16 (int16_t a, int16_t b)
+{
+ int32_t result;
+ __asm__ ("sqdmlal %s0,%h1,%h2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqmlals_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t a_ = (a); \
+ int64_t result; \
+ __asm__ ("sqdmlal %d0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vqmlals_s32 (int32_t a, int32_t b)
+{
+ int64_t result;
+ __asm__ ("sqdmlal %d0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqmlslh_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t a_ = (a); \
+ int32_t result; \
+ __asm__ ("sqdmlsl %s0,%h1,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqmlslh_s16 (int16_t a, int16_t b)
+{
+ int32_t result;
+ __asm__ ("sqdmlsl %s0,%h1,%h2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqmlsls_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t a_ = (a); \
+ int64_t result; \
+ __asm__ ("sqdmlsl %d0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vqmlsls_s32 (int32_t a, int32_t b)
+{
+ int64_t result;
+ __asm__ ("sqdmlsl %d0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.16b, %2.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.8h, %2.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.4s, %2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.16b, %2.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.8h, %2.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.4s, %2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.16b, %2.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.8h, %2.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.4s, %2.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqrshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 (UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 (UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 (UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrbit_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rbit %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrbit_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rbit %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrbitq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rbit %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrbitq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rbit %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frecpe %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("urecpe %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecped_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("frecpe %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frecpe %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpeq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frecpe %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("urecpe %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpes_f32 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("frecpe %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("frecps %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpsd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("frecps %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("frecps %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpsq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("frecps %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpss_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("frecps %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpxd_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("frecpe %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpxs_f32 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("frecpe %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t a)
+{
+ poly16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t a)
+{
+ poly16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t a)
+{
+ poly16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t a)
+{
+ poly16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnd_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frintz %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnda_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frinta %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndm_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frintm %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndn_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frintn %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndp_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frintp %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frintz %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frintz %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqa_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frinta %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqa_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frinta %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqm_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frintm %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqm_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frintm %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqn_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frintn %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqn_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frintn %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqp_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frintp %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndqp_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frintp %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vrshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("rshrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("rshrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("rshrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("rshrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("rshrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("rshrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frsqrte %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrte_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frsqrte %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("ursqrte %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrted_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("frsqrte %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frsqrte %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrteq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frsqrte %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ursqrte %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtes_f32 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("frsqrte %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("frsqrts %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrtsd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("frsqrts %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("frsqrts %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtss_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("frsqrts %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vset_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64_t a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64_t a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 (UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("shrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("shrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("shrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("shrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("shrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("shrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsli_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("sli %0.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsli_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("sli %0.4h,%2.4h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsliq_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("sli %0.16b,%2.16b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsliq_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("sli %0.8h,%2.8h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsri_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("sri %0.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsri_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("sri %0.4h,%2.4h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsriq_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("sri %0.16b,%2.16b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsriq_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("sri %0.8h,%2.8h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t * a, float32x2_t b)
+{
+ __asm__ ("st1 {%1.2s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f64 (float64_t * a, float64x1_t b)
+{
+ __asm__ ("st1 {%1.1d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+#define vst1_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t * a, poly8x8_t b)
+{
+ __asm__ ("st1 {%1.8b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t * a, poly16x4_t b)
+{
+ __asm__ ("st1 {%1.4h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t * a, int8x8_t b)
+{
+ __asm__ ("st1 {%1.8b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t * a, int16x4_t b)
+{
+ __asm__ ("st1 {%1.4h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t * a, int32x2_t b)
+{
+ __asm__ ("st1 {%1.2s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t * a, int64x1_t b)
+{
+ __asm__ ("st1 {%1.1d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t * a, uint8x8_t b)
+{
+ __asm__ ("st1 {%1.8b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t * a, uint16x4_t b)
+{
+ __asm__ ("st1 {%1.4h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t * a, uint32x2_t b)
+{
+ __asm__ ("st1 {%1.2s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t * a, uint64x1_t b)
+{
+ __asm__ ("st1 {%1.1d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t * a, float32x4_t b)
+{
+ __asm__ ("st1 {%1.4s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f64 (float64_t * a, float64x2_t b)
+{
+ __asm__ ("st1 {%1.2d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+#define vst1q_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t * a, poly8x16_t b)
+{
+ __asm__ ("st1 {%1.16b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t * a, poly16x8_t b)
+{
+ __asm__ ("st1 {%1.8h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t * a, int8x16_t b)
+{
+ __asm__ ("st1 {%1.16b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t * a, int16x8_t b)
+{
+ __asm__ ("st1 {%1.8h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t * a, int32x4_t b)
+{
+ __asm__ ("st1 {%1.4s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t * a, int64x2_t b)
+{
+ __asm__ ("st1 {%1.2d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t * a, uint8x16_t b)
+{
+ __asm__ ("st1 {%1.16b},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t * a, uint16x8_t b)
+{
+ __asm__ ("st1 {%1.8h},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t * a, uint32x4_t b)
+{
+ __asm__ ("st1 {%1.4s},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t * a, uint64x2_t b)
+{
+ __asm__ ("st1 {%1.2d},[%0]"
+ :
+ : "r"(a), "w"(b)
+ : "memory");
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t a, poly8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("cmtst %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_p16 (poly16x4_t a, poly16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("cmtst %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t a, poly8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("cmtst %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_p16 (poly16x8_t a, poly16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("cmtst %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+/* End of temporary inline asm implementations. */
+
+/* Start of temporary inline asm for vldn, vstn and friends. */
+
+/* Create struct element types for duplicating loads.
+
+ Create 2 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | Y | N | N |
+ +------+----+----+----+----+
+ |uint | Y | Y | N | N |
+ +------+----+----+----+----+
+ |float | - | - | N | N |
+ +------+----+----+----+----+
+ |poly | Y | Y | - | - |
+ +------+----+----+----+----+
+
+ Create 3 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | Y | Y | Y |
+ +------+----+----+----+----+
+ |uint | Y | Y | Y | Y |
+ +------+----+----+----+----+
+ |float | - | - | Y | Y |
+ +------+----+----+----+----+
+ |poly | Y | Y | - | - |
+ +------+----+----+----+----+
+
+ Create 4 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | N | N | Y |
+ +------+----+----+----+----+
+ |uint | Y | N | N | Y |
+ +------+----+----+----+----+
+ |float | - | - | N | Y |
+ +------+----+----+----+----+
+ |poly | Y | N | - | - |
+ +------+----+----+----+----+
+
+ This is required for casting memory reference. */
+#define __STRUCTN(t, sz, nelem) \
+ typedef struct t ## sz ## x ## nelem ## _t { \
+ t ## sz ## _t val[nelem]; \
+ } t ## sz ## x ## nelem ## _t;
+
+/* 2-element structs. */
+__STRUCTN (int, 8, 2)
+__STRUCTN (int, 16, 2)
+__STRUCTN (uint, 8, 2)
+__STRUCTN (uint, 16, 2)
+__STRUCTN (poly, 8, 2)
+__STRUCTN (poly, 16, 2)
+/* 3-element structs. */
+__STRUCTN (int, 8, 3)
+__STRUCTN (int, 16, 3)
+__STRUCTN (int, 32, 3)
+__STRUCTN (int, 64, 3)
+__STRUCTN (uint, 8, 3)
+__STRUCTN (uint, 16, 3)
+__STRUCTN (uint, 32, 3)
+__STRUCTN (uint, 64, 3)
+__STRUCTN (float, 32, 3)
+__STRUCTN (float, 64, 3)
+__STRUCTN (poly, 8, 3)
+__STRUCTN (poly, 16, 3)
+/* 4-element structs. */
+__STRUCTN (int, 8, 4)
+__STRUCTN (int, 64, 4)
+__STRUCTN (uint, 8, 4)
+__STRUCTN (uint, 64, 4)
+__STRUCTN (poly, 8, 4)
+__STRUCTN (float, 64, 4)
+#undef __STRUCTN
+
+#define __LD2R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld2 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld2r {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17"); \
+ return result; \
+ }
+
+__LD2R_FUNC (float32x2x2_t, float32x2_t, float32_t, 2s, f32,)
+__LD2R_FUNC (float64x1x2_t, float64x2_t, float64_t, 1d, f64,)
+__LD2R_FUNC (poly8x8x2_t, poly8x2_t, poly8_t, 8b, p8,)
+__LD2R_FUNC (poly16x4x2_t, poly16x2_t, poly16_t, 4h, p16,)
+__LD2R_FUNC (int8x8x2_t, int8x2_t, int8_t, 8b, s8,)
+__LD2R_FUNC (int16x4x2_t, int16x2_t, int16_t, 4h, s16,)
+__LD2R_FUNC (int32x2x2_t, int32x2_t, int32_t, 2s, s32,)
+__LD2R_FUNC (int64x1x2_t, int64x2_t, int64_t, 1d, s64,)
+__LD2R_FUNC (uint8x8x2_t, uint8x2_t, uint8_t, 8b, u8,)
+__LD2R_FUNC (uint16x4x2_t, uint16x2_t, uint16_t, 4h, u16,)
+__LD2R_FUNC (uint32x2x2_t, uint32x2_t, uint32_t, 2s, u32,)
+__LD2R_FUNC (uint64x1x2_t, uint64x2_t, uint64_t, 1d, u64,)
+__LD2R_FUNC (float32x4x2_t, float32x2_t, float32_t, 4s, f32, q)
+__LD2R_FUNC (float64x2x2_t, float64x2_t, float64_t, 2d, f64, q)
+__LD2R_FUNC (poly8x16x2_t, poly8x2_t, poly8_t, 16b, p8, q)
+__LD2R_FUNC (poly16x8x2_t, poly16x2_t, poly16_t, 8h, p16, q)
+__LD2R_FUNC (int8x16x2_t, int8x2_t, int8_t, 16b, s8, q)
+__LD2R_FUNC (int16x8x2_t, int16x2_t, int16_t, 8h, s16, q)
+__LD2R_FUNC (int32x4x2_t, int32x2_t, int32_t, 4s, s32, q)
+__LD2R_FUNC (int64x2x2_t, int64x2_t, int64_t, 2d, s64, q)
+__LD2R_FUNC (uint8x16x2_t, uint8x2_t, uint8_t, 16b, u8, q)
+__LD2R_FUNC (uint16x8x2_t, uint16x2_t, uint16_t, 8h, u16, q)
+__LD2R_FUNC (uint32x4x2_t, uint32x2_t, uint32_t, 4s, u32, q)
+__LD2R_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, 2d, u64, q)
+
+#define __LD2_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "ld2 {v16." #lnsuffix ", v17." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17"); \
+ return result; \
+ }
+
+__LD2_LANE_FUNC (int8x8x2_t, uint8_t, 8b, b, s8,)
+__LD2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__LD2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__LD2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__LD2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__LD2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__LD2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__LD2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__LD2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__LD2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__LD2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__LD2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__LD2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__LD2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__LD2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__LD2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__LD2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__LD2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__LD2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__LD2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__LD2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__LD2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__LD2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__LD2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __LD3R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld3 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld3r {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17", "v18"); \
+ return result; \
+ }
+
+__LD3R_FUNC (float32x2x3_t, float32x3_t, float32_t, 2s, f32,)
+__LD3R_FUNC (float64x1x3_t, float64x3_t, float64_t, 1d, f64,)
+__LD3R_FUNC (poly8x8x3_t, poly8x3_t, poly8_t, 8b, p8,)
+__LD3R_FUNC (poly16x4x3_t, poly16x3_t, poly16_t, 4h, p16,)
+__LD3R_FUNC (int8x8x3_t, int8x3_t, int8_t, 8b, s8,)
+__LD3R_FUNC (int16x4x3_t, int16x3_t, int16_t, 4h, s16,)
+__LD3R_FUNC (int32x2x3_t, int32x3_t, int32_t, 2s, s32,)
+__LD3R_FUNC (int64x1x3_t, int64x3_t, int64_t, 1d, s64,)
+__LD3R_FUNC (uint8x8x3_t, uint8x3_t, uint8_t, 8b, u8,)
+__LD3R_FUNC (uint16x4x3_t, uint16x3_t, uint16_t, 4h, u16,)
+__LD3R_FUNC (uint32x2x3_t, uint32x3_t, uint32_t, 2s, u32,)
+__LD3R_FUNC (uint64x1x3_t, uint64x3_t, uint64_t, 1d, u64,)
+__LD3R_FUNC (float32x4x3_t, float32x3_t, float32_t, 4s, f32, q)
+__LD3R_FUNC (float64x2x3_t, float64x3_t, float64_t, 2d, f64, q)
+__LD3R_FUNC (poly8x16x3_t, poly8x3_t, poly8_t, 16b, p8, q)
+__LD3R_FUNC (poly16x8x3_t, poly16x3_t, poly16_t, 8h, p16, q)
+__LD3R_FUNC (int8x16x3_t, int8x3_t, int8_t, 16b, s8, q)
+__LD3R_FUNC (int16x8x3_t, int16x3_t, int16_t, 8h, s16, q)
+__LD3R_FUNC (int32x4x3_t, int32x3_t, int32_t, 4s, s32, q)
+__LD3R_FUNC (int64x2x3_t, int64x3_t, int64_t, 2d, s64, q)
+__LD3R_FUNC (uint8x16x3_t, uint8x3_t, uint8_t, 16b, u8, q)
+__LD3R_FUNC (uint16x8x3_t, uint16x3_t, uint16_t, 8h, u16, q)
+__LD3R_FUNC (uint32x4x3_t, uint32x3_t, uint32_t, 4s, u32, q)
+__LD3R_FUNC (uint64x2x3_t, uint64x3_t, uint64_t, 2d, u64, q)
+
+#define __LD3_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "ld3 {v16." #lnsuffix " - v18." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17", "v18"); \
+ return result; \
+ }
+
+__LD3_LANE_FUNC (int8x8x3_t, uint8_t, 8b, b, s8,)
+__LD3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__LD3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__LD3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__LD3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__LD3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__LD3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__LD3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__LD3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__LD3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__LD3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__LD3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__LD3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__LD3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__LD3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__LD3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__LD3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__LD3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__LD3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__LD3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__LD3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__LD3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__LD3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__LD3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __LD4R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld4 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld4r {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17", "v18", "v19"); \
+ return result; \
+ }
+
+__LD4R_FUNC (float32x2x4_t, float32x4_t, float32_t, 2s, f32,)
+__LD4R_FUNC (float64x1x4_t, float64x4_t, float64_t, 1d, f64,)
+__LD4R_FUNC (poly8x8x4_t, poly8x4_t, poly8_t, 8b, p8,)
+__LD4R_FUNC (poly16x4x4_t, poly16x4_t, poly16_t, 4h, p16,)
+__LD4R_FUNC (int8x8x4_t, int8x4_t, int8_t, 8b, s8,)
+__LD4R_FUNC (int16x4x4_t, int16x4_t, int16_t, 4h, s16,)
+__LD4R_FUNC (int32x2x4_t, int32x4_t, int32_t, 2s, s32,)
+__LD4R_FUNC (int64x1x4_t, int64x4_t, int64_t, 1d, s64,)
+__LD4R_FUNC (uint8x8x4_t, uint8x4_t, uint8_t, 8b, u8,)
+__LD4R_FUNC (uint16x4x4_t, uint16x4_t, uint16_t, 4h, u16,)
+__LD4R_FUNC (uint32x2x4_t, uint32x4_t, uint32_t, 2s, u32,)
+__LD4R_FUNC (uint64x1x4_t, uint64x4_t, uint64_t, 1d, u64,)
+__LD4R_FUNC (float32x4x4_t, float32x4_t, float32_t, 4s, f32, q)
+__LD4R_FUNC (float64x2x4_t, float64x4_t, float64_t, 2d, f64, q)
+__LD4R_FUNC (poly8x16x4_t, poly8x4_t, poly8_t, 16b, p8, q)
+__LD4R_FUNC (poly16x8x4_t, poly16x4_t, poly16_t, 8h, p16, q)
+__LD4R_FUNC (int8x16x4_t, int8x4_t, int8_t, 16b, s8, q)
+__LD4R_FUNC (int16x8x4_t, int16x4_t, int16_t, 8h, s16, q)
+__LD4R_FUNC (int32x4x4_t, int32x4_t, int32_t, 4s, s32, q)
+__LD4R_FUNC (int64x2x4_t, int64x4_t, int64_t, 2d, s64, q)
+__LD4R_FUNC (uint8x16x4_t, uint8x4_t, uint8_t, 16b, u8, q)
+__LD4R_FUNC (uint16x8x4_t, uint16x4_t, uint16_t, 8h, u16, q)
+__LD4R_FUNC (uint32x4x4_t, uint32x4_t, uint32_t, 4s, u32, q)
+__LD4R_FUNC (uint64x2x4_t, uint64x4_t, uint64_t, 2d, u64, q)
+
+#define __LD4_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "ld4 {v16." #lnsuffix " - v19." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17", "v18", "v19"); \
+ return result; \
+ }
+
+__LD4_LANE_FUNC (int8x8x4_t, uint8_t, 8b, b, s8,)
+__LD4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__LD4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__LD4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__LD4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__LD4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__LD4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__LD4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__LD4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__LD4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__LD4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__LD4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__LD4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__LD4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__LD4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__LD4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__LD4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__LD4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__LD4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__LD4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__LD4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__LD4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__LD4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__LD4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+#define __ST2_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "st2 {v16." #lnsuffix ", v17." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*(intype *) ptr) \
+ : "Q"(b), "i"(c) \
+ : "memory", "v16", "v17"); \
+ }
+
+__ST2_LANE_FUNC (int8x8x2_t, int8_t, 8b, b, s8,)
+__ST2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__ST2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__ST2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__ST2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__ST2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__ST2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__ST2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__ST2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__ST2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__ST2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__ST2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__ST2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__ST2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__ST2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__ST2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__ST2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__ST2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __ST3_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "st3 {v16." #lnsuffix " - v18." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*(intype *) ptr) \
+ : "Q"(b), "i"(c) \
+ : "memory", "v16", "v17", "v18"); \
+ }
+
+__ST3_LANE_FUNC (int8x8x3_t, int8_t, 8b, b, s8,)
+__ST3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__ST3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__ST3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__ST3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__ST3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__ST3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__ST3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__ST3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__ST3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__ST3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__ST3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__ST3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__ST3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__ST3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__ST3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__ST3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__ST3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __ST4_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "st4 {v16." #lnsuffix " - v19." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*(intype *) ptr) \
+ : "Q"(b), "i"(c) \
+ : "memory", "v16", "v17", "v18", "v19"); \
+ }
+
+__ST4_LANE_FUNC (int8x8x4_t, int8_t, 8b, b, s8,)
+__ST4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__ST4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__ST4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__ST4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__ST4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__ST4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__ST4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__ST4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__ST4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__ST4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__ST4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__ST4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__ST4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__ST4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__ST4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__ST4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__ST4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlv_s32 (int32x2_t a)
+{
+ int64_t result;
+ __asm__ ("saddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlv_u32 (uint32x2_t a)
+{
+ uint64_t result;
+ __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddv_s32 (int32x2_t a)
+{
+ int32_t result;
+ __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddv_u32 (uint32x2_t a)
+{
+ uint32_t result;
+ __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmv_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmv_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fminnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmaxnmvq_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fmaxnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxv_s32 (int32x2_t a)
+{
+ int32_t result;
+ __asm__ ("smaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxv_u32 (uint32x2_t a)
+{
+ uint32_t result;
+ __asm__ ("umaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vminnmvq_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fminnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminv_s32 (int32x2_t a)
+{
+ int32_t result;
+ __asm__ ("sminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminv_u32 (uint32x2_t a)
+{
+ uint32_t result;
+ __asm__ ("uminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddd_s64 (int64x2_t __a)
+{
+ return __builtin_aarch64_addpdi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+}
+
+/* Table intrinsics. */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl1_s8 (int8x16_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl2_s8 (int8x16x2_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_s8 (int8x16x2_t tab, int8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl3_s8 (int8x16x3_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_s8 (int8x16x3_t tab, int8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl4_s8 (int8x16x4_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_s8 (int8x16x4_t tab, int8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx1_s8 (int8x8_t r, int8x16_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_s8 (int8x16_t r, int8x16_t tab, int8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, int8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, int8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, int8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+/* V7 legacy table intrinsics. */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t r, int8x8_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x8_t tmp1;
+ int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("movi %0.8b, 8\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "w"(temp), "w"(idx), "w"(r)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t r, uint8x8_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x8_t tmp1;
+ uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("movi %0.8b, 8\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "w"(temp), "w"(idx), "w"(r)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t r, poly8x8_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x8_t tmp1;
+ poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+ __asm__ ("movi %0.8b, 8\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {%2.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "w"(temp), "w"(idx), "w"(r)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t r, int8x8x3_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x8_t tmp1;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+ "movi %0.8b, 24\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "Q"(temp), "w"(idx), "w"(r)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t r, uint8x8x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x8_t tmp1;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+ "movi %0.8b, 24\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "Q"(temp), "w"(idx), "w"(r)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t r, poly8x8x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x8_t tmp1;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
+ "movi %0.8b, 24\n\t"
+ "cmhs %0.8b, %3.8b, %0.8b\n\t"
+ "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
+ "bsl %0.8b, %4.8b, %1.8b\n\t"
+ : "+w"(result), "=w"(tmp1)
+ : "Q"(temp), "w"(idx), "w"(r)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t r, int8x8x4_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t r, uint8x8x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+/* End of temporary inline asm. */
+
+/* Start of optimal implementations in approved order. */
+
+/* vadd */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a + __b;
+}
+
+/* vceq */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmeqdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqzd_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, 0);
+}
+
+/* vcge */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgezd_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgedi (__a, 0);
+}
+
+/* vcgt */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtzd_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, 0);
+}
+
+/* vcle */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgev4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __b,
+ (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __b,
+ (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __b,
+ (int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgev8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __b,
+ (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __b,
+ (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __b,
+ (int64x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcled_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclezd_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_cmledi (__a, 0);
+}
+
+/* vclt */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __b,
+ (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __b,
+ (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __b,
+ (int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __b,
+ (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __b,
+ (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __b,
+ (int64x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltzd_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_cmltdi (__a, 0);
+}
+
+/* vdup */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vdupb_lane_s8 (int8x16_t a, int const b)
+{
+ return __builtin_aarch64_dup_laneqi (a, b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vdupb_lane_u8 (uint8x16_t a, int const b)
+{
+ return (uint8x1_t) __builtin_aarch64_dup_laneqi ((int8x16_t) a, b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vduph_lane_s16 (int16x8_t a, int const b)
+{
+ return __builtin_aarch64_dup_lanehi (a, b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vduph_lane_u16 (uint16x8_t a, int const b)
+{
+ return (uint16x1_t) __builtin_aarch64_dup_lanehi ((int16x8_t) a, b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vdups_lane_s32 (int32x4_t a, int const b)
+{
+ return __builtin_aarch64_dup_lanesi (a, b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vdups_lane_u32 (uint32x4_t a, int const b)
+{
+ return (uint32x1_t) __builtin_aarch64_dup_lanesi ((int32x4_t) a, b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdupd_lane_s64 (int64x2_t a, int const b)
+{
+ return __builtin_aarch64_dup_lanedi (a, b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdupd_lane_u64 (uint64x2_t a, int const b)
+{
+ return (uint64x1_t) __builtin_aarch64_dup_lanedi ((int64x2_t) a, b);
+}
+
+/* vldn */
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+ int64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+ uint64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
+vld2_f64 (const float64_t * __a)
+{
+ float64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+ int8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+ poly8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+ int16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+ poly16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+ int32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+ uint8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+ uint16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+ uint32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+ float32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+ int8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+ poly8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+ int16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+ poly16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+ int32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
+vld2q_s64 (const int64_t * __a)
+{
+ int64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+ uint8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+ uint16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+ uint32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
+vld2q_u64 (const uint64_t * __a)
+{
+ uint64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+ float32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
+vld2q_f64 (const float64_t * __a)
+{
+ float64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+ int64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+ ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+ uint64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+ ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
+vld3_f64 (const float64_t * __a)
+{
+ float64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 1);
+ ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+ int8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+ poly8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+ int16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+ poly16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+ int32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+ ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+ uint8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+ uint16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+ uint32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+ ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+ float32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
+ ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+ int8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+ poly8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+ int16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+ poly16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+ int32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+ ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
+vld3q_s64 (const int64_t * __a)
+{
+ int64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+ ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+ uint8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+ uint16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+ uint32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+ ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
+vld3q_u64 (const uint64_t * __a)
+{
+ uint64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+ ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+ float32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
+ ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
+vld3q_f64 (const float64_t * __a)
+{
+ float64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
+ ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+ int64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+ ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+ ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+ uint64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+ ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+ ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
+vld4_f64 (const float64_t * __a)
+{
+ float64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 1);
+ ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 2);
+ ret.val[3] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+ int8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+ poly8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+ int16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+ poly16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+ int32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+ ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+ ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+ uint8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+ uint16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+ uint32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+ ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+ ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+ float32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
+ ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
+ ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+ int8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+ poly8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+ int16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+ poly16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+ int32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+ ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+ ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
+vld4q_s64 (const int64_t * __a)
+{
+ int64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+ ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+ ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+ uint8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+ uint16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+ uint32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+ ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+ ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
+vld4q_u64 (const uint64_t * __a)
+{
+ uint64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+ ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+ ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+ float32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
+ ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
+ ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
+vld4q_f64 (const float64_t * __a)
+{
+ float64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
+ ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
+ ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
+ return ret;
+}
+
+/* vmax */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_fmaxv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_smaxv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_smaxv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_smaxv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_fmaxv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_fmaxv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_smaxv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_smaxv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_smaxv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+/* vmin */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_fminv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sminv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sminv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sminv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_fminv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vminq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_fminv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sminv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sminv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sminv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+/* vmla */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ return a + b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ return a + b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ return a + b * c;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ return a - b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ return a - b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ return a - b * c;
+}
+
+/* vqabs */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqabsq_s64 (int64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqabsb_s8 (int8x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqabsqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqabsh_s16 (int16x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqabshi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqabss_s32 (int32x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqabssi (__a);
+}
+
+/* vqadd */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqaddb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqaddqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqaddh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqaddhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqadds_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqaddsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqaddb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqaddqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqaddh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqaddhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqadds_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqaddsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqadddi (__a, __b);
+}
+
+/* vqdmlal */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+ return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+ return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmlsl */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+ return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+ return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqdmull */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqdmullv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqdmull2v8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqdmullv2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqdmull2v4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return __builtin_aarch64_sqdmull_nv2si (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqdmullhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqdmullsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
+}
+
+/* vqmovn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovnh_s16 (int16x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqmovnhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovns_s32 (int32x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqmovnsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovnd_s64 (int64x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqmovndi (__a);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqmovnh_u16 (uint16x1_t __a)
+{
+ return (uint8x1_t) __builtin_aarch64_uqmovnhi (__a);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqmovns_u32 (uint32x1_t __a)
+{
+ return (uint16x1_t) __builtin_aarch64_uqmovnsi (__a);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqmovnd_u64 (uint64x1_t __a)
+{
+ return (uint32x1_t) __builtin_aarch64_uqmovndi (__a);
+}
+
+/* vqmovun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovunh_s16 (int16x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqmovunhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovuns_s32 (int32x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqmovunsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovund_s64 (int64x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqmovundi (__a);
+}
+
+/* vqneg */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqnegq_s64 (int64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqnegb_s8 (int8x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqnegqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqnegh_s16 (int16x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqneghi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqnegs_s32 (int32x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqnegsi (__a);
+}
+
+/* vqrdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sqrshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqrshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqrshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqrshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqrshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqrshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqrshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sqrshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqrshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqrshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __builtin_aarch64_sqrshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqrshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqrshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqrshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqrshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return __builtin_aarch64_sqrshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return __builtin_aarch64_sqrshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return __builtin_aarch64_sqrshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqrshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqrshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqrshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqrshldi (__a, __b);
+}
+
+/* vqrshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqrshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqrshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqrshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
+}
+
+/* vqrshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshruns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrund_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
+}
+
+/* vqshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sqshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sqshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __builtin_aarch64_sqshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return __builtin_aarch64_sqshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return __builtin_aarch64_sqshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return __builtin_aarch64_sqshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshldi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqshl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqshl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqshl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqshl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_s8 (int8x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_u8 (uint8x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshl_ndi (__a, __b);
+}
+
+/* vqshlu */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqshlu_nv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqshlu_nv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqshlu_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_sqshlu_nv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_sqshlu_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_sqshlu_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_sqshlu_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlub_n_s8 (int8x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshluh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshlus_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshlud_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+/* vqshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
+}
+
+/* vqshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshruns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrund_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
+}
+
+/* vqsub */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqsubb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqsubqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqsubh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqsubhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqsubs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqsubsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqsubb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqsubqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqsubh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqsubhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqsubs_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqsubsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqsubdi (__a, __b);
+}
+
+/* vrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_urshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshldi (__a, __b);
+}
+
+/* vrshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urshr_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urshr_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urshr_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshr_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urshr_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urshr_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urshr_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_urshr_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshr_ndi (__a, __b);
+}
+
+/* vrsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_ursra_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_ursra_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_ursra_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_ursra_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_ursra_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_ursra_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_ursra_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_ursra_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
+}
+
+/* vshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sshl_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sshl_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sshl_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_ushl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_ushl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_ushl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_sshl_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_sshl_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_sshl_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_sshl_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_ushl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushl_ndi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_ushlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_ushlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_ushlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_ushlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushldi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_s8 (int8x16_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushll_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushll_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushll_nv2si ((int32x2_t) __a, __b);
+}
+
+/* vshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sshr_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sshr_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sshr_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_ushr_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_ushr_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_ushr_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushr_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_sshr_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_sshr_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_sshr_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_sshr_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_ushr_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushr_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushr_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushr_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshrd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshrd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushr_ndi (__a, __b);
+}
+
+/* vsli */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usli_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usli_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usli_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usli_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usli_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usli_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usli_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usli_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vslid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vslid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usli_ndi (__a, __b, __c);
+}
+
+/* vsqadd */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_usqaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_usqaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_usqaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_usqaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usqaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usqaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usqaddv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vsqaddb_u8 (uint8x1_t __a, int8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_usqaddqi ((int8x1_t) __a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vsqaddh_u16 (uint16x1_t __a, int16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_usqaddhi ((int16x1_t) __a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vsqadds_u32 (uint32x1_t __a, int32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_usqaddsi ((int32x1_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqaddd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+/* vsqrt */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsqrt_f32 (float32x2_t a)
+{
+ return __builtin_aarch64_sqrtv2sf (a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsqrtq_f32 (float32x4_t a)
+{
+ return __builtin_aarch64_sqrtv4sf (a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsqrtq_f64 (float64x2_t a)
+{
+ return __builtin_aarch64_sqrtv2df (a);
+}
+
+/* vsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usra_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usra_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usra_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usra_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usra_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usra_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usra_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usra_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usra_ndi (__a, __b, __c);
+}
+
+/* vsri */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usri_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usri_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usri_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usri_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usri_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usri_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usri_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usri_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usri_ndi (__a, __b, __c);
+}
+
+/* vstn */
+
+__extension__ static __inline void
+vst2_s64 (int64_t * __a, int64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int64x2x2_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_u64 (uint64_t * __a, uint64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint64x2x2_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_f64 (float64_t * __a, float64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ float64x2x2_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_s8 (int8_t * __a, int8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int16x8x2_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly16x8x2_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int32x4x2_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint16x8x2_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint32x4x2_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ float32x4x2_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s64 (int64_t * __a, int64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f64 (float64_t * __a, float64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s64 (int64_t * __a, int64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int64x2x3_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_u64 (uint64_t * __a, uint64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint64x2x3_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_f64 (float64_t * __a, float64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ float64x2x3_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s8 (int8_t * __a, int8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int8x16x3_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ poly8x16x3_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int16x8x3_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ poly16x8x3_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int32x4x3_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint8x16x3_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint16x8x3_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint32x4x3_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ float32x4x3_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s64 (int64_t * __a, int64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f64 (float64_t * __a, float64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s64 (int64_t * __a, int64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int64x2x4_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
+ temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_u64 (uint64_t * __a, uint64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint64x2x4_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
+ temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_f64 (float64_t * __a, float64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ float64x2x4_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
+ temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s8 (int8_t * __a, int8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int8x16x4_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
+ temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ poly8x16x4_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
+ temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int16x8x4_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
+ temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ poly16x8x4_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
+ temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int32x4x4_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
+ temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint8x16x4_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
+ temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint16x8x4_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
+ temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint32x4x4_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
+ temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ float32x4x4_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
+ temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s64 (int64_t * __a, int64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f64 (float64_t * __a, float64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+/* vsub */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a - __b;
+}
+
+/* vtrn */
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t a, float32x2_t b)
+{
+ return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t a, poly8x8_t b)
+{
+ return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t a, poly16x4_t b)
+{
+ return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)};
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t a, int8x8_t b)
+{
+ return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)};
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t a, int16x4_t b)
+{
+ return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)};
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t a, int32x2_t b)
+{
+ return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t a, uint8x8_t b)
+{
+ return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t a, uint16x4_t b)
+{
+ return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t a, uint32x2_t b)
+{
+ return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)};
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t a, float32x4_t b)
+{
+ return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t a, poly8x16_t b)
+{
+ return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t a, poly16x8_t b)
+{
+ return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)};
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t a, int8x16_t b)
+{
+ return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)};
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t a, int16x8_t b)
+{
+ return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)};
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t a, int32x4_t b)
+{
+ return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)};
+}
+
+/* vtst */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmtstv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmtstv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmtstv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmtstv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmtstv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmtstv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmtstv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmtstv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmtstv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmtstv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmtstv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmtstv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmtstv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmtstv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+/* vuqadd */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_suqaddv8qi (__a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_suqaddv4hi (__a, (int16x4_t) __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_suqaddv2si (__a, (int32x2_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_suqaddv16qi (__a, (int8x16_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_suqaddv8hi (__a, (int16x8_t) __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_suqaddv4si (__a, (int32x4_t) __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_suqaddv2di (__a, (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vuqaddb_s8 (int8x1_t __a, uint8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_suqaddqi (__a, (int8x1_t) __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vuqaddh_s16 (int16x1_t __a, uint16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_suqaddhi (__a, (int16x1_t) __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vuqadds_s32 (int32x1_t __a, uint32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_suqaddsi (__a, (int32x1_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+#define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ v ## op ## Q ## _ ## funcsuffix (intype a, intype b) \
+ { \
+ return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b), \
+ v ## op ## 2 ## Q ## _ ## funcsuffix (a, b)}; \
+ }
+
+#define __INTERLEAVE_LIST(op) \
+ __DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,) \
+ __DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,) \
+ __DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,) \
+ __DEFINTERLEAVE (op, int8x8x2_t, int8x8_t, s8,) \
+ __DEFINTERLEAVE (op, int16x4x2_t, int16x4_t, s16,) \
+ __DEFINTERLEAVE (op, int32x2x2_t, int32x2_t, s32,) \
+ __DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,) \
+ __DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,) \
+ __DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,) \
+ __DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q) \
+ __DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q) \
+ __DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q) \
+ __DEFINTERLEAVE (op, int8x16x2_t, int8x16_t, s8, q) \
+ __DEFINTERLEAVE (op, int16x8x2_t, int16x8_t, s16, q) \
+ __DEFINTERLEAVE (op, int32x4x2_t, int32x4_t, s32, q) \
+ __DEFINTERLEAVE (op, uint8x16x2_t, uint8x16_t, u8, q) \
+ __DEFINTERLEAVE (op, uint16x8x2_t, uint16x8_t, u16, q) \
+ __DEFINTERLEAVE (op, uint32x4x2_t, uint32x4_t, u32, q)
+
+/* vuzp */
+
+__INTERLEAVE_LIST (uzp)
+
+/* vzip */
+
+__INTERLEAVE_LIST (zip)
+
+#undef __INTERLEAVE_LIST
+#undef __DEFINTERLEAVE
+
+/* End of optimal implementations in approved order. */
+
+#endif
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
new file mode 100644
index 00000000000..fe613070f4e
--- /dev/null
+++ b/gcc/config/aarch64/constraints.md
@@ -0,0 +1,167 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "k" "STACK_REG"
+ "@internal The stack register.")
+
+(define_register_constraint "w" "FP_REGS"
+ "Floating point and SIMD vector registers.")
+
+(define_register_constraint "x" "FP_LO_REGS"
+ "Floating point and SIMD vector registers V0 - V15.")
+
+(define_constraint "I"
+ "A constant that can be used with an ADD operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_uimm12_shift (ival)")))
+
+(define_constraint "J"
+ "A constant that can be used with a SUB operation (once negated)."
+ (and (match_code "const_int")
+ (match_test "aarch64_uimm12_shift (-ival)")))
+
+;; We can't use the mode of a CONST_INT to determine the context in
+;; which it is being used, so we must have a separate constraint for
+;; each context.
+
+(define_constraint "K"
+ "A constant that can be used with a 32-bit logical operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (ival, SImode)")))
+
+(define_constraint "L"
+ "A constant that can be used with a 64-bit logical operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (ival, DImode)")))
+
+(define_constraint "M"
+ "A constant that can be used with a 32-bit MOV immediate operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_move_imm (ival, SImode)")))
+
+(define_constraint "N"
+ "A constant that can be used with a 64-bit MOV immediate operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_move_imm (ival, DImode)")))
+
+(define_constraint "S"
+ "A constraint that matches an absolute symbolic address."
+ (and (match_code "const,symbol_ref,label_ref")
+ (match_test "aarch64_symbolic_address_p (op)")))
+
+(define_constraint "Y"
+ "Floating point constant zero."
+ (and (match_code "const_double")
+ (match_test "aarch64_const_double_zero_rtx_p (op)")))
+
+(define_constraint "Z"
+ "Integer constant zero."
+ (match_test "op == const0_rtx"))
+
+(define_constraint "Usa"
+ "A constraint that matches an absolute symbolic address."
+ (and (match_code "const,symbol_ref")
+ (match_test "aarch64_symbolic_address_p (op)")))
+
+(define_constraint "Ush"
+ "A constraint that matches an absolute symbolic address high part."
+ (and (match_code "high")
+ (match_test "aarch64_valid_symref (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
+
+(define_constraint "Uss"
+ "@internal
+ A constraint that matches an immediate shift constant in SImode."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival < 32")))
+
+(define_constraint "Usd"
+ "@internal
+ A constraint that matches an immediate shift constant in DImode."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival < 64")))
+
+(define_constraint "UsM"
+ "@internal
+ A constraint that matches the immediate constant -1."
+ (match_test "op == constm1_rtx"))
+
+(define_constraint "Ui3"
+ "@internal
+ A constraint that matches the integers 0...4."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival <= 4")))
+
+(define_constraint "Up3"
+ "@internal
+ A constraint that matches the integers 2^(0...4)."
+ (and (match_code "const_int")
+ (match_test "(unsigned) exact_log2 (ival) <= 4")))
+
+(define_memory_constraint "Q"
+ "A memory address which uses a single base register with no offset."
+ (and (match_code "mem")
+ (match_test "REG_P (XEXP (op, 0))")))
+
+(define_memory_constraint "Ump"
+ "@internal
+ A memory address suitable for a load/store pair operation."
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
+ PARALLEL, 1)")))
+
+(define_memory_constraint "Utv"
+ "@internal
+ An address valid for loading/storing opaque structure
+ types wider than TImode."
+ (and (match_code "mem")
+ (match_test "aarch64_simd_mem_operand_p (op)")))
+
+(define_constraint "Dn"
+ "@internal
+ A constraint that matches vector of immediates."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_immediate_valid_for_move (op, GET_MODE (op),
+ NULL, NULL, NULL,
+ NULL, NULL) != 0")))
+
+(define_constraint "Dl"
+ "@internal
+ A constraint that matches vector of immediates for left shifts."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+ true)")))
+
+(define_constraint "Dr"
+ "@internal
+ A constraint that matches vector of immediates for right shifts."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+ false)")))
+(define_constraint "Dz"
+ "@internal
+ A constraint that matches vector of immediate zero."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_imm_zero_p (op, GET_MODE (op))")))
+
+(define_constraint "Dd"
+ "@internal
+ A constraint that matches an immediate operand valid for AdvSIMD scalar."
+ (and (match_code "const_int")
+ (match_test "aarch64_simd_imm_scalar_p (op, GET_MODE (op))")))
diff --git a/gcc/config/aarch64/gentune.sh b/gcc/config/aarch64/gentune.sh
new file mode 100644
index 00000000000..97b378756b6
--- /dev/null
+++ b/gcc/config/aarch64/gentune.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# Copyright (C) 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Generate aarch64-tune.md, a file containing the tune attribute from the list of
+# CPUs in aarch64-cores.def
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically by gentune.sh from aarch64-cores.def"
+
+allcores=`awk -F'[(, ]+' '/^AARCH64_CORE/ { cores = cores$3"," } END { print cores } ' $1`
+
+echo "(define_attr \"tune\""
+echo " \"$allcores\"" | sed -e 's/,"$/"/'
+echo " (const (symbol_ref \"((enum attr_tune) aarch64_tune)\")))"
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
new file mode 100644
index 00000000000..bf2041e78e5
--- /dev/null
+++ b/gcc/config/aarch64/iterators.md
@@ -0,0 +1,716 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------
+;; Mode Iterators
+;; -------------------------------------------------------------------
+
+
+;; Iterator for General Purpose Integer registers (32- and 64-bit modes)
+(define_mode_iterator GPI [SI DI])
+
+;; Iterator for QI and HI modes
+(define_mode_iterator SHORT [QI HI])
+
+;; Iterator for all integer modes (up to 64-bit)
+(define_mode_iterator ALLI [QI HI SI DI])
+
+;; Iterator scalar modes (up to 64-bit)
+(define_mode_iterator SDQ_I [QI HI SI DI])
+
+;; Iterator for all integer modes that can be extended (up to 64-bit)
+(define_mode_iterator ALLX [QI HI SI])
+
+;; Iterator for General Purpose Floating-point registers (32- and 64-bit modes)
+(define_mode_iterator GPF [SF DF])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; vector and scalar, 64 & 128-bit container, all integer modes
+(define_mode_iterator VSDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI DI])
+
+;; vector and scalar, 64 & 128-bit container: all vector integer modes;
+;; 64-bit scalar integer mode
+(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
+
+;; Double vector modes.
+(define_mode_iterator VD [V8QI V4HI V2SI V2SF])
+
+;; vector, 64-bit container, all integer modes
+(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
+
+;; 128 and 64-bit container; 8, 16, 32-bit vector integer modes
+(define_mode_iterator VDQ_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Quad vector modes.
+(define_mode_iterator VQ [V16QI V8HI V4SI V2DI V4SF V2DF])
+
+;; All vector modes, except double.
+(define_mode_iterator VQ_S [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar, 64 & 128-bit container: all vector integer mode;
+;; 8, 16, 32-bit scalar integer modes
+(define_mode_iterator VSDQ_I_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI])
+
+;; Vector modes for moves.
+(define_mode_iterator VDQM [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; This mode iterator allows :PTR to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator PTR [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; Vector Float modes.
+(define_mode_iterator VDQF [V2SF V4SF V2DF])
+
+;; Vector Float modes with 2 elements.
+(define_mode_iterator V2F [V2SF V2DF])
+
+;; All modes.
+(define_mode_iterator VALL [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF])
+
+;; Vector modes for Integer reduction across lanes.
+(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI])
+
+;; All double integer narrow-able modes.
+(define_mode_iterator VDN [V4HI V2SI DI])
+
+;; All quad integer narrow-able modes.
+(define_mode_iterator VQN [V8HI V4SI V2DI])
+
+;; All double integer widen-able modes.
+(define_mode_iterator VDW [V8QI V4HI V2SI])
+
+;; Vector and scalar 128-bit container: narrowable 16, 32, 64-bit integer modes
+(define_mode_iterator VSQN_HSDI [V8HI V4SI V2DI HI SI DI])
+
+;; All quad integer widen-able modes.
+(define_mode_iterator VQW [V16QI V8HI V4SI])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDC [V8QI V4HI V2SI V2SF DI DF])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDIC [V8QI V4HI V2SI])
+
+;; Double vector modes.
+(define_mode_iterator VD_RE [V8QI V4HI V2SI DI DF V2SF])
+
+;; Vector modes except double int.
+(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+
+;; Vector modes for H and S types.
+(define_mode_iterator VDQHS [V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar integer modes for H and S
+(define_mode_iterator VSDQ_HSI [V4HI V8HI V2SI V4SI HI SI])
+
+;; Vector and scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VSD_HSI [V4HI V2SI HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VD_HSI [V4HI V2SI])
+
+;; Scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator SD_HSI [HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VQ_HSI [V8HI V4SI])
+
+;; All byte modes.
+(define_mode_iterator VB [V8QI V16QI])
+
+(define_mode_iterator TX [TI TF])
+
+;; Opaque structure modes.
+(define_mode_iterator VSTRUCT [OI CI XI])
+
+;; Double scalar modes
+(define_mode_iterator DX [DI DF])
+
+;; ------------------------------------------------------------------
+;; Unspec enumerations for Advance SIMD. These could well go into
+;; aarch64.md but for their use in int_iterators here.
+;; ------------------------------------------------------------------
+
+(define_c_enum "unspec"
+ [
+ UNSPEC_ASHIFT_SIGNED ; Used in aarch-simd.md.
+ UNSPEC_ASHIFT_UNSIGNED ; Used in aarch64-simd.md.
+ UNSPEC_FMAXV ; Used in aarch64-simd.md.
+ UNSPEC_FMINV ; Used in aarch64-simd.md.
+ UNSPEC_FADDV ; Used in aarch64-simd.md.
+ UNSPEC_ADDV ; Used in aarch64-simd.md.
+ UNSPEC_SMAXV ; Used in aarch64-simd.md.
+ UNSPEC_SMINV ; Used in aarch64-simd.md.
+ UNSPEC_UMAXV ; Used in aarch64-simd.md.
+ UNSPEC_UMINV ; Used in aarch64-simd.md.
+ UNSPEC_SHADD ; Used in aarch64-simd.md.
+ UNSPEC_UHADD ; Used in aarch64-simd.md.
+ UNSPEC_SRHADD ; Used in aarch64-simd.md.
+ UNSPEC_URHADD ; Used in aarch64-simd.md.
+ UNSPEC_SHSUB ; Used in aarch64-simd.md.
+ UNSPEC_UHSUB ; Used in aarch64-simd.md.
+ UNSPEC_SRHSUB ; Used in aarch64-simd.md.
+ UNSPEC_URHSUB ; Used in aarch64-simd.md.
+ UNSPEC_ADDHN ; Used in aarch64-simd.md.
+ UNSPEC_RADDHN ; Used in aarch64-simd.md.
+ UNSPEC_SUBHN ; Used in aarch64-simd.md.
+ UNSPEC_RSUBHN ; Used in aarch64-simd.md.
+ UNSPEC_ADDHN2 ; Used in aarch64-simd.md.
+ UNSPEC_RADDHN2 ; Used in aarch64-simd.md.
+ UNSPEC_SUBHN2 ; Used in aarch64-simd.md.
+ UNSPEC_RSUBHN2 ; Used in aarch64-simd.md.
+ UNSPEC_SQDMULH ; Used in aarch64-simd.md.
+ UNSPEC_SQRDMULH ; Used in aarch64-simd.md.
+ UNSPEC_PMUL ; Used in aarch64-simd.md.
+ UNSPEC_USQADD ; Used in aarch64-simd.md.
+ UNSPEC_SUQADD ; Used in aarch64-simd.md.
+ UNSPEC_SQXTUN ; Used in aarch64-simd.md.
+ UNSPEC_SQXTN ; Used in aarch64-simd.md.
+ UNSPEC_UQXTN ; Used in aarch64-simd.md.
+ UNSPEC_SSRA ; Used in aarch64-simd.md.
+ UNSPEC_USRA ; Used in aarch64-simd.md.
+ UNSPEC_SRSRA ; Used in aarch64-simd.md.
+ UNSPEC_URSRA ; Used in aarch64-simd.md.
+ UNSPEC_SRSHR ; Used in aarch64-simd.md.
+ UNSPEC_URSHR ; Used in aarch64-simd.md.
+ UNSPEC_SQSHLU ; Used in aarch64-simd.md.
+ UNSPEC_SQSHL ; Used in aarch64-simd.md.
+ UNSPEC_UQSHL ; Used in aarch64-simd.md.
+ UNSPEC_SQSHRUN ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHRUN ; Used in aarch64-simd.md.
+ UNSPEC_SQSHRN ; Used in aarch64-simd.md.
+ UNSPEC_UQSHRN ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHRN ; Used in aarch64-simd.md.
+ UNSPEC_UQRSHRN ; Used in aarch64-simd.md.
+ UNSPEC_SSHL ; Used in aarch64-simd.md.
+ UNSPEC_USHL ; Used in aarch64-simd.md.
+ UNSPEC_SRSHL ; Used in aarch64-simd.md.
+ UNSPEC_URSHL ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHL ; Used in aarch64-simd.md.
+ UNSPEC_UQRSHL ; Used in aarch64-simd.md.
+ UNSPEC_CMEQ ; Used in aarch64-simd.md.
+ UNSPEC_CMLE ; Used in aarch64-simd.md.
+ UNSPEC_CMLT ; Used in aarch64-simd.md.
+ UNSPEC_CMGE ; Used in aarch64-simd.md.
+ UNSPEC_CMGT ; Used in aarch64-simd.md.
+ UNSPEC_CMHS ; Used in aarch64-simd.md.
+ UNSPEC_CMHI ; Used in aarch64-simd.md.
+ UNSPEC_SSLI ; Used in aarch64-simd.md.
+ UNSPEC_USLI ; Used in aarch64-simd.md.
+ UNSPEC_SSRI ; Used in aarch64-simd.md.
+ UNSPEC_USRI ; Used in aarch64-simd.md.
+ UNSPEC_SSHLL ; Used in aarch64-simd.md.
+ UNSPEC_USHLL ; Used in aarch64-simd.md.
+ UNSPEC_ADDP ; Used in aarch64-simd.md.
+ UNSPEC_CMTST ; Used in aarch64-simd.md.
+ UNSPEC_FMAX ; Used in aarch64-simd.md.
+ UNSPEC_FMIN ; Used in aarch64-simd.md.
+])
+
+;; -------------------------------------------------------------------
+;; Mode attributes
+;; -------------------------------------------------------------------
+
+;; In GPI templates, a string like "%<w>0" will expand to "%w0" in the
+;; 32-bit version and "%x0" in the 64-bit version.
+(define_mode_attr w [(QI "w") (HI "w") (SI "w") (DI "x") (SF "s") (DF "d")])
+
+;; For scalar usage of vector/FP registers
+(define_mode_attr v [(QI "b") (HI "h") (SI "s") (DI "d")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, narrowing
+(define_mode_attr vn2 [(QI "") (HI "b") (SI "h") (DI "s")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, widening
+(define_mode_attr vw2 [(DI "") (QI "h") (HI "s") (SI "d")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; Map a floating point mode to the appropriate register name prefix
+(define_mode_attr s [(SF "s") (DF "d")])
+
+;; Give the length suffix letter for a sign- or zero-extension.
+(define_mode_attr size [(QI "b") (HI "h") (SI "w")])
+
+;; Give the number of bits in the mode
+(define_mode_attr sizen [(QI "8") (HI "16") (SI "32") (DI "64")])
+
+;; Give the ordinal of the MSB in the mode
+(define_mode_attr sizem1 [(QI "#7") (HI "#15") (SI "#31") (DI "#63")])
+
+;; Attribute to describe constants acceptable in logical operations
+(define_mode_attr lconst [(SI "K") (DI "L")])
+
+;; Map a mode to a specific constraint character.
+(define_mode_attr cmode [(QI "q") (HI "h") (SI "s") (DI "d")])
+
+(define_mode_attr Vtype [(V8QI "8b") (V16QI "16b")
+ (V4HI "4h") (V8HI "8h")
+ (V2SI "2s") (V4SI "4s")
+ (DI "1d") (DF "1d")
+ (V2DI "2d") (V2SF "2s")
+ (V4SF "4s") (V2DF "2d")])
+
+(define_mode_attr Vmtype [(V8QI ".8b") (V16QI ".16b")
+ (V4HI ".4h") (V8HI ".8h")
+ (V2SI ".2s") (V4SI ".4s")
+ (V2DI ".2d") (V2SF ".2s")
+ (V4SF ".4s") (V2DF ".2d")
+ (DI "") (SI "")
+ (HI "") (QI "")
+ (TI "")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vmntype [(V8HI ".8b") (V4SI ".4h")
+ (V2DI ".2s")
+ (DI "") (SI "")
+ (HI "")])
+
+;; Mode-to-individual element type mapping.
+(define_mode_attr Vetype [(V8QI "b") (V16QI "b")
+ (V4HI "h") (V8HI "h")
+ (V2SI "s") (V4SI "s")
+ (V2DI "d") (V2SF "s")
+ (V4SF "s") (V2DF "d")
+ (QI "b") (HI "h")
+ (SI "s") (DI "d")])
+
+;; Mode-to-bitwise operation type mapping.
+(define_mode_attr Vbtype [(V8QI "8b") (V16QI "16b")
+ (V4HI "8b") (V8HI "16b")
+ (V2SI "8b") (V4SI "16b")
+ (V2DI "16b") (V2SF "8b")
+ (V4SF "16b") (V2DF "16b")])
+
+;; Define element mode for each vector mode.
+(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
+ (V4HI "HI") (V8HI "HI")
+ (V2SI "SI") (V4SI "SI")
+ (DI "DI") (V2DI "DI")
+ (V2SF "SF") (V4SF "SF")
+ (V2DF "DF")
+ (SI "SI") (HI "HI")
+ (QI "QI")])
+
+;; Define container mode for lane selection.
+(define_mode_attr VCON [(V8QI "V16QI") (V16QI "V16QI")
+ (V4HI "V8HI") (V8HI "V8HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (DI "V2DI") (V2DI "V2DI")
+ (V2SF "V2SF") (V4SF "V4SF")
+ (V2DF "V2DF") (SI "V4SI")
+ (HI "V8HI") (QI "V16QI")])
+
+;; Half modes of all vector modes.
+(define_mode_attr VHALF [(V8QI "V4QI") (V16QI "V8QI")
+ (V4HI "V2HI") (V8HI "V4HI")
+ (V2SI "SI") (V4SI "V2SI")
+ (V2DI "DI") (V2SF "SF")
+ (V4SF "V2SF") (V2DF "DF")])
+
+;; Double modes of vector modes.
+(define_mode_attr VDBL [(V8QI "V16QI") (V4HI "V8HI")
+ (V2SI "V4SI") (V2SF "V4SF")
+ (SI "V2SI") (DI "V2DI")
+ (DF "V2DF")])
+
+;; Double modes of vector modes (lower case).
+(define_mode_attr Vdbl [(V8QI "v16qi") (V4HI "v8hi")
+ (V2SI "v4si") (V2SF "v4sf")
+ (SI "v2si") (DI "v2di")])
+
+;; Narrowed modes for VDN.
+(define_mode_attr VNARROWD [(V4HI "V8QI") (V2SI "V4HI")
+ (DI "V2SI")])
+
+;; Narrowed double-modes for VQN (Used for XTN).
+(define_mode_attr VNARROWQ [(V8HI "V8QI") (V4SI "V4HI")
+ (V2DI "V2SI")
+ (DI "SI") (SI "HI")
+ (HI "QI")])
+
+;; Narrowed quad-modes for VQN (Used for XTN2).
+(define_mode_attr VNARROWQ2 [(V8HI "V16QI") (V4SI "V8HI")
+ (V2DI "V4SI")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vntype [(V8HI "8b") (V4SI "4h")
+ (V2DI "2s")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h")
+ (V2DI "4s")])
+
+;; Widened modes of vector modes.
+(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
+ (V2SI "V2DI") (V16QI "V8HI")
+ (V8HI "V4SI") (V4SI "V2DI")
+ (HI "SI") (SI "DI")]
+
+)
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
+ (V2SI "2d") (V16QI "8h")
+ (V8HI "4s") (V4SI "2d")])
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
+ (V2SI ".2d") (V16QI ".8h")
+ (V8HI ".4s") (V4SI ".2d")
+ (SI "") (HI "")])
+
+;; Lower part register suffixes for VQW.
+(define_mode_attr Vhalftype [(V16QI "8b") (V8HI "4h")
+ (V4SI "2s")])
+
+;; Define corresponding core/FP element mode for each vector mode.
+(define_mode_attr vw [(V8QI "w") (V16QI "w")
+ (V4HI "w") (V8HI "w")
+ (V2SI "w") (V4SI "w")
+ (DI "x") (V2DI "x")
+ (V2SF "s") (V4SF "s")
+ (V2DF "d")])
+
+;; Double vector types for ALLX.
+(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
+
+;; Mode of result of comparison operations.
+(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (DI "DI") (V2DI "V2DI")])
+
+;; Vm for lane instructions is restricted to FP_LO_REGS.
+(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
+ (V2SI "w") (V4SI "w") (SI "w")])
+
+(define_mode_attr Vendreg [(OI "T") (CI "U") (XI "V")])
+
+(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
+
+(define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI")
+ (V2SI "V8SI") (V2SF "V8SF")
+ (DI "V4DI") (DF "V4DF")
+ (V16QI "V32QI") (V8HI "V16HI")
+ (V4SI "V8SI") (V4SF "V8SF")
+ (V2DI "V4DI") (V2DF "V4DF")])
+
+(define_mode_attr VRL3 [(V8QI "V48QI") (V4HI "V24HI")
+ (V2SI "V12SI") (V2SF "V12SF")
+ (DI "V6DI") (DF "V6DF")
+ (V16QI "V48QI") (V8HI "V24HI")
+ (V4SI "V12SI") (V4SF "V12SF")
+ (V2DI "V6DI") (V2DF "V6DF")])
+
+(define_mode_attr VRL4 [(V8QI "V64QI") (V4HI "V32HI")
+ (V2SI "V16SI") (V2SF "V16SF")
+ (DI "V8DI") (DF "V8DF")
+ (V16QI "V64QI") (V8HI "V32HI")
+ (V4SI "V16SI") (V4SF "V16SF")
+ (V2DI "V8DI") (V2DF "V8DF")])
+
+(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")])
+
+;; -------------------------------------------------------------------
+;; Code Iterators
+;; -------------------------------------------------------------------
+
+;; This code iterator allows the various shifts supported on the core
+(define_code_iterator SHIFT [ashift ashiftrt lshiftrt rotatert])
+
+;; This code iterator allows the shifts supported in arithmetic instructions
+(define_code_iterator ASHIFT [ashift ashiftrt lshiftrt])
+
+;; Code iterator for logical operations
+(define_code_iterator LOGICAL [and ior xor])
+
+;; Code iterator for sign/zero extension
+(define_code_iterator ANY_EXTEND [sign_extend zero_extend])
+
+;; All division operations (signed/unsigned)
+(define_code_iterator ANY_DIV [div udiv])
+
+;; Code iterator for sign/zero extraction
+(define_code_iterator ANY_EXTRACT [sign_extract zero_extract])
+
+;; Code iterator for equality comparisons
+(define_code_iterator EQL [eq ne])
+
+;; Code iterator for less-than and greater/equal-to
+(define_code_iterator LTGE [lt ge])
+
+;; Iterator for __sync_<op> operations that where the operation can be
+;; represented directly RTL. This is all of the sync operations bar
+;; nand.
+(define_code_iterator syncop [plus minus ior xor and])
+
+;; Iterator for integer conversions
+(define_code_iterator FIXUORS [fix unsigned_fix])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator MAXMIN [smax smin umax umin])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator ADDSUB [plus minus])
+
+;; Code iterator for variants of vector saturating binary ops.
+(define_code_iterator BINQOPS [ss_plus us_plus ss_minus us_minus])
+
+;; Code iterator for variants of vector saturating unary ops.
+(define_code_iterator UNQOPS [ss_neg ss_abs])
+
+;; Code iterator for signed variants of vector saturating binary ops.
+(define_code_iterator SBINQOPS [ss_plus ss_minus])
+
+;; -------------------------------------------------------------------
+;; Code Attributes
+;; -------------------------------------------------------------------
+;; Map rtl objects to optab names
+(define_code_attr optab [(ashift "ashl")
+ (ashiftrt "ashr")
+ (lshiftrt "lshr")
+ (rotatert "rotr")
+ (sign_extend "extend")
+ (zero_extend "zero_extend")
+ (sign_extract "extv")
+ (zero_extract "extzv")
+ (and "and")
+ (ior "ior")
+ (xor "xor")
+ (not "one_cmpl")
+ (neg "neg")
+ (plus "add")
+ (minus "sub")
+ (ss_plus "qadd")
+ (us_plus "qadd")
+ (ss_minus "qsub")
+ (us_minus "qsub")
+ (ss_neg "qneg")
+ (ss_abs "qabs")
+ (eq "eq")
+ (ne "ne")
+ (lt "lt")
+ (ge "ge")])
+
+;; Optab prefix for sign/zero-extending operations
+(define_code_attr su_optab [(sign_extend "") (zero_extend "u")
+ (div "") (udiv "u")
+ (fix "") (unsigned_fix "u")
+ (ss_plus "s") (us_plus "u")
+ (ss_minus "s") (us_minus "u")])
+
+;; Similar for the instruction mnemonics
+(define_code_attr shift [(ashift "lsl") (ashiftrt "asr")
+ (lshiftrt "lsr") (rotatert "ror")])
+
+;; Map shift operators onto underlying bit-field instructions
+(define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
+ (lshiftrt "ubfx") (rotatert "extr")])
+
+;; Logical operator instruction mnemonics
+(define_code_attr logical [(and "and") (ior "orr") (xor "eor")])
+
+;; Similar, but when not(op)
+(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
+
+;; Sign- or zero-extending load
+(define_code_attr ldrxt [(sign_extend "ldrs") (zero_extend "ldr")])
+
+;; Sign- or zero-extending data-op
+(define_code_attr su [(sign_extend "s") (zero_extend "u")
+ (sign_extract "s") (zero_extract "u")
+ (fix "s") (unsigned_fix "u")
+ (div "s") (udiv "u")])
+
+;; Emit cbz/cbnz depending on comparison type.
+(define_code_attr cbz [(eq "cbz") (ne "cbnz") (lt "cbnz") (ge "cbz")])
+
+;; Emit tbz/tbnz depending on comparison type.
+(define_code_attr tbz [(eq "tbz") (ne "tbnz") (lt "tbnz") (ge "tbz")])
+
+;; Max/min attributes.
+(define_code_attr maxmin [(smax "smax")
+ (smin "smin")
+ (umax "umax")
+ (umin "umin")])
+
+;; MLA/MLS attributes.
+(define_code_attr as [(ss_plus "a") (ss_minus "s")])
+
+
+;; -------------------------------------------------------------------
+;; Int Iterators.
+;; -------------------------------------------------------------------
+(define_int_iterator MAXMINV [UNSPEC_UMAXV UNSPEC_UMINV
+ UNSPEC_SMAXV UNSPEC_SMINV])
+
+(define_int_iterator FMAXMINV [UNSPEC_FMAXV UNSPEC_FMINV])
+
+(define_int_iterator HADDSUB [UNSPEC_SHADD UNSPEC_UHADD
+ UNSPEC_SRHADD UNSPEC_URHADD
+ UNSPEC_SHSUB UNSPEC_UHSUB
+ UNSPEC_SRHSUB UNSPEC_URHSUB])
+
+
+(define_int_iterator ADDSUBHN [UNSPEC_ADDHN UNSPEC_RADDHN
+ UNSPEC_SUBHN UNSPEC_RSUBHN])
+
+(define_int_iterator ADDSUBHN2 [UNSPEC_ADDHN2 UNSPEC_RADDHN2
+ UNSPEC_SUBHN2 UNSPEC_RSUBHN2])
+
+(define_int_iterator FMAXMIN [UNSPEC_FMAX UNSPEC_FMIN])
+
+(define_int_iterator VQDMULH [UNSPEC_SQDMULH UNSPEC_SQRDMULH])
+
+(define_int_iterator USSUQADD [UNSPEC_SUQADD UNSPEC_USQADD])
+
+(define_int_iterator SUQMOVN [UNSPEC_SQXTN UNSPEC_UQXTN])
+
+(define_int_iterator VSHL [UNSPEC_SSHL UNSPEC_USHL
+ UNSPEC_SRSHL UNSPEC_URSHL])
+
+(define_int_iterator VSHLL [UNSPEC_SSHLL UNSPEC_USHLL])
+
+(define_int_iterator VQSHL [UNSPEC_SQSHL UNSPEC_UQSHL
+ UNSPEC_SQRSHL UNSPEC_UQRSHL])
+
+(define_int_iterator VSRA [UNSPEC_SSRA UNSPEC_USRA
+ UNSPEC_SRSRA UNSPEC_URSRA])
+
+(define_int_iterator VSLRI [UNSPEC_SSLI UNSPEC_USLI
+ UNSPEC_SSRI UNSPEC_USRI])
+
+
+(define_int_iterator VRSHR_N [UNSPEC_SRSHR UNSPEC_URSHR])
+
+(define_int_iterator VQSHL_N [UNSPEC_SQSHLU UNSPEC_SQSHL UNSPEC_UQSHL])
+
+(define_int_iterator VQSHRN_N [UNSPEC_SQSHRUN UNSPEC_SQRSHRUN
+ UNSPEC_SQSHRN UNSPEC_UQSHRN
+ UNSPEC_SQRSHRN UNSPEC_UQRSHRN])
+
+(define_int_iterator VCMP_S [UNSPEC_CMEQ UNSPEC_CMGE UNSPEC_CMGT
+ UNSPEC_CMLE UNSPEC_CMLT])
+
+(define_int_iterator VCMP_U [UNSPEC_CMHS UNSPEC_CMHI UNSPEC_CMTST])
+
+
+;; -------------------------------------------------------------------
+;; Int Iterators Attributes.
+;; -------------------------------------------------------------------
+(define_int_attr maxminv [(UNSPEC_UMAXV "umax")
+ (UNSPEC_UMINV "umin")
+ (UNSPEC_SMAXV "smax")
+ (UNSPEC_SMINV "smin")])
+
+(define_int_attr fmaxminv [(UNSPEC_FMAXV "max")
+ (UNSPEC_FMINV "min")])
+
+(define_int_attr fmaxmin [(UNSPEC_FMAX "fmax")
+ (UNSPEC_FMIN "fmin")])
+
+(define_int_attr sur [(UNSPEC_SHADD "s") (UNSPEC_UHADD "u")
+ (UNSPEC_SRHADD "sr") (UNSPEC_URHADD "ur")
+ (UNSPEC_SHSUB "s") (UNSPEC_UHSUB "u")
+ (UNSPEC_SRHSUB "sr") (UNSPEC_URHSUB "ur")
+ (UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
+ (UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")
+ (UNSPEC_ADDHN2 "") (UNSPEC_RADDHN2 "r")
+ (UNSPEC_SUBHN2 "") (UNSPEC_RSUBHN2 "r")
+ (UNSPEC_SQXTN "s") (UNSPEC_UQXTN "u")
+ (UNSPEC_USQADD "us") (UNSPEC_SUQADD "su")
+ (UNSPEC_SSLI "s") (UNSPEC_USLI "u")
+ (UNSPEC_SSRI "s") (UNSPEC_USRI "u")
+ (UNSPEC_USRA "u") (UNSPEC_SSRA "s")
+ (UNSPEC_URSRA "ur") (UNSPEC_SRSRA "sr")
+ (UNSPEC_URSHR "ur") (UNSPEC_SRSHR "sr")
+ (UNSPEC_SQSHLU "s") (UNSPEC_SQSHL "s")
+ (UNSPEC_UQSHL "u")
+ (UNSPEC_SQSHRUN "s") (UNSPEC_SQRSHRUN "s")
+ (UNSPEC_SQSHRN "s") (UNSPEC_UQSHRN "u")
+ (UNSPEC_SQRSHRN "s") (UNSPEC_UQRSHRN "u")
+ (UNSPEC_USHL "u") (UNSPEC_SSHL "s")
+ (UNSPEC_USHLL "u") (UNSPEC_SSHLL "s")
+ (UNSPEC_URSHL "ur") (UNSPEC_SRSHL "sr")
+ (UNSPEC_UQRSHL "u") (UNSPEC_SQRSHL "s")
+])
+
+(define_int_attr r [(UNSPEC_SQDMULH "") (UNSPEC_SQRDMULH "r")
+ (UNSPEC_SQSHRUN "") (UNSPEC_SQRSHRUN "r")
+ (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
+ (UNSPEC_SQRSHRN "r") (UNSPEC_UQRSHRN "r")
+ (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
+ (UNSPEC_SQRSHL "r")(UNSPEC_UQRSHL "r")
+])
+
+(define_int_attr lr [(UNSPEC_SSLI "l") (UNSPEC_USLI "l")
+ (UNSPEC_SSRI "r") (UNSPEC_USRI "r")])
+
+(define_int_attr u [(UNSPEC_SQSHLU "u") (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
+ (UNSPEC_SQSHRUN "u") (UNSPEC_SQRSHRUN "u")
+ (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
+ (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")])
+
+(define_int_attr addsub [(UNSPEC_SHADD "add")
+ (UNSPEC_UHADD "add")
+ (UNSPEC_SRHADD "add")
+ (UNSPEC_URHADD "add")
+ (UNSPEC_SHSUB "sub")
+ (UNSPEC_UHSUB "sub")
+ (UNSPEC_SRHSUB "sub")
+ (UNSPEC_URHSUB "sub")
+ (UNSPEC_ADDHN "add")
+ (UNSPEC_SUBHN "sub")
+ (UNSPEC_RADDHN "add")
+ (UNSPEC_RSUBHN "sub")
+ (UNSPEC_ADDHN2 "add")
+ (UNSPEC_SUBHN2 "sub")
+ (UNSPEC_RADDHN2 "add")
+ (UNSPEC_RSUBHN2 "sub")])
+
+(define_int_attr cmp [(UNSPEC_CMGE "ge") (UNSPEC_CMGT "gt")
+ (UNSPEC_CMLE "le") (UNSPEC_CMLT "lt")
+ (UNSPEC_CMEQ "eq")
+ (UNSPEC_CMHS "hs") (UNSPEC_CMHI "hi")
+ (UNSPEC_CMTST "tst")])
+
+(define_int_attr offsetlr [(UNSPEC_SSLI "1") (UNSPEC_USLI "1")
+ (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
+
diff --git a/gcc/config/aarch64/large.md b/gcc/config/aarch64/large.md
new file mode 100644
index 00000000000..1e73dc31a0f
--- /dev/null
+++ b/gcc/config/aarch64/large.md
@@ -0,0 +1,312 @@
+;; Copyright (C) 2012 Free Software Foundation, Inc.
+;;
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; In the absence of any ARMv8-A implementations, two examples derived
+;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
+;; included by way of example. This is a temporary measure.
+
+;; Example pipeline description for an example 'large' core
+;; implementing AArch64
+
+;;-------------------------------------------------------
+;; General Description
+;;-------------------------------------------------------
+
+(define_automaton "large_cpu")
+
+;; The core is modelled as a triple issue pipeline that has
+;; the following dispatch units.
+;; 1. Two pipelines for simple integer operations: int1, int2
+;; 2. Two pipelines for SIMD and FP data-processing operations: fpsimd1, fpsimd2
+;; 3. One pipeline for branch operations: br
+;; 4. One pipeline for integer multiply and divide operations: multdiv
+;; 5. Two pipelines for load and store operations: ls1, ls2
+;;
+;; We can issue into three pipelines per-cycle.
+;;
+;; We assume that where we have unit pairs xxx1 is always filled before xxx2.
+
+;;-------------------------------------------------------
+;; CPU Units and Reservations
+;;-------------------------------------------------------
+
+;; The three issue units
+(define_cpu_unit "large_cpu_unit_i1, large_cpu_unit_i2, large_cpu_unit_i3" "large_cpu")
+
+(define_reservation "large_cpu_resv_i1"
+ "(large_cpu_unit_i1 | large_cpu_unit_i2 | large_cpu_unit_i3)")
+
+(define_reservation "large_cpu_resv_i2"
+ "((large_cpu_unit_i1 + large_cpu_unit_i2) | (large_cpu_unit_i2 + large_cpu_unit_i3))")
+
+(define_reservation "large_cpu_resv_i3"
+ "(large_cpu_unit_i1 + large_cpu_unit_i2 + large_cpu_unit_i3)")
+
+(final_presence_set "large_cpu_unit_i2" "large_cpu_unit_i1")
+(final_presence_set "large_cpu_unit_i3" "large_cpu_unit_i2")
+
+;; The main dispatch units
+(define_cpu_unit "large_cpu_unit_int1, large_cpu_unit_int2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_ls1, large_cpu_unit_ls2" "large_cpu")
+(define_cpu_unit "large_cpu_unit_br" "large_cpu")
+(define_cpu_unit "large_cpu_unit_multdiv" "large_cpu")
+
+(define_reservation "large_cpu_resv_ls" "(large_cpu_unit_ls1 | large_cpu_unit_ls2)")
+
+;; The extended load-store pipeline
+(define_cpu_unit "large_cpu_unit_load, large_cpu_unit_store" "large_cpu")
+
+;; The extended ALU pipeline
+(define_cpu_unit "large_cpu_unit_int1_alu, large_cpu_unit_int2_alu" "large_cpu")
+(define_cpu_unit "large_cpu_unit_int1_shf, large_cpu_unit_int2_shf" "large_cpu")
+(define_cpu_unit "large_cpu_unit_int1_sat, large_cpu_unit_int2_sat" "large_cpu")
+
+
+;;-------------------------------------------------------
+;; Simple ALU Instructions
+;;-------------------------------------------------------
+
+;; Simple ALU operations without shift
+(define_insn_reservation "large_cpu_alu" 2
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "adc,alu,alu_ext"))
+ "large_cpu_resv_i1, \
+ (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+ (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_logic" 2
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "logic,logic_imm"))
+ "large_cpu_resv_i1, \
+ (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+ (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_shift" 2
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "shift,shift_imm"))
+ "large_cpu_resv_i1, \
+ (large_cpu_unit_int1, large_cpu_unit_int1_shf) |\
+ (large_cpu_unit_int2, large_cpu_unit_int2_shf)")
+
+;; Simple ALU operations with immediate shift
+(define_insn_reservation "large_cpu_alu_shift" 3
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "alu_shift"))
+ "large_cpu_resv_i1, \
+ (large_cpu_unit_int1,
+ large_cpu_unit_int1 + large_cpu_unit_int1_shf, large_cpu_unit_int1_alu) | \
+ (large_cpu_unit_int2,
+ large_cpu_unit_int2 + large_cpu_unit_int2_shf, large_cpu_unit_int2_alu)")
+
+(define_insn_reservation "large_cpu_logic_shift" 3
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "logic_shift"))
+ "large_cpu_resv_i1, \
+ (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
+ (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
+
+
+;;-------------------------------------------------------
+;; Multiplication/Division
+;;-------------------------------------------------------
+
+;; Simple multiplication
+(define_insn_reservation "large_cpu_mult_single" 3
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_mult_double" 4
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+;; 64-bit multiplication
+(define_insn_reservation "large_cpu_mull" 4
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "mull,mulh,maddl"))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv * 2")
+
+;; Division
+(define_insn_reservation "large_cpu_udiv_single" 9
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_udiv_double" 18
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_sdiv_single" 10
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+(define_insn_reservation "large_cpu_sdiv_double" 20
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
+ "large_cpu_resv_i1, large_cpu_unit_multdiv")
+
+
+;;-------------------------------------------------------
+;; Branches
+;;-------------------------------------------------------
+
+;; Branches take one issue slot.
+;; No latency as there is no result
+(define_insn_reservation "large_cpu_branch" 0
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "branch"))
+ "large_cpu_resv_i1, large_cpu_unit_br")
+
+
+;; Calls take up all issue slots, and form a block in the
+;; pipeline. The result however is available the next cycle.
+;; Addition of new units requires this to be updated.
+(define_insn_reservation "large_cpu_call" 1
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "call"))
+ "large_cpu_resv_i3 | large_cpu_resv_i2, \
+ large_cpu_unit_int1 + large_cpu_unit_int2 + large_cpu_unit_br + \
+ large_cpu_unit_multdiv + large_cpu_unit_fpsimd1 + large_cpu_unit_fpsimd2 + \
+ large_cpu_unit_ls1 + large_cpu_unit_ls2,\
+ large_cpu_unit_int1_alu + large_cpu_unit_int1_shf + large_cpu_unit_int1_sat + \
+ large_cpu_unit_int2_alu + large_cpu_unit_int2_shf + \
+ large_cpu_unit_int2_sat + large_cpu_unit_load + large_cpu_unit_store")
+
+
+;;-------------------------------------------------------
+;; Load/Store Instructions
+;;-------------------------------------------------------
+
+;; Loads of up to two words.
+(define_insn_reservation "large_cpu_load1" 4
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "load_acq,load1,load2"))
+ "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_load, nothing")
+
+;; Stores of up to two words.
+(define_insn_reservation "large_cpu_store1" 0
+ (and (eq_attr "tune" "large") (eq_attr "v8type" "store_rel,store1,store2"))
+ "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_store")
+
+
+;;-------------------------------------------------------
+;; Floating-point arithmetic.
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_fpalu" 4
+ (and (eq_attr "tune" "large")
+ (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
+ "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fconst" 3
+ (and (eq_attr "tune" "large")
+ (eq_attr "v8type" "fconst"))
+ "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fpmuls" 4
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "SF")))
+ "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+(define_insn_reservation "large_cpu_fpmuld" 7
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "DF")))
+ "large_cpu_resv_i1 + large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd1 * 2,\
+ large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
+
+
+;;-------------------------------------------------------
+;; Floating-point Division
+;;-------------------------------------------------------
+
+;; Single-precision divide takes 14 cycles to complete, and this
+;; includes the time taken for the special instruction used to collect the
+;; result to travel down the multiply pipeline.
+
+(define_insn_reservation "large_cpu_fdivs" 14
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
+ "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 13")
+
+(define_insn_reservation "large_cpu_fdivd" 29
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
+ "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 28")
+
+
+
+;;-------------------------------------------------------
+;; Floating-point Transfers
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_i2f" 4
+ (and (eq_attr "tune" "large")
+ (eq_attr "v8type" "fmovi2f"))
+ "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_f2i" 2
+ (and (eq_attr "tune" "large")
+ (eq_attr "v8type" "fmovf2i"))
+ "large_cpu_resv_i1")
+
+
+;;-------------------------------------------------------
+;; Floating-point Load/Store
+;;-------------------------------------------------------
+
+(define_insn_reservation "large_cpu_floads" 4
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "SF")))
+ "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_floadd" 5
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "DF")))
+ "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_fstores" 0
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "SF")))
+ "large_cpu_resv_i1")
+
+(define_insn_reservation "large_cpu_fstored" 0
+ (and (eq_attr "tune" "large")
+ (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "DF")))
+ "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
+
+
+;;-------------------------------------------------------
+;; Bypasses
+;;-------------------------------------------------------
+
+(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift"
+ "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
+
+(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift"
+ "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
+
+(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift" "large_cpu_load1")
+
+(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift" "large_cpu_load1")
+
+(define_bypass 2 "large_cpu_floads"
+ "large_cpu_fpalu, large_cpu_fpmuld,\
+ large_cpu_fdivs, large_cpu_fdivd,\
+ large_cpu_f2i")
+
+(define_bypass 3 "large_cpu_floadd"
+ "large_cpu_fpalu, large_cpu_fpmuld,\
+ large_cpu_fdivs, large_cpu_fdivd,\
+ large_cpu_f2i")
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
new file mode 100644
index 00000000000..6f79039647c
--- /dev/null
+++ b/gcc/config/aarch64/predicates.md
@@ -0,0 +1,297 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_special_predicate "cc_register"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) == CC_REGNUM")
+ (ior (match_test "mode == GET_MODE (op)")
+ (match_test "mode == VOIDmode
+ && GET_MODE_CLASS (GET_MODE (op)) == MODE_CC"))))
+)
+
+(define_predicate "aarch64_reg_or_zero"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (match_test "op == const0_rtx"))))
+
+(define_predicate "aarch64_reg_zero_or_m1"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_test "op == const0_rtx")
+ (match_test "op == constm1_rtx")))))
+
+(define_predicate "aarch64_fp_compare_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_double")
+ (match_test "aarch64_const_double_zero_rtx_p (op)"))))
+
+(define_predicate "aarch64_plus_immediate"
+ (and (match_code "const_int")
+ (ior (match_test "aarch64_uimm12_shift (INTVAL (op))")
+ (match_test "aarch64_uimm12_shift (-INTVAL (op))"))))
+
+(define_predicate "aarch64_plus_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_plus_immediate")))
+
+(define_predicate "aarch64_pluslong_immediate"
+ (and (match_code "const_int")
+ (match_test "(INTVAL (op) < 0xffffff && INTVAL (op) > -0xffffff)")))
+
+(define_predicate "aarch64_pluslong_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_pluslong_immediate")))
+
+(define_predicate "aarch64_logical_immediate"
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (INTVAL (op), mode)")))
+
+(define_predicate "aarch64_logical_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_logical_immediate")))
+
+(define_predicate "aarch64_shift_imm_si"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 32")))
+
+(define_predicate "aarch64_shift_imm_di"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 64")))
+
+(define_predicate "aarch64_reg_or_shift_imm_si"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_shift_imm_si")))
+
+(define_predicate "aarch64_reg_or_shift_imm_di"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_shift_imm_di")))
+
+;; The imm3 field is a 3-bit field that only accepts immediates in the
+;; range 0..4.
+(define_predicate "aarch64_imm3"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) <= 4")))
+
+(define_predicate "aarch64_pwr_imm3"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) <= 4")))
+
+(define_predicate "aarch64_pwr_2_si"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) < 32")))
+
+(define_predicate "aarch64_pwr_2_di"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) < 64")))
+
+(define_predicate "aarch64_mem_pair_operand"
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), PARALLEL,
+ 0)")))
+
+(define_predicate "aarch64_const_address"
+ (and (match_code "symbol_ref")
+ (match_test "mode == DImode && CONSTANT_ADDRESS_P (op)")))
+
+(define_predicate "aarch64_valid_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ enum aarch64_symbol_type symbol_type;
+ return (aarch64_symbolic_constant_p (op, SYMBOL_CONTEXT_ADR, &symbol_type)
+ && symbol_type != SYMBOL_FORCE_TO_MEM);
+})
+
+(define_predicate "aarch64_tls_ie_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return false;
+ op = XEXP (op, 0);
+
+ case SYMBOL_REF:
+ return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+(define_predicate "aarch64_tls_le_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return false;
+ op = XEXP (op, 0);
+
+ case SYMBOL_REF:
+ return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+(define_predicate "aarch64_mov_operand"
+ (and (match_code "reg,subreg,mem,const_int,symbol_ref,high")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_operand 0 "memory_operand")
+ (ior (match_test "GET_CODE (op) == HIGH
+ && aarch64_valid_symref (XEXP (op, 0),
+ GET_MODE (XEXP (op, 0)))")
+ (ior (match_test "CONST_INT_P (op)
+ && aarch64_move_imm (INTVAL (op), mode)")
+ (match_test "aarch64_const_address (op, mode)")))))))
+
+(define_predicate "aarch64_movti_operand"
+ (and (match_code "reg,subreg,mem,const_int")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_operand 0 "memory_operand")
+ (match_operand 0 "const_int_operand")))))
+
+(define_predicate "aarch64_reg_or_imm"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_int_operand"))))
+
+;; True for integer comparisons and for FP comparisons other than LTGT or UNEQ.
+(define_special_predicate "aarch64_comparison_operator"
+ (match_code "eq,ne,le,lt,ge,gt,geu,gtu,leu,ltu,unordered,ordered,unlt,unle,unge,ungt"))
+
+;; True if the operand is memory reference suitable for a load/store exclusive.
+(define_predicate "aarch64_sync_memory_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_code "reg" "0")))
+
+;; Predicates for parallel expanders based on mode.
+(define_special_predicate "vect_par_cnst_hi_half"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int nunits = GET_MODE_NUNITS (mode);
+ int i;
+
+ if (count < 1
+ || count != nunits / 2)
+ return false;
+
+ if (!VECTOR_MODE_P (mode))
+ return false;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ int val;
+
+ if (GET_CODE (elt) != CONST_INT)
+ return false;
+
+ val = INTVAL (elt);
+ if (val != (nunits / 2) + i)
+ return false;
+ }
+ return true;
+})
+
+(define_special_predicate "vect_par_cnst_lo_half"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int nunits = GET_MODE_NUNITS (mode);
+ int i;
+
+ if (count < 1
+ || count != nunits / 2)
+ return false;
+
+ if (!VECTOR_MODE_P (mode))
+ return false;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ int val;
+
+ if (GET_CODE (elt) != CONST_INT)
+ return false;
+
+ val = INTVAL (elt);
+ if (val != i)
+ return false;
+ }
+ return true;
+})
+
+
+(define_special_predicate "aarch64_simd_lshift_imm"
+ (match_code "const_vector")
+{
+ return aarch64_simd_shift_imm_p (op, mode, true);
+})
+
+(define_special_predicate "aarch64_simd_rshift_imm"
+ (match_code "const_vector")
+{
+ return aarch64_simd_shift_imm_p (op, mode, false);
+})
+
+(define_predicate "aarch64_simd_reg_or_zero"
+ (and (match_code "reg,subreg,const_int,const_vector")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_test "op == const0_rtx")
+ (match_test "aarch64_simd_imm_zero_p (op, mode)")))))
+
+(define_predicate "aarch64_simd_struct_operand"
+ (and (match_code "mem")
+ (match_test "TARGET_SIMD && aarch64_simd_mem_operand_p (op)")))
+
+;; Like general_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_general_operand"
+ (and (match_operand 0 "general_operand")
+ (match_test "!MEM_P (op)
+ || GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG")))
+
+;; Like nonimmediate_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_nonimmediate_operand"
+ (and (match_operand 0 "nonimmediate_operand")
+ (match_test "!MEM_P (op)
+ || GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_special_predicate "aarch64_simd_imm_zero"
+ (match_code "const_vector")
+{
+ return aarch64_simd_imm_zero_p (op, mode);
+})
diff --git a/gcc/config/aarch64/small.md b/gcc/config/aarch64/small.md
new file mode 100644
index 00000000000..8f70ca9c02c
--- /dev/null
+++ b/gcc/config/aarch64/small.md
@@ -0,0 +1,287 @@
+;; Copyright (C) 2012 Free Software Foundation, Inc.
+;;
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; In the absence of any ARMv8-A implementations, two examples derived
+;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
+;; included by way of example. This is a temporary measure.
+
+;; Example pipeline description for an example 'small' core
+;; implementing AArch64
+
+;;-------------------------------------------------------
+;; General Description
+;;-------------------------------------------------------
+
+(define_automaton "small_cpu")
+
+;; The core is modelled as a single issue pipeline with the following
+;; dispatch units.
+;; 1. One pipeline for simple intructions.
+;; 2. One pipeline for branch intructions.
+;;
+;; There are five pipeline stages.
+;; The decode/issue stages operate the same for all instructions.
+;; Instructions always advance one stage per cycle in order.
+;; Only branch instructions may dual-issue with other instructions, except
+;; when those instructions take multiple cycles to issue.
+
+
+;;-------------------------------------------------------
+;; CPU Units and Reservations
+;;-------------------------------------------------------
+
+(define_cpu_unit "small_cpu_unit_i" "small_cpu")
+(define_cpu_unit "small_cpu_unit_br" "small_cpu")
+
+;; Pseudo-unit for blocking the multiply pipeline when a double-precision
+;; multiply is in progress.
+(define_cpu_unit "small_cpu_unit_fpmul_pipe" "small_cpu")
+
+;; The floating-point add pipeline, used to model the usage
+;; of the add pipeline by fp alu instructions.
+(define_cpu_unit "small_cpu_unit_fpadd_pipe" "small_cpu")
+
+;; Floating-point division pipeline (long latency, out-of-order completion).
+(define_cpu_unit "small_cpu_unit_fpdiv" "small_cpu")
+
+
+;;-------------------------------------------------------
+;; Simple ALU Instructions
+;;-------------------------------------------------------
+
+;; Simple ALU operations without shift
+(define_insn_reservation "small_cpu_alu" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "adc,alu,alu_ext"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_logic" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "logic,logic_imm"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_shift" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "shift,shift_imm"))
+ "small_cpu_unit_i")
+
+;; Simple ALU operations with immediate shift
+(define_insn_reservation "small_cpu_alu_shift" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "alu_shift"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_logic_shift" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "logic_shift"))
+ "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Multiplication/Division
+;;-------------------------------------------------------
+
+;; Simple multiplication
+(define_insn_reservation "small_cpu_mult_single" 2
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_mult_double" 3
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
+ "small_cpu_unit_i")
+
+;; 64-bit multiplication
+(define_insn_reservation "small_cpu_mull" 3
+ (and (eq_attr "tune" "small") (eq_attr "v8type" "mull,mulh,maddl"))
+ "small_cpu_unit_i * 2")
+
+;; Division
+(define_insn_reservation "small_cpu_udiv_single" 5
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_udiv_double" 10
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_sdiv_single" 6
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_sdiv_double" 12
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
+ "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Load/Store Instructions
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_load1" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "load_acq,load1"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_store1" 0
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "store_rel,store1"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_load2" 3
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "load2"))
+ "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_store2" 0
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "store2"))
+ "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Branches
+;;-------------------------------------------------------
+
+;; Direct branches are the only instructions that can dual-issue.
+;; The latency here represents when the branch actually takes place.
+
+(define_insn_reservation "small_cpu_unit_br" 3
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "branch,call"))
+ "small_cpu_unit_br")
+
+
+;;-------------------------------------------------------
+;; Floating-point arithmetic.
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_fpalu" 4
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
+ "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
+
+(define_insn_reservation "small_cpu_fconst" 3
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "fconst"))
+ "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
+
+(define_insn_reservation "small_cpu_fpmuls" 4
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")))
+ "small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
+
+(define_insn_reservation "small_cpu_fpmuld" 7
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")))
+ "small_cpu_unit_i + small_cpu_unit_fpmul_pipe, small_cpu_unit_fpmul_pipe * 2,\
+ small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
+
+
+;;-------------------------------------------------------
+;; Floating-point Division
+;;-------------------------------------------------------
+
+;; Single-precision divide takes 14 cycles to complete, and this
+;; includes the time taken for the special instruction used to collect the
+;; result to travel down the multiply pipeline.
+
+(define_insn_reservation "small_cpu_fdivs" 14
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
+ "small_cpu_unit_i, small_cpu_unit_fpdiv * 13")
+
+(define_insn_reservation "small_cpu_fdivd" 29
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
+ "small_cpu_unit_i, small_cpu_unit_fpdiv * 28")
+
+
+;;-------------------------------------------------------
+;; Floating-point Transfers
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_i2f" 4
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "fmovi2f"))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_f2i" 2
+ (and (eq_attr "tune" "small")
+ (eq_attr "v8type" "fmovf2i"))
+ "small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Floating-point Load/Store
+;;-------------------------------------------------------
+
+(define_insn_reservation "small_cpu_floads" 4
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_floadd" 5
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")))
+ "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_fstores" 0
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")))
+ "small_cpu_unit_i")
+
+(define_insn_reservation "small_cpu_fstored" 0
+ (and (eq_attr "tune" "small")
+ (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")))
+ "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
+
+
+;;-------------------------------------------------------
+;; Bypasses
+;;-------------------------------------------------------
+
+;; Forwarding path for unshifted operands.
+
+(define_bypass 1 "small_cpu_alu, small_cpu_alu_shift"
+ "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+(define_bypass 1 "small_cpu_logic, small_cpu_logic_shift"
+ "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+(define_bypass 1 "small_cpu_shift"
+ "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
+
+;; Load-to-use for floating-point values has a penalty of one cycle.
+
+(define_bypass 2 "small_cpu_floads"
+ "small_cpu_fpalu, small_cpu_fpmuld,\
+ small_cpu_fdivs, small_cpu_fdivd,\
+ small_cpu_f2i")
+
+(define_bypass 3 "small_cpu_floadd"
+ "small_cpu_fpalu, small_cpu_fpmuld,\
+ small_cpu_fdivs, small_cpu_fdivd,\
+ small_cpu_f2i")
diff --git a/gcc/config/aarch64/sync.md b/gcc/config/aarch64/sync.md
new file mode 100644
index 00000000000..61f1f1b78b8
--- /dev/null
+++ b/gcc/config/aarch64/sync.md
@@ -0,0 +1,467 @@
+;; Machine description for AArch64 processor synchronization primitives.
+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspecv"
+ [
+ UNSPECV_SYNC_COMPARE_AND_SWAP ; Represent a sync_compare_and_swap.
+ UNSPECV_SYNC_LOCK ; Represent a sync_lock_test_and_set.
+ UNSPECV_SYNC_LOCK_RELEASE ; Represent a sync_lock_release.
+ UNSPECV_SYNC_OP ; Represent a sync_<op>
+ UNSPECV_SYNC_NEW_OP ; Represent a sync_new_<op>
+ UNSPECV_SYNC_OLD_OP ; Represent a sync_old_<op>
+])
+
+(define_expand "sync_compare_and_swap<mode>"
+ [(set (match_operand:ALLI 0 "register_operand")
+ (unspec_volatile:ALLI [(match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")
+ (match_operand:ALLI 3 "register_operand")]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omrn;
+ generator.u.omrn = gen_aarch64_sync_compare_and_swap<mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+ })
+
+(define_expand "sync_lock_test_and_set<mode>"
+ [(match_operand:ALLI 0 "register_operand")
+ (match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_lock_test_and_set<mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ NULL, operands[2]);
+ DONE;
+ })
+
+(define_expand "sync_<optab><mode>"
+ [(match_operand:ALLI 0 "memory_operand")
+ (match_operand:ALLI 1 "register_operand")
+ (syncop:ALLI (match_dup 0) (match_dup 1))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_new_<optab><mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, NULL, operands[0], NULL,
+ operands[1]);
+ DONE;
+ })
+
+(define_expand "sync_nand<mode>"
+ [(match_operand:ALLI 0 "memory_operand")
+ (match_operand:ALLI 1 "register_operand")
+ (not:ALLI (and:ALLI (match_dup 0) (match_dup 1)))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_new_nand<mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, NULL, operands[0], NULL,
+ operands[1]);
+ DONE;
+ })
+
+(define_expand "sync_new_<optab><mode>"
+ [(match_operand:ALLI 0 "register_operand")
+ (match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")
+ (syncop:ALLI (match_dup 1) (match_dup 2))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_new_<optab><mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ NULL, operands[2]);
+ DONE;
+ })
+
+(define_expand "sync_new_nand<mode>"
+ [(match_operand:ALLI 0 "register_operand")
+ (match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")
+ (not:ALLI (and:ALLI (match_dup 1) (match_dup 2)))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_new_nand<mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ NULL, operands[2]);
+ DONE;
+ });
+
+(define_expand "sync_old_<optab><mode>"
+ [(match_operand:ALLI 0 "register_operand")
+ (match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")
+ (syncop:ALLI (match_dup 1) (match_dup 2))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_old_<optab><mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ NULL, operands[2]);
+ DONE;
+ })
+
+(define_expand "sync_old_nand<mode>"
+ [(match_operand:ALLI 0 "register_operand")
+ (match_operand:ALLI 1 "memory_operand")
+ (match_operand:ALLI 2 "register_operand")
+ (not:ALLI (and:ALLI (match_dup 1) (match_dup 2)))]
+ ""
+ {
+ struct aarch64_sync_generator generator;
+ generator.op = aarch64_sync_generator_omn;
+ generator.u.omn = gen_aarch64_sync_old_nand<mode>;
+ aarch64_expand_sync (<MODE>mode, &generator, operands[0], operands[1],
+ NULL, operands[2]);
+ DONE;
+ })
+
+(define_expand "memory_barrier"
+ [(set (match_dup 0) (unspec:BLK [(match_dup 0)] UNSPEC_MB))]
+ ""
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "aarch64_sync_compare_and_swap<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (unspec_volatile:GPI
+ [(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:GPI 2 "register_operand" "r")
+ (match_operand:GPI 3 "register_operand" "r")]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))
+ (set (match_dup 1) (unspec_volatile:GPI [(match_dup 2)]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))
+ (clobber:GPI (match_scratch:GPI 4 "=&r"))
+ (set (reg:CC CC_REGNUM) (unspec_volatile:CC [(match_dup 1)]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))
+ ]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_required_value" "2")
+ (set_attr "sync_new_value" "3")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "4")
+ ])
+
+(define_insn "aarch64_sync_compare_and_swap<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (zero_extend:SI
+ (unspec_volatile:SHORT
+ [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")]
+ UNSPECV_SYNC_COMPARE_AND_SWAP)))
+ (set (match_dup 1) (unspec_volatile:SHORT [(match_dup 2)]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))
+ (clobber:SI (match_scratch:SI 4 "=&r"))
+ (set (reg:CC CC_REGNUM) (unspec_volatile:CC [(match_dup 1)]
+ UNSPECV_SYNC_COMPARE_AND_SWAP))
+ ]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_required_value" "2")
+ (set_attr "sync_new_value" "3")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "4")
+ ])
+
+(define_insn "aarch64_sync_lock_test_and_set<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q"))
+ (set (match_dup 1)
+ (unspec_volatile:GPI [(match_operand:GPI 2 "register_operand" "r")]
+ UNSPECV_SYNC_LOCK))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:GPI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_release_barrier" "no")
+ (set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ ])
+
+(define_insn "aarch64_sync_lock_test_and_set<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (zero_extend:SI (match_operand:SHORT 1
+ "aarch64_sync_memory_operand" "+Q")))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_operand:SI 2 "register_operand" "r")]
+ UNSPECV_SYNC_LOCK))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_release_barrier" "no")
+ (set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ ])
+
+(define_insn "aarch64_sync_new_<optab><mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (unspec_volatile:GPI
+ [(syncop:GPI
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:GPI 2 "register_operand" "r"))]
+ UNSPECV_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_NEW_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:GPI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ (set_attr "sync_op" "<optab>")
+ ])
+
+(define_insn "aarch64_sync_new_nand<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (unspec_volatile:GPI
+ [(not:GPI (and:GPI
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ UNSPECV_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_NEW_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:GPI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ (set_attr "sync_op" "nand")
+ ])
+
+(define_insn "aarch64_sync_new_<optab><mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(syncop:SI
+ (zero_extend:SI
+ (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+ (match_operand:SI 2 "register_operand" "r"))]
+ UNSPECV_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_NEW_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ (set_attr "sync_op" "<optab>")
+ ])
+
+(define_insn "aarch64_sync_new_nand<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(not:SI
+ (and:SI
+ (zero_extend:SI
+ (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+ (match_operand:SI 2 "register_operand" "r")))
+ ] UNSPECV_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_NEW_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "0")
+ (set_attr "sync_t2" "3")
+ (set_attr "sync_op" "nand")
+ ])
+
+(define_insn "aarch64_sync_old_<optab><mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (unspec_volatile:GPI
+ [(syncop:GPI
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:GPI 2 "register_operand" "r"))]
+ UNSPECV_SYNC_OLD_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_OLD_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:GPI 3 "=&r"))
+ (clobber (match_scratch:GPI 4 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "3")
+ (set_attr "sync_t2" "4")
+ (set_attr "sync_op" "<optab>")
+ ])
+
+(define_insn "aarch64_sync_old_nand<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=&r")
+ (unspec_volatile:GPI
+ [(not:GPI (and:GPI
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ UNSPECV_SYNC_OLD_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPI [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_OLD_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:GPI 3 "=&r"))
+ (clobber (match_scratch:GPI 4 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "3")
+ (set_attr "sync_t2" "4")
+ (set_attr "sync_op" "nand")
+ ])
+
+(define_insn "aarch64_sync_old_<optab><mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(syncop:SI
+ (zero_extend:SI
+ (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+ (match_operand:SI 2 "register_operand" "r"))]
+ UNSPECV_SYNC_OLD_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_OLD_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "3")
+ (set_attr "sync_t2" "4")
+ (set_attr "sync_op" "<optab>")
+ ])
+
+(define_insn "aarch64_sync_old_nand<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(not:SI
+ (and:SI
+ (zero_extend:SI
+ (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))
+ (match_operand:SI 2 "register_operand" "r")))]
+ UNSPECV_SYNC_OLD_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPECV_SYNC_OLD_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ {
+ return aarch64_output_sync_insn (insn, operands);
+ }
+ [(set_attr "sync_result" "0")
+ (set_attr "sync_memory" "1")
+ (set_attr "sync_new_value" "2")
+ (set_attr "sync_t1" "3")
+ (set_attr "sync_t2" "4")
+ (set_attr "sync_op" "nand")
+ ])
+
+(define_insn "*memory_barrier"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_MB))]
+ ""
+ "dmb\\tish"
+)
+
+(define_insn "sync_lock_release<mode>"
+ [(set (match_operand:ALLI 0 "memory_operand" "+Q")
+ (unspec_volatile:ALLI [(match_operand:ALLI 1 "register_operand" "r")]
+ UNSPECV_SYNC_LOCK_RELEASE))]
+
+ ""
+ {
+ return aarch64_output_sync_lock_release (operands[1], operands[0]);
+ })
+
diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
new file mode 100644
index 00000000000..715ad1da2c0
--- /dev/null
+++ b/gcc/config/aarch64/t-aarch64
@@ -0,0 +1,32 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+$(srcdir)/config/aarch64/aarch64-tune.md: $(srcdir)/config/aarch64/gentune.sh \
+ $(srcdir)/config/aarch64/aarch64-cores.def
+ $(SHELL) $(srcdir)/config/aarch64/gentune.sh \
+ $(srcdir)/config/aarch64/aarch64-cores.def > \
+ $(srcdir)/config/aarch64/aarch64-tune.md
+
+aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
+ $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
+ $(DIAGNOSTIC_CORE_H) $(OPTABS_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/aarch64/aarch64-builtins.c
diff --git a/gcc/config/aarch64/t-aarch64-linux b/gcc/config/aarch64/t-aarch64-linux
new file mode 100644
index 00000000000..f6ec5765f7c
--- /dev/null
+++ b/gcc/config/aarch64/t-aarch64-linux
@@ -0,0 +1,22 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = aarch64/lib1funcs.asm
+LIB1ASMFUNCS = _aarch64_sync_cache_range
diff --git a/gcc/configure b/gcc/configure
index 03b6772a353..fa7e3b68dba 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -23422,6 +23422,19 @@ foo:
tls_first_minor=19
tls_as_opt='--fatal-warnings'
;;
+ aarch64*-*-*)
+ conftest_s='
+ .section ".tdata","awT",%progbits
+foo: .long 25
+ .text
+ adrp x0, :tlsgd:x
+ add x0, x0, #:tlsgd_lo12:x
+ bl __tls_get_addr
+ nop'
+ tls_first_major=2
+ tls_first_minor=20
+ tls_as_opt='--fatal-warnings'
+ ;;
powerpc-*-*)
conftest_s='
.section ".tdata","awT",@progbits
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 51ebbd6add1..a0ce0548387 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -2982,6 +2982,19 @@ foo:
tls_first_minor=19
tls_as_opt='--fatal-warnings'
;;
+ aarch64*-*-*)
+ conftest_s='
+ .section ".tdata","awT",%progbits
+foo: .long 25
+ .text
+ adrp x0, :tlsgd:x
+ add x0, x0, #:tlsgd_lo12:x
+ bl __tls_get_addr
+ nop'
+ tls_first_major=2
+ tls_first_minor=20
+ tls_as_opt='--fatal-warnings'
+ ;;
powerpc-*-*)
conftest_s='
.section ".tdata","awT",@progbits
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 929749a669b..c710dc8c71e 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -461,6 +461,15 @@ Objective-C and Objective-C++ Dialects}.
@c Try and put the significant identifier (CPU or system) first,
@c so users have a clue at guessing where the ones they want will be.
+@emph{AArch64 Options}
+@gccoptlist{-mbig-endian -mlittle-endian @gol
+-mgeneral-regs-only @gol
+-mcmodel=tiny -mcmodel=small -mcmodel=large @gol
+-mstrict-align @gol
+-momit-leaf-frame-pointer -mno-omit-leaf-frame-pointer @gol
+-mtls-dialect=desc -mtls-dialect=traditional @gol
+-march=@var{name} -mcpu=@var{name} -mtune=@var{name}}
+
@emph{Adapteva Epiphany Options}
@gccoptlist{-mhalf-reg-file -mprefer-short-insn-regs @gol
-mbranch-cost=@var{num} -mcmove -mnops=@var{num} -msoft-cmpsf @gol
@@ -10331,6 +10340,7 @@ platform.
@c in Machine Dependent Options
@menu
+* AArch64 Options::
* Adapteva Epiphany Options::
* ARM Options::
* AVR Options::
@@ -10539,6 +10549,125 @@ purpose. The default is @option{-m1reg-none}.
@end table
+@node AArch64 Options
+@subsection AArch64 Options
+@cindex AArch64 Options
+
+These options are defined for AArch64 implementations:
+
+@table @gcctabopt
+
+@item -mbig-endian
+@opindex mbig-endian
+Generate big-endian code. This is the default when GCC is configured for an
+@samp{aarch64_be-*-*} target.
+
+@item -mgeneral-regs-only
+@opindex mgeneral-regs-only
+Generate code which uses only the general registers.
+
+@item -mlittle-endian
+@opindex mlittle-endian
+Generate little-endian code. This is the default when GCC is configured for an
+@samp{aarch64-*-*} but not an @samp{aarch64_be-*-*} target.
+
+@item -mcmodel=tiny
+@opindex mcmodel=tiny
+Generate code for the tiny code model. The program and its statically defined
+symbols must be within 1GB of each other. Pointers are 64 bits. Programs can
+be statically or dynamically linked. This model is not fully implemented and
+mostly treated as "small".
+
+@item -mcmodel=small
+@opindex mcmodel=small
+Generate code for the small code model. The program and its statically defined
+symbols must be within 4GB of each other. Pointers are 64 bits. Programs can
+be statically or dynamically linked. This is the default code model.
+
+@item -mcmodel=large
+@opindex mcmodel=large
+Generate code for the large code model. This makes no assumptions about
+addresses and sizes of sections. Pointers are 64 bits. Programs can be
+statically linked only.
+
+@item -mstrict-align
+@opindex mstrict-align
+Do not assume that unaligned memory references will be handled by the system.
+
+@item -momit-leaf-frame-pointer
+@item -mno-omit-leaf-frame-pointer
+@opindex momit-leaf-frame-pointer
+@opindex mno-omit-leaf-frame-pointer
+Omit or keep the frame pointer in leaf functions. The former behaviour is the
+default.
+
+@item -mtls-dialect=desc
+@opindex mtls-dialect=desc
+Use TLS descriptors as the thread-local storage mechanism for dynamic accesses
+of TLS variables. This is the default.
+
+@item -mtls-dialect=traditional
+@opindex mtls-dialect=traditional
+Use traditional TLS as the thread-local storage mechanism for dynamic accesses
+of TLS variables.
+
+@item -march=@var{name}
+@opindex march
+Specify the name of the target architecture, optionally suffixed by one or
+more feature modifiers. This option has the form
+@option{-march=@var{arch}@r{@{}+@r{[}no@r{]}@var{feature}@r{@}*}}, where the
+only value for @var{arch} is @samp{armv8-a}. The possible values for
+@var{feature} are documented in the sub-section below.
+
+Where conflicting feature modifiers are specified, the right-most feature is
+used.
+
+GCC uses this name to determine what kind of instructions it can emit when
+generating assembly code. This option can be used in conjunction with or
+instead of the @option{-mcpu=} option.
+
+@item -mcpu=@var{name}
+@opindex mcpu
+Specify the name of the target processor, optionally suffixed by one or more
+feature modifiers. This option has the form
+@option{-mcpu=@var{cpu}@r{@{}+@r{[}no@r{]}@var{feature}@r{@}*}}, where the
+possible values for @var{cpu} are @samp{generic}, @samp{large}. The
+possible values for @var{feature} are documented in the sub-section
+below.
+
+Where conflicting feature modifiers are specified, the right-most feature is
+used.
+
+GCC uses this name to determine what kind of instructions it can emit when
+generating assembly code.
+
+@item -mtune=@var{name}
+@opindex mtune
+Specify the name of the processor to tune the performance for. The code will
+be tuned as if the target processor were of the type specified in this option,
+but still using instructions compatible with the target processor specified
+by a @option{-mcpu=} option. This option cannot be suffixed by feature
+modifiers.
+
+@end table
+
+@subsubsection @option{-march} and @option{-mcpu} feature modifiers
+@cindex @option{-march} feature modifiers
+@cindex @option{-mcpu} feature modifiers
+Feature modifiers used with @option{-march} and @option{-mcpu} can be one
+the following:
+
+@table @samp
+@item crypto
+Enable Crypto extension. This implies Advanced SIMD is enabled.
+@item fp
+Enable floating-point instructions.
+@item simd
+Enable Advanced SIMD instructions. This implies floating-point instructions
+are enabled. This is the default for all current possible values for options
+@option{-march} and @option{-mcpu=}.
+@end table
+
@node ARM Options
@subsection ARM Options
@cindex ARM options
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index 386db196820..ffa5b6bbfbe 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -1653,6 +1653,65 @@ table heading for each architecture is the definitive reference for
the meanings of that architecture's constraints.
@table @emph
+@item AArch64 family---@file{config/aarch64/constraints.md}
+@table @code
+@item k
+The stack pointer register (@code{SP})
+
+@item w
+Floating point or SIMD vector register
+
+@item I
+Integer constant that is valid as an immediate operand in an @code{ADD}
+instruction
+
+@item J
+Integer constant that is valid as an immediate operand in a @code{SUB}
+instruction (once negated)
+
+@item K
+Integer constant that can be used with a 32-bit logical instruction
+
+@item L
+Integer constant that can be used with a 64-bit logical instruction
+
+@item M
+Integer constant that is valid as an immediate operand in a 32-bit @code{MOV}
+pseudo instruction. The @code{MOV} may be assembled to one of several different
+machine instructions depending on the value
+
+@item N
+Integer constant that is valid as an immediate operand in a 64-bit @code{MOV}
+pseudo instruction
+
+@item S
+An absolute symbolic address or a label reference
+
+@item Y
+Floating point constant zero
+
+@item Z
+Integer constant zero
+
+@item Usa
+An absolute symbolic address
+
+@item Ush
+The high part (bits 12 and upwards) of the pc-relative address of a symbol
+within 4GB of the instruction
+
+@item Q
+A memory address which uses a single base register with no offset
+
+@item Ump
+A memory address suitable for a load/store pair instruction in SI, DI, SF and
+DF modes
+
+@item Utf
+A memory address suitable for a load/store pair instruction in TF mode
+
+@end table
+
@item ARM family---@file{config/arm/arm.h}
@table @code
@item f
@@ -8888,6 +8947,7 @@ facilities to make this process easier.
@menu
* Mode Iterators:: Generating variations of patterns for different modes.
* Code Iterators:: Doing the same for codes.
+* Int Iterators:: Doing the same for integers.
@end menu
@node Mode Iterators
@@ -9159,4 +9219,81 @@ This is equivalent to:
@dots{}
@end smallexample
+@node Int Iterators
+@subsection Int Iterators
+@cindex int iterators in @file{.md} files
+@findex define_int_iterator
+@findex define_int_attr
+
+Int iterators operate in a similar way to code iterators. @xref{Code Iterators}.
+
+The construct:
+
+@smallexample
+(define_int_iterator @var{name} [(@var{int1} "@var{cond1}") @dots{} (@var{intn} "@var{condn}")])
+@end smallexample
+
+defines a pseudo integer constant @var{name} that can be instantiated as
+@var{inti} if condition @var{condi} is true. Each @var{int}
+must have the same rtx format. @xref{RTL Classes}. Int iterators can appear
+in only those rtx fields that have 'i' as the specifier. This means that
+each @var{int} has to be a constant defined using define_constant or
+define_c_enum.
+
+As with mode and code iterators, each pattern that uses @var{name} will be
+expanded @var{n} times, once with all uses of @var{name} replaced by
+@var{int1}, once with all uses replaced by @var{int2}, and so on.
+@xref{Defining Mode Iterators}.
+
+It is possible to define attributes for ints as well as for codes and modes.
+Attributes are defined using:
+
+@smallexample
+(define_int_attr @var{name} [(@var{int1} "@var{value1}") @dots{} (@var{intn} "@var{valuen}")])
+@end smallexample
+
+Here's an example of int iterators in action, taken from the ARM port:
+
+@smallexample
+(define_int_iterator QABSNEG [UNSPEC_VQABS UNSPEC_VQNEG])
+
+(define_int_attr absneg [(UNSPEC_VQABS "abs") (UNSPEC_VQNEG "neg")])
+
+(define_insn "neon_vq<absneg><mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ QABSNEG))]
+ "TARGET_NEON"
+ "vq<absneg>.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_vqneg_vqabs")]
+)
+
+@end smallexample
+
+This is equivalent to:
+
+@smallexample
+(define_insn "neon_vqabs<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQABS))]
+ "TARGET_NEON"
+ "vqabs.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_vqneg_vqabs")]
+)
+
+(define_insn "neon_vqneg<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQNEG))]
+ "TARGET_NEON"
+ "vqneg.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_vqneg_vqabs")]
+)
+
+@end smallexample
+
@end ifset
diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c
index 1402c546837..90b4afff9a1 100644
--- a/gcc/read-rtl.c
+++ b/gcc/read-rtl.c
@@ -94,6 +94,26 @@ struct iterator_traverse_data {
#define BELLWETHER_CODE(CODE) \
((CODE) < NUM_RTX_CODE ? CODE : bellwether_codes[CODE - NUM_RTX_CODE])
+/* One element in the (rtx, opno) pair list. */
+struct rtx_list {
+ /* rtx. */
+ rtx x;
+ /* Position of the operand to replace. */
+ int opno;
+};
+
+/* A structure to track which rtx uses which int iterator. */
+struct int_iterator_mapping {
+ /* Iterator. */
+ struct mapping *iterator;
+ /* list of rtx using ITERATOR. */
+ struct rtx_list *rtxs;
+ int num_rtx;
+};
+
+static struct int_iterator_mapping *int_iterator_data;
+static int num_int_iterator_data;
+
static int find_mode (const char *);
static bool uses_mode_iterator_p (rtx, int);
static void apply_mode_iterator (rtx, int);
@@ -121,8 +141,8 @@ static rtx read_rtx_code (const char *, struct map_value **);
static rtx read_nested_rtx (struct map_value **);
static rtx read_rtx_variadic (struct map_value **, rtx);
-/* The mode and code iterator structures. */
-static struct iterator_group modes, codes;
+/* The mode, code and int iterator structures. */
+static struct iterator_group modes, codes, ints;
/* Index I is the value of BELLWETHER_CODE (I + NUM_RTX_CODE). */
static enum rtx_code *bellwether_codes;
@@ -179,6 +199,59 @@ apply_code_iterator (rtx x, int code)
PUT_CODE (x, (enum rtx_code) code);
}
+/* Since GCC does not construct a table of valid constants,
+ we have to accept any int as valid. No cross-checking can
+ be done. */
+static int
+find_int (const char *name)
+{
+ char *endptr;
+ int ret;
+
+ if (ISDIGIT (*name))
+ {
+ ret = strtol (name, &endptr, 0);
+ gcc_assert (*endptr == '\0');
+ return ret;
+ }
+ else
+ fatal_with_file_and_line ("unknown int `%s'", name);
+}
+
+static bool
+dummy_uses_int_iterator (rtx x ATTRIBUTE_UNUSED, int index ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
+static void
+dummy_apply_int_iterator (rtx x ATTRIBUTE_UNUSED, int code ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+/* Stand-alone int iterator usage-checking function. */
+static bool
+uses_int_iterator_p (rtx x, struct mapping *iterator, int opno)
+{
+ int i;
+ for (i=0; i < num_int_iterator_data; i++)
+ if (int_iterator_data[i].iterator->group == iterator->group &&
+ int_iterator_data[i].iterator->index == iterator->index)
+ {
+ /* Found an existing entry. Check if X is in its list. */
+ struct int_iterator_mapping it = int_iterator_data[i];
+ int j;
+
+ for (j=0; j < it.num_rtx; j++)
+ {
+ if (it.rtxs[j].x == x && it.rtxs[j].opno == opno)
+ return true;
+ }
+ }
+ return false;
+}
+
/* Map a code or mode attribute string P to the underlying string for
ITERATOR and VALUE. */
@@ -341,7 +414,9 @@ apply_iterator_to_rtx (rtx original, struct mapping *iterator, int value,
x = rtx_alloc (bellwether_code);
memcpy (x, original, RTX_CODE_SIZE (bellwether_code));
- /* Change the mode or code itself. */
+ /* Change the mode or code itself.
+ For int iterators, apply_iterator () does nothing. This is
+ because we want to apply int iterators to operands below. */
group = iterator->group;
if (group->uses_iterator_p (x, iterator->index + group->num_builtins))
group->apply_iterator (x, value);
@@ -379,6 +454,10 @@ apply_iterator_to_rtx (rtx original, struct mapping *iterator, int value,
unknown_mode_attr);
}
break;
+ case 'i':
+ if (uses_int_iterator_p (original, iterator, i))
+ XINT (x, i) = value;
+ break;
default:
break;
@@ -419,6 +498,10 @@ uses_iterator_p (rtx x, struct mapping *iterator)
return true;
break;
+ case 'i':
+ if (uses_int_iterator_p (x, iterator, i))
+ return true;
+
default:
break;
}
@@ -480,6 +563,7 @@ apply_iterator_traverse (void **slot, void *data)
iterator = (struct mapping *) *slot;
for (elem = mtd->queue; elem != 0; elem = XEXP (elem, 1))
+ {
if (uses_iterator_p (XEXP (elem, 0), iterator))
{
/* For each iterator we expand, we set UNKNOWN_MODE_ATTR to NULL.
@@ -509,6 +593,7 @@ apply_iterator_traverse (void **slot, void *data)
XEXP (elem, 0) = x;
}
}
+ }
return 1;
}
@@ -553,7 +638,7 @@ add_map_value (struct map_value **end_ptr, int number, const char *string)
return &value->next;
}
-/* Do one-time initialization of the mode and code attributes. */
+/* Do one-time initialization of the mode, code and int attributes. */
static void
initialize_iterators (void)
@@ -579,6 +664,15 @@ initialize_iterators (void)
codes.uses_iterator_p = uses_code_iterator_p;
codes.apply_iterator = apply_code_iterator;
+ ints.attrs = htab_create (13, leading_string_hash, leading_string_eq_p, 0);
+ ints.iterators = htab_create (13, leading_string_hash,
+ leading_string_eq_p, 0);
+ ints.num_builtins = 0;
+ ints.find_builtin = find_int;
+ ints.uses_iterator_p = dummy_uses_int_iterator;
+ ints.apply_iterator = dummy_apply_int_iterator;
+ num_int_iterator_data = 0;
+
lower = add_mapping (&modes, modes.attrs, "mode");
upper = add_mapping (&modes, modes.attrs, "MODE");
lower_ptr = &lower->values;
@@ -728,6 +822,61 @@ find_iterator (struct iterator_group *group, const char *name)
return group->find_builtin (name);
}
+/* We cannot use the same design as code and mode iterators as ints
+ can be any arbitrary number and there is no way to represent each
+ int iterator's placeholder with a unique numeric identifier. Therefore
+ we create a (rtx *, op, iterator *) triplet database. */
+
+static struct mapping *
+find_int_iterator (struct iterator_group *group, const char *name)
+{
+ struct mapping *m;
+
+ m = (struct mapping *) htab_find (group->iterators, &name);
+ if (m == 0)
+ fatal_with_file_and_line ("invalid iterator \"%s\"\n", name);
+ return m;
+}
+
+/* Add to triplet-database for int iterators. */
+static void
+add_int_iterator (struct mapping *iterator, rtx x, int opno)
+{
+
+ /* Find iterator in int_iterator_data. If already present,
+ add this R to its list of rtxs. If not present, create
+ a new entry for INT_ITERATOR_DATA and add the R to its
+ rtx list. */
+ int i;
+ for (i=0; i < num_int_iterator_data; i++)
+ if (int_iterator_data[i].iterator->index == iterator->index)
+ {
+ /* Found an existing entry. Add rtx to this iterator's list. */
+ int_iterator_data[i].rtxs =
+ XRESIZEVEC (struct rtx_list,
+ int_iterator_data[i].rtxs,
+ int_iterator_data[i].num_rtx + 1);
+ int_iterator_data[i].rtxs[int_iterator_data[i].num_rtx].x = x;
+ int_iterator_data[i].rtxs[int_iterator_data[i].num_rtx].opno = opno;
+ int_iterator_data[i].num_rtx++;
+ return;
+ }
+
+ /* New INT_ITERATOR_DATA entry. */
+ if (num_int_iterator_data == 0)
+ int_iterator_data = XNEWVEC (struct int_iterator_mapping, 1);
+ else
+ int_iterator_data = XRESIZEVEC (struct int_iterator_mapping,
+ int_iterator_data,
+ num_int_iterator_data + 1);
+ int_iterator_data[num_int_iterator_data].iterator = iterator;
+ int_iterator_data[num_int_iterator_data].rtxs = XNEWVEC (struct rtx_list, 1);
+ int_iterator_data[num_int_iterator_data].rtxs[0].x = x;
+ int_iterator_data[num_int_iterator_data].rtxs[0].opno = opno;
+ int_iterator_data[num_int_iterator_data].num_rtx = 1;
+ num_int_iterator_data++;
+}
+
/* Finish reading a declaration of the form:
(define... <name> [<value1> ... <valuen>])
@@ -817,6 +966,7 @@ read_rtx (const char *rtx_name, rtx *x)
static rtx queue_head;
struct map_value *mode_maps;
struct iterator_traverse_data mtd;
+ int i;
/* Do one-time initialization. */
if (queue_head == 0)
@@ -852,6 +1002,17 @@ read_rtx (const char *rtx_name, rtx *x)
check_code_iterator (read_mapping (&codes, codes.iterators));
return false;
}
+ if (strcmp (rtx_name, "define_int_attr") == 0)
+ {
+ read_mapping (&ints, ints.attrs);
+ return false;
+ }
+ if (strcmp (rtx_name, "define_int_iterator") == 0)
+ {
+ read_mapping (&ints, ints.iterators);
+ return false;
+ }
+
mode_maps = 0;
XEXP (queue_head, 0) = read_rtx_code (rtx_name, &mode_maps);
@@ -860,6 +1021,15 @@ read_rtx (const char *rtx_name, rtx *x)
mtd.queue = queue_head;
mtd.mode_maps = mode_maps;
mtd.unknown_mode_attr = mode_maps ? mode_maps->string : NULL;
+ htab_traverse (ints.iterators, apply_iterator_traverse, &mtd);
+ /* Free used memory from recording int iterator usage. */
+ for (i=0; i < num_int_iterator_data; i++)
+ if (int_iterator_data[i].num_rtx > 0)
+ XDELETEVEC (int_iterator_data[i].rtxs);
+ if (num_int_iterator_data > 0)
+ XDELETEVEC (int_iterator_data);
+ num_int_iterator_data = 0;
+
htab_traverse (modes.iterators, apply_iterator_traverse, &mtd);
htab_traverse (codes.iterators, apply_iterator_traverse, &mtd);
if (mtd.unknown_mode_attr)
@@ -1057,14 +1227,30 @@ read_rtx_code (const char *code_name, struct map_value **mode_maps)
XWINT (return_rtx, i) = tmp_wide;
break;
- case 'i':
case 'n':
- read_name (&name);
validate_const_int (name.string);
tmp_int = atoi (name.string);
XINT (return_rtx, i) = tmp_int;
break;
-
+ case 'i':
+ /* Can be an iterator or an integer constant. */
+ read_name (&name);
+ if (!ISDIGIT (name.string[0]))
+ {
+ struct mapping *iterator;
+ /* An iterator. */
+ iterator = find_int_iterator (&ints, name.string);
+ /* Build (iterator, rtx, op) triplet-database. */
+ add_int_iterator (iterator, return_rtx, i);
+ }
+ else
+ {
+ /* A numeric constant. */
+ validate_const_int (name.string);
+ tmp_int = atoi (name.string);
+ XINT (return_rtx, i) = tmp_int;
+ }
+ break;
default:
gcc_unreachable ();
}
diff --git a/gcc/testsuite/ChangeLog.aarch64 b/gcc/testsuite/ChangeLog.aarch64
new file mode 100644
index 00000000000..5b577d5ba60
--- /dev/null
+++ b/gcc/testsuite/ChangeLog.aarch64
@@ -0,0 +1,184 @@
+2012-09-25 Tejas Belagod <tejas.belagod@arm.com>
+
+ * testsuite/lib/target-supports.exp
+ (check_effective_target_vect_stridedN): Enable support for strided
+ load and stores for aarch64.
+
+2012-09-18 Ian Bolton <ian.bolton@arm.com>
+
+ * gcc.target/aarch64/clrsb.c: New test.
+ * gcc.target/aarch64/clz.c: New test.
+ * gcc.target/aarch64/ctz.c: New test.
+
+2012-09-17 Ian Bolton <ian.bolton@arm.com>
+
+ * gcc.target/aarch64/ffs.c: New test.
+
+2012-09-17 Ian Bolton <ian.bolton@arm.com>
+
+ * gcc.target/aarch64/fmadd.c: Added extra tests.
+ * gcc.target/aarch64/fnmadd-fastmath.c: New test.
+
+2012-05-25 Ian Bolton <ian.bolton@arm.com>
+ Jim MacArthur <jim.macarthur@arm.com>
+ Marcus Shawcroft <marcus.shawcroft@arm.com>
+ Nigel Stephens <nigel.stephens@arm.com>
+ Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+ Sofiane Naci <sofiane.naci@arm.com>
+ Stephen Thomas <stephen,thomas@arm.com>
+ Tejas Belagod <tejas.belagod@arm.com>
+ Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * gcc.target/aarch64/aapcs/aapcs64.exp: New file.
+ * gcc.target/aarch64/aapcs/abitest-2.h: New file.
+ * gcc.target/aarch64/aapcs/abitest-common.h: New file.
+ * gcc.target/aarch64/aapcs/abitest.S: New file.
+ * gcc.target/aarch64/aapcs/abitest.h: New file.
+ * gcc.target/aarch64/aapcs/func-ret-1.c: New file.
+ * gcc.target/aarch64/aapcs/func-ret-2.c: New file.
+ * gcc.target/aarch64/aapcs/func-ret-3.c: New file.
+ * gcc.target/aarch64/aapcs/func-ret-3.x: New file.
+ * gcc.target/aarch64/aapcs/func-ret-4.c: New file.
+ * gcc.target/aarch64/aapcs/func-ret-4.x: New file.
+ * gcc.target/aarch64/aapcs/ice_1.c: New file.
+ * gcc.target/aarch64/aapcs/ice_2.c: New file.
+ * gcc.target/aarch64/aapcs/ice_3.c: New file.
+ * gcc.target/aarch64/aapcs/ice_4.c: New file.
+ * gcc.target/aarch64/aapcs/ice_5.c: New file.
+ * gcc.target/aarch64/aapcs/macro-def.h: New file.
+ * gcc.target/aarch64/aapcs/test_1.c: New file.
+ * gcc.target/aarch64/aapcs/test_10.c: New file.
+ * gcc.target/aarch64/aapcs/test_11.c: New file.
+ * gcc.target/aarch64/aapcs/test_12.c: New file.
+ * gcc.target/aarch64/aapcs/test_13.c: New file.
+ * gcc.target/aarch64/aapcs/test_14.c: New file.
+ * gcc.target/aarch64/aapcs/test_15.c: New file.
+ * gcc.target/aarch64/aapcs/test_16.c: New file.
+ * gcc.target/aarch64/aapcs/test_17.c: New file.
+ * gcc.target/aarch64/aapcs/test_18.c: New file.
+ * gcc.target/aarch64/aapcs/test_19.c: New file.
+ * gcc.target/aarch64/aapcs/test_2.c: New file.
+ * gcc.target/aarch64/aapcs/test_20.c: New file.
+ * gcc.target/aarch64/aapcs/test_21.c: New file.
+ * gcc.target/aarch64/aapcs/test_22.c: New file.
+ * gcc.target/aarch64/aapcs/test_23.c: New file.
+ * gcc.target/aarch64/aapcs/test_24.c: New file.
+ * gcc.target/aarch64/aapcs/test_25.c: New file.
+ * gcc.target/aarch64/aapcs/test_26.c: New file.
+ * gcc.target/aarch64/aapcs/test_3.c: New file.
+ * gcc.target/aarch64/aapcs/test_4.c: New file.
+ * gcc.target/aarch64/aapcs/test_5.c: New file.
+ * gcc.target/aarch64/aapcs/test_6.c: New file.
+ * gcc.target/aarch64/aapcs/test_7.c: New file.
+ * gcc.target/aarch64/aapcs/test_8.c: New file.
+ * gcc.target/aarch64/aapcs/test_9.c: New file.
+ * gcc.target/aarch64/aapcs/test_align-1.c: New file.
+ * gcc.target/aarch64/aapcs/test_align-2.c: New file.
+ * gcc.target/aarch64/aapcs/test_align-3.c: New file.
+ * gcc.target/aarch64/aapcs/test_align-4.c: New file.
+ * gcc.target/aarch64/aapcs/test_complex.c: New file.
+ * gcc.target/aarch64/aapcs/test_int128.c: New file.
+ * gcc.target/aarch64/aapcs/test_quad_double.c: New file.
+ * gcc.target/aarch64/aapcs/type-def.h: New file.
+ * gcc.target/aarch64/aapcs/va_arg-1.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-10.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-11.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-12.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-2.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-3.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-4.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-5.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-6.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-7.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-8.c: New file.
+ * gcc.target/aarch64/aapcs/va_arg-9.c: New file.
+ * gcc.target/aarch64/aapcs/validate_memory.h: New file.
+ * gcc.target/aarch64/aarch64.exp: New file.
+ * gcc.target/aarch64/adc-1.c: New file.
+ * gcc.target/aarch64/adc-2.c: New file.
+ * gcc.target/aarch64/asm-1.c: New file.
+ * gcc.target/aarch64/csinc-1.c: New file.
+ * gcc.target/aarch64/csinv-1.c: New file.
+ * gcc.target/aarch64/csneg-1.c: New file.
+ * gcc.target/aarch64/extend.c: New file.
+ * gcc.target/aarch64/fcvt.x: New file.
+ * gcc.target/aarch64/fcvt_double_int.c: New file.
+ * gcc.target/aarch64/fcvt_double_long.c: New file.
+ * gcc.target/aarch64/fcvt_double_uint.c: New file.
+ * gcc.target/aarch64/fcvt_double_ulong.c: New file.
+ * gcc.target/aarch64/fcvt_float_int.c: New file.
+ * gcc.target/aarch64/fcvt_float_long.c: New file.
+ * gcc.target/aarch64/fcvt_float_uint.c: New file.
+ * gcc.target/aarch64/fcvt_float_ulong.c: New file.
+ * gcc.target/aarch64/fmadd.c: New file.
+ * gcc.target/aarch64/frint.x: New file.
+ * gcc.target/aarch64/frint_double.c: New file.
+ * gcc.target/aarch64/frint_float.c: New file.
+ * gcc.target/aarch64/index.c: New file.
+ * gcc.target/aarch64/mneg-1.c: New file.
+ * gcc.target/aarch64/mneg-2.c: New file.
+ * gcc.target/aarch64/mneg-3.c: New file.
+ * gcc.target/aarch64/mnegl-1.c: New file.
+ * gcc.target/aarch64/mnegl-2.c: New file.
+ * gcc.target/aarch64/narrow_high-intrinsics.c: New file.
+ * gcc.target/aarch64/pic-constantpool1.c: New file.
+ * gcc.target/aarch64/pic-symrefplus.c: New file.
+ * gcc.target/aarch64/reload-valid-spoff.c: New file.
+ * gcc.target/aarch64/scalar_intrinsics.c: New file.
+ * gcc.target/aarch64/table-intrinsics.c: New file.
+ * gcc.target/aarch64/tst-1.c: New file.
+ * gcc.target/aarch64/vect-abs-compile.c: New file.
+ * gcc.target/aarch64/vect-abs.c: New file.
+ * gcc.target/aarch64/vect-abs.x: New file.
+ * gcc.target/aarch64/vect-compile.c: New file.
+ * gcc.target/aarch64/vect-faddv-compile.c: New file.
+ * gcc.target/aarch64/vect-faddv.c: New file.
+ * gcc.target/aarch64/vect-faddv.x: New file.
+ * gcc.target/aarch64/vect-fmax-fmin-compile.c: New file.
+ * gcc.target/aarch64/vect-fmax-fmin.c: New file.
+ * gcc.target/aarch64/vect-fmax-fmin.x: New file.
+ * gcc.target/aarch64/vect-fmaxv-fminv-compile.c: New file.
+ * gcc.target/aarch64/vect-fmaxv-fminv.x: New file.
+ * gcc.target/aarch64/vect-fp-compile.c: New file.
+ * gcc.target/aarch64/vect-fp.c: New file.
+ * gcc.target/aarch64/vect-fp.x: New file.
+ * gcc.target/aarch64/vect-mull-compile.c: New file.
+ * gcc.target/aarch64/vect-mull.c: New file.
+ * gcc.target/aarch64/vect-mull.x: New file.
+ * gcc.target/aarch64/vect.c: New file.
+ * gcc.target/aarch64/vect.x: New file.
+ * gcc.target/aarch64/vector_intrinsics.c: New file.
+ * gcc.target/aarch64/vfp-1.c: New file.
+ * gcc.target/aarch64/volatile-bitfields-1.c: New file.
+ * gcc.target/aarch64/volatile-bitfields-2.c: New file.
+ * gcc.target/aarch64/volatile-bitfields-3.c: New file.
+ * lib/target-supports.exp
+ (check_profiling_available): Add AArch64.
+ (check_effective_target_vect_int): Likewise.
+ (check_effective_target_vect_shift): Likewise.
+ (check_effective_target_vect_float): Likewise.
+ (check_effective_target_vect_double): Likewise.
+ (check_effective_target_vect_widen_mult_qi_to_hi): Likewise.
+ (check_effective_target_vect_widen_mult_hi_to_si): Likewise.
+ (check_effective_target_vect_pack_trunc): Likewise.
+ (check_effective_target_vect_unpack): Likewise.
+ (check_effective_target_vect_hw_misalign): Likewise.
+ (check_effective_target_vect_short_mult): Likewise.
+ (check_effective_target_vect_int_mult): Likewise.
+ (check_effective_target_sync_int_long): Likewise.
+ (check_effective_target_sync_char_short): Likewise.
+ (check_vect_support_and_set_flags): Likewise.
+ * g++.dg/abi/aarch64_guard1.C: New file.
+ * g++.dg/other/PR23205.C: Enable aarch64.
+ * g++.dg/other/pr23205-2.C: Likewise.
+ * g++.old-deja/g++.abi/ptrmem.C: Likewise.
+ * gcc.c-torture/execute/20101011-1.c: Likewise.
+ * gcc.dg/torture/fp-int-convert-float128-timode.c: Likewise.
+ * gcc.dg/torture/fp-int-convert-float128.c: Likewise.
+ * gcc.dg/20020312-2.c: Likewise.
+ * gcc.dg/20040813-1.c: Likewise.
+ * gcc.dg/builtin-apply2.c: Likewise.
+ * gcc.dg/const-float128-ped.c: Likewise.
+ * gcc.dg/const-float128.c: Likewise.
+ * gcc.dg/stack-usage-1.c: Likewise.
diff --git a/gcc/testsuite/g++.dg/abi/aarch64_guard1.C b/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
new file mode 100644
index 00000000000..af82ad2ec36
--- /dev/null
+++ b/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
@@ -0,0 +1,17 @@
+// Check that the initialization guard variable is an 8-byte aligned,
+// 8-byte doubleword and that only the least significant bit is used
+// for initialization guard variables.
+// { dg-do compile { target aarch64*-*-* } }
+// { dg-options "-O -fdump-tree-original" }
+
+int bar();
+
+int *foo ()
+{
+ static int x = bar ();
+ return &x;
+}
+
+// { dg-final { scan-assembler _ZGVZ3foovE1x,8,8 } }
+// { dg-final { scan-tree-dump "_ZGVZ3foovE1x & 1" "original" } }
+// { dg-final { cleanup-tree-dump "original" } }
diff --git a/gcc/testsuite/g++.dg/other/PR23205.C b/gcc/testsuite/g++.dg/other/PR23205.C
index a31fc1d773d..e55710b40f0 100644
--- a/gcc/testsuite/g++.dg/other/PR23205.C
+++ b/gcc/testsuite/g++.dg/other/PR23205.C
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types" } */
const int foobar = 4;
diff --git a/gcc/testsuite/g++.dg/other/pr23205-2.C b/gcc/testsuite/g++.dg/other/pr23205-2.C
index fbd16dfab58..607e5a2b4e4 100644
--- a/gcc/testsuite/g++.dg/other/pr23205-2.C
+++ b/gcc/testsuite/g++.dg/other/pr23205-2.C
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types -ftoplevel-reorder" } */
const int foobar = 4;
diff --git a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
index 077fa50840c..341735879c5 100644
--- a/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
+++ b/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
@@ -7,7 +7,7 @@
function. However, some platforms use all bits to encode a
function pointer. Such platforms use the lowest bit of the delta,
that is shifted left by one bit. */
-#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__
+#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__
#define ADJUST_PTRFN(func, virt) ((void (*)())(func))
#define ADJUST_DELTA(delta, virt) (((delta) << 1) + !!(virt))
#else
diff --git a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
index fcf8c071246..961240d5260 100644
--- a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+++ b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
@@ -12,6 +12,10 @@
#elif defined (__sh__)
/* On SH division by zero does not trap. */
# define DO_TEST 0
+#elif defined (__aarch64__) && !defined(__linux__)
+ /* AArch64 divisions do trap by default, but libgloss targets do not
+ intercept the trap and raise a SIGFPE. So restrict the test to
+ AArch64 systems that use the Linux kernel. */
#elif defined (__TMS320C6X__)
/* On TI C6X division by zero does not trap. */
# define DO_TEST 0
diff --git a/gcc/testsuite/gcc.dg/20020312-2.c b/gcc/testsuite/gcc.dg/20020312-2.c
index 768e17e64cd..47c2d0fc648 100644
--- a/gcc/testsuite/gcc.dg/20020312-2.c
+++ b/gcc/testsuite/gcc.dg/20020312-2.c
@@ -92,6 +92,8 @@ extern void abort (void);
# else
# define PIC_REG "gr17"
#endif
+#elif defined (__aarch64__)
+/* No pic register -- yet. */
#else
# error "Modify the test for your target."
#endif
diff --git a/gcc/testsuite/gcc.dg/20040813-1.c b/gcc/testsuite/gcc.dg/20040813-1.c
index e16344164d5..c1a9fd0409a 100644
--- a/gcc/testsuite/gcc.dg/20040813-1.c
+++ b/gcc/testsuite/gcc.dg/20040813-1.c
@@ -2,7 +2,7 @@
/* Contributed by Devang Patel <dpatel@apple.com> */
/* { dg-do compile } */
-/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks* } { "*" } { "" } } */
+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks* } { "*" } { "" } } */
/* { dg-options "-gstabs" } */
int
diff --git a/gcc/testsuite/gcc.dg/builtin-apply2.c b/gcc/testsuite/gcc.dg/builtin-apply2.c
index c5b841a8496..7061b1041ce 100644
--- a/gcc/testsuite/gcc.dg/builtin-apply2.c
+++ b/gcc/testsuite/gcc.dg/builtin-apply2.c
@@ -1,5 +1,5 @@
/* { dg-do run } */
-/* { dg-skip-if "Variadic funcs have all args on stack. Normal funcs have args in registers." { "avr-*-*" } { "*" } { "" } } */
+/* { dg-skip-if "Variadic funcs have all args on stack. Normal funcs have args in registers." { "aarch64*-*-* avr-*-* " } { "*" } { "" } } */
/* { dg-skip-if "Variadic funcs use Base AAPCS. Normal funcs use VFP variant." { "arm*-*-*" } { "-mfloat-abi=hard" } { "" } } */
/* PR target/12503 */
diff --git a/gcc/testsuite/gcc.dg/stack-usage-1.c b/gcc/testsuite/gcc.dg/stack-usage-1.c
index e2c38ee3026..ecdd2f2c544 100644
--- a/gcc/testsuite/gcc.dg/stack-usage-1.c
+++ b/gcc/testsuite/gcc.dg/stack-usage-1.c
@@ -7,7 +7,9 @@
function FOO is reported as 256 or 264 in the stack usage (.su) file.
Then check that this is the actual stack usage in the assembly file. */
-#if defined(__i386__)
+#if defined(__aarch64__)
+# define SIZE 256 /* No frame pointer for leaf functions (default) */
+#elif defined(__i386__)
# define SIZE 248
#elif defined(__x86_64__)
# ifndef _WIN64
diff --git a/gcc/testsuite/gcc.dg/torture/pr51106-2.s b/gcc/testsuite/gcc.dg/torture/pr51106-2.s
new file mode 100644
index 00000000000..d1051e48b64
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr51106-2.s
@@ -0,0 +1,2 @@
+ .cpu generic
+ .file "pr51106-2.c"
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp b/gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp
new file mode 100644
index 00000000000..2ec7d3f4b15
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/aapcs64.exp
@@ -0,0 +1,67 @@
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+load_lib c-torture.exp
+load_lib target-supports.exp
+load_lib torture-options.exp
+
+if { ![istarget aarch64*-*-*] } then {
+ return
+}
+
+torture-init
+set-torture-options $C_TORTURE_OPTIONS
+set additional_flags "-W -Wall -Wno-abi"
+
+# Test parameter passing.
+foreach src [lsort [glob -nocomplain $srcdir/$subdir/test_*.c]] {
+ if {[runtest_file_p $runtests $src]} {
+ c-torture-execute [list $src \
+ $srcdir/$subdir/abitest.S] \
+ $additional_flags
+ }
+}
+
+# Test unnamed argument retrieval via the va_arg macro.
+foreach src [lsort [glob -nocomplain $srcdir/$subdir/va_arg-*.c]] {
+ if {[runtest_file_p $runtests $src]} {
+ c-torture-execute [list $src \
+ $srcdir/$subdir/abitest.S] \
+ $additional_flags
+ }
+}
+
+# Test function return value.
+foreach src [lsort [glob -nocomplain $srcdir/$subdir/func-ret-*.c]] {
+ if {[runtest_file_p $runtests $src]} {
+ c-torture-execute [list $src \
+ $srcdir/$subdir/abitest.S] \
+ $additional_flags
+ }
+}
+
+# Test no internal compiler errors.
+foreach src [lsort [glob -nocomplain $srcdir/$subdir/ice_*.c]] {
+ if {[runtest_file_p $runtests $src]} {
+ c-torture [list $src] \
+ $additional_flags
+ }
+}
+
+torture-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-2.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-2.h
new file mode 100644
index 00000000000..c56e7cc6785
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-2.h
@@ -0,0 +1,101 @@
+/* This header file should be included for the purpose of function return
+ value testing. */
+
+#include "abitest-common.h"
+#include "validate_memory.h"
+
+void (*testfunc_ptr)(char* stack);
+
+/* Helper macros to generate function name. Example of the function name:
+ func_return_val_1. */
+#define FUNC_BASE_NAME func_return_val_
+#define FUNC_NAME_COMBINE(base,suffix) base ## suffix
+#define FUNC_NAME_1(base,suffix) FUNC_NAME_COMBINE(base,suffix)
+#define FUNC_NAME(suffix) FUNC_NAME_1(FUNC_BASE_NAME,suffix)
+#define TEST_FUNC_BASE_NAME testfunc_
+#define TEST_FUNC_NAME(suffix) FUNC_NAME_1(TEST_FUNC_BASE_NAME,suffix)
+
+#undef DUMP_STATUS
+#ifdef DUMP_ENABLED
+#define DUMP_STATUS(type,val) printf ("### Checking "#type" "#val"\n");
+#else
+#define DUMP_STATUS(type,val)
+#endif
+
+/* Generate code to do memcmp to check if the returned value is in the
+ correct location and has the expected value.
+ Note that for value that is returned in the caller-allocated memory
+ block, we get the address from the saved x8 register. x8 is saved
+ just after the callee is returned; we assume that x8 has not been
+ clobbered at then, although there is no requirement for the callee
+ preserve the value stored in x8. Luckily, all test cases here are
+ simple enough that x8 doesn't normally get clobbered (although not
+ guaranteed). */
+#undef FUNC_VAL_CHECK
+#define FUNC_VAL_CHECK(id, type, val, offset, layout) \
+void TEST_FUNC_NAME(id)(char* stack) \
+{ \
+ type __x = val; \
+ char* addr; \
+ DUMP_STATUS(type,val) \
+ if (offset != X8) \
+ addr = stack + offset; \
+ else \
+ addr = *(char **)(stack + X8); \
+ if (validate_memory (&__x, addr, sizeof (type), layout) != 0) \
+ abort(); \
+}
+
+/* Composite larger than 16 bytes is replaced by a pointer to a copy prepared
+ by the caller, so here we extrat the pointer, deref it and compare the
+ content with that of the original one. */
+#define PTR(type, val, offset, ...) { \
+ type * ptr; \
+ DUMP_ARG(type,val) \
+ ptr = *(type **)(stack + offset); \
+ if (memcmp (ptr, &val, sizeof (type)) != 0) abort (); \
+}
+
+#include TESTFILE
+
+MYFUNCTYPE myfunc () PCSATTR;
+
+/* Define the function to return VAL of type TYPE. I and D in the
+ parameter list are two dummy parameters to help improve the detection
+ of bugs like a short vector being returned in X0 after copied from V0. */
+#undef FUNC_VAL_CHECK
+#define FUNC_VAL_CHECK(id, type, var, offset, layout) \
+__attribute__ ((noinline)) type FUNC_NAME (id) (int i, double d, type t) \
+ { \
+ asm (""::"r" (i),"r" (d)); /* asm prevents function from getting \
+ optimized away. Using i and d prevents \
+ warnings about unused parameters. \
+ */ \
+ return t; \
+ }
+#include TESTFILE
+
+
+/* Call the function to return value and call the checking function
+ to validate. See the comment above for the reason of having 0 and 0.0
+ in the function argument list. */
+#undef FUNC_VAL_CHECK
+#define FUNC_VAL_CHECK(id, type, var, offset, layout) \
+ { \
+ testfunc_ptr = TEST_FUNC_NAME(id); \
+ FUNC_NAME(id) (0, 0.0, var); \
+ myfunc (); \
+ }
+
+int main()
+{
+ which_kind_of_test = TK_RETURN;
+
+#ifdef HAS_DATA_INIT_FUNC
+ init_data ();
+#endif
+
+#include TESTFILE
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-common.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-common.h
new file mode 100644
index 00000000000..c749a42a2e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest-common.h
@@ -0,0 +1,139 @@
+#undef __AAPCS64_BIG_ENDIAN__
+#ifdef __GNUC__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define __AAPCS64_BIG_ENDIAN__
+#endif
+#else
+#error unknown compiler
+#endif
+
+#define IN_FRAMEWORK
+
+#define D0 0
+#define D1 8
+#define D2 16
+#define D3 24
+#define D4 32
+#define D5 40
+#define D6 48
+#define D7 56
+
+#define S0 64
+#define S1 68
+#define S2 72
+#define S3 76
+#define S4 80
+#define S5 84
+#define S6 88
+#define S7 92
+
+#define W0 96
+#define W1 100
+#define W2 104
+#define W3 108
+#define W4 112
+#define W5 116
+#define W6 120
+#define W7 124
+
+#define X0 128
+#define X1 136
+#define X2 144
+#define X3 152
+#define X4 160
+#define X5 168
+#define X6 176
+#define X7 184
+
+#define Q0 192
+#define Q1 208
+#define Q2 224
+#define Q3 240
+#define Q4 256
+#define Q5 272
+#define Q6 288
+#define Q7 304
+
+#define X8 320
+#define X9 328
+
+#define STACK 336
+
+/* The type of test. 'myfunc' in abitest.S needs to know which kind of
+ test it is running to decide what to do at the runtime. Keep the
+ related code in abitest.S synchronized if anything is changed here. */
+enum aapcs64_test_kind
+{
+ TK_PARAM = 0, /* Test parameter passing. */
+ TK_VA_ARG, /* Test va_arg code generation. */
+ TK_RETURN /* Test function return value. */
+};
+
+int which_kind_of_test;
+
+extern int printf (const char*, ...);
+extern void abort (void);
+extern void dumpregs () __asm("myfunc");
+
+#ifndef MYFUNCTYPE
+#define MYFUNCTYPE void
+#endif
+
+#ifndef PCSATTR
+#define PCSATTR
+#endif
+
+
+#ifdef RUNTIME_ENDIANNESS_CHECK
+#ifndef RUNTIME_ENDIANNESS_CHECK_FUNCTION_DEFINED
+/* This helper funtion defined to detect whether there is any incompatibility
+ issue on endianness between compilation time and run-time environments.
+ TODO: review the implementation when the work of big-endian support in A64
+ GCC starts.
+ */
+static void rt_endian_check ()
+{
+ const char* msg_endian[2] = {"little-endian", "big-endian"};
+ const char* msg_env[2] = {"compile-time", "run-time"};
+ union
+ {
+ unsigned int ui;
+ unsigned char ch[4];
+ } u;
+ int flag = -1;
+
+ u.ui = 0xCAFEBABE;
+
+ printf ("u.ui=0x%X, u.ch[0]=0x%X\n", u.ui, u.ch[0]);
+
+ if (u.ch[0] == 0xBE)
+ {
+ /* Little-Endian at run-time */
+#ifdef __AAPCS64_BIG_ENDIAN__
+ /* Big-Endian at compile-time */
+ flag = 1;
+#endif
+ }
+ else
+ {
+ /* Big-Endian at run-time */
+#ifndef __AAPCS64_BIG_ENDIAN__
+ /* Little-Endian at compile-time */
+ flag = 0;
+#endif
+ }
+
+ if (flag != -1)
+ {
+ /* Endianness conflict exists */
+ printf ("Error: endianness conflicts between %s and %s:\n\
+\t%s: %s\n\t%s: %s\n", msg_env[0], msg_env[1], msg_env[0], msg_endian[flag],
+ msg_env[1], msg_endian[1-flag]);
+ abort ();
+ }
+
+ return;
+}
+#endif
+#define RUNTIME_ENDIANNESS_CHECK_FUNCTION_DEFINED
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
new file mode 100644
index 00000000000..86ce7bed777
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
@@ -0,0 +1,59 @@
+ .global dumpregs
+ .global myfunc
+ .type dumpregs,%function
+ .type myfunc,%function
+dumpregs:
+myfunc:
+ mov x16, sp
+ mov x17, sp
+ sub sp, sp, 352 // 336 for registers and 16 for old sp and lr
+
+ stp x8, x9, [x17, #-16]! //320
+
+ stp q6, q7, [x17, #-32]! //288
+ stp q4, q5, [x17, #-32]! //256
+ stp q2, q3, [x17, #-32]! //224
+ stp q0, q1, [x17, #-32]! //192
+
+ stp x6, x7, [x17, #-16]! //176
+ stp x4, x5, [x17, #-16]! //160
+ stp x2, x3, [x17, #-16]! //144
+ stp x0, x1, [x17, #-16]! //128
+
+ stp w6, w7, [x17, #-8]! //120
+ stp w4, w5, [x17, #-8]! //112
+ stp w2, w3, [x17, #-8]! //104
+ stp w0, w1, [x17, #-8]! // 96
+
+ stp s6, s7, [x17, #-8]! // 88
+ stp s4, s5, [x17, #-8]! // 80
+ stp s2, s3, [x17, #-8]! // 72
+ stp s0, s1, [x17, #-8]! // 64
+
+ stp d6, d7, [x17, #-16]! // 48
+ stp d4, d5, [x17, #-16]! // 32
+ stp d2, d3, [x17, #-16]! // 16
+ stp d0, d1, [x17, #-16]! // 0
+
+ add x0, sp, #16
+ stp x16, x30, [x17, #-16]!
+
+ adrp x9, which_kind_of_test // determine the type of test
+ add x9, x9, :lo12:which_kind_of_test
+ ldr w9, [x9, #0]
+ cmp w9, #1
+ bgt LABEL_TEST_FUNC_RETURN
+ bl testfunc // parameter passing test or va_arg code gen test
+ b LABEL_RET
+LABEL_TEST_FUNC_RETURN:
+ adrp x9, testfunc_ptr
+ add x9, x9, :lo12:testfunc_ptr
+ ldr x9, [x9, #0]
+ blr x9 // function return value test
+LABEL_RET:
+ ldp x0, x30, [sp]
+ mov sp, x0
+ ret
+
+.weak testfunc
+.weak testfunc_ptr
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h
new file mode 100644
index 00000000000..af70937e047
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h
@@ -0,0 +1,159 @@
+/* This header file should be included for the purpose of parameter passing
+ testing and va_arg code gen testing.
+
+ To test va_arg code gen, #define AAPCS64_TEST_STDARG in the test case.
+
+ The parameter passing test is done by passing variables/constants to
+ 'myfunc', which pushes its incoming arguments to a memory block on the
+ stack and then passes the memory block address to 'testfunc'. It is inside
+ 'testfunc' that the real parameter passing check is carried out.
+
+ The function body of 'myfunc' is in abitest.S. The declaration of 'myfunc'
+ is constructed during the pre-processing stage.
+
+ The va_arg code gen test has a similar workflow, apart from an extra set-up
+ step before calling 'myfunc'. All arguments are passed to 'stdarg_func'
+ first, which assigned these arguments to its local variables via either
+ direct assignment or va_arg macro, depending on whether an argument is named
+ or not. Afterwards, 'stdarg_func' calls 'myfunc' with the aforementioned
+ local variables as the arguments to finish the remaining steps. */
+
+#include "abitest-common.h"
+#include "validate_memory.h"
+
+#ifdef AAPCS64_TEST_STDARG
+/* Generate va_start (ap, last_named_arg). Note that this requires
+ LAST_NAMED_ARG_ID to be defined/used correctly in the test file. */
+#ifndef LAST_NAMED_ARG_ID
+#define LAST_NAMED_ARG_ID 65535
+#endif
+#ifndef VA_START
+#undef VA_START_1
+#define VA_START_1(ap, id) va_start (ap, _f##id);
+#define VA_START(ap, id) VA_START_1 (ap, id);
+#endif
+#endif /* AAPCS64_TEST_STDARG */
+
+/* Some debugging facility. */
+#undef DUMP_ARG
+#ifdef DUMP_ENABLED
+#define DUMP_ARG(type,val) printf ("### Checking ARG "#type" "#val"\n")
+#else
+#define DUMP_ARG(type,val)
+#endif
+
+
+/* Function called from myfunc (defined in abitest.S) to check the arguments
+ passed to myfunc. myfunc has pushed all the arguments into the memory
+ block pointed by STACK. */
+void testfunc(char* stack)
+{
+#define AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS
+ return;
+}
+
+
+#ifndef AAPCS64_TEST_STDARG
+/* Test parameter passing. */
+
+/* Function declaration of myfunc. */
+MYFUNCTYPE myfunc(
+#define AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST
+) PCSATTR;
+
+#else /* AAPCS64_TEST_STDARG */
+/* Test stdarg macros, e.g. va_arg. */
+#include <stdarg.h>
+
+/* Dummy function to help reset parameter passing registers, i.e. X0-X7
+ and V0-V7 (by being passed 0 in W0-W7 and 0.f in S0-S7). */
+__attribute__ ((noinline)) void
+dummy_func (int w0, int w1, int w2, int w3, int w4, int w5, int w6, int w7,
+ float s0, float s1, float s2, float s3, float s4, float s5,
+ float s6, float s7)
+{
+ asm (""); /* Prevent function from getting optimized away */
+ return;
+}
+
+/* Function declaration of myfunc. */
+MYFUNCTYPE myfunc(
+#define AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST
+) PCSATTR;
+
+/* Function definition of stdarg_func.
+ stdarg_func is a variadic function; it retrieves all of its arguments,
+ both named and unnamed, and passes them to myfunc in the identical
+ order. myfunc will carry out the check on the passed values. Remember
+ that myfunc is not a variadic function. */
+MYFUNCTYPE stdarg_func(
+#define AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT
+) PCSATTR
+{
+ /* Start of the function body of stdarg_func. */
+ va_list ap;
+
+ VA_START (ap, LAST_NAMED_ARG_ID)
+ /* Zeroize the content of X0-X7 and V0-V7 to make sure that any va_arg
+ failure will not be hidden by the old data being in these registers. */
+ dummy_func (0, 0, 0, 0, 0, 0, 0, 0, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f);
+ /* A full memory barrier to ensure that compiler won't optimize away
+ va_arg code gen. */
+ __sync_synchronize ();
+ {
+ /* Assign all the function incoming arguments to local variables. */
+#define AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS
+
+ /* Call myfunc and pass in the local variables prepared above. */
+ myfunc (
+#define AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST
+);
+ }
+ va_end (ap);
+}
+
+#endif /* AAPCS64_TEST_STDARG */
+
+
+int main()
+{
+#ifdef RUNTIME_ENDIANNESS_CHECK
+ rt_endian_check();
+#endif
+#ifdef HAS_DATA_INIT_FUNC
+ init_data ();
+#endif
+
+#ifndef AAPCS64_TEST_STDARG
+ which_kind_of_test = TK_PARAM;
+ myfunc(
+#else
+ which_kind_of_test = TK_VA_ARG;
+ stdarg_func(
+#endif
+#define AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST
+#include "macro-def.h"
+#include TESTFILE
+#undef AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST
+);
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
new file mode 100644
index 00000000000..16b5c1efdf8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
@@ -0,0 +1,44 @@
+/* Test AAPCS64 function result return.
+
+ This test covers most fundamental data types as specified in
+ AAPCS64 \S 4.1. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-sources "abitest.S" } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "func-ret-1.c"
+#include "type-def.h"
+
+vf2_t vf2 = (vf2_t){ 17.f, 18.f };
+vi4_t vi4 = (vi4_t){ 0xdeadbabe, 0xbabecafe, 0xcafebeef, 0xbeefdead };
+union int128_t qword;
+
+int *int_ptr = (int *)0xabcdef0123456789ULL;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init signed quad-word integer. */
+ qword.l64 = 0xfdb9753102468aceLL;
+ qword.h64 = 0xeca8642013579bdfLL;
+}
+
+#include "abitest-2.h"
+#else
+FUNC_VAL_CHECK (0, unsigned char , 0xfe , X0, i8in64)
+FUNC_VAL_CHECK (1, signed char , 0xed , X0, i8in64)
+FUNC_VAL_CHECK (2, unsigned short, 0xdcba , X0, i16in64)
+FUNC_VAL_CHECK (3, signed short, 0xcba9 , X0, i16in64)
+FUNC_VAL_CHECK (4, unsigned int , 0xdeadbeef, X0, i32in64)
+FUNC_VAL_CHECK (5, signed int , 0xcafebabe, X0, i32in64)
+FUNC_VAL_CHECK (6, unsigned long long, 0xba98765432101234ULL, X0, flat)
+FUNC_VAL_CHECK (7, signed long long, 0xa987654321012345LL, X0, flat)
+FUNC_VAL_CHECK (8, __int128, qword.i, X0, flat)
+FUNC_VAL_CHECK (9, float, 65432.12345f, S0, flat)
+FUNC_VAL_CHECK (10, double, 9876543.212345, D0, flat)
+FUNC_VAL_CHECK (11, long double, 98765432123456789.987654321L, Q0, flat)
+FUNC_VAL_CHECK (12, vf2_t, vf2, D0, f32in64)
+FUNC_VAL_CHECK (13, vi4_t, vi4, Q0, i32in128)
+FUNC_VAL_CHECK (14, int *, int_ptr, X0, flat)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c
new file mode 100644
index 00000000000..6b171c46fbb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-2.c
@@ -0,0 +1,71 @@
+/* Test AAPCS64 function result return.
+
+ This test covers most composite types as described in AAPCS64 \S 4.3.
+ Homogeneous floating-point aggregate types are covered in func-ret-3.c. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-sources "abitest.S" } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "func-ret-2.c"
+
+struct x0
+{
+ char ch;
+ int i;
+} ys0 = { 'a', 12345 };
+
+struct x1
+{
+ int a;
+ unsigned int b;
+ unsigned int c;
+ unsigned int d;
+} ys1 = { 0xdeadbeef, 0xcafebabe, 0x87654321, 0xbcedf975 };
+
+struct x2
+{
+ long long a;
+ long long b;
+ char ch;
+} y2 = { 0x12, 0x34, 0x56 };
+
+union x3
+{
+ char ch;
+ int i;
+ long long ll;
+} y3;
+
+union x4
+{
+ int i;
+ struct x2 y2;
+} y4;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init small union. */
+ y3.ll = 0xfedcba98LL;
+
+ /* Init big union. */
+ y4.y2.a = 0x78;
+ y4.y2.b = 0x89;
+ y4.y2.ch= 0x9a;
+}
+
+
+#include "abitest-2.h"
+#else
+ /* Composite smaller than or equal to 16 bytes returned in X0 and X1. */
+FUNC_VAL_CHECK ( 0, struct x0, ys0, X0, flat)
+FUNC_VAL_CHECK ( 1, struct x1, ys1, X0, flat)
+FUNC_VAL_CHECK ( 2, union x3, y3, X0, flat)
+
+ /* Composite larger than 16 bytes returned in the caller-reserved memory
+ block of which the address is passed as an additional argument to the
+ function in X8. */
+FUNC_VAL_CHECK (10, struct x2, y2, X8, flat)
+FUNC_VAL_CHECK (11, union x4, y4, X8, flat)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c
new file mode 100644
index 00000000000..ff9b7e6d4b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-3.c
@@ -0,0 +1,93 @@
+/* Test AAPCS64 function result return.
+
+ This test covers homogeneous floating-point aggregate types as described
+ in AAPCS64 \S 4.3.5. */
+
+/* { dg-do run { target aarch64-*-* } } */
+/* { dg-additional-sources "abitest.S" } */
+/* { dg-require-effective-target aarch64_big_endian } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "func-ret-3.c"
+#include "type-def.h"
+
+struct hfa_fx1_t hfa_fx1 = {12.345f};
+struct hfa_fx2_t hfa_fx2 = {123.456f, 234.456f};
+struct hfa_dx2_t hfa_dx2 = {234.567, 345.678};
+struct hfa_dx4_t hfa_dx4 = {1234.123, 2345.234, 3456.345, 4567.456};
+struct hfa_ldx3_t hfa_ldx3 = {123456.7890, 234567.8901, 345678.9012};
+struct non_hfa_fx5_t non_hfa_fx5 = {456.789f, 567.890f, 678.901f, 789.012f, 890.123f};
+struct hfa_ffs_t hfa_ffs;
+struct non_hfa_ffs_t non_hfa_ffs;
+struct non_hfa_ffs_2_t non_hfa_ffs_2;
+struct hva_vf2x1_t hva_vf2x1;
+struct hva_vi4x1_t hva_vi4x1;
+struct non_hfa_ffd_t non_hfa_ffd = {23.f, 24.f, 25.0};
+struct non_hfa_ii_t non_hfa_ii = {26, 27};
+struct non_hfa_c_t non_hfa_c = {28};
+struct non_hfa_ffvf2_t non_hfa_ffvf2;
+struct non_hfa_fffd_t non_hfa_fffd = {33.f, 34.f, 35.f, 36.0};
+union hfa_union_t hfa_union;
+union non_hfa_union_t non_hfa_union;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ hva_vf2x1.a = (vf2_t){17.f, 18.f};
+ hva_vi4x1.a = (vi4_t){19, 20, 21, 22};
+
+ non_hfa_ffvf2.a = 29.f;
+ non_hfa_ffvf2.b = 30.f;
+ non_hfa_ffvf2.c = (vf2_t){31.f, 32.f};
+
+ hfa_union.s.a = 37.f;
+ hfa_union.s.b = 38.f;
+ hfa_union.c = 39.f;
+
+ non_hfa_union.a = 40.0;
+ non_hfa_union.b = 41.f;
+
+ hfa_ffs.a = 42.f;
+ hfa_ffs.b = 43.f;
+ hfa_ffs.c.a = 44.f;
+ hfa_ffs.c.b = 45.f;
+
+ non_hfa_ffs.a = 46.f;
+ non_hfa_ffs.b = 47.f;
+ non_hfa_ffs.c.a = 48.0;
+ non_hfa_ffs.c.b = 49.0;
+
+ non_hfa_ffs_2.s.a = 50;
+ non_hfa_ffs_2.s.b = 51;
+ non_hfa_ffs_2.c = 52.f;
+ non_hfa_ffs_2.d = 53.f;
+}
+
+#include "abitest-2.h"
+#else
+ /* HFA returned in fp/simd registers. */
+
+FUNC_VAL_CHECK ( 0, struct hfa_fx1_t , hfa_fx1 , S0, flat)
+FUNC_VAL_CHECK ( 1, struct hfa_fx2_t , hfa_fx2 , S0, flat)
+FUNC_VAL_CHECK ( 2, struct hfa_dx2_t , hfa_dx2 , D0, flat)
+
+FUNC_VAL_CHECK ( 3, struct hfa_dx4_t , hfa_dx4 , D0, flat)
+FUNC_VAL_CHECK ( 4, struct hfa_ldx3_t, hfa_ldx3 , Q0, flat)
+FUNC_VAL_CHECK ( 5, struct hfa_ffs_t , hfa_ffs , S0, flat)
+FUNC_VAL_CHECK ( 6, union hfa_union_t, hfa_union, S0, flat)
+
+FUNC_VAL_CHECK ( 7, struct hva_vf2x1_t, hva_vf2x1, D0, flat)
+FUNC_VAL_CHECK ( 8, struct hva_vi4x1_t, hva_vi4x1, Q0, flat)
+
+ /* Non-HFA returned in general registers or via a pointer in X8. */
+FUNC_VAL_CHECK (10, struct non_hfa_fx5_t , non_hfa_fx5 , X8, flat)
+FUNC_VAL_CHECK (13, struct non_hfa_ffd_t , non_hfa_ffd , X0, flat)
+FUNC_VAL_CHECK (14, struct non_hfa_ii_t , non_hfa_ii , X0, flat)
+FUNC_VAL_CHECK (15, struct non_hfa_c_t , non_hfa_c , X0, flat)
+FUNC_VAL_CHECK (16, struct non_hfa_ffvf2_t, non_hfa_ffvf2, X0, flat)
+FUNC_VAL_CHECK (17, struct non_hfa_fffd_t , non_hfa_fffd , X8, flat)
+FUNC_VAL_CHECK (18, struct non_hfa_ffs_t , non_hfa_ffs , X8, flat)
+FUNC_VAL_CHECK (19, struct non_hfa_ffs_2_t, non_hfa_ffs_2, X0, flat)
+FUNC_VAL_CHECK (20, union non_hfa_union_t, non_hfa_union, X0, flat)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c
new file mode 100644
index 00000000000..af05fbe9fdf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-4.c
@@ -0,0 +1,27 @@
+/* Test AAPCS64 function result return.
+
+ This test covers complex types. Complex floating-point types are treated
+ as homogeneous floating-point aggregates, while complex integral types
+ are treated as general composite types. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+/* { dg-additional-sources "abitest.S" } */
+/* { dg-require-effective-target aarch64_big_endian } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "func-ret-4.c"
+
+#include "abitest-2.h"
+#else
+ /* Complex floating-point types are passed in fp/simd registers. */
+FUNC_VAL_CHECK ( 0, _Complex float , 12.3f + 23.4fi, S0, flat)
+FUNC_VAL_CHECK ( 1, _Complex double, 34.56 + 45.67i, D0, flat)
+FUNC_VAL_CHECK ( 2, _Complex long double, 56789.01234 + 67890.12345i, Q0, flat)
+
+ /* Complex integral types are passed in general registers or via a pointer in
+ X8. */
+FUNC_VAL_CHECK (10, _Complex short , 12345 + 23456i, X0, flat)
+FUNC_VAL_CHECK (11, _Complex int , 34567 + 45678i, X0, flat)
+FUNC_VAL_CHECK (12, _Complex __int128, 567890 + 678901i, X8, flat)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c
new file mode 100644
index 00000000000..906ccebf616
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_1.c
@@ -0,0 +1,21 @@
+/* Test AAPCS layout
+
+ Empty, i.e. zero-sized, small struct passing used to cause Internal Compiler
+ Error. */
+
+/* { dg-do compile { target aarch64*-*-* } } */
+
+struct AAAA
+{
+
+} aaaa;
+
+
+void named (int, struct AAAA);
+void unnamed (int, ...);
+
+void foo ()
+{
+ name (0, aaaa);
+ unnamed (0, aaaa);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_2.c
new file mode 100644
index 00000000000..8d34f270d48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_2.c
@@ -0,0 +1,13 @@
+/* Test AAPCS layout
+
+ Larger than machine-supported vector size. The behaviour is unspecified by
+ the AAPCS64 document; the implementation opts for pass by reference. */
+
+/* { dg-do compile { target aarch64*-*-* } } */
+
+typedef char A __attribute__ ((vector_size (64)));
+
+void
+foo (A a)
+{
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_3.c
new file mode 100644
index 00000000000..fb6816f4270
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_3.c
@@ -0,0 +1,16 @@
+/* Test AAPCS layout
+
+/* { dg-do compile { target aarch64*-*-* } } */
+
+#define vector __attribute__((vector_size(16)))
+
+void
+foo(int a, ...);
+
+int
+main(void)
+{
+ foo (1, (vector unsigned int){10,11,12,13},
+ 2, (vector unsigned int){20,21,22,23});
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_4.c
new file mode 100644
index 00000000000..44af079af9c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_4.c
@@ -0,0 +1,9 @@
+/* Test AAPCS layout
+
+/* { dg-do compile { target aarch64*-*-* } } */
+
+__complex__ long int
+ctest_long_int(__complex__ long int x)
+{
+ return x;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_5.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_5.c
new file mode 100644
index 00000000000..da24ba8c9de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/ice_5.c
@@ -0,0 +1,20 @@
+/* { dg-do compile { target aarch64*-*-* } } */
+
+struct S
+{
+ union
+ {
+ long double b;
+ } a;
+};
+
+struct S s;
+
+extern struct S a[5];
+extern struct S check (struct S, struct S *, struct S);
+extern void checkx (struct S);
+
+void test (void)
+{
+ checkx (check (s, &a[1], a[2]));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/macro-def.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/macro-def.h
new file mode 100644
index 00000000000..72a47067631
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/macro-def.h
@@ -0,0 +1,286 @@
+/* This header file defines a set of macros to be used in the construction
+ of parameter passing and/or va_arg code gen tests during the
+ pre-processing stage. It is included inside abitest.h.
+
+ The following macros are defined here:
+
+ LAST_ARG
+ ARG
+ DOTS
+ ANON
+ LAST_ANON
+ PTR
+ PTR_ANON
+ LAST_ANONPTR
+
+ These macros are given different definitions depending on which one of
+ the following macros is defined.
+
+ AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS
+ AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST
+ AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST
+ AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST
+ AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT
+ AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS
+ AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST
+
+ Do not define more than one of the above macros. */
+
+
+/* AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS
+ Define macros to check the incoming arguments. */
+
+#ifdef AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate memcmp to check if the incoming args have the expected values. */
+#define LAST_ARG_NONFLAT(type, val, offset, layout, ...) \
+{ \
+ type __x = val; \
+ DUMP_ARG(type,val); \
+ if (validate_memory (&__x, stack + offset, sizeof (type), layout) != 0) \
+ abort(); \
+}
+#define LAST_ARG(type,val,offset,...) LAST_ARG_NONFLAT (type, val, offset, \
+ flat,__VA_ARGS__)
+#define ARG_NONFLAT(type,val,offset,layout,...) LAST_ARG_NONFLAT (type, val, \
+ offset, \
+ layout, \
+ __VA_ARGS__)
+#define ARG(type,val,offset,...) LAST_ARG_NONFLAT(type, val, offset, \
+ flat, __VA_ARGS__)
+#define ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define LAST_ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...) \
+ ANON(type_promoted, val_promoted, offset, __VA_ARGS__)
+/* Composite larger than 16 bytes is replaced by a pointer to a copy prepared
+ by the caller, so here we extrat the pointer, deref it and compare the
+ content with that of the original one. */
+#define PTR(type, val, offset, ...) { \
+ type * ptr; \
+ DUMP_ARG(type,val); \
+ ptr = *(type **)(stack + offset); \
+ if (memcmp (ptr, &val, sizeof (type)) != 0) abort (); \
+}
+#define PTR_ANON(type, val, offset, ...) PTR(type, val, offset, __VA_ARGS__)
+#define LAST_ANONPTR(type, val, offset, ...) PTR(type, val, offset, __VA_ARGS__)
+#define DOTS
+
+#endif /* AARCH64_MACRO_DEF_CHECK_INCOMING_ARGS */
+
+
+/* AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST
+ Define macros to generate parameter type list. */
+
+#ifdef AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+
+/* Generate parameter type list (without identifiers). */
+#define LAST_ARG(type,val,offset) type
+#define LAST_ARG_NONFLAT(type, val, offset, layout) type
+#define ARG(type,val,offset) LAST_ARG(type, val, offset),
+#define ARG_NONFLAT(type, val, offset, layout) LAST_ARG (type, val, offset),
+#define DOTS ...
+#define ANON(type,val, offset)
+#define LAST_ANON(type,val, offset)
+#define PTR(type, val, offset) LAST_ARG(type, val, offset),
+#define PTR_ANON(type, val, offset)
+#define LAST_ANONPTR(type, val, offset)
+
+#endif /* AARCH64_MACRO_DEF_GEN_PARAM_TYPE_LIST */
+
+
+/* AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST
+ Define macros to generate argument list. */
+
+#ifdef AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate the argument list; use VAL as the argument name. */
+#define LAST_ARG(type,val,offset,...) val
+#define LAST_ARG_NONFLAT(type,val,offset,layout,...) val
+#define ARG(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define ARG_NONFLAT(type, val, offset, layout,...) LAST_ARG (type, val, \
+ offset, \
+ __VA_ARGS__),
+#define DOTS
+#define LAST_ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR(type, val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR_ANON(type, val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define LAST_ANONPTR(type, val, offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...) \
+ LAST_ARG(type, val, offset, __VA_ARGS__),
+
+#endif /* AARCH64_MACRO_DEF_GEN_ARGUMENT_LIST */
+
+
+/* AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST
+ Define variadic macros to generate parameter type list. */
+
+#ifdef AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate parameter type list (without identifiers). */
+#define LAST_ARG(type,val,offset,...) type
+#define LAST_ARG_NONFLAT(type, val, offset, layout, ...) type
+#define ARG(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define ARG_NONFLAT(type, val, offset, layout, ...) LAST_ARG (type, val, \
+ offset, \
+ __VA_ARGS__),
+#define DOTS
+#define ANON(type,val, offset,...) ARG(type,val,offset, __VA_ARGS__)
+#define LAST_ANON(type,val, offset,...) LAST_ARG(type,val, offset, __VA_ARGS__)
+#define PTR(type, val, offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR_ANON(type, val, offset,...) PTR(type, val, offset, __VA_ARGS__)
+#define LAST_ANONPTR(type, val, offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...) \
+ LAST_ARG(type_promoted, val_promoted, offset, __VA_ARGS__),
+
+#endif /* AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST */
+
+
+/* AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT
+ Define variadic macros to generate parameter type list with
+ identifiers. */
+
+#ifdef AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate parameter type list (with identifiers).
+ The identifiers are named with prefix _f and suffix of the value of
+ __VA_ARGS__. */
+#define LAST_ARG(type,val,offset,...) type _f##__VA_ARGS__
+#define LAST_ARG_NONFLAT(type, val, offset, layout, ...) type _f##__VA_ARGS__
+#define ARG(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define ARG_NONFLAT(type, val, offset, layout, ...) LAST_ARG (type, val, \
+ offset, \
+ __VA_ARGS__),
+#define DOTS ...
+#define ANON(type,val, offset,...)
+#define LAST_ANON(type,val, offset,...)
+#define PTR(type, val, offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR_ANON(type, val, offset,...)
+#define LAST_ANONPTR(type, val, offset,...)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...)
+
+#endif /* AARCH64_VARIADIC_MACRO_DEF_GEN_PARAM_TYPE_LIST_WITH_IDENT */
+
+
+/* AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS
+ Define variadic macros to generate assignment from the function
+ incoming arguments to local variables. */
+
+#ifdef AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate assignment statements. For named args, direct assignment from
+ the formal parameter is generated; for unnamed args, va_arg is used.
+ The names of the local variables start with _x and end with the value of
+ __VA_ARGS__. */
+#define LAST_ARG(type,val,offset,...) type _x##__VA_ARGS__ = _f##__VA_ARGS__;
+#define LAST_ARG_NONFLAT(type, val, offset, layout, ...) \
+ type _x##__VA_ARGS__ = _f##__VA_ARGS__;
+#define ARG(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ARG_NONFLAT(type,val,offset,layout,...) \
+ LAST_ARG (type, val, offset, __VA_ARGS__)
+#define ANON(type,val,offset,...) type _x##__VA_ARGS__ = va_arg (ap, type);
+#define LAST_ANON(type,val,offset,...) ANON(type, val, offset, __VA_ARGS__)
+#define PTR(type, val,offset,...) ARG(type, val, offset, __VA_ARGS__)
+#define PTR_ANON(type, val, offset,...) ANON(type, val,offset, __VA_ARGS__)
+#define LAST_ANONPTR(type, val, offset,...) ANON(type, val, offset, __VA_ARGS__)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...) \
+ ANON(type_promoted, val_promoted, offset, __VA_ARGS__)
+
+#define DOTS
+
+#endif /* AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS */
+
+
+/* AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST
+ Define variadic macros to generate argument list using the variables
+ generated during AARCH64_VARIADIC_MACRO_DEF_ASSIGN_LOCAL_VARS_WITH_ARGS. */
+
+#ifdef AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST
+
+#undef LAST_ARG
+#undef ARG
+#undef DOTS
+#undef ANON
+#undef LAST_ANON
+#undef PTR
+#undef PTR_ANON
+#undef LAST_ANONPTR
+#undef ANON_PROMOTED
+
+/* Generate the argument list; the names start with _x and end with the value of
+ __VA_ARGS__. All arguments (named or unnamed) in stdarg_func are passed to
+ myfunc as named arguments. */
+#define LAST_ARG(type,val,offset,...) _x##__VA_ARGS__
+#define LAST_ARG_NONFLAT(type, val, offset, layout, ...) _x##__VA_ARGS__
+#define ARG(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define ARG_NONFLAT(type, val, offset, layout, ...) \
+ LAST_ARG_NONFLAT (type, val, offset, layout, __VA_ARGS__),
+#define DOTS
+#define LAST_ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON(type,val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR(type, val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define PTR_ANON(type, val,offset,...) LAST_ARG(type, val, offset, __VA_ARGS__),
+#define LAST_ANONPTR(type, val, offset,...) LAST_ARG(type, val, offset, __VA_ARGS__)
+#define ANON_PROMOTED(type,val,type_promoted, val_promoted, offset,...) \
+ ANON(type_promoted, val_promoted, offset, __VA_ARGS__)
+
+#endif /* AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST */
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
new file mode 100644
index 00000000000..545b0568512
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
@@ -0,0 +1,31 @@
+/* Test AAPCS64 layout */
+
+/* C.7 If the argument is an Integral Type, the size of the argument is
+ less than or equal to 8 bytes and the NGRN is less than 8, the
+ argument is copied to the least significant bits in x[NGRN]. The
+ NGRN is incremented by one. The argument has now been allocated. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_1.c"
+/* TODO: review if we need this */
+#define RUNTIME_ENDIANNESS_CHECK
+#include "abitest.h"
+#else
+ ARG(int, 4, W0)
+ ARG(double, 4.0, D0)
+ ARG(int, 3, W1)
+ /* TODO: review the way of memcpy char, short, etc. */
+#ifndef __AAPCS64_BIG_ENDIAN__
+ ARG(char, 0xEF, X2)
+ ARG(short, 0xBEEF, X3)
+ ARG(int, 0xDEADBEEF, X4)
+#else
+ /* TODO: need the model/qemu to be big-endian as well */
+ ARG(char, 0xEF, X2+7)
+ ARG(short, 0xBEEF, X3+6)
+ ARG(int, 0xDEADBEEF, X4+4)
+#endif
+ LAST_ARG(long long, 0xDEADBEEFCAFEBABELL, X5)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c
new file mode 100644
index 00000000000..c2f48154a0a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c
@@ -0,0 +1,26 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_10.c"
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+
+ ARG(int, 7, W0)
+ DOTS
+ ANON(struct z, a, D0)
+ ANON(struct z, b, D4)
+ ANON(double, 0.5, STACK)
+ LAST_ANON(double, 1.5, STACK+8)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_11.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_11.c
new file mode 100644
index 00000000000..34cbe0303b9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_11.c
@@ -0,0 +1,34 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_11.c"
+
+__complex__ x = 1.0+2.0i;
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(double, 11.0, D0)
+ DOTS
+ ANON(struct z, a, D1)
+ ANON(struct z, b, STACK)
+ LAST_ANON(double, 0.5, STACK+32)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_12.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_12.c
new file mode 100644
index 00000000000..d07bef8b8f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_12.c
@@ -0,0 +1,44 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_12.c"
+
+
+struct y
+{
+ long p;
+ long q;
+ long r;
+ long s;
+} v = { 1, 2, 3, 4 };
+
+struct y1
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v1 = { 1, 2, 3, 4 };
+
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#define MYFUNCTYPE struct y
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ ARG(struct y1, v1, X1)
+ ARG(struct z, a, D0)
+ ARG(struct z, b, D4)
+ LAST_ARG(double, 0.5, STACK)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_13.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_13.c
new file mode 100644
index 00000000000..c73e6f2f9fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_13.c
@@ -0,0 +1,34 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+
+#define TESTFILE "test_13.c"
+
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ ARG(struct y, v, X1)
+ ARG(struct z, a, D0)
+ ARG(double, 1.0, D4)
+ ARG(struct z, b, STACK)
+ LAST_ARG(double, 0.5, STACK+32)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_14.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_14.c
new file mode 100644
index 00000000000..3c22b8a0456
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_14.c
@@ -0,0 +1,35 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_14.c"
+
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ ARG(int, 9, W1)
+ ARG(struct z, a, D0)
+ ARG(double, 1.0, D4)
+ ARG(struct z, b, STACK)
+ ARG(int, 4, W2)
+ LAST_ARG(double, 0.5, STACK+32)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_15.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_15.c
new file mode 100644
index 00000000000..1a869ad772f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_15.c
@@ -0,0 +1,21 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_15.c"
+
+#include "abitest.h"
+#else
+ ARG(double, 1.0, D0)
+ ARG(double, 2.0, D1)
+ ARG(double, 3.0, D2)
+ ARG(double, 4.0, D3)
+ ARG(double, 5.0, D4)
+ ARG(double, 6.0, D5)
+ ARG(double, 7.0, D6)
+ ARG(double, 8.0, D7)
+ ARG(double, 9.0, STACK)
+ LAST_ARG(double, 10.0, STACK+8)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_16.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_16.c
new file mode 100644
index 00000000000..1aa9725fd64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_16.c
@@ -0,0 +1,32 @@
+/* Test AAPCS layout */
+/* C.5 If the argument is a Half- or Single- precision Floating-point type,
+ then the size of the argument is set to 8 bytes. The effect is as if
+ the argument had been copied to the least significant bits of a 64-bit
+ register and the remaining bits filled with unspecified values. */
+/* TODO: add the check of half-precision floating-point when it is supported
+ by the A64 GCC. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_16.c"
+
+#include "abitest.h"
+#else
+ ARG(float, 1.0, S0)
+ ARG(float, 2.0, S1)
+ ARG(float, 3.0, S2)
+ ARG(float, 4.0, S3)
+ ARG(float, 5.0, S4)
+ ARG(float, 6.0, S5)
+ ARG(float, 7.0, S6)
+ ARG(float, 8.0, S7)
+#ifndef __AAPCS64_BIG_ENDIAN__
+ ARG(float, 9.0, STACK)
+ LAST_ARG(float, 10.0, STACK+8)
+#else
+ ARG(float, 9.0, STACK+4)
+ LAST_ARG(float, 10.0, STACK+12)
+#endif
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_17.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_17.c
new file mode 100644
index 00000000000..348ea284760
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_17.c
@@ -0,0 +1,37 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_17.c"
+
+__complex__ x = 1.0+2.0i;
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+float f1 = 25.0;
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(double, 11.0, D0)
+ DOTS
+ ANON(struct z, a, D1)
+ ANON(struct z, b, STACK)
+ ANON(int , 5, W0)
+ ANON(double, f1, STACK+32)
+ LAST_ANON(double, 0.5, STACK+40)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_18.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_18.c
new file mode 100644
index 00000000000..b611e9b27fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_18.c
@@ -0,0 +1,34 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+
+#define TESTFILE "test_18.c"
+
+
+struct y
+{
+ long p;
+ long q;
+ long r;
+ long s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ PTR(struct y, v, X1)
+ ARG(struct z, a, D0)
+ ARG(double, 1.0, D4)
+ ARG(struct z, b, STACK)
+ LAST_ARG(double, 0.5, STACK+32)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_19.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_19.c
new file mode 100644
index 00000000000..1a3f873b301
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_19.c
@@ -0,0 +1,35 @@
+/* Test AAPCS64 layout. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_19.c"
+
+struct y
+{
+ int p1;
+ int p2;
+ float q;
+ int r1;
+ int r2;
+ char x;
+} v = { -1, 1, 2.0f, 3, 18, 19, 20};
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ DOTS
+ ANON(double, 4.0, D0)
+ ANON(struct z, a, D1)
+ ANON(struct z, b, STACK)
+ PTR_ANON(struct y, v, X1)
+ LAST_ANON(int, 10, W2)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
new file mode 100644
index 00000000000..94817ede3e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
@@ -0,0 +1,16 @@
+/* Test AAPCS64 layout */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_2.c"
+#include "abitest.h"
+
+#else
+ ARG(float, 1.0f, S0)
+ ARG(double, 4.0, D1)
+ ARG(float, 2.0f, S2)
+ ARG(double, 5.0, D3)
+ LAST_ARG(int, 3, W0)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c
new file mode 100644
index 00000000000..e4cc1a1b5dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c
@@ -0,0 +1,22 @@
+/* Test AAPCS64 layout */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_20.c"
+
+#include "abitest.h"
+
+#else
+ ARG(int, 8, W0)
+ ARG(double, 1.0, D0)
+ ARG(double, 2.0, D1)
+ ARG(double, 3.0, D2)
+ ARG(double, 4.0, D3)
+ ARG(double, 5.0, D4)
+ ARG(double, 6.0, D5)
+ ARG(double, 7.0, D6)
+ DOTS
+ ANON(_Complex double, 1234.0 + 567.0i, STACK)
+ LAST_ANON(double, -987.0, STACK+16)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_21.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_21.c
new file mode 100644
index 00000000000..b3a75e0256b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_21.c
@@ -0,0 +1,21 @@
+/* Test AAPCS64 layout */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_21.c"
+
+#include "abitest.h"
+
+#else
+ ARG(int, 8, W0)
+ ARG(double, 1.0, D0)
+ ARG(double, 2.0, D1)
+ ARG(double, 3.0, D2)
+ ARG(double, 4.0, D3)
+ ARG(double, 5.0, D4)
+ ARG(double, 6.0, D5)
+ ARG(double, 7.0, D6)
+ ARG(_Complex double, 1234.0 + 567.0i, STACK)
+ LAST_ARG(double, -987.0, STACK+16)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_22.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_22.c
new file mode 100644
index 00000000000..cb8a8abc06b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_22.c
@@ -0,0 +1,19 @@
+/* Test AAPCS64 layout */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_22.c"
+
+struct y
+{
+ float p;
+ float q;
+} v = { 345.0f, 678.0f };
+
+#include "abitest.h"
+#else
+ ARG(float, 123.0f, S0)
+ ARG(struct y, v, S1)
+ LAST_ARG(float, 901.0f, S3)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_23.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_23.c
new file mode 100644
index 00000000000..6993884c0e7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_23.c
@@ -0,0 +1,42 @@
+/* Test AAPCS64 layout.
+
+ Larger than machine-supported vector size. The behaviour is unspecified by
+ the AAPCS64 document; the implementation opts for pass by reference. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_23.c"
+
+typedef char A __attribute__ ((vector_size (64)));
+
+struct y
+{
+ double df[8];
+};
+
+union u
+{
+ struct y x;
+ A a;
+} u;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ u.x.df[0] = 1.0;
+ u.x.df[1] = 2.0;
+ u.x.df[2] = 3.0;
+ u.x.df[3] = 4.0;
+ u.x.df[4] = 5.0;
+ u.x.df[5] = 6.0;
+ u.x.df[6] = 7.0;
+ u.x.df[7] = 8.0;
+}
+
+#include "abitest.h"
+#else
+ARG (float, 123.0f, S0)
+PTR (A, u.a, X0)
+LAST_ARG_NONFLAT (int, 0xdeadbeef, X1, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_24.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_24.c
new file mode 100644
index 00000000000..8655f6f3efc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_24.c
@@ -0,0 +1,22 @@
+/* Test AAPCS64 layout. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_24.c"
+
+typedef long double TFtype;
+
+#include "abitest.h"
+#else
+ ARG(TFtype, 1.0, Q0)
+ ARG(TFtype, 2.0, Q1)
+ ARG(TFtype, 3.0, Q2)
+ ARG(TFtype, 4.0, Q3)
+ ARG(TFtype, 5.0, Q4)
+ ARG(TFtype, 6.0, Q5)
+ ARG(TFtype, 7.0, Q6)
+ ARG(TFtype, 8.0, Q7)
+ ARG(double, 9.0, STACK)
+ LAST_ARG(TFtype, 10.0, STACK+16)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_25.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_25.c
new file mode 100644
index 00000000000..2f942ff4d10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_25.c
@@ -0,0 +1,61 @@
+/* Test AAPCS64 layout
+
+ Test homogeneous floating-point aggregates and homogeneous short-vector
+ aggregates, which should be passed in SIMD/FP registers or via the
+ stack. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_25.c"
+
+typedef float vf2_t __attribute__((vector_size (8)));
+struct x0
+{
+ vf2_t v;
+} s0;
+struct x3
+{
+ vf2_t v[2];
+} s3;
+struct x4
+{
+ vf2_t v[3];
+} s4;
+
+typedef float vf4_t __attribute__((vector_size(16)));
+struct x1
+{
+ vf4_t v;
+} s1;
+
+struct x2
+{
+ double df[3];
+} s2;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ s0.v = (vf2_t){ 17.f, 18.f };
+ s1.v = (vf4_t){ 567.890f, 678.901f, 789.012f, 890.123f };
+ s2.df[0] = 123.456;
+ s2.df[1] = 234.567;
+ s2.df[2] = 345.678;
+ s3.v[0] = (vf2_t){ 19.f, 20.f, 21.f, 22.f };
+ s3.v[1] = (vf2_t){ 23.f, 24.f, 25.f, 26.f };
+ s4.v[0] = (vf2_t){ 27.f, 28.f, 29.f, 30.f };
+ s4.v[1] = (vf2_t){ 31.f, 32.f, 33.f, 34.f };
+ s4.v[2] = (vf2_t){ 35.f, 36.f, 37.f, 38.f };
+}
+
+#include "abitest.h"
+#else
+ARG_NONFLAT (struct x0, s0, Q0, f32in64)
+ARG (struct x2, s2, D1)
+ARG (struct x1, s1, Q4)
+ARG (struct x3, s3, D5)
+ARG (struct x4, s4, STACK)
+ARG_NONFLAT (int, 0xdeadbeef, X0, i32in64)
+LAST_ARG (double, 456.789, STACK+24)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_26.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_26.c
new file mode 100644
index 00000000000..9b9a3a4804f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_26.c
@@ -0,0 +1,54 @@
+/* Test AAPCS64 layout.
+
+ Test some small structures that should be passed in GPRs. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_26.c"
+
+struct y0
+{
+ char ch;
+} c0 = { 'A' };
+
+struct y2
+{
+ long long ll[2];
+} c2 = { 0xDEADBEEF, 0xCAFEBABE };
+
+struct y3
+{
+ int i[3];
+} c3 = { 56789, 67890, 78901 };
+
+typedef float vf2_t __attribute__((vector_size (8)));
+struct x0
+{
+ vf2_t v;
+} s0;
+
+typedef short vh4_t __attribute__((vector_size (8)));
+
+struct x1
+{
+ vh4_t v[2];
+} s1;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ s0.v = (vf2_t){ 17.f, 18.f };
+ s1.v[0] = (vh4_t){ 345, 456, 567, 678 };
+ s1.v[1] = (vh4_t){ 789, 890, 901, 123 };
+}
+
+#include "abitest.h"
+#else
+ARG (struct y0, c0, X0)
+ARG (struct y2, c2, X1)
+ARG (struct y3, c3, X3)
+ARG_NONFLAT (struct x0, s0, D0, f32in64)
+ARG (struct x1, s1, D1)
+LAST_ARG_NONFLAT (int, 89012, X5, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c
new file mode 100644
index 00000000000..f05b8e659cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c
@@ -0,0 +1,18 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_3.c"
+
+__complex__ x = 1.0+2.0i;
+
+#include "abitest.h"
+#else
+ARG (float, 1.0f, S0)
+ARG (__complex__ double, x, D1)
+ARG (float, 2.0f, S3)
+ARG (double, 5.0, D4)
+LAST_ARG_NONFLAT (int, 3, X0, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_4.c
new file mode 100644
index 00000000000..a37db569bc8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_4.c
@@ -0,0 +1,20 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target arm*-*-eabi* } } */
+/* { dg-require-effective-target arm_hard_vfp_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O -mfpu=vfp -mfloat-abi=hard" } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_4.c"
+
+__complex__ float x = 1.0f + 2.0fi;
+#include "abitest.h"
+#else
+ARG (float, 1.0f, S0)
+ARG (__complex__ float, x, S1)
+ARG (float, 2.0f, S3)
+ARG (double, 5.0, D4)
+LAST_ARG_NONFLAT (int, 3, X0, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_5.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_5.c
new file mode 100644
index 00000000000..674efd8c25e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_5.c
@@ -0,0 +1,24 @@
+/* Test AAPCS64 layout */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_5.c"
+
+__complex__ float x = 1.0+2.0i;
+
+struct y
+{
+ long p;
+ long q;
+} v = { 1, 2};
+
+#include "abitest.h"
+#else
+ ARG(float, 1.0f, S0)
+ ARG(__complex__ float, x, S1)
+ ARG(float, 2.0f, S3)
+ ARG(double, 5.0, D4)
+ LAST_ARG(struct y, v, X0)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_6.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_6.c
new file mode 100644
index 00000000000..95d44e9232e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_6.c
@@ -0,0 +1,26 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_6.c"
+
+__complex__ double x = 1.0+2.0i;
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+#include "abitest.h"
+#else
+ ARG(struct y, v, X0)
+ ARG(float, 1.0f, S0)
+ ARG(__complex__ double, x, D1)
+ ARG(float, 2.0f, S3)
+ ARG(double, 5.0, D4)
+ LAST_ARG(int, 3, W2)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_7.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_7.c
new file mode 100644
index 00000000000..4fb1feeaf7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_7.c
@@ -0,0 +1,30 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_7.c"
+
+__complex__ float x = 1.0f + 2.0i;
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 }, v1 = {5, 6, 7, 8}, v2 = {9, 10, 11, 12};
+
+#include "abitest.h"
+#else
+ARG (struct y, v, X0)
+ARG (struct y, v1, X2)
+ARG (struct y, v2, X4)
+ARG (int, 4, W6)
+ARG (float, 1.0f, S0)
+ARG (__complex__ float, x, S1)
+ARG (float, 2.0f, S3)
+ARG (double, 5.0, D4)
+ARG (int, 3, W7)
+LAST_ARG_NONFLAT (int, 5, STACK, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_8.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_8.c
new file mode 100644
index 00000000000..3d67ff50844
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_8.c
@@ -0,0 +1,24 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_8.c"
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(struct z, a, D0)
+ ARG(struct z, b, D4)
+ ARG(double, 0.5, STACK)
+ ARG(int, 7, W0)
+ LAST_ARG(int, 8, W1)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_9.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_9.c
new file mode 100644
index 00000000000..fbe42456ca3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_9.c
@@ -0,0 +1,32 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define VFP
+#define TESTFILE "test_9.c"
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+} v = { 1, 2, 3, 4 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 7, W0)
+ ARG(struct y, v, X1)
+ ARG(struct z, a, D0)
+ ARG(struct z, b, D4)
+ LAST_ARG(double, 0.5, STACK)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-1.c
new file mode 100644
index 00000000000..f22fca6deb2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-1.c
@@ -0,0 +1,126 @@
+/* Test AAPCS64 layout.
+
+ Test the comformance to the alignment and padding requirements.
+
+ B.4 If the argument type is a Composite Type then the size of the
+ argument is rounded up to the nearest multiple of 8 bytes.
+ C.4 If the argument is an HFA, a Quad-precision Floating-point or Short
+ Vector Type then the NSAA is rounded up to the larger of 8 or the
+ Natural Alignment of the argument's type.
+ C.12 The NSAA is rounded up to the larger of 8 or the Natural Alignment
+ of the argument's type.
+ C.14 If the size of the argument is less than 8 bytes then the size of
+ the argument is set ot 8 bytes. The effect is as if the argument
+ was copied to the least significant bits of a 64-bit register and
+ the remaining bits filled with unspecified values. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_align-1.c"
+#include "type-def.h"
+
+struct y
+{
+ int p;
+ int q;
+ int r;
+ int s;
+};
+
+struct y v1 = { 1, 2, 3, 4 };
+struct y v2 = { 5, 6, 7, 8 };
+struct y v3 = { 9, 10, 11, 12 };
+struct y v4 = { 13, 14, 15, 16 };
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+vf4_t c = { 13.f, 14.f, 15.f, 16.f };
+
+struct x
+{
+ vf4_t v;
+} w;
+
+char ch='a';
+short sh=13;
+int i=14;
+long long ll=15;
+
+struct s1
+{
+ short sh[3];
+} s1;
+
+struct s2
+{
+ int i[2];
+ char c;
+} s2;
+
+struct ldx2_t
+{
+ long double ld[2];
+} ldx2 = { 12345.67890L, 23456.78901L };
+
+union u_t
+{
+ long double ld;
+ double d[2];
+} u;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ w.v = (vf4_t){ 17.f, 18.f, 19.f, 20.f };
+ s1.sh[0] = 16;
+ s1.sh[1] = 17;
+ s1.sh[2] = 18;
+ s2.i[0] = 19;
+ s2.i[1] = 20;
+ s2.c = 21;
+ u.ld = 34567.89012L;
+}
+
+#include "abitest.h"
+#else
+
+ ARG(struct y, v1, X0)
+ ARG(struct y, v2, X2)
+ ARG(struct y, v3, X4)
+ ARG(struct y, v4, X6)
+ ARG(struct z, a, D0)
+ ARG(struct z, b, D4)
+ ARG(double, 12.5, STACK)
+ ARG(vf4_t, c, STACK+16) /* [C.4] 16-byte aligned short vector */
+ ARG(double, 17.0, STACK+32)
+ ARG(struct x, w, STACK+48) /* [C.12] 16-byte aligned small struct */
+#ifndef __AAPCS64_BIG_ENDIAN__
+ ARG(char, ch, STACK+64) /* [C.14] char padded to the size of 8 bytes */
+ ARG(short, sh, STACK+72) /* [C.14] short padded to the size of 8 bytes */
+ ARG(int, i, STACK+80) /* [C.14] int padded to the size of 8 bytes */
+#else
+ ARG(char, ch, STACK+71)
+ ARG(short, sh, STACK+78)
+ ARG(int, i, STACK+84)
+#endif
+ ARG(long long, ll, STACK+88)
+ ARG(struct s1, s1, STACK+96) /* [B.4] small struct padded to the size of 8 bytes */
+ ARG(double, 18.0, STACK+104)
+ ARG(struct s2, s2, STACK+112) /* [B.4] small struct padded to the size of 16 bytes */
+ ARG(double, 19.0, STACK+128)
+ ARG(long double, 30.0L, STACK+144) /* [C.4] 16-byte aligned quad-precision */
+ ARG(double, 31.0, STACK+160)
+ ARG(struct ldx2_t, ldx2, STACK+176) /* [C.4] 16-byte aligned HFA */
+ ARG(double, 32.0, STACK+208)
+ ARG(__int128, 33, STACK+224) /* [C.12] 16-byte aligned 128-bit integer */
+ ARG(double, 34.0, STACK+240)
+ ARG(union u_t, u, STACK+256) /* [C.12] 16-byte aligned small composite (union in this case) */
+ LAST_ARG_NONFLAT (int, 35.0, STACK+272, i32in64)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-2.c
new file mode 100644
index 00000000000..6c61948b16f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-2.c
@@ -0,0 +1,42 @@
+/* Test AAPCS64 layout.
+
+ C.8 If the argument has an alignment of 16 then the NGRN is rounded up
+ the next even number.
+
+ The case of a small struture containing only one 16-byte aligned
+ quad-word integer is covered in this test. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_align-2.c"
+#include "type-def.h"
+
+struct y
+{
+ union int128_t v;
+} w;
+
+struct x
+{
+ long long p;
+ int q;
+} s = {0xDEADBEEFCAFEBABELL, 0xFEEBDAED};
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init signed quad-word integer. */
+ w.v.l64 = 0xfdb9753102468aceLL;
+ w.v.h64 = 0xeca8642013579bdfLL;
+}
+
+#include "abitest.h"
+#else
+ ARG(int, 0xAB, W0)
+ ARG(struct y, w, X2)
+ ARG(int, 0xCD, W4)
+ ARG(struct x, s, X5)
+ LAST_ARG(int, 0xFF00FF00, W7)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-3.c
new file mode 100644
index 00000000000..bf8bc7468f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-3.c
@@ -0,0 +1,46 @@
+/* Test AAPCS64 layout.
+
+ C.8 If the argument has an alignment of 16 then the NGRN is rounded up
+ the next even number.
+ C.9 If the argument is an Integral Type, the size of the argument is
+ equal to 16 and the NGRN is less than 7, the argument is copied
+ to x[NGRN] and x[NGRN+1]. x[NGRN] shall contain the lower addressed
+ double-word of the memory representation of the argument. The
+ NGRN is incremented by two. The argument has now been allocated.
+
+ The case of passing a 128-bit integer in two general registers is covered
+ in this test. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_align-3.c"
+#include "type-def.h"
+
+union int128_t qword;
+
+int gInt[4];
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Initialize the quadword integer via the union. */
+ qword.l64 = 0xDEADBEEFCAFEBABELL;
+ qword.h64 = 0x123456789ABCDEF0LL;
+
+ gInt[0] = 12345;
+ gInt[1] = 23456;
+ gInt[2] = 34567;
+ gInt[3] = 45678;
+}
+
+
+#include "abitest.h"
+#else
+ ARG(int, gInt[0], W0)
+ ARG(int, gInt[1], W1)
+ ARG(int, gInt[2], W2)
+ ARG(__int128, qword.i, X4)
+ LAST_ARG(int, gInt[3], W6)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-4.c
new file mode 100644
index 00000000000..7834ed87e78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_align-4.c
@@ -0,0 +1,42 @@
+/* Test AAPCS64 layout.
+
+ C.3 If the argument is an HFA then the NSRN is set to 8 and the size
+ of the argument is rounded up to the nearest multiple of 8 bytes.
+
+ TODO: add the check of an HFA containing half-precision floating-point
+ when __f16 is supported in A64 GCC. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_align-4.c"
+
+struct z1
+{
+ double x[4];
+};
+
+struct z1 a = { 5.0, 6.0, 7.0, 8.0 };
+
+struct z2
+{
+ float x[3];
+};
+
+struct z2 b = { 13.f, 14.f, 15.f };
+struct z2 c = { 16.f, 17.f, 18.f };
+
+#include "abitest.h"
+#else
+
+ ARG(struct z1, a, D0)
+ ARG(double, 9.0, D4)
+ ARG(double, 10.0, D5)
+ ARG(struct z2, b, STACK) /* [C.3] on stack and size padded to 16 bytes */
+#ifndef __AAPCS64_BIG_ENDIAN__
+ ARG(float, 15.5f, STACK+16) /* [C.3] NSRN has been set to 8 */
+#else
+ ARG(float, 15.5f, STACK+20)
+#endif
+ LAST_ARG(struct z2, c, STACK+24)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_complex.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_complex.c
new file mode 100644
index 00000000000..6bf9721cc4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_complex.c
@@ -0,0 +1,18 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_complex.c"
+
+__complex__ float x = 1.0+2.0i;
+__complex__ int y = 5 + 6i;
+__complex__ double z = 2.0 + 3.0i;
+
+#include "abitest.h"
+#else
+ ARG(__complex__ float, x, S0)
+ ARG(__complex__ int, y, X0)
+ ARG(__complex__ double, z, D2)
+ LAST_ARG (int, 5, W1)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_int128.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_int128.c
new file mode 100644
index 00000000000..9df344f29f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_int128.c
@@ -0,0 +1,17 @@
+/* Test AAPCS layout (VFP variant) */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_int128.c"
+
+typedef int TItype __attribute__ ((mode (TI)));
+
+TItype x = 0xcafecafecafecfeacfeacfea;
+TItype y = 0xcfeacfeacfeacafecafecafe;
+
+#include "abitest.h"
+#else
+ ARG (TItype, x, X0)
+ LAST_ARG (TItype, y, X2)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/test_quad_double.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_quad_double.c
new file mode 100644
index 00000000000..109cea0b5c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/test_quad_double.c
@@ -0,0 +1,26 @@
+/* Test AAPCS64 layout.
+
+ Test parameter passing of floating-point quad precision types. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define TESTFILE "test_quad_double.c"
+
+typedef long double TFtype;
+typedef _Complex long double CTFtype;
+
+TFtype x = 1.0;
+TFtype y = 2.0;
+
+CTFtype cx = 3.0 + 4.0i;
+CTFtype cy = 5.0 + 6.0i;
+
+#include "abitest.h"
+#else
+ ARG ( TFtype, x, Q0)
+ ARG (CTFtype, cx, Q1)
+ DOTS
+ ANON (CTFtype, cy, Q3)
+ LAST_ANON ( TFtype, y, Q5)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/type-def.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/type-def.h
new file mode 100644
index 00000000000..a95d06aa2ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/type-def.h
@@ -0,0 +1,157 @@
+/* This header file defines some types that are used in the AAPCS64 tests. */
+
+
+/* 64-bit vector of 2 floats. */
+typedef float vf2_t __attribute__((vector_size (8)));
+
+/* 128-bit vector of 4 floats. */
+typedef float vf4_t __attribute__((vector_size (16)));
+
+/* 128-bit vector of 4 ints. */
+typedef int vi4_t __attribute__((vector_size (16)));
+
+/* signed quad-word (in an union for the convenience of initialization). */
+union int128_t
+{
+ __int128 i;
+ struct
+ {
+ signed long long l64;
+ signed long long h64;
+ };
+};
+
+/* Homogeneous floating-point composite types. */
+
+struct hfa_fx1_t
+{
+ float a;
+};
+
+struct hfa_fx2_t
+{
+ float a;
+ float b;
+};
+
+struct hfa_dx2_t
+{
+ double a;
+ double b;
+};
+
+struct hfa_dx4_t
+{
+ double a;
+ double b;
+ double c;
+ double d;
+};
+
+struct hfa_ldx3_t
+{
+ long double a;
+ long double b;
+ long double c;
+};
+
+struct hfa_ffs_t
+{
+ float a;
+ float b;
+ struct hfa_fx2_t c;
+};
+
+union hfa_union_t
+{
+ struct
+ {
+ float a;
+ float b;
+ } s;
+ float c;
+};
+
+/* Non homogeneous floating-point-composite types. */
+
+struct non_hfa_fx5_t
+{
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+};
+
+struct non_hfa_ffs_t
+{
+ float a;
+ float b;
+ struct hfa_dx2_t c;
+};
+
+struct non_hfa_ffs_2_t
+{
+ struct
+ {
+ int a;
+ int b;
+ } s;
+ float c;
+ float d;
+};
+
+struct hva_vf2x1_t
+{
+ vf2_t a;
+};
+
+struct hva_vf2x2_t
+{
+ vf2_t a;
+ vf2_t b;
+};
+
+struct hva_vi4x1_t
+{
+ vi4_t a;
+};
+
+struct non_hfa_ffd_t
+{
+ float a;
+ float b;
+ double c;
+};
+
+struct non_hfa_ii_t
+{
+ int a;
+ int b;
+};
+
+struct non_hfa_c_t
+{
+ char a;
+};
+
+struct non_hfa_ffvf2_t
+{
+ float a;
+ float b;
+ vf2_t c;
+};
+
+struct non_hfa_fffd_t
+{
+ float a;
+ float b;
+ float c;
+ double d;
+};
+
+union non_hfa_union_t
+{
+ double a;
+ float b;
+};
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
new file mode 100644
index 00000000000..4eb569e8c74
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
@@ -0,0 +1,50 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test covers fundamental data types as specified in AAPCS64 \S 4.1.
+ It is focus on unnamed parameter passed in registers. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-1.c"
+#include "type-def.h"
+
+vf2_t vf2 = (vf2_t){ 17.f, 18.f };
+vi4_t vi4 = (vi4_t){ 0xdeadbabe, 0xbabecafe, 0xcafebeef, 0xbeefdead };
+union int128_t qword;
+signed char sc = 0xed;
+signed int sc_promoted = 0xffffffed;
+signed short ss = 0xcba9;
+signed int ss_promoted = 0xffffcba9;
+float fp = 65432.12345f;
+double fp_promoted = (double)65432.12345f;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init signed quad-word integer. */
+ qword.l64 = 0xfdb9753102468aceLL;
+ qword.h64 = 0xeca8642013579bdfLL;
+}
+
+#include "abitest.h"
+#else
+ ARG ( int , 0xff , X0, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON_PROMOTED(unsigned char , 0xfe , unsigned int, 0xfe , X1, 1)
+ ANON_PROMOTED( signed char , sc , signed int, sc_promoted, X2, 2)
+ ANON_PROMOTED(unsigned short , 0xdcba, unsigned int, 0xdcba , X3, 3)
+ ANON_PROMOTED( signed short , ss , signed int, ss_promoted, X4, 4)
+ ANON (unsigned int , 0xdeadbeef, X5, 5)
+ ANON ( signed int , 0xcafebabe, X6, 6)
+ ANON (unsigned long long, 0xba98765432101234ULL, X7, 7)
+ ANON ( signed long long, 0xa987654321012345LL , STACK, 8)
+ ANON ( __int128, qword.i , STACK+16, 9)
+ ANON_PROMOTED( float , fp , double, fp_promoted, D0, 10)
+ ANON ( double , 9876543.212345, D1, 11)
+ ANON ( long double , 98765432123456789.987654321L, Q2, 12)
+ ANON ( vf2_t, vf2 , D3, 13)
+ ANON ( vi4_t, vi4 , Q4, 14)
+ LAST_ANON ( int , 0xeeee, STACK+32,15)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c
new file mode 100644
index 00000000000..50b77005b32
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c
@@ -0,0 +1,29 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ Miscellaneous test: Anonymous arguments passed on the stack. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-10.c"
+
+struct z
+{
+ double x[4];
+};
+
+double d1 = 25.0;
+double d2 = 103.0;
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(struct z, a, D0, 0)
+ ARG(struct z, b, D4, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON(double, d1, STACK, 2)
+ LAST_ANON(double, d2, STACK+8, 3)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-11.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-11.c
new file mode 100644
index 00000000000..c1f1f8f9b7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-11.c
@@ -0,0 +1,32 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ Miscellaneous test: Anonymous arguments passed on the stack. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-11.c"
+
+struct z
+{
+ double x[2];
+};
+
+double d1 = 25.0;
+struct z a = { 5.0, 6.0 };
+
+#include "abitest.h"
+#else
+ ARG(double, 1.0, D0, 0)
+ ARG(double, 2.0, D1, 1)
+ ARG(double, 3.0, D2, 2)
+ ARG(double, 4.0, D3, 3)
+ ARG(double, 5.0, D4, 4)
+ ARG(double, 6.0, D5, 5)
+ ARG(double, 7.0, D6, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON(struct z, a, STACK, 8)
+ LAST_ANON(double, d1, STACK+16, 9)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-12.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-12.c
new file mode 100644
index 00000000000..a12ccfd8b97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-12.c
@@ -0,0 +1,60 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ Pass by reference. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-12.c"
+
+struct z
+{
+ char c;
+ short s;
+ int ia[4];
+};
+
+struct z a, b, c;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ a.c = 0x11;
+ a.s = 0x2222;
+ a.ia[0] = 0x33333333;
+ a.ia[1] = 0x44444444;
+ a.ia[2] = 0x55555555;
+ a.ia[3] = 0x66666666;
+
+ b.c = 0x77;
+ b.s = 0x8888;
+ b.ia[0] = 0x99999999;
+ b.ia[1] = 0xaaaaaaaa;
+ b.ia[2] = 0xbbbbbbbb;
+ b.ia[3] = 0xcccccccc;
+
+ c.c = 0xdd;
+ c.s = 0xeeee;
+ c.ia[0] = 0xffffffff;
+ c.ia[1] = 0x12121212;
+ c.ia[2] = 0x23232323;
+ c.ia[3] = 0x34343434;
+}
+
+#include "abitest.h"
+#else
+ PTR(struct z, a, X0, 0)
+ ARG(int, 0xdeadbeef, X1, 1)
+ ARG(int, 0xcafebabe, X2, 2)
+ ARG(int, 0xdeadbabe, X3, 3)
+ ARG(int, 0xcafebeef, X4, 4)
+ ARG(int, 0xbeefdead, X5, 5)
+ ARG(int, 0xbabecafe, X6, LAST_NAMED_ARG_ID)
+ DOTS
+ PTR_ANON(struct z, b, X7, 7)
+ PTR_ANON(struct z, c, STACK, 8)
+ ANON(int, 0xbabedead, STACK+8, 9)
+ LAST_ANON(double, 123.45, D0, 10)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c
new file mode 100644
index 00000000000..b6da677c5ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c
@@ -0,0 +1,59 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test covers fundamental data types as specified in AAPCS64 \S 4.1.
+ It is focus on unnamed parameter passed on stack. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-2.c"
+#include "type-def.h"
+
+vf2_t vf2 = (vf2_t){ 17.f, 18.f };
+vi4_t vi4 = (vi4_t){ 0xdeadbabe, 0xbabecafe, 0xcafebeef, 0xbeefdead };
+union int128_t qword;
+signed char sc = 0xed;
+signed int sc_promoted = 0xffffffed;
+signed short ss = 0xcba9;
+signed int ss_promoted = 0xffffcba9;
+float fp = 65432.12345f;
+double fp_promoted = (double)65432.12345f;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init signed quad-word integer. */
+ qword.l64 = 0xfdb9753102468aceLL;
+ qword.h64 = 0xeca8642013579bdfLL;
+}
+
+#include "abitest.h"
+#else
+ ARG ( int , 0xff , X0, 0)
+ ARG ( float , 1.0f , S0, 1)
+ ARG ( float , 1.0f , S1, 2)
+ ARG ( float , 1.0f , S2, 3)
+ ARG ( float , 1.0f , S3, 4)
+ ARG ( float , 1.0f , S4, 5)
+ ARG ( float , 1.0f , S5, 6)
+ ARG ( float , 1.0f , S6, 7)
+ ARG ( float , 1.0f , S7, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON ( __int128, qword.i , X2, 8)
+ ANON ( signed long long, 0xa987654321012345LL , X4, 9)
+ ANON ( __int128, qword.i , X6, 10)
+ ANON_PROMOTED(unsigned char , 0xfe , unsigned int, 0xfe , STACK, 11)
+ ANON_PROMOTED( signed char , sc , signed int, sc_promoted, STACK+8, 12)
+ ANON_PROMOTED(unsigned short , 0xdcba, unsigned int, 0xdcba , STACK+16, 13)
+ ANON_PROMOTED( signed short , ss , signed int, ss_promoted, STACK+24, 14)
+ ANON (unsigned int , 0xdeadbeef, STACK+32, 15)
+ ANON ( signed int , 0xcafebabe, STACK+40, 16)
+ ANON (unsigned long long, 0xba98765432101234ULL, STACK+48, 17)
+ ANON_PROMOTED( float , fp , double, fp_promoted, STACK+56, 18)
+ ANON ( double , 9876543.212345, STACK+64, 19)
+ ANON ( long double , 98765432123456789.987654321L, STACK+80, 20)
+ ANON ( vf2_t, vf2 , STACK+96, 21)
+ ANON ( vi4_t, vi4 , STACK+112,22)
+ LAST_ANON ( int , 0xeeee, STACK+128,23)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-3.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-3.c
new file mode 100644
index 00000000000..34978c7e59d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-3.c
@@ -0,0 +1,86 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test covers most composite types as described in AAPCS64 \S 4.3.
+ Homogeneous floating-point aggregate types are covered in other tests. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-3.c"
+#include "type-def.h"
+
+struct x0
+{
+ char ch;
+ int i;
+} y0 = { 'a', 12345 };
+
+struct x1
+{
+ int a;
+ int b;
+ int c;
+ int d;
+} y1 = { 0xdeadbeef, 0xcafebabe, 0x87654321, 0xabcedf975 };
+
+struct x2
+{
+ long long a;
+ long long b;
+ char ch;
+} y2 = { 0x12, 0x34, 0x56 };
+
+union x3
+{
+ char ch;
+ int i;
+ long long ll;
+} y3;
+
+union x4
+{
+ int i;
+ struct x2 y2;
+} y4;
+
+struct x5
+{
+ union int128_t qword;
+} y5;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ /* Init small union. */
+ y3.ll = 0xfedcba98LL;
+
+ /* Init big union. */
+ y4.y2.a = 0x78;
+ y4.y2.b = 0x89;
+ y4.y2.ch= 0x9a;
+
+ /* Init signed quad-word integer. */
+ y5.qword.l64 = 0xfdb9753102468aceLL;
+ y5.qword.h64 = 0xeca8642013579bdfLL;
+}
+
+#include "abitest.h"
+#else
+ ARG (float ,1.0f, S0, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON (struct x0, y0, X0, 1)
+ ANON (struct x1, y1, X1, 2)
+ PTR_ANON (struct x2, y2, X3, 3)
+ ANON (union x3, y3, X4, 4)
+ PTR_ANON (union x4, y4, X5, 5)
+ ANON (struct x5, y5, X6, 6)
+ ANON (struct x0, y0, STACK, 7)
+ ANON (struct x1, y1, STACK+8, 8)
+ PTR_ANON (struct x2, y2, STACK+24, 9)
+ ANON (union x3, y3, STACK+32, 10)
+ PTR_ANON (union x4, y4, STACK+40, 11)
+ ANON (int , 1, STACK+48, 12)
+ ANON (struct x5, y5, STACK+64, 13)
+ LAST_ANON(int , 2, STACK+80, 14)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-4.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-4.c
new file mode 100644
index 00000000000..d0e18db54d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-4.c
@@ -0,0 +1,93 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test covers homogeneous floating-point aggregate types and homogeneous
+ short-vector aggregate types as described in AAPCS64 \S 4.3.5. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-4.c"
+#include "type-def.h"
+
+struct hfa_fx1_t hfa_fx1 = {12.345f};
+struct hfa_fx2_t hfa_fx2 = {123.456f, 234.456f};
+struct hfa_dx2_t hfa_dx2 = {234.567, 345.678};
+struct hfa_dx4_t hfa_dx4 = {1234.123, 2345.234, 3456.345, 4567.456};
+struct hfa_ldx3_t hfa_ldx3 = {123456.7890, 234567.8901, 345678.9012};
+struct non_hfa_fx5_t non_hfa_fx5 = {456.789f, 567.890f, 678.901f, 789.012f, 890.123f};
+struct hfa_ffs_t hfa_ffs;
+struct non_hfa_ffs_t non_hfa_ffs;
+struct non_hfa_ffs_2_t non_hfa_ffs_2;
+struct hva_vf2x1_t hva_vf2x1;
+struct hva_vf2x2_t hva_vf2x2;
+struct hva_vi4x1_t hva_vi4x1;
+struct non_hfa_ffd_t non_hfa_ffd = {23.f, 24.f, 25.0};
+struct non_hfa_ii_t non_hfa_ii = {26, 27};
+struct non_hfa_c_t non_hfa_c = {28};
+struct non_hfa_ffvf2_t non_hfa_ffvf2;
+struct non_hfa_fffd_t non_hfa_fffd = {33.f, 34.f, 35.f, 36.0};
+union hfa_union_t hfa_union;
+union non_hfa_union_t non_hfa_union;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ hva_vf2x1.a = (vf2_t){17.f, 18.f};
+ hva_vf2x2.a = (vf2_t){19.f, 20.f};
+ hva_vf2x2.b = (vf2_t){21.f, 22.f};
+ hva_vi4x1.a = (vi4_t){19, 20, 21, 22};
+
+ non_hfa_ffvf2.a = 29.f;
+ non_hfa_ffvf2.b = 30.f;
+ non_hfa_ffvf2.c = (vf2_t){31.f, 32.f};
+
+ hfa_union.s.a = 37.f;
+ hfa_union.s.b = 38.f;
+ hfa_union.c = 39.f;
+
+ non_hfa_union.a = 40.0;
+ non_hfa_union.b = 41.f;
+
+ hfa_ffs.a = 42.f;
+ hfa_ffs.b = 43.f;
+ hfa_ffs.c.a = 44.f;
+ hfa_ffs.c.b = 45.f;
+
+ non_hfa_ffs.a = 46.f;
+ non_hfa_ffs.b = 47.f;
+ non_hfa_ffs.c.a = 48.0;
+ non_hfa_ffs.c.b = 49.0;
+
+ non_hfa_ffs_2.s.a = 50;
+ non_hfa_ffs_2.s.b = 51;
+ non_hfa_ffs_2.c = 52.f;
+ non_hfa_ffs_2.d = 53.f;
+}
+
+#include "abitest.h"
+#else
+ ARG (int , 1, X0, LAST_NAMED_ARG_ID)
+ DOTS
+ /* HFA or HVA passed in fp/simd registers or on stack. */
+ ANON (struct hfa_fx1_t , hfa_fx1 , S0 , 0)
+ ANON (struct hfa_fx2_t , hfa_fx2 , S1 , 1)
+ ANON (struct hfa_dx2_t , hfa_dx2 , D3 , 2)
+ ANON (struct hva_vf2x1_t, hva_vf2x1, D5 , 11)
+ ANON (struct hva_vi4x1_t, hva_vi4x1, Q6 , 12)
+ ANON (struct hfa_dx4_t , hfa_dx4 , STACK , 3)
+ ANON (struct hfa_ffs_t , hfa_ffs , STACK+32, 4)
+ ANON (union hfa_union_t, hfa_union, STACK+48, 5)
+ ANON (struct hfa_ldx3_t , hfa_ldx3 , STACK+64, 6)
+ /* Non-H[FV]A passed in general registers or on stack or via reference. */
+ PTR_ANON (struct non_hfa_fx5_t , non_hfa_fx5 , X1 , 10)
+ ANON (struct non_hfa_ffd_t , non_hfa_ffd , X2 , 13)
+ ANON (struct non_hfa_ii_t , non_hfa_ii , X4 , 14)
+ ANON (struct non_hfa_c_t , non_hfa_c , X5 , 15)
+ ANON (struct non_hfa_ffvf2_t, non_hfa_ffvf2, X6 , 16)
+ PTR_ANON (struct non_hfa_fffd_t , non_hfa_fffd , STACK+112, 17)
+ PTR_ANON (struct non_hfa_ffs_t , non_hfa_ffs , STACK+120, 18)
+ ANON (struct non_hfa_ffs_2_t, non_hfa_ffs_2, STACK+128, 19)
+ ANON (union non_hfa_union_t, non_hfa_union, STACK+144, 20)
+ LAST_ANON(int , 2 , STACK+152, 30)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-5.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-5.c
new file mode 100644
index 00000000000..6b99a6f1e6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-5.c
@@ -0,0 +1,47 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test is focus on certain unnamed homogeneous floating-point aggregate
+ types passed in fp/simd registers. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-5.c"
+#include "type-def.h"
+
+struct hfa_fx1_t hfa_fx1 = {12.345f};
+struct hfa_fx2_t hfa_fx2 = {123.456f, 234.456f};
+struct hfa_dx2_t hfa_dx2 = {234.567, 345.678};
+struct hfa_dx4_t hfa_dx4 = {1234.123, 2345.234, 3456.345, 4567.456};
+struct hfa_ldx3_t hfa_ldx3 = {123456.7890, 234567.8901, 345678.9012};
+struct hfa_ffs_t hfa_ffs;
+union hfa_union_t hfa_union;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ hfa_union.s.a = 37.f;
+ hfa_union.s.b = 38.f;
+ hfa_union.c = 39.f;
+
+ hfa_ffs.a = 42.f;
+ hfa_ffs.b = 43.f;
+ hfa_ffs.c.a = 44.f;
+ hfa_ffs.c.b = 45.f;
+}
+
+#include "abitest.h"
+#else
+ ARG (int, 1, X0, LAST_NAMED_ARG_ID)
+ DOTS
+ /* HFA passed in fp/simd registers or on stack. */
+ ANON (struct hfa_dx4_t , hfa_dx4 , D0 , 0)
+ ANON (struct hfa_ldx3_t , hfa_ldx3 , Q4 , 1)
+ ANON (struct hfa_ffs_t , hfa_ffs , STACK , 2)
+ ANON (union hfa_union_t, hfa_union, STACK+16, 3)
+ ANON (struct hfa_fx1_t , hfa_fx1 , STACK+24, 4)
+ ANON (struct hfa_fx2_t , hfa_fx2 , STACK+32, 5)
+ ANON (struct hfa_dx2_t , hfa_dx2 , STACK+40, 6)
+ LAST_ANON(double , 1.0 , STACK+56, 7)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-6.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-6.c
new file mode 100644
index 00000000000..f94a54ab1b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-6.c
@@ -0,0 +1,40 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test is focus on certain unnamed homogeneous floating-point aggregate
+ types passed in fp/simd registers. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-6.c"
+#include "type-def.h"
+
+struct hfa_fx1_t hfa_fx1 = {12.345f};
+struct hfa_dx2_t hfa_dx2 = {234.567, 345.678};
+struct hfa_ffs_t hfa_ffs;
+union hfa_union_t hfa_union;
+
+#define HAS_DATA_INIT_FUNC
+void init_data ()
+{
+ hfa_union.s.a = 37.f;
+ hfa_union.s.b = 38.f;
+ hfa_union.c = 39.f;
+
+ hfa_ffs.a = 42.f;
+ hfa_ffs.b = 43.f;
+ hfa_ffs.c.a = 44.f;
+ hfa_ffs.c.b = 45.f;
+}
+
+#include "abitest.h"
+#else
+ ARG (int, 1, X0, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON (struct hfa_ffs_t , hfa_ffs , S0 , 0)
+ ANON (union hfa_union_t, hfa_union, S4 , 1)
+ ANON (struct hfa_dx2_t , hfa_dx2 , D6 , 2)
+ ANON (struct hfa_fx1_t , hfa_fx1 , STACK , 3)
+ LAST_ANON(double , 1.0 , STACK+8, 4)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-7.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-7.c
new file mode 100644
index 00000000000..b82e7a74217
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-7.c
@@ -0,0 +1,31 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ This test covers complex types. Complex floating-point types are treated
+ as homogeneous floating-point aggregates, while complex integral types
+ are treated as general composite types. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-7.c"
+#include "type-def.h"
+
+_Complex __int128 complex_qword = 567890 + 678901i;
+
+#include "abitest.h"
+#else
+ ARG (int, 1, X0, LAST_NAMED_ARG_ID)
+ DOTS
+ /* Complex floating-point types are passed in fp/simd registers. */
+ ANON (_Complex float , 12.3f + 23.4fi , S0, 0)
+ ANON (_Complex double , 34.56 + 45.67i , D2, 1)
+ ANON (_Complex long double, 56789.01234L + 67890.12345Li, Q4, 2)
+
+ /* Complex integral types are passed in general registers or via reference. */
+ ANON (_Complex short , (short)12345 + (short)23456i, X1, 10)
+ ANON (_Complex int , 34567 + 45678i , X2, 11)
+ PTR_ANON (_Complex __int128 , complex_qword , X3, 12)
+
+ LAST_ANON(int , 1 , X4, 20)
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-8.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-8.c
new file mode 100644
index 00000000000..d148482981c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-8.c
@@ -0,0 +1,25 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ Miscellaneous test: HFA anonymous parameter passed in SIMD/FP regs. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-8.c"
+
+struct z
+{
+ double x[4];
+};
+
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+
+#include "abitest.h"
+#else
+ ARG(int, 0xdeadbeef, W0, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON(double, 4.0, D0, 1)
+ LAST_ANON(struct z, a, D1, 2)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-9.c b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-9.c
new file mode 100644
index 00000000000..a5183bef44e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-9.c
@@ -0,0 +1,31 @@
+/* Test AAPCS64 layout and __builtin_va_arg.
+
+ Miscellaneous test: HFA anonymous parameter passed in SIMD/FP regs. */
+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
+#define AAPCS64_TEST_STDARG
+#define TESTFILE "va_arg-9.c"
+
+struct z
+{
+ double x[4];
+};
+
+double d1 = 25.0;
+struct z a = { 5.0, 6.0, 7.0, 8.0 };
+struct z b = { 9.0, 10.0, 11.0, 12.0 };
+
+#include "abitest.h"
+#else
+ ARG(double, 11.0, D0, LAST_NAMED_ARG_ID)
+ DOTS
+ ANON(int, 8, W0, 1)
+ ANON(struct z, a, D1, 2)
+ ANON(struct z, b, STACK, 3)
+ ANON(int, 5, W1, 4)
+ ANON(double, d1, STACK+32, 5)
+ LAST_ANON(double, 0.5, STACK+40, 6)
+
+#endif
diff --git a/gcc/testsuite/gcc.target/aarch64/aapcs64/validate_memory.h b/gcc/testsuite/gcc.target/aarch64/aapcs64/validate_memory.h
new file mode 100644
index 00000000000..ac946256e7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aapcs64/validate_memory.h
@@ -0,0 +1,81 @@
+/* Memory validation functions for AArch64 procedure call standard.
+ Copyright (C) 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef VALIDATE_MEMORY_H
+#define VALIDATE_MEMORY_H
+
+enum structure_type
+{
+ flat = 0,
+ i32in128,
+ f32in64,
+ i8in64,
+ i16in64,
+ i32in64,
+};
+
+/* Some explicit declarations as I can't include files outside the testsuite.
+ */
+typedef long unsigned int size_t;
+int memcmp (void *, void *, size_t);
+
+/* These two arrays contain element size and block size data for the enumeration
+ above. */
+const int element_size[] = { 1, 4, 4, 1, 2, 4 };
+const int block_reverse_size[] = { 1, 16, 8, 8, 8, 8 };
+
+int
+validate_memory (void *mem1, char *mem2, size_t size, enum structure_type type)
+{
+ /* In big-endian mode, the data in mem2 will have been byte-reversed in
+ register sized groups, while the data in mem1 will have been byte-reversed
+ according to the true structure of the data. To compare them, we need to
+ compare chunks of data in reverse order.
+
+ This is only implemented for homogeneous data layouts at the moment. For
+ hetrogeneous structures a custom compare case will need to be written. */
+
+ unsigned int i;
+ char *cmem1 = (char *) mem1;
+ switch (type)
+ {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ case i8in64:
+ case i16in64:
+ case i32in64:
+ case f32in64:
+ case i32in128:
+ for (i = 0; i < size; i += element_size[type])
+ {
+ if (memcmp (cmem1 + i,
+ mem2 + block_reverse_size[type] - i - element_size[type],
+ element_size[type]))
+ return 1;
+ }
+ return 0;
+ break;
+#endif
+ default:
+ break;
+ }
+ return memcmp (mem1, mem2, size);
+}
+
+#endif /* VALIDATE_MEMORY_H. */
diff --git a/gcc/testsuite/gcc.target/aarch64/aarch64.exp b/gcc/testsuite/gcc.target/aarch64/aarch64.exp
new file mode 100644
index 00000000000..60ebb499ac7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/aarch64.exp
@@ -0,0 +1,45 @@
+# Specific regression driver for AArch64.
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } then {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+ set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" $DEFAULT_CFLAGS
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/adc-1.c b/gcc/testsuite/gcc.target/aarch64/adc-1.c
new file mode 100644
index 00000000000..c19920ce500
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/adc-1.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+volatile unsigned int w0, w1, w2, w3, w4;
+volatile int result;
+
+void test_si() {
+ /* { dg-final { scan-assembler "adc\tw\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ w0 = w1 + w2 + (w3 >= w4);
+}
+
+volatile unsigned long long int x0, x1, x2, x3, x4;
+
+void test_di() {
+ /* { dg-final { scan-assembler "adc\tx\[0-9\]*, x\[0-9\]*, x\[0-9\]*\n" } } */
+ x0 = x1 + x2 + (x3 >= x4);
+}
+
diff --git a/gcc/testsuite/gcc.target/aarch64/adc-2.c b/gcc/testsuite/gcc.target/aarch64/adc-2.c
new file mode 100644
index 00000000000..0f13619106b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/adc-2.c
@@ -0,0 +1,277 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+extern void abort (void);
+
+/* This series of tests looks for the optimization:
+ x = (a >= b) + c + d
+ =>
+ cmp a, b
+ adc x, c, d
+ */
+
+unsigned long
+ltu_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a < b) + c + d;
+}
+
+unsigned long
+gtu_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a > b) + c + d;
+}
+
+unsigned long
+leu_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a <= b) + c + d;
+}
+
+unsigned long
+geu_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a >= b) + c + d;
+}
+
+unsigned long
+equ_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a == b) + c + d;
+}
+
+unsigned long
+neu_add (unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ return (a != b) + c + d;
+}
+
+long
+lt_add ( long a, long b, long c, long d)
+{
+ return (a < b) + c + d;
+}
+
+long
+gt_add ( long a, long b, long c, long d)
+{
+ return (a > b) + c + d;
+}
+
+long
+le_add ( long a, long b, long c, long d)
+{
+ return (a <= b) + c + d;
+}
+
+long
+ge_add ( long a, long b, long c, long d)
+{
+ return (a >= b) + c + d;
+}
+
+long
+eq_add ( long a, long b, long c, long d)
+{
+ return (a == b) + c + d;
+}
+
+long
+ne_add ( long a, long b, long c, long d)
+{
+ return (a != b) + c + d;
+}
+
+
+int
+main ()
+{
+ if (ltu_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (ltu_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (ltu_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (gtu_add(2,1,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (gtu_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (gtu_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (leu_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (leu_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (leu_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (leu_add(2,1,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (geu_add(2,1,3,4) != 8)
+ {
+ abort();
+ }
+ if (geu_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (geu_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (equ_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (equ_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (equ_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (neu_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (neu_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (neu_add(3,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (lt_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (lt_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (lt_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (gt_add(2,1,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (gt_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (gt_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (le_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (le_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (le_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (le_add(2,1,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (ge_add(2,1,3,4) != 8)
+ {
+ abort();
+ }
+ if (ge_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (ge_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (eq_add(1,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (eq_add(2,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (eq_add(3,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (ne_add(1,2,3,4) != 8)
+ {
+ abort();
+ }
+
+ if (ne_add(2,2,3,4) != 7)
+ {
+ abort();
+ }
+
+ if (ne_add(3,2,3,4) != 8)
+ {
+ abort();
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-1.c b/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-1.c
new file mode 100644
index 00000000000..a0f59825227
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-1.c
@@ -0,0 +1,7 @@
+/* { dg-error "unknown" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -march=dummy" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-2.c b/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-2.c
new file mode 100644
index 00000000000..f1f3ea38c2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/arch-diagnostics-2.c
@@ -0,0 +1,7 @@
+/* { dg-error "missing" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -march=+dummy" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/arg-type-diagnostics-1.c b/gcc/testsuite/gcc.target/aarch64/arg-type-diagnostics-1.c
new file mode 100644
index 00000000000..55dd9f66f23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/arg-type-diagnostics-1.c
@@ -0,0 +1,15 @@
+/* { dg-do compile { target { aarch64*-*-* } } } */
+/* { dg-options "-O2" } */
+
+#include "arm_neon.h"
+
+void foo ()
+{
+ int a;
+ int32x2_t arg1;
+ int32x2_t arg2;
+ int32x2_t result;
+ arg1 = vcreate_s32 (UINT64_C (0x0000ffffffffffff));
+ arg2 = vcreate_s32 (UINT64_C (0x16497fffffffffff));
+ result = __builtin_aarch64_srsra_nv2si (arg1, arg2, a); /* { dg-error "incompatible type for argument" } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/asm-1.c b/gcc/testsuite/gcc.target/aarch64/asm-1.c
new file mode 100644
index 00000000000..bdfa4504f61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/asm-1.c
@@ -0,0 +1,15 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+typedef struct
+{
+ int i;
+ int y;
+} __attribute__ ((aligned (16))) struct64_t;
+
+void foo ()
+{
+ struct64_t tmp;
+ asm volatile ("ldr q0, %[value]" : : [value]"m"(tmp));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/clrsb.c b/gcc/testsuite/gcc.target/aarch64/clrsb.c
new file mode 100644
index 00000000000..ac8d2e05106
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/clrsb.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int functest (unsigned int x)
+{
+ return __builtin_clrsb (x);
+}
+
+/* { dg-final { scan-assembler "cls\tw" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/clz.c b/gcc/testsuite/gcc.target/aarch64/clz.c
new file mode 100644
index 00000000000..b650b131857
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/clz.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int functest (unsigned int x)
+{
+ return __builtin_clz (x);
+}
+
+/* { dg-final { scan-assembler "clz\tw" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-1.c b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-1.c
new file mode 100644
index 00000000000..de6b8a7da4d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-1.c
@@ -0,0 +1,7 @@
+/* { dg-error "unknown" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -mcpu=dummy" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-2.c b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-2.c
new file mode 100644
index 00000000000..284971d832c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-2.c
@@ -0,0 +1,7 @@
+/* { dg-error "missing" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -mcpu=example-1+no" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-3.c b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-3.c
new file mode 100644
index 00000000000..4e5d17c3b82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-3.c
@@ -0,0 +1,7 @@
+/* { dg-error "unknown" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -mcpu=example-1+dummy" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-4.c b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-4.c
new file mode 100644
index 00000000000..4c246eb0172
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/cpu-diagnostics-4.c
@@ -0,0 +1,7 @@
+/* { dg-error "missing" "" {target "aarch64*-*-*" } } */
+/* { dg-options "-O2 -mcpu=+dummy" } */
+
+void f ()
+{
+ return;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/csinc-1.c b/gcc/testsuite/gcc.target/aarch64/csinc-1.c
new file mode 100644
index 00000000000..132a0f67939
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/csinc-1.c
@@ -0,0 +1,72 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int
+test_csinc32_ifcvt(unsigned int w0,
+ unsigned int w1,
+ unsigned int w2) {
+ /* { dg-final { scan-assembler "csinc\tw\[0-9\]*.*ne" } } */
+ if (w0 == w1)
+ ++ w2;
+
+ return w2;
+}
+
+unsigned int
+test_csinc32_condasn1(unsigned int w0,
+ unsigned int w1,
+ unsigned int w2,
+ unsigned int w3) {
+ unsigned int w4;
+
+ /* { dg-final { scan-assembler "csinc\tw\[0-9\]*.*ne" } } */
+ w4 = (w0 == w1) ? (w3 + 1) : w2;
+ return w4;
+}
+
+unsigned int
+test_csinc32_condasn2(unsigned int w0,
+ unsigned int w1,
+ unsigned int w2,
+ unsigned int w3) {
+ unsigned int w4;
+
+ /* { dg-final { scan-assembler "csinc\tw\[0-9\]*.*eq" } } */
+ w4 = (w0 == w1) ? w2 : (w3 + 1);
+ return w4;
+}
+
+unsigned long long
+test_csinc64_ifcvt(unsigned long long x0,
+ unsigned long long x1,
+ unsigned long long x2) {
+ /* { dg-final { scan-assembler "csinc\tx\[0-9\]*.*ne" } } */
+ if (x0 == x1)
+ ++ x2;
+
+ return x2;
+}
+
+unsigned long long
+test_csinc64_condasn1(unsigned long long x0,
+ unsigned long long x1,
+ unsigned long long x2,
+ unsigned long long x3) {
+ unsigned long long x4;
+
+ /* { dg-final { scan-assembler "csinc\tx\[0-9\]*.*ne" } } */
+ x4 = (x0 == x1) ? (x3 + 1) : x2;
+ return x4;
+}
+
+unsigned long long
+test_csinc64_condasn2(unsigned long long x0,
+ unsigned long long x1,
+ unsigned long long x2,
+ unsigned long long x3) {
+ unsigned long long x4;
+
+ /* { dg-final { scan-assembler "csinc\tx\[0-9\]*.*eq" } } */
+ x4 = (x0 == x1) ? x2 : (x3 + 1);
+ return x4;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/csinv-1.c b/gcc/testsuite/gcc.target/aarch64/csinv-1.c
new file mode 100644
index 00000000000..8d44449f477
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/csinv-1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int
+test_csinv32_condasn1(unsigned int w0,
+ unsigned int w1,
+ unsigned int w2,
+ unsigned int w3) {
+ unsigned int w4;
+
+ /* { dg-final { scan-assembler "csinv\tw\[0-9\]*.*ne" } } */
+ w4 = (w0 == w1) ? ~w3 : w2;
+ return w4;
+}
+
+unsigned int
+test_csinv32_condasn2(unsigned int w0,
+ unsigned int w1,
+ unsigned int w2,
+ unsigned int w3) {
+ unsigned int w4;
+
+ /* { dg-final { scan-assembler "csinv\tw\[0-9\]*.*eq" } } */
+ w4 = (w0 == w1) ? w3 : ~w2;
+ return w4;
+}
+
+unsigned long long
+test_csinv64_condasn1(unsigned long long x0,
+ unsigned long long x1,
+ unsigned long long x2,
+ unsigned long long x3) {
+ unsigned long long x4;
+
+ /* { dg-final { scan-assembler "csinv\tx\[0-9\]*.*ne" } } */
+ x4 = (x0 == x1) ? ~x3 : x2;
+ return x4;
+}
+
+unsigned long long
+test_csinv64_condasn2(unsigned long long x0,
+ unsigned long long x1,
+ unsigned long long x2,
+ unsigned long long x3) {
+ unsigned long long x4;
+
+ /* { dg-final { scan-assembler "csinv\tx\[0-9\]*.*eq" } } */
+ x4 = (x0 == x1) ? x3 : ~x2;
+ return x4;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/csneg-1.c b/gcc/testsuite/gcc.target/aarch64/csneg-1.c
new file mode 100644
index 00000000000..08001afd8ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/csneg-1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int
+test_csneg32_condasn1(int w0,
+ int w1,
+ int w2,
+ int w3) {
+ int w4;
+
+ /* { dg-final { scan-assembler "csneg\tw\[0-9\]*.*ne" } } */
+ w4 = (w0 == w1) ? -w3 : w2;
+ return w4;
+}
+
+int
+test_csneg32_condasn2(int w0,
+ int w1,
+ int w2,
+ int w3) {
+ int w4;
+
+ /* { dg-final { scan-assembler "csneg\tw\[0-9\]*.*eq" } } */
+ w4 = (w0 == w1) ? w3 : -w2;
+ return w4;
+}
+
+long long
+test_csneg64_condasn1(long long x0,
+ long long x1,
+ long long x2,
+ long long x3) {
+ long long x4;
+
+ /* { dg-final { scan-assembler "csneg\tx\[0-9\]*.*ne" } } */
+ x4 = (x0 == x1) ? -x3 : x2;
+ return x4;
+}
+
+long long
+test_csneg64_condasn2(long long x0,
+ long long x1,
+ long long x2,
+ long long x3) {
+ long long x4;
+
+ /* { dg-final { scan-assembler "csneg\tx\[0-9\]*.*eq" } } */
+ x4 = (x0 == x1) ? x3 : -x2;
+ return x4;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/ctz.c b/gcc/testsuite/gcc.target/aarch64/ctz.c
new file mode 100644
index 00000000000..89d6fb442bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ctz.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int functest (unsigned int x)
+{
+ return __builtin_ctz (x);
+}
+
+/* { dg-final { scan-assembler "rbit\tw" } } */
+/* { dg-final { scan-assembler "clz\tw" } } */
+
diff --git a/gcc/testsuite/gcc.target/aarch64/extend.c b/gcc/testsuite/gcc.target/aarch64/extend.c
new file mode 100644
index 00000000000..f399e55ce8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/extend.c
@@ -0,0 +1,170 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int
+ldr_uxtw (int *arr, unsigned int i)
+{
+ /* { dg-final { scan-assembler "ldr\tw\[0-9\]+,.*uxtw #?2]" } } */
+ return arr[i];
+}
+
+int
+ldr_uxtw0 (char *arr, unsigned int i)
+{
+ /* { dg-final { scan-assembler "ldr\tw\[0-9\]+,.*uxtw]" } } */
+ return arr[i];
+}
+
+int
+ldr_sxtw (int *arr, int i)
+{
+ /* { dg-final { scan-assembler "ldr\tw\[0-9\]+,.*sxtw #?2]" } } */
+ return arr[i];
+}
+
+int
+ldr_sxtw0 (char *arr, int i)
+{
+ /* { dg-final { scan-assembler "ldr\tw\[0-9\]+,.*sxtw]" } } */
+ return arr[i];
+}
+
+unsigned long long
+adddi_uxtw (unsigned long long a, unsigned int i)
+{
+ /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw #?3" } } */
+ return a + ((unsigned long long)i << 3);
+}
+
+unsigned long long
+adddi_uxtw0 (unsigned long long a, unsigned int i)
+{
+ /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*uxtw\n" } } */
+ return a + i;
+}
+
+long long
+adddi_sxtw (long long a, int i)
+{
+ /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw #?3" } } */
+ return a + ((long long)i << 3);
+}
+
+long long
+adddi_sxtw0 (long long a, int i)
+{
+ /* { dg-final { scan-assembler "add\tx\[0-9\]+,.*sxtw\n" } } */
+ return a + i;
+}
+
+unsigned long long
+subdi_uxtw (unsigned long long a, unsigned int i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw #?3" } } */
+ return a - ((unsigned long long)i << 3);
+}
+
+unsigned long long
+subdi_uxtw0 (unsigned long long a, unsigned int i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxtw\n" } } */
+ return a - i;
+}
+
+long long
+subdi_sxtw (long long a, int i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw #?3" } } */
+ return a - ((long long)i << 3);
+}
+
+long long
+subdi_sxtw0 (long long a, int i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxtw\n" } } */
+ return a - (long long)i;
+}
+
+unsigned long long
+subdi_uxth (unsigned long long a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxth #?1" } } */
+ return a - ((unsigned long long)i << 1);
+}
+
+unsigned long long
+subdi_uxth0 (unsigned long long a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*uxth\n" } } */
+ return a - i;
+}
+
+long long
+subdi_sxth (long long a, short i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxth #?1" } } */
+ return a - ((long long)i << 1);
+}
+
+long long
+subdi_sxth0 (long long a, short i)
+{
+ /* { dg-final { scan-assembler "sub\tx\[0-9\]+,.*sxth\n" } } */
+ return a - (long long)i;
+}
+
+unsigned int
+subsi_uxth (unsigned int a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "sub\tw\[0-9\]+,.*uxth #?1" } } */
+ return a - ((unsigned int)i << 1);
+}
+
+unsigned int
+subsi_uxth0 (unsigned int a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "sub\tw\[0-9\]+,.*uxth\n" } } */
+ return a - i;
+}
+
+int
+subsi_sxth (int a, short i)
+{
+ /* { dg-final { scan-assembler "sub\tw\[0-9\]+,.*sxth #?1" } } */
+ return a - ((int)i << 1);
+}
+
+int
+subsi_sxth0 (int a, short i)
+{
+ /* { dg-final { scan-assembler "sub\tw\[0-9\]+,.*sxth\n" } } */
+ return a - (int)i;
+}
+
+unsigned int
+addsi_uxth (unsigned int a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "add\tw\[0-9\]+,.*uxth #?1" } } */
+ return a + ((unsigned int)i << 1);
+}
+
+unsigned int
+addsi_uxth0 (unsigned int a, unsigned short i)
+{
+ /* { dg-final { scan-assembler "add\tw\[0-9\]+,.*uxth\n" } } */
+ return a + i;
+}
+
+int
+addsi_sxth (int a, short i)
+{
+ /* { dg-final { scan-assembler "add\tw\[0-9\]+,.*sxth #?1" } } */
+ return a + ((int)i << 1);
+}
+
+int
+addsi_sxth0 (int a, short i)
+{
+ /* { dg-final { scan-assembler "add\tw\[0-9\]+,.*sxth\n" } } */
+ return a + (int)i;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt.x b/gcc/testsuite/gcc.target/aarch64/fcvt.x
new file mode 100644
index 00000000000..be50ee50f98
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt.x
@@ -0,0 +1,55 @@
+extern GPF SUFFIX(trunc) (GPF);
+extern GPF SUFFIX(ceil) (GPF);
+extern GPF SUFFIX(floor) (GPF);
+extern GPF SUFFIX(round) (GPF);
+
+GPI test1a (GPF x) {
+ return SUFFIX(__builtin_trunc)(x);
+}
+
+GPI test1b (GPF x)
+{
+ return SUFFIX(trunc)(x);
+}
+
+GPI test2a (GPF x)
+{
+ return SUFFIX(__builtin_lceil)(x);
+}
+
+GPI test2b (GPF x)
+{
+ return SUFFIX(ceil)(x);
+}
+
+GPI test2c (GPF x)
+{
+ return SUFFIX(__builtin_ceil)(x);
+}
+
+GPI test3a (GPF x)
+{
+ return SUFFIX(__builtin_lfloor)(x);
+}
+
+GPI test3b (GPF x)
+{
+ return SUFFIX(floor)(x);
+}
+
+GPI test3c (GPF x)
+{
+ return SUFFIX(__builtin_floor)(x);
+}
+
+GPI test4a (GPF x)
+{
+ return SUFFIX(__builtin_round)(x);
+}
+
+GPI test4b (GPF x)
+{
+ return SUFFIX(round)(x);
+}
+
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c b/gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c
new file mode 100644
index 00000000000..697aab1c478
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF double
+#define SUFFIX(x) x
+#define GPI int
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzs\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtas\tw\[0-9\]+, *d\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_double_long.c b/gcc/testsuite/gcc.target/aarch64/fcvt_double_long.c
new file mode 100644
index 00000000000..edf640bda4b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_double_long.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF double
+#define SUFFIX(x) x
+#define GPI long
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzs\tx\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *d\[0-9\]" 3 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *d\[0-9\]" 3 } } */
+/* { dg-final { scan-assembler-times "fcvtas\tx\[0-9\]+, *d\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_double_uint.c b/gcc/testsuite/gcc.target/aarch64/fcvt_double_uint.c
new file mode 100644
index 00000000000..a1fae764fdb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_double_uint.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF double
+#define SUFFIX(x) x
+#define GPI unsigned int
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzu\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtpu\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtmu\tw\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtau\tw\[0-9\]+, *d\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_double_ulong.c b/gcc/testsuite/gcc.target/aarch64/fcvt_double_ulong.c
new file mode 100644
index 00000000000..f95fe55c674
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_double_ulong.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF double
+#define SUFFIX(x) x
+#define GPI unsigned long
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzu\tx\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtpu\tx\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *d\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtmu\tx\[0-9\]+, *d\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtau\tx\[0-9\]+, *d\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_float_int.c b/gcc/testsuite/gcc.target/aarch64/fcvt_float_int.c
new file mode 100644
index 00000000000..ac1509857c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_float_int.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF float
+#define SUFFIX(x) x##f
+#define GPI int
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzs\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtas\tw\[0-9\]+, *s\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_float_long.c b/gcc/testsuite/gcc.target/aarch64/fcvt_float_long.c
new file mode 100644
index 00000000000..928ac52f7bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_float_long.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF float
+#define SUFFIX(x) x##f
+#define GPI long
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzs\tx\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *s\[0-9\]" 3 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *s\[0-9\]" 3 } } */
+/* { dg-final { scan-assembler-times "fcvtas\tx\[0-9\]+, *s\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_float_uint.c b/gcc/testsuite/gcc.target/aarch64/fcvt_float_uint.c
new file mode 100644
index 00000000000..a7d1868ba73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_float_uint.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF float
+#define SUFFIX(x) x##f
+#define GPI unsigned int
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzu\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtpu\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtmu\tw\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtau\tw\[0-9\]+, *s\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcvt_float_ulong.c b/gcc/testsuite/gcc.target/aarch64/fcvt_float_ulong.c
new file mode 100644
index 00000000000..ab6f46e7134
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcvt_float_ulong.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF float
+#define SUFFIX(x) x##f
+#define GPI unsigned long
+
+#include "fcvt.x"
+
+/* { dg-final { scan-assembler-times "fcvtzu\tx\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtps\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtpu\tx\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtmu\tx\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtau\tx\[0-9\]+, *s\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/ffs.c b/gcc/testsuite/gcc.target/aarch64/ffs.c
new file mode 100644
index 00000000000..a3447619d23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/ffs.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+unsigned int functest(unsigned int x)
+{
+ return __builtin_ffs(x);
+}
+
+/* { dg-final { scan-assembler "cmp\tw" } } */
+/* { dg-final { scan-assembler "rbit\tw" } } */
+/* { dg-final { scan-assembler "clz\tw" } } */
+/* { dg-final { scan-assembler "csinc\tw" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmadd.c b/gcc/testsuite/gcc.target/aarch64/fmadd.c
new file mode 100644
index 00000000000..39975dbae0a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fmadd.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+extern double fma (double, double, double);
+extern float fmaf (float, float, float);
+
+double test_fma1 (double x, double y, double z)
+{
+ return fma (x, y, z);
+}
+
+float test_fma2 (float x, float y, float z)
+{
+ return fmaf (x, y, z);
+}
+
+double test_fnma1 (double x, double y, double z)
+{
+ return fma (-x, y, z);
+}
+
+float test_fnma2 (float x, float y, float z)
+{
+ return fmaf (-x, y, z);
+}
+
+double test_fms1 (double x, double y, double z)
+{
+ return fma (x, y, -z);
+}
+
+float test_fms2 (float x, float y, float z)
+{
+ return fmaf (x, y, -z);
+}
+
+double test_fnms1 (double x, double y, double z)
+{
+ return fma (-x, y, -z);
+}
+
+float test_fnms2 (float x, float y, float z)
+{
+ return fmaf (-x, y, -z);
+}
+
+/* { dg-final { scan-assembler-times "fmadd\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fmadd\ts\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fmsub\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fmsub\ts\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmsub\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmsub\ts\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmadd\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmadd\ts\[0-9\]" 1 } } */
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c b/gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c
new file mode 100644
index 00000000000..9c115df08ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ffast-math" } */
+
+extern double fma (double, double, double);
+extern float fmaf (float, float, float);
+
+double test_fma1 (double x, double y, double z)
+{
+ return - fma (x, y, z);
+}
+
+float test_fma2 (float x, float y, float z)
+{
+ return - fmaf (x, y, z);
+}
+
+/* { dg-final { scan-assembler-times "fnmadd\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmadd\ts\[0-9\]" 1 } } */
+
diff --git a/gcc/testsuite/gcc.target/aarch64/frint.x b/gcc/testsuite/gcc.target/aarch64/frint.x
new file mode 100644
index 00000000000..1403740686e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/frint.x
@@ -0,0 +1,66 @@
+extern GPF SUFFIX(trunc) (GPF);
+extern GPF SUFFIX(ceil) (GPF);
+extern GPF SUFFIX(floor) (GPF);
+extern GPF SUFFIX(nearbyint) (GPF);
+extern GPF SUFFIX(rint) (GPF);
+extern GPF SUFFIX(round) (GPF);
+
+GPF test1a (GPF x)
+{
+ return SUFFIX(__builtin_trunc)(x);
+}
+
+GPF test1b (GPF x)
+{
+ return SUFFIX(trunc)(x);
+}
+
+GPF test2a (GPF x)
+{
+ return SUFFIX(__builtin_ceil)(x);
+}
+
+GPF test2b (GPF x)
+{
+ return SUFFIX(ceil)(x);
+}
+
+GPF test3a (GPF x)
+{
+ return SUFFIX(__builtin_floor)(x);
+}
+
+GPF test3b (GPF x)
+{
+ return SUFFIX(floor)(x);
+}
+
+GPF test4a (GPF x)
+{
+ return SUFFIX(__builtin_nearbyint)(x);
+}
+
+GPF test4b (GPF x)
+{
+ return SUFFIX(nearbyint)(x);
+}
+
+GPF test5a (GPF x)
+{
+ return SUFFIX(__builtin_rint)(x);
+}
+
+GPF test5b (GPF x)
+{
+ return SUFFIX(rint)(x);
+}
+
+GPF test6a (GPF x)
+{
+ return SUFFIX(__builtin_round)(x);
+}
+
+GPF test6b (GPF x)
+{
+ return SUFFIX(round)(x);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/frint_double.c b/gcc/testsuite/gcc.target/aarch64/frint_double.c
new file mode 100644
index 00000000000..96139496ca4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/frint_double.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF double
+#define SUFFIX(x) x
+
+#include "frint.x"
+
+/* { dg-final { scan-assembler-times "frintz\td\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintp\td\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintm\td\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frinti\td\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintx\td\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frinta\td\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/frint_float.c b/gcc/testsuite/gcc.target/aarch64/frint_float.c
new file mode 100644
index 00000000000..493ec37f940
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/frint_float.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#define GPF float
+#define SUFFIX(x) x##f
+
+#include "frint.x"
+
+/* { dg-final { scan-assembler-times "frintz\ts\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintp\ts\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintm\ts\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frinti\ts\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frintx\ts\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "frinta\ts\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/index.c b/gcc/testsuite/gcc.target/aarch64/index.c
new file mode 100644
index 00000000000..582771ba1c2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/index.c
@@ -0,0 +1,111 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-final { scan-assembler-not "\[us\]xtw\t" } } */
+/* { dg-final { scan-assembler-not "\[us\]bfiz\t" } } */
+/* { dg-final { scan-assembler-not "lsl\t" } } */
+
+int
+load_scaled_sxtw (int *arr, int i)
+{
+ return arr[arr[i]];
+}
+
+unsigned int
+load_scaled_uxtw (unsigned int *arr, unsigned int i)
+{
+ return arr[arr[i]];
+}
+
+void
+store_scaled_sxtw (int *arr, int i)
+{
+ arr[arr[i]] = 0;
+}
+
+void
+store_scaled_uxtw (unsigned int *arr, unsigned int i)
+{
+ arr[arr[i]] = 0;
+}
+
+int
+load_unscaled_sxtw (signed char *arr, int i)
+{
+ return arr[arr[i]];
+}
+
+unsigned int
+load_unscaled_uxtw (unsigned char *arr, unsigned int i)
+{
+ return arr[arr[i]];
+}
+
+void
+store_unscaled_sxtw (signed char *arr, int i)
+{
+ arr[arr[i]] = 0;
+}
+
+void
+store_unscaled_uxtw (unsigned char *arr, unsigned int i)
+{
+ arr[arr[i]] = 0;
+}
+
+
+
+int
+load_scaled_tmp_sxtw (int *arr, int i)
+{
+ int j = arr[i];
+ return arr[j];
+}
+
+unsigned int
+load_scaled_tmp_uxtw (unsigned int *arr, unsigned int i)
+{
+ unsigned int j = arr[i];
+ return arr[j];
+}
+
+void
+store_scaled_tmp_sxtw (int *arr, int i)
+{
+ int j = arr[i];
+ arr[j] = 0;
+}
+
+void
+store_scaled_tmp_uxtw (unsigned int *arr, unsigned int i)
+{
+ unsigned int j = arr[i];
+ arr[j] = 0;
+}
+
+int
+load_unscaled_tmp_sxtw (signed char *arr, int i)
+{
+ signed char j = arr[i];
+ return arr[j];
+}
+
+unsigned int
+load_unscaled_tmp_uxtw (unsigned char *arr, unsigned int i)
+{
+ unsigned char j = arr[i];
+ return arr[j];
+}
+
+void
+store_unscaled_tmp_sxtw (signed char *arr, int i)
+{
+ signed char j = arr[i];
+ arr[j] = 0;
+}
+
+void
+store_unscaled_tmp_uxtw (unsigned char *arr, unsigned int i)
+{
+ unsigned char j = arr[i];
+ arr[j] = 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/mneg-1.c b/gcc/testsuite/gcc.target/aarch64/mneg-1.c
new file mode 100644
index 00000000000..618854a6a52
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/mneg-1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int r;
+
+void test (int a, int b)
+{
+ /* { dg-final { scan-assembler "mneg\tw\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = (-a) * b;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/mneg-2.c b/gcc/testsuite/gcc.target/aarch64/mneg-2.c
new file mode 100644
index 00000000000..25f817b9c5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/mneg-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int r;
+
+void test (int a, int b)
+{
+ /* { dg-final { scan-assembler "mneg\tw\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = a * (-b);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/mneg-3.c b/gcc/testsuite/gcc.target/aarch64/mneg-3.c
new file mode 100644
index 00000000000..d9a135465a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/mneg-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int r;
+
+void test (int a, int b)
+{
+ /* { dg-final { scan-assembler "mneg\tw\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = - (a * b);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/mnegl-1.c b/gcc/testsuite/gcc.target/aarch64/mnegl-1.c
new file mode 100644
index 00000000000..b45debbc267
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/mnegl-1.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+long long r;
+
+void test_signed (int a, int b)
+{
+ /* { dg-final { scan-assembler "smnegl\tx\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = (-((long long) a)) * ((long long) b);
+}
+
+void test_unsigned (unsigned int a, unsigned int b)
+{
+ /* { dg-final { scan-assembler "umnegl\tx\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = (-((long long) a)) * ((long long) b);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/mnegl-2.c b/gcc/testsuite/gcc.target/aarch64/mnegl-2.c
new file mode 100644
index 00000000000..1c5dc758196
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/mnegl-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+long long r;
+
+void test_signed (int a, int b)
+{
+ /* { dg-final { scan-assembler "smnegl\tx\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = ((long long) a) * (-((long long) b));
+}
+
+void test_unsigned (unsigned int a, unsigned int b)
+{
+ /* { dg-final { scan-assembler "umnegl\tx\[0-9\]*, w\[0-9\]*, w\[0-9\]*\n" } } */
+ r = ((long long) a) * (-((long long) b));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c
new file mode 100644
index 00000000000..0f23cc9c7b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c
@@ -0,0 +1,125 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+#define TWO(name, rettype, rmwtype, intype, fs) \
+ rettype test_ ## name ## _ ## fs \
+ (rmwtype a, intype b, intype c) \
+ { \
+ return name ## _ ## fs (a, b, c); \
+ }
+
+TWO (vsubhn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+TWO (vsubhn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+TWO (vsubhn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+TWO (vsubhn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWO (vsubhn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWO (vsubhn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWO (vaddhn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+TWO (vaddhn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+TWO (vaddhn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+TWO (vaddhn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWO (vaddhn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWO (vaddhn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWO (vrsubhn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+TWO (vrsubhn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+TWO (vrsubhn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+TWO (vrsubhn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWO (vrsubhn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWO (vrsubhn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWO (vraddhn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+TWO (vraddhn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+TWO (vraddhn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+TWO (vraddhn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWO (vraddhn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWO (vraddhn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+#define TWOn(name, rettype, rmwtype, intype, fs) \
+ rettype test_ ## name ## _ ## fs \
+ (rmwtype a, intype b) \
+ { \
+ return name ## _ ## fs (a, b, 4); \
+ }
+
+TWOn (vrshrn_high_n, int8x16_t, int8x8_t, int16x8_t, s16)
+TWOn (vrshrn_high_n, int16x8_t, int16x4_t, int32x4_t, s32)
+TWOn (vrshrn_high_n, int32x4_t, int32x2_t, int64x2_t, s64)
+TWOn (vrshrn_high_n, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWOn (vrshrn_high_n, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWOn (vrshrn_high_n, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWOn (vshrn_high_n, int8x16_t, int8x8_t, int16x8_t, s16)
+TWOn (vshrn_high_n, int16x8_t, int16x4_t, int32x4_t, s32)
+TWOn (vshrn_high_n, int32x4_t, int32x2_t, int64x2_t, s64)
+TWOn (vshrn_high_n, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWOn (vshrn_high_n, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWOn (vshrn_high_n, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWOn (vqshrun_high_n, uint8x16_t, uint8x8_t, int16x8_t, s16)
+TWOn (vqshrun_high_n, uint16x8_t, uint16x4_t, int32x4_t, s32)
+TWOn (vqshrun_high_n, uint32x4_t, uint32x2_t, int64x2_t, s64)
+
+TWOn (vqrshrun_high_n, uint8x16_t, uint8x8_t, int16x8_t, s16)
+TWOn (vqrshrun_high_n, uint16x8_t, uint16x4_t, int32x4_t, s32)
+TWOn (vqrshrun_high_n, uint32x4_t, uint32x2_t, int64x2_t, s64)
+
+TWOn (vqshrn_high_n, int8x16_t, int8x8_t, int16x8_t, s16)
+TWOn (vqshrn_high_n, int16x8_t, int16x4_t, int32x4_t, s32)
+TWOn (vqshrn_high_n, int32x4_t, int32x2_t, int64x2_t, s64)
+TWOn (vqshrn_high_n, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWOn (vqshrn_high_n, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWOn (vqshrn_high_n, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+TWOn (vqrshrn_high_n, int8x16_t, int8x8_t, int16x8_t, s16)
+TWOn (vqrshrn_high_n, int16x8_t, int16x4_t, int32x4_t, s32)
+TWOn (vqrshrn_high_n, int32x4_t, int32x2_t, int64x2_t, s64)
+TWOn (vqrshrn_high_n, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+TWOn (vqrshrn_high_n, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+TWOn (vqrshrn_high_n, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+#define ONE(name, rettype, rmwtype, intype, fs) \
+ rettype test_ ## name ## _ ## fs \
+ (rmwtype a, intype b) \
+ { \
+ return name ## _ ## fs (a, b); \
+ }
+
+ONE (vqmovn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+ONE (vqmovn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+ONE (vqmovn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+ONE (vqmovn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+ONE (vqmovn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+ONE (vqmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+ONE (vqmovun_high, uint8x16_t, uint8x8_t, int16x8_t, s16)
+ONE (vqmovun_high, uint16x8_t, uint16x4_t, int32x4_t, s32)
+ONE (vqmovun_high, uint32x4_t, uint32x2_t, int64x2_t, s64)
+
+ONE (vmovn_high, int8x16_t, int8x8_t, int16x8_t, s16)
+ONE (vmovn_high, int16x8_t, int16x4_t, int32x4_t, s32)
+ONE (vmovn_high, int32x4_t, int32x2_t, int64x2_t, s64)
+ONE (vmovn_high, uint8x16_t, uint8x8_t, uint16x8_t, u16)
+ONE (vmovn_high, uint16x8_t, uint16x4_t, uint32x4_t, u32)
+ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
+
+
+/* { dg-final { scan-assembler-times "\\tsubhn2 v" 6} } */
+/* { dg-final { scan-assembler-times "\\taddhn2\\tv" 6} } */
+/* { dg-final { scan-assembler-times "rsubhn2 v" 6} } */
+/* { dg-final { scan-assembler-times "raddhn2\\tv" 6} } */
+/* { dg-final { scan-assembler-times "\\trshrn2 v" 6} } */
+/* { dg-final { scan-assembler-times "\\tshrn2 v" 6} } */
+/* { dg-final { scan-assembler-times "sqshrun2 v" 3} } */
+/* { dg-final { scan-assembler-times "sqrshrun2 v" 3} } */
+/* { dg-final { scan-assembler-times "sqshrn2 v" 3} } */
+/* { dg-final { scan-assembler-times "uqshrn2 v" 3} } */
+/* { dg-final { scan-assembler-times "sqrshrn2 v" 3} } */
+/* { dg-final { scan-assembler-times "uqrshrn2 v" 3} } */
+/* { dg-final { scan-assembler-times "uqxtn2 v" 3} } */
+/* { dg-final { scan-assembler-times "sqxtn2 v" 3} } */
+/* { dg-final { scan-assembler-times "sqxtun2 v" 3} } */
+/* { dg-final { scan-assembler-times "\\txtn2 v" 6} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/pic-constantpool1.c b/gcc/testsuite/gcc.target/aarch64/pic-constantpool1.c
new file mode 100644
index 00000000000..3109d9d4e9a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pic-constantpool1.c
@@ -0,0 +1,30 @@
+/* { dg-options "-O2 -mcmodel=small -fPIC" } */
+/* { dg-do compile } */
+
+extern int __finite (double __value) __attribute__ ((__nothrow__)) __attribute__ ((__const__));
+int
+__ecvt_r (value, ndigit, decpt, sign, buf, len)
+ double value;
+ int ndigit, *decpt, *sign;
+ char *buf;
+{
+ if ((sizeof (value) == sizeof (float) ? __finitef (value) : __finite (value)) && value != 0.0)
+ {
+ double d;
+ double f = 1.0;
+ d = -value;
+ if (d < 1.0e-307)
+ {
+ do
+ {
+ f *= 10.0;
+ }
+ while (d * f < 1.0);
+ }
+ }
+ if (ndigit <= 0 && len > 0)
+ {
+ buf[0] = '\0';
+ *sign = (sizeof (value) == sizeof (float) ? __finitef (value) : __finite (value)) ? (sizeof (value) == sizeof (float) ? __signbitf (value) : __signbit (value)) != 0 : 0;
+ }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/pic-symrefplus.c b/gcc/testsuite/gcc.target/aarch64/pic-symrefplus.c
new file mode 100644
index 00000000000..f277a528578
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pic-symrefplus.c
@@ -0,0 +1,128 @@
+/* { dg-options "-O2 -mcmodel=small -fPIC -fno-builtin" } */
+/* { dg-do compile } */
+
+typedef long unsigned int size_t;
+enum
+{
+ __LC_TIME = 2,
+};
+enum
+{
+ ABDAY_1 = (((__LC_TIME) << 16) | (0)),
+ DAY_1,
+ ABMON_1,
+ MON_1,
+ D_T_FMT,
+};
+typedef struct __locale_struct
+{
+ struct locale_data *__locales[13];
+} *__locale_t;
+struct tm
+{
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+};
+struct locale_data
+{
+ const char *name;
+ struct
+ {
+ const char *string;
+ }
+ values [];
+};
+extern const struct locale_data _nl_C_LC_TIME __attribute__ ((visibility ("hidden")));
+char *
+__strptime_internal (rp, fmt, tmp, statep , locale)
+ const char *rp;
+ const char *fmt;
+ __locale_t locale;
+ void *statep;
+{
+ struct locale_data *const current = locale->__locales[__LC_TIME];
+ const char *rp_backup;
+ const char *rp_longest;
+ int cnt;
+ size_t val;
+ enum ptime_locale_status { not, loc, raw } decided_longest;
+ struct __strptime_state
+ {
+ enum ptime_locale_status decided : 2;
+ } s;
+ struct tm tmb;
+ struct tm *tm;
+ if (statep == ((void *)0))
+ {
+ memset (&s, 0, sizeof (s));
+ }
+ {
+ tm = &tmb;
+ }
+ while (*fmt != '\0')
+ {
+ if (*fmt != '%')
+ {
+ if (*fmt++ != *rp++) return ((void *)0);
+ continue;
+ }
+ if (statep != ((void *)0))
+ {
+ ++fmt;
+ }
+ rp_backup = rp;
+ switch (*fmt++)
+ {
+ case '%':
+ for (cnt = 0; cnt < 7; ++cnt)
+ {
+ const char *trp;
+ if (s.decided !=raw)
+ {
+ if (({ size_t len = strlen ((current->values[((int) (DAY_1 + cnt) & 0xffff)].string)); int result = __strncasecmp_l (((current->values[((int) (DAY_1 + cnt) & 0xffff)].string)), (trp), len, locale) == 0; if (result) (trp) += len; result; })
+ && trp > rp_longest)
+ {
+ }
+ if (({ size_t len = strlen ((current->values[((int) (ABDAY_1 + cnt) & 0xffff)].string)); int result = __strncasecmp_l (((current->values[((int) (ABDAY_1 + cnt) & 0xffff)].string)), (trp), len, locale) == 0; if (result) (trp) += len; result; })
+ && trp > rp_longest)
+ {
+ }
+ }
+ if (s.decided != loc
+ && (((trp = rp, ({ size_t len = strlen ((&_nl_C_LC_TIME.values[((int) (DAY_1) & 0xffff)].string)[cnt]); int result = __strncasecmp_l (((&_nl_C_LC_TIME.values[((int) (DAY_1) & 0xffff)].string)[cnt]), (trp), len, locale) == 0; if (result) (trp) += len; result; }))
+ && trp > rp_longest)
+ || ((trp = rp, ({ size_t len = strlen ((&_nl_C_LC_TIME.values[((int) (ABDAY_1) & 0xffff)].string)[cnt]); int result = __strncasecmp_l (((&_nl_C_LC_TIME.values[((int) (ABDAY_1) & 0xffff)].string)[cnt]), (rp), len, locale) == 0; if (result) (rp) += len; result; }))
+ && trp > rp_longest)))
+ {
+ }
+ }
+ {
+ const char *trp;
+ if (s.decided != loc
+ && (((trp = rp, ({ size_t len = strlen ((&_nl_C_LC_TIME.values[((int) (MON_1) & 0xffff)].string)[cnt]); int result = __strncasecmp_l (((&_nl_C_LC_TIME.values[((int) (MON_1) & 0xffff)].string)[cnt]), (trp), len, locale) == 0; if (result) (trp) += len; result; }))
+ && trp > rp_longest)
+ || ((trp = rp, ({ size_t len = strlen ((&_nl_C_LC_TIME.values[((int) (ABMON_1) & 0xffff)].string)[cnt]); int result = __strncasecmp_l (((&_nl_C_LC_TIME.values[((int) (ABMON_1) & 0xffff)].string)[cnt]), (trp), len, locale) == 0; if (result) (trp) += len; result; }))
+ && trp > rp_longest)))
+ {
+ }
+ }
+ case 'c':
+ {
+ if (!(*((current->values[((int) (D_T_FMT) & 0xffff)].string)) != '\0' && (rp = __strptime_internal (rp, ((current->values[((int) (D_T_FMT) & 0xffff)].string)), tm, &s , locale)) != ((void *)0)))
+ {
+ rp = rp_backup;
+ }
+ }
+ case 'C':
+ do { int __n = 2; val = 0; while (*rp == ' ') ++rp; if (*rp < '0' || *rp > '9') return ((void *)0); do { val *= 10; val += *rp++ - '0'; } while (--__n > 0 && val * 10 <= 99 && *rp >= '0' && *rp <= '9'); if (val < 0 || val > 99) return ((void *)0); } while (0);
+ case 'F':
+ if (!(*("%Y-%m-%d") != '\0' && (rp = __strptime_internal (rp, ("%Y-%m-%d"), tm, &s , locale)) != ((void *)0)))
+ tm->tm_hour = val % 12;
+ }
+ }
+}
+char *
+__strptime_l (buf, format, tm , locale)
+{
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c b/gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c
new file mode 100644
index 00000000000..b44e56023af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/reload-valid-spoff.c
@@ -0,0 +1,66 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcmodel=large -fno-builtin" } */
+/* { dg-skip-if "-mcmodel=large -fPIC not currently supported" { aarch64-*-* } { "-fPIC" } { "" } } */
+
+typedef long unsigned int size_t;
+typedef unsigned short int sa_family_t;
+
+struct sockaddr
+{
+ sa_family_t sa_family;
+ char sa_data[14];
+};
+struct arpreq
+{
+ int arp_flags;
+ struct sockaddr arp_netmask;
+};
+typedef struct _IO_FILE FILE;
+extern char *fgets (char *__restrict __s, int __n, FILE *__restrict __stream);
+extern struct _IO_FILE *stderr;
+extern int optind;
+struct aftype {
+ int (*input) (int type, char *bufp, struct sockaddr *);
+};
+struct aftype *ap;
+static int arp_set(char **args)
+{
+ char host[128];
+ struct arpreq req;
+ struct sockaddr sa;
+ memset((char *) &req, 0, sizeof(req));
+ if (*args == ((void *)0)) {
+ fprintf(stderr, ("arp: need host name\n"));
+ }
+ safe_strncpy(host, *args++, (sizeof host));
+ if (ap->input(0, host, &sa) < 0) {
+ }
+ while (*args != ((void *)0)) {
+ if (!__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (*args) && __builtin_constant_p ("netmask") && (__s1_len = strlen (*args), __s2_len = strlen ("netmask"), (!((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("netmask") + 1) - (size_t)(const void *)("netmask") == 1) || __s2_len >= 4)) ? __builtin_strcmp (*args, "netmask") : (__builtin_constant_p (*args) && ((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) && (__s1_len = strlen (*args), __s1_len < 4) ? (__builtin_constant_p ("netmask") && ((size_t)(const void *)(("netmask") + 1) - (size_t)(const void *)("netmask") == 1) ? __builtin_strcmp (*args, "netmask") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("netmask"); register int __result = (((__const unsigned char *) (__const char *) (*args))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (*args))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (*args))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (*args))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("netmask") && ((size_t)(const void *)(("netmask") + 1) - (size_t)(const void *)("netmask") == 1) && (__s2_len = strlen ("netmask"), __s2_len < 4) ? (__builtin_constant_p (*args) && ((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) ? __builtin_strcmp (*args, "netmask") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (*args); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("netmask"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("netmask"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("netmask"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("netmask"))[3]); } } __result; }))) : __builtin_strcmp (*args, "netmask")))); })) {
+ if (__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (*args) && __builtin_constant_p ("255.255.255.255") && (__s1_len = strlen (*args), __s2_len = strlen ("255.255.255.255"), (!((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) || __s1_len >= 4) && (!((size_t)(const void *)(("255.255.255.255") + 1) - (size_t)(const void *)("255.255.255.255") == 1) || __s2_len >= 4)) ? __builtin_strcmp (*args, "255.255.255.255") : (__builtin_constant_p (*args) && ((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) && (__s1_len = strlen (*args), __s1_len < 4) ? (__builtin_constant_p ("255.255.255.255") && ((size_t)(const void *)(("255.255.255.255") + 1) - (size_t)(const void *)("255.255.255.255") == 1) ? __builtin_strcmp (*args, "255.255.255.255") : (__extension__ ({ __const unsigned char *__s2 = (__const unsigned char *) (__const char *) ("255.255.255.255"); register int __result = (((__const unsigned char *) (__const char *) (*args))[0] - __s2[0]); if (__s1_len > 0 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (*args))[1] - __s2[1]); if (__s1_len > 1 && __result == 0) { __result = (((__const unsigned char *) (__const char *) (*args))[2] - __s2[2]); if (__s1_len > 2 && __result == 0) __result = (((__const unsigned char *) (__const char *) (*args))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p ("255.255.255.255") && ((size_t)(const void *)(("255.255.255.255") + 1) - (size_t)(const void *)("255.255.255.255") == 1) && (__s2_len = strlen ("255.255.255.255"), __s2_len < 4) ? (__builtin_constant_p (*args) && ((size_t)(const void *)((*args) + 1) - (size_t)(const void *)(*args) == 1) ? __builtin_strcmp (*args, "255.255.255.255") : (__extension__ ({ __const unsigned char *__s1 = (__const unsigned char *) (__const char *) (*args); register int __result = __s1[0] - ((__const unsigned char *) (__const char *) ("255.255.255.255"))[0]; if (__s2_len > 0 && __result == 0) { __result = (__s1[1] - ((__const unsigned char *) (__const char *) ("255.255.255.255"))[1]); if (__s2_len > 1 && __result == 0) { __result = (__s1[2] - ((__const unsigned char *) (__const char *) ("255.255.255.255"))[2]); if (__s2_len > 2 && __result == 0) __result = (__s1[3] - ((__const unsigned char *) (__const char *) ("255.255.255.255"))[3]); } } __result; }))) : __builtin_strcmp (*args, "255.255.255.255")))); }) != 0) {
+ memcpy((char *) &req.arp_netmask, (char *) &sa,
+ sizeof(struct sockaddr));
+ }
+ }
+ }
+}
+static int arp_file(char *name)
+{
+ char buff[1024];
+ char *sp, *args[32];
+ int linenr, argc;
+ FILE *fp;
+ while (fgets(buff, sizeof(buff), fp) != (char *) ((void *)0)) {
+ if (arp_set(args) != 0)
+ fprintf(stderr, ("arp: cannot set entry on line %u on line %u of etherfile %s !\n"),
+ linenr, name);
+ }
+}
+int main(int argc, char **argv)
+{
+ int i, lop, what;
+ switch (what) {
+ case 0:
+ what = arp_file(argv[optind] ? argv[optind] : "/etc/ethers");
+ }
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c b/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c
new file mode 100644
index 00000000000..1b853084328
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/scalar_intrinsics.c
@@ -0,0 +1,1181 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#include "../../../config/aarch64/arm_neon.h"
+
+/* { dg-final { scan-assembler-times "\\tadd\\tx\[0-9\]+" 2 } } */
+
+uint64x1_t
+test_vaddd_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vaddd_u64 (a, b);
+}
+
+int64x1_t
+test_vaddd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vaddd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tadd\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vaddd_s64_2 (int64x1_t a, int64x1_t b, int64x1_t c, int64x1_t d)
+{
+ return vqaddd_s64 (vaddd_s64 (vqaddd_s64 (a, b), vqaddd_s64 (c, d)),
+ vqaddd_s64 (a, d));
+}
+
+/* { dg-final { scan-assembler-times "\\tcmeq\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vceqd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vceqd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmeq\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
+
+uint64x1_t
+test_vceqzd_s64 (int64x1_t a)
+{
+ return vceqzd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmge\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 2 } } */
+
+uint64x1_t
+test_vcged_s64 (int64x1_t a, int64x1_t b)
+{
+ return vcged_s64 (a, b);
+}
+
+uint64x1_t
+test_vcled_s64 (int64x1_t a, int64x1_t b)
+{
+ return vcled_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmge\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
+
+uint64x1_t
+test_vcgezd_s64 (int64x1_t a)
+{
+ return vcgezd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmhs\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vcged_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vcged_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmgt\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 2 } } */
+
+uint64x1_t
+test_vcgtd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vcgtd_s64 (a, b);
+}
+
+uint64x1_t
+test_vcltd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vcltd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmgt\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
+
+uint64x1_t
+test_vcgtzd_s64 (int64x1_t a)
+{
+ return vcgtzd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmhi\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vcgtd_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vcgtd_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmle\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
+
+uint64x1_t
+test_vclezd_s64 (int64x1_t a)
+{
+ return vclezd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmlt\\td\[0-9\]+, d\[0-9\]+, #?0" 1 } } */
+
+uint64x1_t
+test_vcltzd_s64 (int64x1_t a)
+{
+ return vcltzd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tdup\\tb\[0-9\]+, v\[0-9\]+\.b" 2 } } */
+
+int8x1_t
+test_vdupb_lane_s8 (int8x16_t a)
+{
+ return vdupb_lane_s8 (a, 2);
+}
+
+uint8x1_t
+test_vdupb_lane_u8 (uint8x16_t a)
+{
+ return vdupb_lane_u8 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tdup\\th\[0-9\]+, v\[0-9\]+\.h" 2 } } */
+
+int16x1_t
+test_vduph_lane_s16 (int16x8_t a)
+{
+ return vduph_lane_s16 (a, 2);
+}
+
+uint16x1_t
+test_vduph_lane_u16 (uint16x8_t a)
+{
+ return vduph_lane_u16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tdup\\ts\[0-9\]+, v\[0-9\]+\.s" 2 } } */
+
+int32x1_t
+test_vdups_lane_s32 (int32x4_t a)
+{
+ return vdups_lane_s32 (a, 2);
+}
+
+uint32x1_t
+test_vdups_lane_u32 (uint32x4_t a)
+{
+ return vdups_lane_u32 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tdup\\td\[0-9\]+, v\[0-9\]+\.d" 2 } } */
+
+int64x1_t
+test_vdupd_lane_s64 (int64x2_t a)
+{
+ return vdupd_lane_s64 (a, 2);
+}
+
+uint64x1_t
+test_vdupd_lane_u64 (uint64x2_t a)
+{
+ return vdupd_lane_u64 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tcmtst\\td\[0-9\]+, d\[0-9\]+, d\[0-9\]+" 2 } } */
+
+int64x1_t
+test_vtst_s64 (int64x1_t a, int64x1_t b)
+{
+ return vtstd_s64 (a, b);
+}
+
+uint64x1_t
+test_vtst_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vtstd_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\taddp\\td\[0-9\]+, v\[0-9\]+\.2d" 1 } } */
+
+test_vpaddd_s64 (int64x2_t a)
+{
+ return vpaddd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqadd\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vqaddd_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vqaddd_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqadd\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqadds_u32 (uint32x1_t a, uint32x1_t b)
+{
+ return vqadds_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqadd\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqaddh_u16 (uint16x1_t a, uint16x1_t b)
+{
+ return vqaddh_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqadd\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqaddb_u8 (uint8x1_t a, uint8x1_t b)
+{
+ return vqaddb_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqadd\\td\[0-9\]+" 5 } } */
+
+int64x1_t
+test_vqaddd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vqaddd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqadd\\ts\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqadds_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqadds_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqadd\\th\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqaddh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqaddh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqadd\\tb\[0-9\]+, b\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqaddb_s8 (int8x1_t a, int8x1_t b)
+{
+ return vqaddb_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\ts\[0-9\]+, h\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqdmlalh_s16 (int32x1_t a, int16x1_t b, int16x1_t c)
+{
+ return vqdmlalh_s16 (a, b, c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\ts\[0-9\]+, h\[0-9\]+, v" 1 } } */
+
+int32x1_t
+test_vqdmlalh_lane_s16 (int32x1_t a, int16x1_t b, int16x8_t c)
+{
+ return vqdmlalh_lane_s16 (a, b, c, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\td\[0-9\]+, s\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vqdmlals_s32 (int64x1_t a, int32x1_t b, int32x1_t c)
+{
+ return vqdmlals_s32 (a, b, c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\td\[0-9\]+, s\[0-9\]+, v" 1 } } */
+
+int64x1_t
+test_vqdmlals_lane_s32 (int64x1_t a, int32x1_t b, int32x4_t c)
+{
+ return vqdmlals_lane_s32 (a, b, c, 1);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\ts\[0-9\]+, h\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqdmlslh_s16 (int32x1_t a, int16x1_t b, int16x1_t c)
+{
+ return vqdmlslh_s16 (a, b, c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\ts\[0-9\]+, h\[0-9\]+, v" 1 } } */
+
+int32x1_t
+test_vqdmlslh_lane_s16 (int32x1_t a, int16x1_t b, int16x8_t c)
+{
+ return vqdmlslh_lane_s16 (a, b, c, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\td\[0-9\]+, s\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vqdmlsls_s32 (int64x1_t a, int32x1_t b, int32x1_t c)
+{
+ return vqdmlsls_s32 (a, b, c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\td\[0-9\]+, s\[0-9\]+, v" 1 } } */
+
+int64x1_t
+test_vqdmlsls_lane_s32 (int64x1_t a, int32x1_t b, int32x4_t c)
+{
+ return vqdmlsls_lane_s32 (a, b, c, 1);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmulh\\th\[0-9\]+, h\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqdmulhh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqdmulhh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmulh\\th\[0-9\]+, h\[0-9\]+, v" 1 } } */
+
+int16x1_t
+test_vqdmulhh_lane_s16 (int16x1_t a, int16x8_t b)
+{
+ return vqdmulhh_lane_s16 (a, b, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmulh\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqdmulhs_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqdmulhs_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmulh\\ts\[0-9\]+, s\[0-9\]+, v" 1 } } */
+
+int32x1_t
+test_vqdmulhs_lane_s32 (int32x1_t a, int32x4_t b)
+{
+ return vqdmulhs_lane_s32 (a, b, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\ts\[0-9\]+, h\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqdmullh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqdmullh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\ts\[0-9\]+, h\[0-9\]+, v" 1 } } */
+
+int32x1_t
+test_vqdmullh_lane_s16 (int16x1_t a, int16x8_t b)
+{
+ return vqdmullh_lane_s16 (a, b, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\td\[0-9\]+, s\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vqdmulls_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqdmulls_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\td\[0-9\]+, s\[0-9\]+, v" 1 } } */
+
+int64x1_t
+test_vqdmulls_lane_s32 (int32x1_t a, int32x4_t b)
+{
+ return vqdmulls_lane_s32 (a, b, 1);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrdmulh\\th\[0-9\]+, h\[0-9\]+, h\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqrdmulhh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqrdmulhh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrdmulh\\th\[0-9\]+, h\[0-9\]+, v" 1 } } */
+
+int16x1_t
+test_vqrdmulhh_lane_s16 (int16x1_t a, int16x8_t b)
+{
+ return vqrdmulhh_lane_s16 (a, b, 6);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrdmulh\\ts\[0-9\]+, s\[0-9\]+, s\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqrdmulhs_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqrdmulhs_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrdmulh\\ts\[0-9\]+, s\[0-9\]+, v" 1 } } */
+
+int32x1_t
+test_vqrdmulhs_lane_s32 (int32x1_t a, int32x4_t b)
+{
+ return vqrdmulhs_lane_s32 (a, b, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsuqadd\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vuqaddb_s8 (int8x1_t a, int8x1_t b)
+{
+ return vuqaddb_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsuqadd\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vuqaddh_s16 (int16x1_t a, int8x1_t b)
+{
+ return vuqaddh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsuqadd\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vuqadds_s32 (int32x1_t a, int8x1_t b)
+{
+ return vuqadds_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsuqadd\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vuqaddd_s64 (int64x1_t a, int8x1_t b)
+{
+ return vuqaddd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tusqadd\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vsqaddb_u8 (uint8x1_t a, int8x1_t b)
+{
+ return vsqaddb_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tusqadd\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vsqaddh_u16 (uint16x1_t a, int8x1_t b)
+{
+ return vsqaddh_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tusqadd\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vsqadds_u32 (uint32x1_t a, int8x1_t b)
+{
+ return vsqadds_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tusqadd\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vsqaddd_u64 (uint64x1_t a, int8x1_t b)
+{
+ return vsqaddd_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqabs\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqabsb_s8 (int8x1_t a)
+{
+ return vqabsb_s8 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqabs\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqabsh_s16 (int16x1_t a)
+{
+ return vqabsh_s16 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqabs\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqabss_s32 (int32x1_t a)
+{
+ return vqabss_s32 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqneg\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqnegb_s8 (int8x1_t a)
+{
+ return vqnegb_s8 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqneg\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqnegh_s16 (int16x1_t a)
+{
+ return vqnegh_s16 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqneg\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqnegs_s32 (int32x1_t a)
+{
+ return vqnegs_s32 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtun\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqmovunh_s16 (int16x1_t a)
+{
+ return vqmovunh_s16 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtun\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqmovuns_s32 (int32x1_t a)
+{
+ return vqmovuns_s32 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtun\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqmovund_s64 (int64x1_t a)
+{
+ return vqmovund_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtn\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqmovnh_s16 (int16x1_t a)
+{
+ return vqmovnh_s16 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtn\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqmovns_s32 (int32x1_t a)
+{
+ return vqmovns_s32 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqxtn\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqmovnd_s64 (int64x1_t a)
+{
+ return vqmovnd_s64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqxtn\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqmovnh_u16 (uint16x1_t a)
+{
+ return vqmovnh_u16 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqxtn\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqmovns_u32 (uint32x1_t a)
+{
+ return vqmovns_u32 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqxtn\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqmovnd_u64 (uint64x1_t a)
+{
+ return vqmovnd_u64 (a);
+}
+
+/* { dg-final { scan-assembler-times "\\tsub\\tx\[0-9\]+" 2 } } */
+
+uint64x1_t
+test_vsubd_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vsubd_u64 (a, b);
+}
+
+int64x1_t
+test_vsubd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vsubd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsub\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vsubd_s64_2 (int64x1_t a, int64x1_t b, int64x1_t c, int64x1_t d)
+{
+ return vqsubd_s64 (vsubd_s64 (vqsubd_s64 (a, b), vqsubd_s64 (c, d)),
+ vqsubd_s64 (a, d));
+}
+
+/* { dg-final { scan-assembler-times "\\tuqsub\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vqsubd_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vqsubd_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqsub\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqsubs_u32 (uint32x1_t a, uint32x1_t b)
+{
+ return vqsubs_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqsub\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqsubh_u16 (uint16x1_t a, uint16x1_t b)
+{
+ return vqsubh_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqsub\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqsubb_u8 (uint8x1_t a, uint8x1_t b)
+{
+ return vqsubb_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqsub\\td\[0-9\]+" 5 } } */
+
+int64x1_t
+test_vqsubd_s64 (int64x1_t a, int64x1_t b)
+{
+ return vqsubd_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqsub\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqsubs_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqsubs_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqsub\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqsubh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqsubh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqsub\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqsubb_s8 (int8x1_t a, int8x1_t b)
+{
+ return vqsubb_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshl\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vshld_s64 (int64x1_t a, int64x1_t b)
+{
+ return vshld_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tushl\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vshld_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vshld_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsrshl\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vrshld_s64 (int64x1_t a, int64x1_t b)
+{
+ return vrshld_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\turshl\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vrshld_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vrshld_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tasr\\tx\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vshrd_n_s64 (int64x1_t a)
+{
+ return vshrd_n_s64 (a, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\tlsr\\tx\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vshrd_n_u64 (uint64x1_t a)
+{
+ return vshrd_n_u64 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tssra\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vsrad_n_s64 (int64x1_t a, int64x1_t b)
+{
+ return vsrad_n_s64 (a, b, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tusra\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vsrad_n_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vsrad_n_u64 (a, b, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\tsrshr\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vrshrd_n_s64 (int64x1_t a)
+{
+ return vrshrd_n_s64 (a, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\turshr\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vrshrd_n_u64 (uint64x1_t a)
+{
+ return vrshrd_n_u64 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsrsra\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vrsrad_n_s64 (int64x1_t a, int64x1_t b)
+{
+ return vrsrad_n_s64 (a, b, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsrsra\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vrsrad_n_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vrsrad_n_u64 (a, b, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshl\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqrshlb_s8 (int8x1_t a, int8x1_t b)
+{
+ return vqrshlb_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshl\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqrshlh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqrshlh_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshl\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqrshls_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqrshls_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshl\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vqrshld_s64 (int64x1_t a, int64x1_t b)
+{
+ return vqrshld_s64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshl\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqrshlb_u8 (uint8x1_t a, uint8x1_t b)
+{
+ return vqrshlb_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshl\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqrshlh_u16 (uint16x1_t a, uint16x1_t b)
+{
+ return vqrshlh_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshl\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqrshls_u32 (uint32x1_t a, uint32x1_t b)
+{
+ return vqrshls_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshl\\td\[0-9\]+" 1 } } */
+
+uint64x1_t
+test_vqrshld_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vqrshld_u64 (a, b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshlu\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqshlub_n_s8 (int8x1_t a)
+{
+ return vqshlub_n_s8 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshlu\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqshluh_n_s16 (int16x1_t a)
+{
+ return vqshluh_n_s16 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshlu\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqshlus_n_s32 (int32x1_t a)
+{
+ return vqshlus_n_s32 (a, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshlu\\td\[0-9\]+" 1 } } */
+
+int64x1_t
+test_vqshlud_n_s64 (int64x1_t a)
+{
+ return vqshlud_n_s64 (a, 6);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshl\\tb\[0-9\]+" 2 } } */
+
+int8x1_t
+test_vqshlb_s8 (int8x1_t a, int8x1_t b)
+{
+ return vqshlb_s8 (a, b);
+}
+
+int8x1_t
+test_vqshlb_n_s8 (int8x1_t a)
+{
+ return vqshlb_n_s8 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshl\\th\[0-9\]+" 2 } } */
+
+int16x1_t
+test_vqshlh_s16 (int16x1_t a, int16x1_t b)
+{
+ return vqshlh_s16 (a, b);
+}
+
+int16x1_t
+test_vqshlh_n_s16 (int16x1_t a)
+{
+ return vqshlh_n_s16 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshl\\ts\[0-9\]+" 2 } } */
+
+int32x1_t
+test_vqshls_s32 (int32x1_t a, int32x1_t b)
+{
+ return vqshls_s32 (a, b);
+}
+
+int32x1_t
+test_vqshls_n_s32 (int32x1_t a)
+{
+ return vqshls_n_s32 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshl\\td\[0-9\]+" 2 } } */
+
+int64x1_t
+test_vqshld_s64 (int64x1_t a, int64x1_t b)
+{
+ return vqshld_s64 (a, b);
+}
+
+int64x1_t
+test_vqshld_n_s64 (int64x1_t a)
+{
+ return vqshld_n_s64 (a, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshl\\tb\[0-9\]+" 2 } } */
+
+uint8x1_t
+test_vqshlb_u8 (uint8x1_t a, uint8x1_t b)
+{
+ return vqshlb_u8 (a, b);
+}
+
+uint8x1_t
+test_vqshlb_n_u8 (uint8x1_t a)
+{
+ return vqshlb_n_u8 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshl\\th\[0-9\]+" 2 } } */
+
+uint16x1_t
+test_vqshlh_u16 (uint16x1_t a, uint16x1_t b)
+{
+ return vqshlh_u16 (a, b);
+}
+
+uint16x1_t
+test_vqshlh_n_u16 (uint16x1_t a)
+{
+ return vqshlh_n_u16 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshl\\ts\[0-9\]+" 2 } } */
+
+uint32x1_t
+test_vqshls_u32 (uint32x1_t a, uint32x1_t b)
+{
+ return vqshls_u32 (a, b);
+}
+
+uint32x1_t
+test_vqshls_n_u32 (uint32x1_t a)
+{
+ return vqshls_n_u32 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshl\\td\[0-9\]+" 2 } } */
+
+uint64x1_t
+test_vqshld_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vqshld_u64 (a, b);
+}
+
+uint64x1_t
+test_vqshld_n_u64 (uint64x1_t a)
+{
+ return vqshld_n_u64 (a, 5);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrun\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqshrunh_n_s16 (int16x1_t a)
+{
+ return vqshrunh_n_s16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrun\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqshruns_n_s32 (int32x1_t a)
+{
+ return vqshruns_n_s32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrun\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqshrund_n_s64 (int64x1_t a)
+{
+ return vqshrund_n_s64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrun\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqrshrunh_n_s16 (int16x1_t a)
+{
+ return vqrshrunh_n_s16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrun\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqrshruns_n_s32 (int32x1_t a)
+{
+ return vqrshruns_n_s32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrun\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqrshrund_n_s64 (int64x1_t a)
+{
+ return vqrshrund_n_s64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrn\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqshrnh_n_s16 (int16x1_t a)
+{
+ return vqshrnh_n_s16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrn\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqshrns_n_s32 (int32x1_t a)
+{
+ return vqshrns_n_s32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqshrn\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqshrnd_n_s64 (int64x1_t a)
+{
+ return vqshrnd_n_s64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshrn\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqshrnh_n_u16 (uint16x1_t a)
+{
+ return vqshrnh_n_u16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshrn\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqshrns_n_u32 (uint32x1_t a)
+{
+ return vqshrns_n_u32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqshrn\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqshrnd_n_u64 (uint64x1_t a)
+{
+ return vqshrnd_n_u64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrn\\tb\[0-9\]+" 1 } } */
+
+int8x1_t
+test_vqrshrnh_n_s16 (int16x1_t a)
+{
+ return vqrshrnh_n_s16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrn\\th\[0-9\]+" 1 } } */
+
+int16x1_t
+test_vqrshrns_n_s32 (int32x1_t a)
+{
+ return vqrshrns_n_s32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqrshrn\\ts\[0-9\]+" 1 } } */
+
+int32x1_t
+test_vqrshrnd_n_s64 (int64x1_t a)
+{
+ return vqrshrnd_n_s64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshrn\\tb\[0-9\]+" 1 } } */
+
+uint8x1_t
+test_vqrshrnh_n_u16 (uint16x1_t a)
+{
+ return vqrshrnh_n_u16 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshrn\\th\[0-9\]+" 1 } } */
+
+uint16x1_t
+test_vqrshrns_n_u32 (uint32x1_t a)
+{
+ return vqrshrns_n_u32 (a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tuqrshrn\\ts\[0-9\]+" 1 } } */
+
+uint32x1_t
+test_vqrshrnd_n_u64 (uint64x1_t a)
+{
+ return vqrshrnd_n_u64 (a, 4);
+}
+
+/* { dg-final { scan-assembler-times "\\tlsl\\tx\[0-9\]+" 2 } } */
+
+int64x1_t
+test_vshl_n_s64 (int64x1_t a)
+{
+ return vshld_n_s64 (a, 9);
+}
+
+uint64x1_t
+test_vshl_n_u64 (uint64x1_t a)
+{
+ return vshld_n_u64 (a, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tsli\\td\[0-9\]+" 2 } } */
+
+int64x1_t
+test_vsli_n_s64 (int64x1_t a, int64x1_t b)
+{
+ return vslid_n_s64 (a, b, 9);
+}
+
+uint64x1_t
+test_vsli_n_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vslid_n_u64 (a, b, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tsri\\td\[0-9\]+" 2 } } */
+
+int64x1_t
+test_vsri_n_s64 (int64x1_t a, int64x1_t b)
+{
+ return vsrid_n_s64 (a, b, 9);
+}
+
+uint64x1_t
+test_vsri_n_u64 (uint64x1_t a, uint64x1_t b)
+{
+ return vsrid_n_u64 (a, b, 9);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/table-intrinsics.c b/gcc/testsuite/gcc.target/aarch64/table-intrinsics.c
new file mode 100644
index 00000000000..5d53abe8d10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/table-intrinsics.c
@@ -0,0 +1,439 @@
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+int8x8_t
+tbl_tests8_ (int8x8_t tab, int8x8_t idx)
+{
+ return vtbl1_s8 (tab, idx);
+}
+
+uint8x8_t
+tbl_testu8_ (uint8x8_t tab, uint8x8_t idx)
+{
+ return vtbl1_u8 (tab, idx);
+}
+
+poly8x8_t
+tbl_testp8_ (poly8x8_t tab, uint8x8_t idx)
+{
+ return vtbl1_p8 (tab, idx);
+}
+
+int8x8_t
+tbl_tests8_2 (int8x8x2_t tab, int8x8_t idx)
+{
+ return vtbl2_s8 (tab, idx);
+}
+
+uint8x8_t
+tbl_testu8_2 (uint8x8x2_t tab, uint8x8_t idx)
+{
+ return vtbl2_u8 (tab, idx);
+}
+
+poly8x8_t
+tbl_testp8_2 (poly8x8x2_t tab, uint8x8_t idx)
+{
+ return vtbl2_p8 (tab, idx);
+}
+
+int8x8_t
+tbl_tests8_3 (int8x8x3_t tab, int8x8_t idx)
+{
+ return vtbl3_s8 (tab, idx);
+}
+
+uint8x8_t
+tbl_testu8_3 (uint8x8x3_t tab, uint8x8_t idx)
+{
+ return vtbl3_u8 (tab, idx);
+}
+
+poly8x8_t
+tbl_testp8_3 (poly8x8x3_t tab, uint8x8_t idx)
+{
+ return vtbl3_p8 (tab, idx);
+}
+
+int8x8_t
+tbl_tests8_4 (int8x8x4_t tab, int8x8_t idx)
+{
+ return vtbl4_s8 (tab, idx);
+}
+
+uint8x8_t
+tbl_testu8_4 (uint8x8x4_t tab, uint8x8_t idx)
+{
+ return vtbl4_u8 (tab, idx);
+}
+
+poly8x8_t
+tbl_testp8_4 (poly8x8x4_t tab, uint8x8_t idx)
+{
+ return vtbl4_p8 (tab, idx);
+}
+
+int8x8_t
+tb_tests8_ (int8x8_t r, int8x8_t tab, int8x8_t idx)
+{
+ return vtbx1_s8 (r, tab, idx);
+}
+
+uint8x8_t
+tb_testu8_ (uint8x8_t r, uint8x8_t tab, uint8x8_t idx)
+{
+ return vtbx1_u8 (r, tab, idx);
+}
+
+poly8x8_t
+tb_testp8_ (poly8x8_t r, poly8x8_t tab, uint8x8_t idx)
+{
+ return vtbx1_p8 (r, tab, idx);
+}
+
+int8x8_t
+tb_tests8_2 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
+{
+ return vtbx2_s8 (r, tab, idx);
+}
+
+uint8x8_t
+tb_testu8_2 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
+{
+ return vtbx2_u8 (r, tab, idx);
+}
+
+poly8x8_t
+tb_testp8_2 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
+{
+ return vtbx2_p8 (r, tab, idx);
+}
+
+int8x8_t
+tb_tests8_3 (int8x8_t r, int8x8x3_t tab, int8x8_t idx)
+{
+ return vtbx3_s8 (r, tab, idx);
+}
+
+uint8x8_t
+tb_testu8_3 (uint8x8_t r, uint8x8x3_t tab, uint8x8_t idx)
+{
+ return vtbx3_u8 (r, tab, idx);
+}
+
+poly8x8_t
+tb_testp8_3 (poly8x8_t r, poly8x8x3_t tab, uint8x8_t idx)
+{
+ return vtbx3_p8 (r, tab, idx);
+}
+
+int8x8_t
+tb_tests8_4 (int8x8_t r, int8x8x4_t tab, int8x8_t idx)
+{
+ return vtbx4_s8 (r, tab, idx);
+}
+
+uint8x8_t
+tb_testu8_4 (uint8x8_t r, uint8x8x4_t tab, uint8x8_t idx)
+{
+ return vtbx4_u8 (r, tab, idx);
+}
+
+poly8x8_t
+tb_testp8_4 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
+{
+ return vtbx4_p8 (r, tab, idx);
+}
+
+int8x8_t
+qtbl_tests8_ (int8x16_t tab, int8x8_t idx)
+{
+ return vqtbl1_s8 (tab, idx);
+}
+
+uint8x8_t
+qtbl_testu8_ (uint8x16_t tab, uint8x8_t idx)
+{
+ return vqtbl1_u8 (tab, idx);
+}
+
+poly8x8_t
+qtbl_testp8_ (poly8x16_t tab, uint8x8_t idx)
+{
+ return vqtbl1_p8 (tab, idx);
+}
+
+int8x8_t
+qtbl_tests8_2 (int8x16x2_t tab, int8x8_t idx)
+{
+ return vqtbl2_s8 (tab, idx);
+}
+
+uint8x8_t
+qtbl_testu8_2 (uint8x16x2_t tab, uint8x8_t idx)
+{
+ return vqtbl2_u8 (tab, idx);
+}
+
+poly8x8_t
+qtbl_testp8_2 (poly8x16x2_t tab, uint8x8_t idx)
+{
+ return vqtbl2_p8 (tab, idx);
+}
+
+int8x8_t
+qtbl_tests8_3 (int8x16x3_t tab, int8x8_t idx)
+{
+ return vqtbl3_s8 (tab, idx);
+}
+
+uint8x8_t
+qtbl_testu8_3 (uint8x16x3_t tab, uint8x8_t idx)
+{
+ return vqtbl3_u8 (tab, idx);
+}
+
+poly8x8_t
+qtbl_testp8_3 (poly8x16x3_t tab, uint8x8_t idx)
+{
+ return vqtbl3_p8 (tab, idx);
+}
+
+int8x8_t
+qtbl_tests8_4 (int8x16x4_t tab, int8x8_t idx)
+{
+ return vqtbl4_s8 (tab, idx);
+}
+
+uint8x8_t
+qtbl_testu8_4 (uint8x16x4_t tab, uint8x8_t idx)
+{
+ return vqtbl4_u8 (tab, idx);
+}
+
+poly8x8_t
+qtbl_testp8_4 (poly8x16x4_t tab, uint8x8_t idx)
+{
+ return vqtbl4_p8 (tab, idx);
+}
+
+int8x8_t
+qtb_tests8_ (int8x8_t r, int8x16_t tab, int8x8_t idx)
+{
+ return vqtbx1_s8 (r, tab, idx);
+}
+
+uint8x8_t
+qtb_testu8_ (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
+{
+ return vqtbx1_u8 (r, tab, idx);
+}
+
+poly8x8_t
+qtb_testp8_ (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
+{
+ return vqtbx1_p8 (r, tab, idx);
+}
+
+int8x8_t
+qtb_tests8_2 (int8x8_t r, int8x16x2_t tab, int8x8_t idx)
+{
+ return vqtbx2_s8 (r, tab, idx);
+}
+
+uint8x8_t
+qtb_testu8_2 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
+{
+ return vqtbx2_u8 (r, tab, idx);
+}
+
+poly8x8_t
+qtb_testp8_2 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
+{
+ return vqtbx2_p8 (r, tab, idx);
+}
+
+int8x8_t
+qtb_tests8_3 (int8x8_t r, int8x16x3_t tab, int8x8_t idx)
+{
+ return vqtbx3_s8 (r, tab, idx);
+}
+
+uint8x8_t
+qtb_testu8_3 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
+{
+ return vqtbx3_u8 (r, tab, idx);
+}
+
+poly8x8_t
+qtb_testp8_3 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
+{
+ return vqtbx3_p8 (r, tab, idx);
+}
+
+int8x8_t
+qtb_tests8_4 (int8x8_t r, int8x16x4_t tab, int8x8_t idx)
+{
+ return vqtbx4_s8 (r, tab, idx);
+}
+
+uint8x8_t
+qtb_testu8_4 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
+{
+ return vqtbx4_u8 (r, tab, idx);
+}
+
+poly8x8_t
+qtb_testp8_4 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
+{
+ return vqtbx4_p8 (r, tab, idx);
+}
+
+int8x16_t
+qtblq_tests8_ (int8x16_t tab, int8x16_t idx)
+{
+ return vqtbl1q_s8 (tab, idx);
+}
+
+uint8x16_t
+qtblq_testu8_ (uint8x16_t tab, uint8x16_t idx)
+{
+ return vqtbl1q_u8 (tab, idx);
+}
+
+poly8x16_t
+qtblq_testp8_ (poly8x16_t tab, uint8x16_t idx)
+{
+ return vqtbl1q_p8 (tab, idx);
+}
+
+int8x16_t
+qtblq_tests8_2 (int8x16x2_t tab, int8x16_t idx)
+{
+ return vqtbl2q_s8 (tab, idx);
+}
+
+uint8x16_t
+qtblq_testu8_2 (uint8x16x2_t tab, uint8x16_t idx)
+{
+ return vqtbl2q_u8 (tab, idx);
+}
+
+poly8x16_t
+qtblq_testp8_2 (poly8x16x2_t tab, uint8x16_t idx)
+{
+ return vqtbl2q_p8 (tab, idx);
+}
+
+int8x16_t
+qtblq_tests8_3 (int8x16x3_t tab, int8x16_t idx)
+{
+ return vqtbl3q_s8 (tab, idx);
+}
+
+uint8x16_t
+qtblq_testu8_3 (uint8x16x3_t tab, uint8x16_t idx)
+{
+ return vqtbl3q_u8 (tab, idx);
+}
+
+poly8x16_t
+qtblq_testp8_3 (poly8x16x3_t tab, uint8x16_t idx)
+{
+ return vqtbl3q_p8 (tab, idx);
+}
+
+int8x16_t
+qtblq_tests8_4 (int8x16x4_t tab, int8x16_t idx)
+{
+ return vqtbl4q_s8 (tab, idx);
+}
+
+uint8x16_t
+qtblq_testu8_4 (uint8x16x4_t tab, uint8x16_t idx)
+{
+ return vqtbl4q_u8 (tab, idx);
+}
+
+poly8x16_t
+qtblq_testp8_4 (poly8x16x4_t tab, uint8x16_t idx)
+{
+ return vqtbl4q_p8 (tab, idx);
+}
+
+int8x16_t
+qtbxq_tests8_ (int8x16_t r, int8x16_t tab, int8x16_t idx)
+{
+ return vqtbx1q_s8 (r, tab, idx);
+}
+
+uint8x16_t
+qtbxq_testu8_ (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
+{
+ return vqtbx1q_u8 (r, tab, idx);
+}
+
+poly8x16_t
+qtbxq_testp8_ (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
+{
+ return vqtbx1q_p8 (r, tab, idx);
+}
+
+int8x16_t
+qtbxq_tests8_2 (int8x16_t r, int8x16x2_t tab, int8x16_t idx)
+{
+ return vqtbx2q_s8 (r, tab, idx);
+}
+
+uint8x16_t
+qtbxq_testu8_2 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
+{
+ return vqtbx2q_u8 (r, tab, idx);
+}
+
+poly8x16_t
+qtbxq_testp8_2 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
+{
+ return vqtbx2q_p8 (r, tab, idx);
+}
+
+int8x16_t
+qtbxq_tests8_3 (int8x16_t r, int8x16x3_t tab, int8x16_t idx)
+{
+ return vqtbx3q_s8 (r, tab, idx);
+}
+
+uint8x16_t
+qtbxq_testu8_3 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
+{
+ return vqtbx3q_u8 (r, tab, idx);
+}
+
+poly8x16_t
+qtbxq_testp8_3 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
+{
+ return vqtbx3q_p8 (r, tab, idx);
+}
+
+int8x16_t
+qtbxq_tests8_4 (int8x16_t r, int8x16x4_t tab, int8x16_t idx)
+{
+ return vqtbx4q_s8 (r, tab, idx);
+}
+
+uint8x16_t
+qtbxq_testu8_4 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
+{
+ return vqtbx4q_u8 (r, tab, idx);
+}
+
+poly8x16_t
+qtbxq_testp8_4 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
+{
+ return vqtbx4q_p8 (r, tab, idx);
+}
+
+/* { dg-final { scan-assembler-times "tbl v" 42} } */
+/* { dg-final { scan-assembler-times "tbx v" 30} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/tst-1.c b/gcc/testsuite/gcc.target/aarch64/tst-1.c
new file mode 100644
index 00000000000..b37c522e2b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/tst-1.c
@@ -0,0 +1,49 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+volatile unsigned int w0, w1;
+volatile int result;
+
+void test_si() {
+ /* { dg-final { scan-assembler "tst\tw\[0-9\]*, w\[0-9\]*\n" } } */
+ result = !(w0 & w1);
+ /* { dg-final { scan-assembler "tst\tw\[0-9\]*, \(0x\[0-9a-fA-F\]+\)|\(\[0-9\]+\)" } } */
+ result = !(w0 & 0x00f0);
+ /* { dg-final { scan-assembler "tst\tw\[0-9\]*.*lsl 4" } } */
+ result = !(w0 & (w1 << 4));
+}
+
+void test_si_tbnz() {
+ /* { dg-final { scan-assembler "tbnz\t\[wx\]\[0-9\]*" } } */
+jumpto:
+ if (w0 & 0x08) goto jumpto;
+}
+
+void test_si_tbz() {
+ /* { dg-final { scan-assembler "tbz\t\[wx\]\[0-9\]*" } } */
+jumpto:
+ if (!(w1 & 0x08)) goto jumpto;
+}
+
+volatile unsigned long long x0, x1;
+
+void test_di() {
+ /* { dg-final { scan-assembler "tst\tx\[0-9\]*, x\[0-9\]*\n" } } */
+ result = !(x0 & x1);
+ /* { dg-final { scan-assembler "tst\tx\[0-9\]*, \(0x\[0-9a-fA-F\]+\)|\(\[0-9\]+\)" } } */
+ result = !(x0 & 0x00f0);
+ /* { dg-final { scan-assembler "tst\tx\[0-9\]*.*lsl 4" } } */
+ result = !(x0 & (x1 << 4));
+}
+
+void test_di_tbnz() {
+ /* { dg-final { scan-assembler "tbnz\tx\[0-9\]*" } } */
+jumpto:
+ if (x0 & 0x08) goto jumpto;
+}
+
+void test_di_tbz() {
+ /* { dg-final { scan-assembler "tbz\tx\[0-9\]*" } } */
+jumpto:
+ if (!(x1 & 0x08)) goto jumpto;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
new file mode 100644
index 00000000000..27146b843d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
@@ -0,0 +1,12 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#define N 16
+
+#include "vect-abs.x"
+
+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.16b" } } */
+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.8h" } } */
+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.4s" } } */
+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.2d" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-abs.c b/gcc/testsuite/gcc.target/aarch64/vect-abs.c
new file mode 100644
index 00000000000..954b10615a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-abs.c
@@ -0,0 +1,131 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "limits.h"
+
+extern void abort (void);
+
+#define N 16
+
+#include "vect-abs.x"
+
+#define SET_VEC(size, type) void set_vector_##size (pRINT##size a) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = (type##_MIN) + (i + 1); \
+ }
+
+#define SET_RVEC(size, type) void set_rvector_##size (pRINT##size a) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = type##_MAX - i; \
+ }
+
+#define CHECK_VEC(size) void check_vector_##size (pRINT##size a, \
+ pRINT##size b) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ if (a[i] != b[i]) \
+ abort (); \
+ }
+
+
+SET_RVEC (8, SCHAR)
+SET_RVEC (16, SHRT)
+SET_RVEC (32, INT)
+SET_RVEC (64, LONG_LONG)
+
+set_rvector_long (pRLONG a)
+{
+ int i;
+ for (i=0; i<N; i++)
+ a[i] = (LONG_MAX) - i;
+}
+
+SET_VEC (8, SCHAR)
+SET_VEC (16, SHRT)
+SET_VEC (32, INT)
+SET_VEC (64, LONG_LONG)
+
+set_vector_long (long *__restrict__ a)
+{
+ long i;
+ for (i=0; i<N; i++)
+ a[i] = (LONG_MIN) + i + 1;
+}
+
+CHECK_VEC (8)
+CHECK_VEC (16)
+CHECK_VEC (32)
+CHECK_VEC (64)
+
+check_vector_long (long *__restrict__ a, long *__restrict__ b)
+{
+ long i;
+ for (i=0; i<N; i++)
+ if (a[i] != b[i])
+ abort ();
+}
+
+int main (void)
+{
+
+ signed char a8[N];
+ short a16[N];
+ int a32[N];
+ long long a64[N];
+ /* abs () from stdlib. */
+ int alib32[N];
+ long alibl[N];
+
+
+ signed char b8[N];
+ short b16[N];
+ int b32[N];
+ long long b64[N];
+ /* abs () from stdlib. */
+ long blibl[N];
+
+ signed char abs_vector_8[N];
+ short abs_vector_16[N];
+ int abs_vector_32[N];
+ long long abs_vector_64[N];
+ long abs_vector_long[N];
+
+ /* Set up result vectors. */
+ set_rvector_8 (abs_vector_8);
+ set_rvector_16 (abs_vector_16);
+ set_rvector_32 (abs_vector_32);
+ set_rvector_long (abs_vector_long);
+ set_rvector_64 (abs_vector_64);
+
+ /* Set up inputs. */
+ set_vector_8 (b8);
+ set_vector_16 (b16);
+ set_vector_32 (b32);
+ set_vector_64 (b64);
+ set_vector_long (blibl);
+
+ /* Calculate their absolute values. */
+ absolute_s8 (a8, b8);
+ absolute_s16 (a16, b16);
+ absolute_s32 (a32, b32);
+ absolute_s64 (a64, b64);
+ /* abs () from stdlib. */
+ absolute_s32_lib (alib32, b32);
+ absolute_l32_lib (alibl, blibl);
+
+ /* Check. */
+ check_vector_8 (a8, abs_vector_8);
+ check_vector_16 (a16, abs_vector_16);
+ check_vector_32 (a32, abs_vector_32);
+ check_vector_64 (a64, abs_vector_64);
+ check_vector_32 (alib32, abs_vector_32);
+ check_vector_long (alibl, abs_vector_long);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-abs.x b/gcc/testsuite/gcc.target/aarch64/vect-abs.x
new file mode 100644
index 00000000000..2e67cc296b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-abs.x
@@ -0,0 +1,36 @@
+
+extern int abs (int);
+extern long labs (long);
+
+typedef signed char *__restrict__ pRINT8;
+typedef short *__restrict__ pRINT16;
+typedef int *__restrict__ pRINT32;
+typedef long *__restrict__ pRLONG;
+typedef long long *__restrict__ pRINT64;
+
+#define DEF_ABS(size) void absolute_s##size (pRINT##size a, pRINT##size b) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = (b[i] > 0 ? b[i] : -b[i]); \
+ }
+
+DEF_ABS (8);
+DEF_ABS (16);
+DEF_ABS (32);
+DEF_ABS (64);
+
+/* Test abs () vectorization. */
+void absolute_s32_lib (pRINT32 a, pRINT32 b)
+{
+ int i;
+ for (i=0; i<N; i++)
+ a[i] = abs (b[i]);
+}
+
+void absolute_l32_lib (pRLONG a, pRLONG b)
+{
+ int i;
+ for (i=0; i<N; i++)
+ a[i] = labs (b[i]);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-compile.c
new file mode 100644
index 00000000000..260c1e041c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-compile.c
@@ -0,0 +1,20 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#include "vect.x"
+
+/* { dg-final { scan-assembler "orn\\tv" } } */
+/* { dg-final { scan-assembler "bic\\tv" } } */
+/* { dg-final { scan-assembler "mla\\tv" } } */
+/* { dg-final { scan-assembler "mls\\tv" } } */
+/* { dg-final { scan-assembler "smax\\tv" } } */
+/* { dg-final { scan-assembler "smin\\tv" } } */
+/* { dg-final { scan-assembler "umax\\tv" } } */
+/* { dg-final { scan-assembler "umin\\tv" } } */
+/* { dg-final { scan-assembler "umaxv" } } */
+/* { dg-final { scan-assembler "uminv" } } */
+/* { dg-final { scan-assembler "smaxv" } } */
+/* { dg-final { scan-assembler "sminv" } } */
+/* { dg-final { scan-assembler-times "addv" 2} } */
+/* { dg-final { scan-assembler-times "addp" 2} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
new file mode 100644
index 00000000000..cce9240343f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
@@ -0,0 +1,7 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3 -ffast-math" } */
+
+#include "vect-faddv.x"
+
+/* { dg-final { scan-assembler-times "faddp\\tv" 2} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-faddv.c b/gcc/testsuite/gcc.target/aarch64/vect-faddv.c
new file mode 100644
index 00000000000..f30bde8e8df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-faddv.c
@@ -0,0 +1,31 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3 -ffast-math" } */
+
+extern void abort (void);
+
+#include "vect-faddv.x"
+
+int main (void)
+{
+ float addv_f32_value = -120.0f;
+ double addv_f64_value = 120.0;
+ float af32[16];
+ double af64[16];
+ int i;
+
+ /* Set up input vectors. */
+ for (i=0; i<16; i++)
+ {
+ af32[i] = (float)-i;
+ af64[i] = (double)i;
+ }
+
+ if (addv_f32 (af32) != addv_f32_value)
+ abort ();
+
+ if (addv_f64 (af64) != addv_f64_value)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-faddv.x b/gcc/testsuite/gcc.target/aarch64/vect-faddv.x
new file mode 100644
index 00000000000..d99ab215639
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-faddv.x
@@ -0,0 +1,23 @@
+
+typedef float *__restrict__ pRF32;
+typedef double *__restrict__ pRF64;
+
+float addv_f32 (pRF32 a)
+{
+ int i;
+ float s = 0.0;
+ for (i=0; i<16; i++)
+ s += a[i];
+
+ return s;
+}
+
+double addv_f64 (pRF64 a)
+{
+ int i;
+ double s = 0.0;
+ for (i=0; i<16; i++)
+ s += a[i];
+
+ return s;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
new file mode 100644
index 00000000000..1285a506320
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -ffast-math" } */
+
+#include "vect-fmax-fmin.x"
+
+/* { dg-final { scan-assembler "fmaxnm\\tv" } } */
+/* { dg-final { scan-assembler "fminnm\\tv" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c
new file mode 100644
index 00000000000..42600b7393d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c
@@ -0,0 +1,105 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -ffast-math" } */
+
+extern void abort (void);
+
+#include "vect-fmax-fmin.x"
+
+#include "vect-fmaxv-fminv.x"
+
+#define DEFN_SETV(type) \
+ set_vector_##type (pR##type a, type n) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ a[i] = n; \
+ }
+
+#define DEFN_CHECKV(type) \
+ void check_vector_##type (pR##type a, pR##type vec) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ if (a[i] != vec[i]) \
+ abort (); \
+ }
+
+#define TEST2(fname, type) \
+ set_vector_##type (c##type, 0.0); \
+ fname##_##type (a##type, b##type); \
+ check_vector_##type (c##type, fname##_##type##_vector);
+
+#define TEST3(fname, type) \
+ set_vector_##type (c##type, 0.0); \
+ fname##_##type (a##type, b##type, c##type); \
+ check_vector_##type (c##type, fname##_##type##_vector);
+
+#define TEST(fname, N) \
+ TEST##N (fname, F32); \
+ TEST##N (fname, F64);
+
+typedef float F32;
+typedef double F64;
+
+DEFN_SETV (F32)
+DEFN_SETV (F64)
+
+DEFN_CHECKV (F32)
+DEFN_CHECKV (F64)
+
+int main (void)
+{
+
+ F32 aF32[16];
+ F32 bF32[16];
+ F32 cF32[16];
+
+ F64 aF64[16];
+ F64 bF64[16];
+ F64 cF64[16];
+ int i;
+
+ /* Golden vectors. */
+ F32 max_F32_vector[] = { 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0,
+ 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0 };
+
+ F64 max_F64_vector[] = { 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0,
+ 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0 };
+
+ F32 min_F32_vector[] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
+ 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0 };
+
+ F64 min_F64_vector[] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
+ 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0 };
+
+ F32 minv_F32_value = 0.0f;
+ F32 maxv_F32_value = 15.0f;
+
+ F64 minv_F64_value = 0.0;
+ F64 maxv_F64_value = 15.0;
+
+ /* Setup input vectors. */
+ for (i=0; i<16; i++)
+ {
+ aF32[i] = (float)(15-i);
+ bF32[i] = (float)i;
+ aF64[i] = (double)(15-i);
+ bF64[i] = (double)i;
+ }
+
+ TEST (max, 3);
+ TEST (min, 3);
+
+ /* Test across lanes ops. */
+ if (maxv_f32 (max_F32_vector) != maxv_F32_value)
+ abort ();
+ if (minv_f32 (min_F32_vector) != minv_F32_value)
+ abort ();
+
+ if (maxv_f64 (max_F64_vector) != maxv_F64_value)
+ abort ();
+ if (minv_f64 (min_F64_vector) != minv_F64_value)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x
new file mode 100644
index 00000000000..a8948208a1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x
@@ -0,0 +1,32 @@
+
+typedef float *__restrict__ pRF32;
+typedef double *__restrict__ pRF64;
+
+
+void max_F32 (pRF32 a, pRF32 b, pRF32 c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
+}
+
+void min_F32 (pRF32 a, pRF32 b, pRF32 c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
+}
+
+void max_F64 (pRF64 a, pRF64 b, pRF64 c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
+}
+
+void min_F64 (pRF64 a, pRF64 b, pRF64 c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv-compile.c
new file mode 100644
index 00000000000..913cc8ce883
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv-compile.c
@@ -0,0 +1,10 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3 -ffast-math" } */
+
+#include "vect-fmaxv-fminv.x"
+
+/* { dg-final { scan-assembler "fminnmv" } } */
+/* { dg-final { scan-assembler "fmaxnmv" } } */
+/* { dg-final { scan-assembler "fminnmp" } } */
+/* { dg-final { scan-assembler "fmaxnmp" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x
new file mode 100644
index 00000000000..0bc6ba494cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x
@@ -0,0 +1,43 @@
+
+typedef float *__restrict__ pRF32;
+typedef double *__restrict__ pRF64;
+
+float maxv_f32 (pRF32 a)
+{
+ int i;
+ float s = a[0];
+ for (i=1;i<8;i++)
+ s = (s > a[i] ? s : a[i]);
+
+ return s;
+}
+
+float minv_f32 (pRF32 a)
+{
+ int i;
+ float s = a[0];
+ for (i=1;i<16;i++)
+ s = (s < a[i] ? s : a[i]);
+
+ return s;
+}
+
+double maxv_f64 (pRF64 a)
+{
+ int i;
+ double s = a[0];
+ for (i=1;i<8;i++)
+ s = (s > a[i] ? s : a[i]);
+
+ return s;
+}
+
+double minv_f64 (pRF64 a)
+{
+ int i;
+ double s = a[0];
+ for (i=1;i<16;i++)
+ s = (s < a[i] ? s : a[i]);
+
+ return s;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
new file mode 100644
index 00000000000..b953dfae3b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
@@ -0,0 +1,13 @@
+
+
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#include "vect-fp.x"
+
+/* { dg-final { scan-assembler "fadd\\tv" } } */
+/* { dg-final { scan-assembler "fsub\\tv" } } */
+/* { dg-final { scan-assembler "fmul\\tv" } } */
+/* { dg-final { scan-assembler "fdiv\\tv" } } */
+/* { dg-final { scan-assembler "fneg\\tv" } } */
+/* { dg-final { scan-assembler "fabs\\tv" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fp.c b/gcc/testsuite/gcc.target/aarch64/vect-fp.c
new file mode 100644
index 00000000000..a7357b7f065
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fp.c
@@ -0,0 +1,137 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+extern void abort (void);
+
+#include "vect-fp.x"
+
+
+#define DEFN_SETV(type) \
+ set_vector_##type (pR##type a, type n) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ a[i] = n; \
+ }
+
+#define DEFN_CHECKV(type) \
+ void check_vector_##type (pR##type a, pR##type vec) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ if (a[i] != vec[i]) \
+ abort (); \
+ }
+
+#define TEST2(fname, type) \
+ set_vector_##type (a##type, 0.0); \
+ fname##_##type (a##type, b##type); \
+ check_vector_##type (a##type, fname##_##type##_vector);
+
+#define TEST3(fname, type) \
+ set_vector_##type (a##type, 0.0); \
+ fname##_##type (a##type, b##type, c##type); \
+ check_vector_##type (a##type, fname##_##type##_vector);
+
+#define TEST(fname, N) \
+ TEST##N(fname, F32); \
+ TEST##N(fname, F64);
+
+DEFN_SETV (F32)
+DEFN_SETV (F64)
+
+DEFN_CHECKV (F32)
+DEFN_CHECKV (F64)
+
+int main (void)
+{
+ F32 aF32[16];
+ F32 bF32[16];
+ F32 cF32[16];
+
+ F64 aF64[16];
+ F64 bF64[16];
+ F64 cF64[16];
+ int i;
+
+ F32 add_F32_vector[] = { 3.0f, 5.0f, 7.0f, 9.0f, 11.0f,
+ 13.0f, 15.0f, 17.0f, 19.0f,
+ 21.0f, 23.0f, 25.0f, 27.0f,
+ 29.0f, 31.0f, 33.0f };
+
+ F64 add_F64_vector[] = { 3.0, 5.0, 7.0, 9.0, 11.0,
+ 13.0, 15.0, 17.0, 19.0,
+ 21.0, 23.0, 25.0, 27.0,
+ 29.0, 31.0, 33.0 };
+
+ F32 sub_F32_vector[] = { -1.0f, -1.0f, -1.0f, -1.0f, -1.0f,
+ -1.0f, -1.0f, -1.0f, -1.0f, -1.0f,
+ -1.0f, -1.0f, -1.0f, -1.0f, -1.0f,
+ -1.0f };
+
+ F64 sub_F64_vector[] = { -1.0, -1.0, -1.0, -1.0, -1.0,
+ -1.0, -1.0, -1.0, -1.0, -1.0,
+ -1.0, -1.0, -1.0, -1.0, -1.0,
+ -1.0 };
+
+ F32 mul_F32_vector[] = { 2.0f, 6.0f, 12.0f, 20.0f, 30.0f,
+ 42.0f, 56.0f, 72.0f, 90.0f,
+ 110.0f, 132.0f, 156.0f, 182.0f,
+ 210.0f, 240.0f, 272.0f };
+
+ F64 mul_F64_vector[] = { 2.0, 6.0, 12.0, 20.0, 30.0,
+ 42.0, 56.0, 72.0, 90.0,
+ 110.0, 132.0, 156.0, 182.0,
+ 210.0, 240.0, 272.0 };
+
+ F32 div_F32_vector[] = { 0.5f, (float)(2.0/3.0), 0.75f, 0.8f,
+ (float)(5.0/6.0), (float)(6.0/7.0), 0.875000f,
+ (float)(8.0/9.0), 0.900000f, (float)(10.0/11.0),
+ (float)(11.0/12.0), (float)(12.0/13.0),
+ (float)(13.0/14.0), (float)(14.0/15.0), 0.937500f,
+ (float)(16.0/17.0) };
+
+ F64 div_F64_vector[] = { 0.5, (2.0/3.0), 0.75, 0.8, (5.0/6.0),
+ (6.0/7.0), 0.875000, (8.0/9.0), 0.900000,
+ (10.0/11.0), (11.0/12.0), (12.0/13.0), (13.0/14.0),
+ (14.0/15.0), 0.937500, (16.0/17.0) };
+
+ F32 neg_F32_vector[] = { -1.0f, -2.0f, -3.0f, -4.0f,
+ -5.0f, -6.0f, -7.0f, -8.0f,
+ -9.0f, -10.0f, -11.0f, -12.0f,
+ -13.0f, -14.0f, -15.0f, -16.0f };
+
+ F64 neg_F64_vector[] = { -1.0, -2.0, -3.0, -4.0,
+ -5.0, -6.0, -7.0, -8.0,
+ -9.0, -10.0, -11.0, -12.0,
+ -13.0, -14.0, -15.0, -16.0 };
+
+ F32 abs_F32_vector[] = { 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f, 12.0f,
+ 13.0f, 14.0f, 15.0f, 16.0f };
+
+ F64 abs_F64_vector[] = { 1.0, 2.0, 3.0, 4.0,
+ 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0 };
+
+ /* Setup input vectors. */
+ for (i=1; i<=16; i++)
+ {
+ bF32[i-1] = (float)i;
+ cF32[i-1] = (float)(i+1);
+ bF64[i-1] = (double)i;
+ cF64[i-1] = (double)(i+1);
+ }
+
+ TEST (add, 3);
+ TEST (sub, 3);
+ TEST (mul, 3);
+ TEST (div, 3);
+ TEST (neg, 2);
+ TEST (abs, 2);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fp.x b/gcc/testsuite/gcc.target/aarch64/vect-fp.x
new file mode 100644
index 00000000000..338f6edf945
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fp.x
@@ -0,0 +1,44 @@
+
+typedef float F32;
+typedef double F64;
+typedef float *__restrict__ pRF32;
+typedef double *__restrict__ pRF64;
+
+extern float fabsf (float);
+extern double fabs (double);
+
+#define DEF3(fname, type, op) \
+ void fname##_##type (pR##type a, \
+ pR##type b, \
+ pR##type c) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ a[i] = b[i] op c[i]; \
+ }
+
+#define DEF2(fname, type, op) \
+ void fname##_##type (pR##type a, \
+ pR##type b) \
+ { \
+ int i; \
+ for (i=0; i<16; i++) \
+ a[i] = op(b[i]); \
+ }
+
+
+#define DEFN3(fname, op) \
+ DEF3 (fname, F32, op) \
+ DEF3 (fname, F64, op)
+
+#define DEFN2(fname, op) \
+ DEF2 (fname, F32, op) \
+ DEF2 (fname, F64, op)
+
+DEFN3 (add, +)
+DEFN3 (sub, -)
+DEFN3 (mul, *)
+DEFN3 (div, /)
+DEFN2 (neg, -)
+DEF2 (abs, F32, fabsf)
+DEF2 (abs, F64, fabs)
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c b/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
new file mode 100644
index 00000000000..e51eaee5429
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
@@ -0,0 +1,16 @@
+
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+#define N 16
+
+#include "vect-mull.x"
+
+DEF_MULL2 (DEF_MULLB)
+DEF_MULL2 (DEF_MULLH)
+DEF_MULL2 (DEF_MULLS)
+
+/* { dg-final { scan-assembler-times "smull v" 3 } } */
+/* { dg-final { scan-assembler-times "smull2 v" 3 } } */
+/* { dg-final { scan-assembler-times "umull v" 3 } } */
+/* { dg-final { scan-assembler-times "umull2 v" 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-mull.c b/gcc/testsuite/gcc.target/aarch64/vect-mull.c
new file mode 100644
index 00000000000..62a3552f7b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-mull.c
@@ -0,0 +1,138 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "limits.h"
+
+extern void abort (void);
+
+#define N 16
+
+#include "vect-mull.x"
+
+#define SET_VEC(size, type, sign) \
+ void set_vector_##sign##size \
+ (pR##sign##INT##size b, \
+ pR##sign##INT##size c) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ { \
+ b[i] = (type)((INT_MAX >> (32 - size)) - i); \
+ c[i] = (type)((INT_MAX >> (32 - size)) - i * 2); \
+ } \
+ }
+
+#define CHECK_VEC(size, sign) void check_vector_##sign##size (pR##sign##INT##size a, \
+ pR##sign##INT##size b) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ if (a[i] != b[i]) \
+ abort (); \
+ }
+
+SET_VEC (8, signed char, S)
+SET_VEC (16, signed short, S)
+SET_VEC (32, signed int, S)
+
+SET_VEC (8, unsigned char, U)
+SET_VEC (16, unsigned short, U)
+SET_VEC (32, unsigned int, U)
+
+DEF_MULL2 (DEF_MULLB)
+DEF_MULL2 (DEF_MULLH)
+DEF_MULL2 (DEF_MULLS)
+
+CHECK_VEC (8, S)
+CHECK_VEC (8, U)
+CHECK_VEC (16, S)
+CHECK_VEC (16, U)
+CHECK_VEC (32, S)
+CHECK_VEC (32, U)
+CHECK_VEC (64, S)
+CHECK_VEC (64, U)
+
+int main (void)
+{
+
+#define DECL_VAR(name) signed char name##_S8[N]; \
+ signed short name##_S16[N]; \
+ signed int name##_S32[N]; \
+ unsigned char name##_U8[N]; \
+ unsigned short name##_U16[N]; \
+ unsigned int name##_U32[N];
+
+ DECL_VAR (output);
+ signed long long output_S64[N];
+ unsigned long long output_U64[N];
+
+ DECL_VAR (input1);
+ DECL_VAR (input2);
+
+ signed short expected_S16[] =
+ { 16129, 15750, 15375, 15004, 14637, 14274, 13915, 13560,
+ 13209, 12862, 12519, 12180, 11845, 11514, 11187, 10864 };
+
+ signed int expected_S32[] =
+ { 1073676289, 1073577990, 1073479695, 1073381404, 1073283117,
+ 1073184834, 1073086555, 1072988280, 1072890009, 1072791742,
+ 1072693479, 1072595220, 1072496965, 1072398714, 1072300467,
+ 1072202224 };
+
+ signed long long expected_S64[] =
+ { 4611686014132420609LL, 4611686007689969670LL,
+ 4611686001247518735LL, 4611685994805067804LL,
+ 4611685988362616877LL, 4611685981920165954LL,
+ 4611685975477715035LL, 4611685969035264120LL,
+ 4611685962592813209LL, 4611685956150362302LL,
+ 4611685949707911399LL, 4611685943265460500LL,
+ 4611685936823009605LL, 4611685930380558714LL,
+ 4611685923938107827LL, 4611685917495656944LL };
+
+ unsigned short expected_U16[] =
+ { 16129, 15750, 15375, 15004, 14637, 14274, 13915, 13560,
+ 13209, 12862, 12519, 12180, 11845, 11514, 11187, 10864 };
+
+ unsigned int expected_U32[] =
+ { 1073676289, 1073577990, 1073479695, 1073381404, 1073283117,
+ 1073184834, 1073086555, 1072988280, 1072890009, 1072791742,
+ 1072693479, 1072595220, 1072496965, 1072398714, 1072300467,
+ 1072202224 };
+
+ unsigned long long expected_U64[] =
+ { 4611686014132420609ULL, 4611686007689969670ULL,
+ 4611686001247518735ULL, 4611685994805067804ULL,
+ 4611685988362616877ULL, 4611685981920165954ULL,
+ 4611685975477715035ULL, 4611685969035264120ULL,
+ 4611685962592813209ULL, 4611685956150362302ULL,
+ 4611685949707911399ULL, 4611685943265460500ULL,
+ 4611685936823009605ULL, 4611685930380558714ULL,
+ 4611685923938107827ULL, 4611685917495656944ULL };
+
+ /* Set up input. */
+ set_vector_S8 (input1_S8, input2_S8);
+ set_vector_S16 (input1_S16, input2_S16);
+ set_vector_S32 (input1_S32, input2_S32);
+ set_vector_U8 (input1_U8, input2_U8);
+ set_vector_U16 (input1_U16, input2_U16);
+ set_vector_U32 (input1_U32, input2_U32);
+
+ /* Calculate actual results. */
+ widen_mult_Sb (output_S16, input1_S8, input2_S8);
+ widen_mult_Sh (output_S32, input1_S16, input2_S16);
+ widen_mult_Ss (output_S64, input1_S32, input2_S32);
+ widen_mult_Ub (output_U16, input1_U8, input2_U8);
+ widen_mult_Uh (output_U32, input1_U16, input2_U16);
+ widen_mult_Us (output_U64, input1_U32, input2_U32);
+
+ /* Check actual vs. expected. */
+ check_vector_S16 (expected_S16, output_S16);
+ check_vector_S32 (expected_S32, output_S32);
+ check_vector_S64 (expected_S64, output_S64);
+ check_vector_U16 (expected_U16, output_U16);
+ check_vector_U32 (expected_U32, output_U32);
+ check_vector_U64 (expected_U64, output_U64);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-mull.x b/gcc/testsuite/gcc.target/aarch64/vect-mull.x
new file mode 100644
index 00000000000..39ec43d77e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect-mull.x
@@ -0,0 +1,49 @@
+
+typedef signed char *__restrict__ pRSINT8;
+typedef signed short *__restrict__ pRSINT16;
+typedef signed int *__restrict__ pRSINT32;
+typedef signed long long *__restrict__ pRSINT64;
+
+typedef unsigned char *__restrict__ pRUINT8;
+typedef unsigned short *__restrict__ pRUINT16;
+typedef unsigned int *__restrict__ pRUINT32;
+typedef unsigned long long *__restrict__ pRUINT64;
+
+typedef signed short SH;
+typedef unsigned short UH;
+typedef signed int SS;
+typedef unsigned int US;
+typedef signed long long SLL;
+typedef unsigned long long ULL;
+
+#define DEF_MULLB(sign) \
+ void widen_mult_##sign##b (pR##sign##INT##16 a, \
+ pR##sign##INT##8 b, \
+ pR##sign##INT##8 c) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = (sign##H)b[i] * c[i]; \
+ }
+
+#define DEF_MULLH(sign) \
+ void widen_mult_##sign##h (pR##sign##INT##32 a, \
+ pR##sign##INT##16 b, \
+ pR##sign##INT##16 c) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = (sign##S)b[i] * c[i]; \
+ }
+#define DEF_MULLS(sign) \
+ void widen_mult_##sign##s (pR##sign##INT##64 a, \
+ pR##sign##INT##32 b, \
+ pR##sign##INT##32 c) \
+ { \
+ int i; \
+ for (i=0; i<N; i++) \
+ a[i] = (sign##LL)b[i] * c[i]; \
+ }
+
+#define DEF_MULL2(x) x (S) \
+ x (U)
diff --git a/gcc/testsuite/gcc.target/aarch64/vect.c b/gcc/testsuite/gcc.target/aarch64/vect.c
new file mode 100644
index 00000000000..fc4874440a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect.c
@@ -0,0 +1,93 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "vect.x"
+
+extern void abort (void);
+
+void set_vector (int *a, int n)
+{
+ int i;
+ for (i=0; i<16; i++)
+ a[i] = n;
+}
+
+void check_vector (pRINT c, pRINT result, char *str)
+{
+ int i;
+ for (i=0; i<16 ; i++)
+ if (c[i] != result[i])
+ abort ();
+}
+
+#define TEST(func, sign) set_vector (sign##c, 0); \
+ func (sign##a, sign##b, sign##c); \
+ check_vector (sign##c, func##_vector, #func);
+
+
+#define TESTV(func, sign) \
+ if (func (sign##a) != func##_value) \
+ abort ();
+
+#define TESTVLL(func, sign) \
+ if (func (ll##sign##a) != func##_value) \
+ abort ();
+
+int main (void)
+{
+ int sa[16];
+ int sb[16];
+ int sc[16];
+ unsigned int ua[16];
+ unsigned int ub[16];
+ unsigned int uc[16];
+ long long llsa[16];
+ unsigned long long llua[16];
+ int i;
+
+ /* Table of standard values to compare against. */
+ unsigned int test_bic_vector[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ unsigned int test_orn_vector[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+ int mla_vector[] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225};
+ int mls_vector[] = {0, -1, -4, -9, -16, -25, -36, -49, -64, -81, -100, -121, -144, -169, -196, -225};
+ int smax_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
+ int smin_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
+ unsigned int umax_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ unsigned int umin_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ int reduce_smax_value = 0;
+ int reduce_smin_value = -15;
+ unsigned int reduce_umax_value = 15;
+ unsigned int reduce_umin_value = 0;
+ unsigned int reduce_add_u32_value = 120;
+ int reduce_add_s32_value = -120;
+ long long reduce_add_s64_value = -120;
+ unsigned long long reduce_add_u64_value = 120;
+
+ /* Set up input vectors. */
+ for (i=0; i < 16; i++)
+ {
+ sa[i] = sb[i] = -i;
+ llsa[i] = (long long)-i;
+ ua[i] = ub[i] = i;
+ llua[i] = (unsigned long long)i;
+ }
+
+ TEST (test_bic, s);
+ TEST (test_orn, s);
+ TEST (mla, s);
+ TEST (mls, s);
+ TEST (smax, s);
+ TEST (smin, s);
+ TEST (umax, u);
+ TEST (umin, u);
+ TESTV (reduce_smax, s);
+ TESTV (reduce_smin, s);
+ TESTV (reduce_umax, u);
+ TESTV (reduce_umin, u);
+ TESTV (reduce_add_u32, u);
+ TESTV (reduce_add_s32, s);
+ TESTVLL (reduce_add_u64, u);
+ TESTVLL (reduce_add_s64, s);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vect.x b/gcc/testsuite/gcc.target/aarch64/vect.x
new file mode 100644
index 00000000000..88078349750
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vect.x
@@ -0,0 +1,140 @@
+typedef int *__restrict__ pRINT;
+typedef unsigned int *__restrict__ pRUINT;
+typedef long long *__restrict__ pRINT64;
+typedef unsigned long long *__restrict__ pRUINT64;
+
+void test_orn (pRUINT a, pRUINT b, pRUINT c)
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ c[i] = a[i] | (~b[i]);
+}
+
+void test_bic (pRUINT a, pRUINT b, pRUINT c)
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ c[i] = a[i] & (~b[i]);
+}
+
+void mla (pRINT a, pRINT b, pRINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] += a[i] * b[i];
+}
+
+void mls (pRINT a, pRINT b, pRINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] -= a[i] * b[i];
+}
+
+void smax (pRINT a, pRINT b, pRINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
+}
+
+void smin (pRINT a, pRINT b, pRINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
+}
+
+void umax (pRUINT a, pRUINT b, pRUINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
+}
+
+void umin (pRUINT a, pRUINT b, pRUINT c)
+{
+ int i;
+ for (i=0;i<16;i++)
+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
+}
+
+unsigned int reduce_umax (pRUINT a)
+{
+ int i;
+ unsigned int s = a[0];
+ for (i = 1; i < 16; i++)
+ s = (s > a[i] ? s : a[i]);
+
+ return s;
+}
+
+unsigned int reduce_umin (pRUINT a)
+{
+ int i;
+ unsigned int s = a[0];
+ for (i = 1; i < 16; i++)
+ s = (s < a[i] ? s : a[i]);
+
+ return s;
+}
+
+int reduce_smax (pRINT a)
+{
+ int i;
+ int s = a[0];
+ for (i = 1; i < 16; i++)
+ s = (s > a[i] ? s : a[i]);
+
+ return s;
+}
+
+int reduce_smin (pRINT a)
+{
+ int i;
+ int s = a[0];
+ for (i = 1; i < 16; i++)
+ s = (s < a[i] ? s : a[i]);
+
+ return s;
+}
+
+unsigned int reduce_add_u32 (pRINT a)
+{
+ int i;
+ unsigned int s = 0;
+ for (i = 0; i < 16; i++)
+ s += a[i];
+
+ return s;
+}
+
+int reduce_add_s32 (pRINT a)
+{
+ int i;
+ int s = 0;
+ for (i = 0; i < 16; i++)
+ s += a[i];
+
+ return s;
+}
+
+unsigned long long reduce_add_u64 (pRUINT64 a)
+{
+ int i;
+ unsigned long long s = 0;
+ for (i = 0; i < 16; i++)
+ s += a[i];
+
+ return s;
+}
+
+long long reduce_add_s64 (pRINT64 a)
+{
+ int i;
+ long long s = 0;
+ for (i = 0; i < 16; i++)
+ s += a[i];
+
+ return s;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c b/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
new file mode 100644
index 00000000000..7bc9caf4e10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
@@ -0,0 +1,803 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#include "../../../config/aarch64/arm_neon.h"
+
+
+/* { dg-final { scan-assembler-times "\\tfmax\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+float32x2_t
+test_vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vmax_f32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.8b, v\[0-9\].8b, v\[0-9\].8b" 1 } } */
+
+int8x8_t
+test_vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return vmax_s8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.8b, v\[0-9\].8b, v\[0-9\].8b" 1 } } */
+
+uint8x8_t
+test_vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return vmax_u8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.4h, v\[0-9\].4h, v\[0-9\].4h" 1 } } */
+
+int16x4_t
+test_vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return vmax_s16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.4h, v\[0-9\].4h, v\[0-9\].4h" 1 } } */
+
+uint16x4_t
+test_vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return vmax_u16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+int32x2_t
+test_vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return vmax_s32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+uint32x2_t
+test_vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return vmax_u32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tfmax\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+float32x4_t
+test_vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vmaxq_f32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tfmax\\tv\[0-9\]+\.2d, v\[0-9\].2d, v\[0-9\].2d" 1 } } */
+
+float64x2_t
+test_vmaxq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vmaxq_f64(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.16b, v\[0-9\].16b, v\[0-9\].16b" 1 } } */
+
+int8x16_t
+test_vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return vmaxq_s8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.16b, v\[0-9\].16b, v\[0-9\].16b" 1 } } */
+
+uint8x16_t
+test_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return vmaxq_u8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.8h, v\[0-9\].8h, v\[0-9\].8h" 1 } } */
+
+int16x8_t
+test_vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return vmaxq_s16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.8h, v\[0-9\].8h, v\[0-9\].8h" 1 } } */
+
+uint16x8_t
+test_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return vmaxq_u16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmax\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+int32x4_t
+test_vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return vmaxq_s32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumax\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+uint32x4_t
+test_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return vmaxq_u32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tfmin\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+float32x2_t
+test_vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vmin_f32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.8b, v\[0-9\].8b, v\[0-9\].8b" 1 } } */
+
+int8x8_t
+test_vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return vmin_s8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.8b, v\[0-9\].8b, v\[0-9\].8b" 1 } } */
+
+uint8x8_t
+test_vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return vmin_u8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.4h, v\[0-9\].4h, v\[0-9\].4h" 1 } } */
+
+int16x4_t
+test_vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return vmin_s16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.4h, v\[0-9\].4h, v\[0-9\].4h" 1 } } */
+
+uint16x4_t
+test_vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return vmin_u16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+int32x2_t
+test_vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return vmin_s32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 1 } } */
+
+uint32x2_t
+test_vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return vmin_u32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tfmin\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+float32x4_t
+test_vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vminq_f32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tfmin\\tv\[0-9\]+\.2d, v\[0-9\].2d, v\[0-9\].2d" 1 } } */
+
+float64x2_t
+test_vminq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vminq_f64(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.16b, v\[0-9\].16b, v\[0-9\].16b" 1 } } */
+
+int8x16_t
+test_vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return vminq_s8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.16b, v\[0-9\].16b, v\[0-9\].16b" 1 } } */
+
+uint8x16_t
+test_vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return vminq_u8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.8h, v\[0-9\].8h, v\[0-9\].8h" 1 } } */
+
+int16x8_t
+test_vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return vminq_s16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.8h, v\[0-9\].8h, v\[0-9\].8h" 1 } } */
+
+uint16x8_t
+test_vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return vminq_u16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsmin\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+int32x4_t
+test_vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return vminq_s32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tumin\\tv\[0-9\]+\.4s, v\[0-9\].4s, v\[0-9\].4s" 1 } } */
+
+uint32x4_t
+test_vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return vminq_u32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\taddp\\tv\[0-9\]+\.8b, v\[0-9\].8b, v\[0-9\].8b" 2 } } */
+
+int8x8_t
+test_vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return vpadd_s8(__a, __b);
+}
+
+uint8x8_t
+test_vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return vpadd_u8(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\taddp\\tv\[0-9\]+\.4h, v\[0-9\].4h, v\[0-9\].4h" 2 } } */
+
+int16x4_t
+test_vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return vpadd_s16(__a, __b);
+}
+
+uint16x4_t
+test_vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return vpadd_u16(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\taddp\\tv\[0-9\]+\.2s, v\[0-9\].2s, v\[0-9\].2s" 2 } } */
+
+int32x2_t
+test_vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return vpadd_s32(__a, __b);
+}
+
+uint32x2_t
+test_vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return vpadd_u32(__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.4h" 1 } } */
+
+int32x4_t
+test_vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return vqdmlal_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.8h" 1 } } */
+
+int32x4_t
+test_vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return vqdmlal_high_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ return vqdmlal_high_lane_s16 (a, b, c, 3);
+}
+
+int32x4_t
+test_vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ return vqdmlal_high_laneq_s16 (a, b, c, 6);
+}
+
+int32x4_t
+test_vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return vqdmlal_high_n_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+{
+ return vqdmlal_lane_s16 (a, b, c, 3);
+}
+
+int32x4_t
+test_vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+{
+ return vqdmlal_laneq_s16 (a, b, c, 6);
+}
+
+int32x4_t
+test_vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return vqdmlal_n_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.2s" 1 } } */
+
+int64x2_t
+test_vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return vqdmlal_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.4s" 1 } } */
+
+int64x2_t
+test_vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlal_high_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlal_high_lane_s32 (__a, __b, __c, 1);
+}
+
+int64x2_t
+test_vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlal_high_laneq_s32 (__a, __b, __c, 3);
+}
+
+int64x2_t
+test_vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return vqdmlal_high_n_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlal\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+{
+ return vqdmlal_lane_s32 (__a, __b, __c, 1);
+}
+
+int64x2_t
+test_vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+{
+ return vqdmlal_laneq_s32 (__a, __b, __c, 3);
+}
+
+int64x2_t
+test_vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return vqdmlal_n_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.4h" 1 } } */
+
+int32x4_t
+test_vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return vqdmlsl_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.8h" 1 } } */
+
+int32x4_t
+test_vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return vqdmlsl_high_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ return vqdmlsl_high_lane_s16 (a, b, c, 3);
+}
+
+int32x4_t
+test_vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ return vqdmlsl_high_laneq_s16 (a, b, c, 6);
+}
+
+int32x4_t
+test_vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return vqdmlsl_high_n_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+{
+ return vqdmlsl_lane_s16 (a, b, c, 3);
+}
+
+int32x4_t
+test_vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t c)
+{
+ return vqdmlsl_laneq_s16 (a, b, c, 6);
+}
+
+int32x4_t
+test_vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return vqdmlsl_n_s16 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.2s" 1 } } */
+
+int64x2_t
+test_vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return vqdmlsl_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.4s" 1 } } */
+
+int64x2_t
+test_vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlsl_high_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlsl_high_lane_s32 (__a, __b, __c, 1);
+}
+
+int64x2_t
+test_vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return vqdmlsl_high_laneq_s32 (__a, __b, __c, 3);
+}
+
+int64x2_t
+test_vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return vqdmlsl_high_n_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmlsl\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+{
+ return vqdmlsl_lane_s32 (__a, __b, __c, 1);
+}
+
+int64x2_t
+test_vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c)
+{
+ return vqdmlsl_laneq_s32 (__a, __b, __c, 3);
+}
+
+int64x2_t
+test_vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return vqdmlsl_n_s32 (__a, __b, __c);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.4h" 1 } } */
+
+int32x4_t
+test_vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return vqdmull_s16 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.8h" 1 } } */
+
+int32x4_t
+test_vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return vqdmull_high_s16 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull2\\tv\[0-9\]+\.4s, v\[0-9\]+\.8h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmull_high_lane_s16 (int16x8_t a, int16x8_t b)
+{
+ return vqdmull_high_lane_s16 (a, b, 3);
+}
+
+int32x4_t
+test_vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t b)
+{
+ return vqdmull_high_laneq_s16 (a, b, 6);
+}
+
+int32x4_t
+test_vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return vqdmull_high_n_s16 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.4s, v\[0-9\]+\.4h, v\[0-9\]+\.h" 3 } } */
+
+int32x4_t
+test_vqdmull_lane_s16 (int16x4_t a, int16x8_t b)
+{
+ return vqdmull_lane_s16 (a, b, 3);
+}
+
+int32x4_t
+test_vqdmull_laneq_s16 (int16x4_t a, int16x8_t b)
+{
+ return vqdmull_laneq_s16 (a, b, 6);
+}
+
+int32x4_t
+test_vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return vqdmull_n_s16 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.2s" 1 } } */
+
+int64x2_t
+test_vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return vqdmull_s32 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.4s" 1 } } */
+
+int64x2_t
+test_vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return vqdmull_high_s32 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull2\\tv\[0-9\]+\.2d, v\[0-9\]+\.4s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmull_high_lane_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return vqdmull_high_lane_s32 (__a, __b, 1);
+}
+
+int64x2_t
+test_vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return vqdmull_high_laneq_s32 (__a, __b, 3);
+}
+
+int64x2_t
+test_vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return vqdmull_high_n_s32 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsqdmull\\tv\[0-9\]+\.2d, v\[0-9\]+\.2s, v\[0-9\]+\.s" 3 } } */
+
+int64x2_t
+test_vqdmull_lane_s32 (int32x2_t __a, int32x4_t __b)
+{
+ return vqdmull_lane_s32 (__a, __b, 1);
+}
+
+int64x2_t
+test_vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b)
+{
+ return vqdmull_laneq_s32 (__a, __b, 1);
+}
+
+int64x2_t
+test_vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return vqdmull_n_s32 (__a, __b);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll\\tv\[0-9\]+\.2d" 1 } } */
+
+int64x2_t
+test_vshll_n_s32 (int32x2_t __a)
+{
+ return vshll_n_s32 (__a, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll\\tv\[0-9\]+\.2d" 1 } } */
+
+uint64x2_t
+test_vshll_n_u32 (uint32x2_t __a)
+{
+ return vshll_n_u32 (__a, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll\\tv\[0-9\]+\.2d" 2 } } */
+
+int64x2_t
+test_vshll_n_s32_2 (int32x2_t __a)
+{
+ return vshll_n_s32 (__a, 32);
+}
+
+uint64x2_t
+test_vshll_n_u32_2 (uint32x2_t __a)
+{
+ return vshll_n_u32 (__a, 32);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll\\tv\[0-9\]+\.4s" 1 } } */
+
+int32x4_t
+test_vshll_n_s16 (int16x4_t __a)
+{
+ return vshll_n_s16 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll\\tv\[0-9\]+\.4s" 1 } } */
+
+uint32x4_t
+test_vshll_n_u16 (uint16x4_t __a)
+{
+ return vshll_n_u16 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll\\tv\[0-9\]+\.4s" 2 } } */
+
+int32x4_t
+test_vshll_n_s16_2 (int16x4_t __a)
+{
+ return vshll_n_s16 (__a, 16);
+}
+
+uint32x4_t
+test_vshll_n_u16_2 (uint16x4_t __a)
+{
+ return vshll_n_u16 (__a, 16);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll\\tv\[0-9\]+\.8h" 1 } } */
+
+int16x8_t
+test_vshll_n_s8 (int8x8_t __a)
+{
+ return vshll_n_s8 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll\\tv\[0-9\]+\.8h" 1 } } */
+
+uint16x8_t
+test_vshll_n_u8 (uint8x8_t __a)
+{
+ return vshll_n_u8 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll\\tv\[0-9\]+\.8h" 2 } } */
+
+int16x8_t
+test_vshll_n_s8_2 (int8x8_t __a)
+{
+ return vshll_n_s8 (__a, 8);
+}
+
+uint16x8_t
+test_vshll_n_u8_2 (uint8x8_t __a)
+{
+ return vshll_n_u8 (__a, 8);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll2\\tv\[0-9\]+\.2d" 1 } } */
+
+int64x2_t
+test_vshll_high_n_s32 (int32x4_t __a)
+{
+ return vshll_high_n_s32 (__a, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll2\\tv\[0-9\]+\.2d" 1 } } */
+
+uint64x2_t
+test_vshll_high_n_u32 (uint32x4_t __a)
+{
+ return vshll_high_n_u32 (__a, 9);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll2\\tv\[0-9\]+\.2d" 2 } } */
+
+int64x2_t
+test_vshll_high_n_s32_2 (int32x4_t __a)
+{
+ return vshll_high_n_s32 (__a, 32);
+}
+
+uint64x2_t
+test_vshll_high_n_u32_2 (uint32x4_t __a)
+{
+ return vshll_high_n_u32 (__a, 32);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll2\\tv\[0-9\]+\.4s" 1 } } */
+
+int32x4_t
+test_vshll_high_n_s16 (int16x8_t __a)
+{
+ return vshll_high_n_s16 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll2\\tv\[0-9\]+\.4s" 1 } } */
+
+uint32x4_t
+test_vshll_high_n_u16 (uint16x8_t __a)
+{
+ return vshll_high_n_u16 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll2\\tv\[0-9\]+\.4s" 2 } } */
+
+int32x4_t
+test_vshll_high_n_s16_2 (int16x8_t __a)
+{
+ return vshll_high_n_s16 (__a, 16);
+}
+
+uint32x4_t
+test_vshll_high_n_u16_2 (uint16x8_t __a)
+{
+ return vshll_high_n_u16 (__a, 16);
+}
+
+/* { dg-final { scan-assembler-times "\\tsshll2\\tv\[0-9\]+\.8h" 1 } } */
+
+int16x8_t
+test_vshll_high_n_s8 (int8x16_t __a)
+{
+ return vshll_high_n_s8 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tushll2\\tv\[0-9\]+\.8h" 1 } } */
+
+uint16x8_t
+test_vshll_high_n_u8 (uint8x16_t __a)
+{
+ return vshll_high_n_u8 (__a, 3);
+}
+
+/* { dg-final { scan-assembler-times "\\tshll2\\tv\[0-9\]+\.8h" 2 } } */
+
+int16x8_t
+test_vshll_high_n_s8_2 (int8x16_t __a)
+{
+ return vshll_high_n_s8 (__a, 8);
+}
+
+uint16x8_t
+test_vshll_high_n_u8_2 (uint8x16_t __a)
+{
+ return vshll_high_n_u8 (__a, 8);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/vfp-1.c b/gcc/testsuite/gcc.target/aarch64/vfp-1.c
new file mode 100644
index 00000000000..79c571402cc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vfp-1.c
@@ -0,0 +1,109 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+extern float fabsf (float);
+extern float sqrtf (float);
+extern double fabs (double);
+extern double sqrt (double);
+
+volatile float f1, f2, f3;
+volatile int cond1, cond2;
+
+void test_sf() {
+ /* abssf2 */
+ /* { dg-final { scan-assembler "fabs\ts\[0-9\]*" } } */
+ f1 = fabsf (f1);
+ /* negsf2 */
+ /* { dg-final { scan-assembler "fneg\ts\[0-9\]*" } } */
+ f1 = -f1;
+ /* addsf3 */
+ /* { dg-final { scan-assembler "fadd\ts\[0-9\]*" } } */
+ f1 = f2 + f3;
+ /* subsf3 */
+ /* { dg-final { scan-assembler "fsub\ts\[0-9\]*" } } */
+ f1 = f2 - f3;
+ /* divsf3 */
+ /* { dg-final { scan-assembler "fdiv\ts\[0-9\]*" } } */
+ f1 = f2 / f3;
+ /* mulsf3 */
+ /* { dg-final { scan-assembler "fmul\ts\[0-9\]*" } } */
+ f1 = f2 * f3;
+ /* sqrtsf2 */
+ /* { dg-final { scan-assembler "fsqrt\ts\[0-9\]*" } } */
+ f1 = sqrtf (f1);
+ /* cmpsf */
+ /* { dg-final { scan-assembler "fcmp\ts\[0-9\]*" } } */
+ if (f1 < f2)
+ cond1 = 1;
+ else
+ cond2 = 1;
+}
+
+volatile double d1, d2, d3;
+
+void test_df() {
+ /* absdf2 */
+ /* { dg-final { scan-assembler "fabs\td\[0-9\]*" } } */
+ d1 = fabs (d1);
+ /* negdf2 */
+ /* { dg-final { scan-assembler "fneg\td\[0-9\]*" } } */
+ d1 = -d1;
+ /* adddf3 */
+ /* { dg-final { scan-assembler "fadd\td\[0-9\]*" } } */
+ d1 = d2 + d3;
+ /* subdf3 */
+ /* { dg-final { scan-assembler "fsub\td\[0-9\]*" } } */
+ d1 = d2 - d3;
+ /* divdf3 */
+ /* { dg-final { scan-assembler "fdiv\td\[0-9\]*" } } */
+ d1 = d2 / d3;
+ /* muldf3 */
+ /* { dg-final { scan-assembler "fmul\td\[0-9\]*" } } */
+ d1 = d2 * d3;
+ /* sqrtdf2 */
+ /* { dg-final { scan-assembler "fsqrt\td\[0-9\]*" } } */
+ d1 = sqrt (d1);
+ /* cmpdf */
+ /* { dg-final { scan-assembler "fcmp\td\[0-9\]*" } } */
+ if (d1 < d2)
+ cond1 = 1;
+ else
+ cond2 = 1;
+}
+
+volatile int i1;
+volatile unsigned int u1;
+
+void test_convert () {
+ /* extendsfdf2 */
+ /* { dg-final { scan-assembler "fcvt\td\[0-9\]*" } } */
+ d1 = f1;
+ /* truncdfsf2 */
+ /* { dg-final { scan-assembler "fcvt\ts\[0-9\]*" } } */
+ f1 = d1;
+ /* fixsfsi2 */
+ /* { dg-final { scan-assembler "fcvtzs\tw\[0-9\], s\[0-9\]*" } } */
+ i1 = f1;
+ /* fixdfsi2 */
+ /* { dg-final { scan-assembler "fcvtzs\tw\[0-9\], d\[0-9\]*" } } */
+ i1 = d1;
+ /* fixunsfsi2 */
+ /* { dg-final { scan-assembler "fcvtzu\tw\[0-9\], s\[0-9\]*" } } */
+ u1 = f1;
+ /* fixunsdfsi2 */
+ /* { dg-final { scan-assembler "fcvtzu\tw\[0-9\], d\[0-9\]*" } } */
+ u1 = d1;
+ /* floatsisf2 */
+ /* { dg-final { scan-assembler "scvtf\ts\[0-9\]*" } } */
+ f1 = i1;
+ /* floatsidf2 */
+ /* { dg-final { scan-assembler "scvtf\td\[0-9\]*" } } */
+ d1 = i1;
+ /* floatunssisf2 */
+ /* { dg-final { scan-assembler "ucvtf\ts\[0-9\]*" } } */
+ f1 = u1;
+ /* floatunssidf2 */
+ /* { dg-final { scan-assembler "ucvtf\td\[0-9\]*" } } */
+ d1 = u1;
+}
+
diff --git a/gcc/testsuite/gcc.target/aarch64/vmlsq_laneq.c b/gcc/testsuite/gcc.target/aarch64/vmlsq_laneq.c
new file mode 100644
index 00000000000..dd3fb811985
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vmlsq_laneq.c
@@ -0,0 +1,158 @@
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+
+extern void abort (void);
+
+void
+test1 ()
+{
+ int16x8_t val1, val2, val3;
+ int16x8_t result;
+ uint64_t act, exp;
+
+ val1 = vcombine_s16 (vcreate_s16 (UINT64_C (0xffff9ab680000000)),
+ vcreate_s16 (UINT64_C (0x00000000ffff0000)));
+ val2 = vcombine_s16 (vcreate_s16 (UINT64_C (0x32b77fffffff7fff)),
+ vcreate_s16 (UINT64_C (0x0000ffff00007fff)));
+ val3 = vcombine_s16 (vcreate_s16 (UINT64_C (0x7fff00007fff0000)),
+ vcreate_s16 (UINT64_C (0x80007fff00000000)));
+ result = vmlsq_laneq_s16 (val1, val2, val3, 6);
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_s16 (result), 0);
+ exp = UINT64_C (0xb2b69ab5ffffffff);
+ if (act != exp)
+ abort ();
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_s16 (result), 1);
+ exp = UINT64_C (0x00007fffffffffff);
+ if (act != exp)
+ abort ();
+}
+
+void
+test2 ()
+{
+ int32x4_t val1, val2, val3;
+ int32x4_t result;
+ uint64_t exp, act;
+
+ val1 = vcombine_s32 (vcreate_s32 (UINT64_C (0x00008000f46f7fff)),
+ vcreate_s32 (UINT64_C (0x7fffffffffff8000)));
+ val2 = vcombine_s32 (vcreate_s32 (UINT64_C (0x7fff7fff0e700000)),
+ vcreate_s32 (UINT64_C (0xffff000080000000)));
+ val3 = vcombine_s32 (vcreate_s32 (UINT64_C (0x00000000ffff0000)),
+ vcreate_s32 (UINT64_C (0xd9edea1a8000fb28)));
+ result = vmlsq_laneq_s32 (val1, val2, val3, 3);
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_s32 (result), 0);
+ exp = UINT64_C (0xcefb6a1a1d0f7fff);
+ if (act != exp)
+ abort ();
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_s32 (result), 1);
+ exp = UINT64_C (0x6a19ffffffff8000);
+ if (act != exp)
+ abort ();
+}
+
+void
+test3 ()
+{
+ uint16x8_t val1, val2, val3;
+ uint16x8_t result;
+ uint64_t act, exp;
+
+ val1 = vcombine_u16 (vcreate_u16 (UINT64_C (0x000080008000802a)),
+ vcreate_u16 (UINT64_C (0x7fffffff00007fff)));
+ val2 = vcombine_u16 (vcreate_u16 (UINT64_C (0x7fffcdf1ffff0000)),
+ vcreate_u16 (UINT64_C (0xe2550000ffffffff)));
+ val3 = vcombine_u16 (vcreate_u16 (UINT64_C (0x80007fff80000000)),
+ vcreate_u16 (UINT64_C (0xbe2100007fffffff)));
+
+ result = vmlsq_laneq_u16 (val1, val2, val3, 7);
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_u16 (result), 0);
+ exp = UINT64_C (0x3e2115ef3e21802a);
+ if (act != exp)
+ abort ();
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_u16 (result), 1);
+ exp = UINT64_C (0x3d0affffbe213e20);
+ if (act != exp)
+ abort ();
+}
+
+void
+test4 ()
+{
+ uint32x4_t val1, val2, val3;
+ uint32x4_t result;
+ uint64_t act, exp;
+
+ val1 = vcombine_u32 (vcreate_u32 (UINT64_C (0x3295fe3d7fff7fff)),
+ vcreate_u32 (UINT64_C (0x7fff00007fff7fff)));
+ val2 = vcombine_u32 (vcreate_u32 (UINT64_C (0xffff7fff7fff8000)),
+ vcreate_u32 (UINT64_C (0x7fff80008000ffff)));
+ val3 = vcombine_u32 (vcreate_u32 (UINT64_C (0x7fff7fff80008000)),
+ vcreate_u32 (UINT64_C (0x0000800053ab7fff)));
+
+ result = vmlsq_laneq_u32 (val1, val2, val3, 2);
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_u32 (result), 0);
+ exp = UINT64_C (0x4640fe3cbffeffff);
+ if (act != exp)
+ abort ();
+
+ act = vgetq_lane_u64 (vreinterpretq_u64_u32 (result), 1);
+ exp = UINT64_C (0xbffe8000d3abfffe);
+ if (act != exp)
+ abort ();
+}
+
+void
+test5 ()
+{
+ float32x4_t val1, val2, val3;
+ float32x4_t result;
+ float32_t act;
+
+ val1 = vcombine_f32 (vcreate_f32 (UINT64_C (0x3f49daf03ef3dc73)),
+ vcreate_f32 (UINT64_C (0x3f5d467a3ef3dc73)));
+ val2 = vcombine_f32 (vcreate_f32 (UINT64_C (0x3d2064c83d10cd28)),
+ vcreate_f32 (UINT64_C (0x3ea7d1a23d10cd28)));
+ val3 = vcombine_f32 (vcreate_f32 (UINT64_C (0x3f6131993edb1e04)),
+ vcreate_f32 (UINT64_C (0x3f37f4bf3edb1e04)));
+
+ result = vmlsq_laneq_f32 (val1, val2, val3, 0);
+
+ act = vgetq_lane_f32 (result, 0);
+ if (act != 0.46116194128990173f)
+ abort ();
+
+ act = vgetq_lane_f32 (result, 1);
+ if (act != 0.7717385292053223f)
+ abort ();
+
+ act = vgetq_lane_f32 (result, 2);
+ if (act != 0.46116194128990173f)
+ abort ();
+
+ act = vgetq_lane_f32 (result, 3);
+ if (act != 0.7240825295448303f)
+ abort ();
+}
+
+int
+main (void)
+{
+ test1 ();
+ test2 ();
+ test3 ();
+ test4 ();
+ test5 ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-1.c b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-1.c
new file mode 100644
index 00000000000..c69d3a358b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-1.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef struct {
+ char a:1;
+ char b:7;
+ int c;
+} BitStruct;
+
+volatile BitStruct bits;
+
+int foo ()
+{
+ return bits.b;
+}
+
+/* { dg-final { scan-assembler "ldrb\[\\t \]+\[^\n\]*,\[\\t \]*\\\[\[^\n\]*\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-2.c b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-2.c
new file mode 100644
index 00000000000..c7a9ebaa2b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-2.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef struct {
+ volatile unsigned long a:8;
+ volatile unsigned long b:8;
+ volatile unsigned long c:16;
+} BitStruct;
+
+BitStruct bits;
+
+unsigned long foo ()
+{
+ return bits.b;
+}
+
+/* { dg-final { scan-assembler "ldr\[\\t \]+\[^\n\]*,\[\\t \]*\\\[\[^\n\]*\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-3.c b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-3.c
new file mode 100644
index 00000000000..ea371dbac95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/volatile-bitfields-3.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef struct {
+ volatile unsigned long a:8;
+ volatile unsigned long b:8;
+ volatile unsigned long c:16;
+} BitStruct;
+
+BitStruct bits;
+
+unsigned long foo ()
+{
+ return bits.c;
+}
+
+/* { dg-final { scan-assembler "ldr\[\\t \]+\[^\n\]*,\[\\t \]*\\\[\[^\n\]*\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/vsqrt.c b/gcc/testsuite/gcc.target/aarch64/vsqrt.c
new file mode 100644
index 00000000000..b59535a9b5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/vsqrt.c
@@ -0,0 +1,66 @@
+
+
+/* { dg-do run } */
+/* { dg-options "-O3" } */
+
+#include "arm_neon.h"
+#include "stdio.h"
+
+extern void abort (void);
+
+void
+test_square_root_v2sf ()
+{
+ float32x2_t val = {4.0f, 9.0f};
+ float32x2_t res;
+
+ res = vsqrt_f32 (val);
+
+ if (vget_lane_f32 (res, 0) != 2.0f)
+ abort ();
+ if (vget_lane_f32 (res, 1) != 3.0f)
+ abort ();
+}
+
+void
+test_square_root_v4sf ()
+{
+ float32x4_t val = {4.0f, 9.0f, 16.0f, 25.0f};
+ float32x4_t res;
+
+ res = vsqrtq_f32 (val);
+
+ if (vgetq_lane_f32 (res, 0) != 2.0f)
+ abort ();
+ if (vgetq_lane_f32 (res, 1) != 3.0f)
+ abort ();
+ if (vgetq_lane_f32 (res, 2) != 4.0f)
+ abort ();
+ if (vgetq_lane_f32 (res, 3) != 5.0f)
+ abort ();
+}
+
+void
+test_square_root_v2df ()
+{
+ float64x2_t val = {4.0, 9.0};
+ float64x2_t res;
+
+ res = vsqrtq_f64 (val);
+
+ if (vgetq_lane_f64 (res, 0) != 2.0)
+ abort ();
+
+ if (vgetq_lane_f64 (res, 1) != 3.0)
+ abort ();
+}
+
+int
+main (void)
+{
+ test_square_root_v2sf ();
+ test_square_root_v4sf ();
+ test_square_root_v2df ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f b/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
index 4b518fe9e4d..fd731994f36 100644
--- a/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
+++ b/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
@@ -1,6 +1,6 @@
C Test program for common block debugging. G. Helffrich 11 July 2004.
C { dg-do compile }
-C { dg-skip-if "No stabs" { mmix-*-* alpha*-*-* hppa*64*-*-* ia64-*-* *-*-vxworks* } { "*" } { "" } }
+C { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* alpha*-*-* hppa*64*-*-* ia64-*-* *-*-vxworks* } { "*" } { "" } }
C { dg-skip-if "No stabs" {*-*-* } { "*" } { "-gstabs" } }
common i,j
common /label/l,m
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 4a8eee1cd5c..3c5e91a786a 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -493,6 +493,13 @@ proc check_profiling_available { test_what } {
return 0
}
+ # We don't yet support profiling for AArch64.
+ if { [istarget aarch64*-*-*]
+ && ([lindex $test_what 1] == "-p"
+ || [lindex $test_what 1] == "-pg") } {
+ return 0
+ }
+
# cygwin does not support -p.
if { [istarget *-*-cygwin*] && $test_what == "-p" } {
return 0
@@ -508,7 +515,8 @@ proc check_profiling_available { test_what } {
if {![info exists profiling_available_saved]} {
# Some targets don't have any implementation of __bb_init_func or are
# missing other needed machinery.
- if { [istarget am3*-*-linux*]
+ if { [istarget aarch64*-*-elf]
+ || [istarget am3*-*-linux*]
|| [istarget arm*-*-eabi*]
|| [istarget arm*-*-elf]
|| [istarget arm*-*-symbianelf*]
@@ -1883,6 +1891,7 @@ proc check_effective_target_vect_int { } {
|| [istarget sparc*-*-*]
|| [istarget alpha*-*-*]
|| [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& [check_effective_target_mips_loongson]) } {
@@ -2003,6 +2012,15 @@ proc check_effective_target_vect_floatuint_cvt { } {
return $et_vect_floatuint_cvt_saved
}
+# Return 1 if this is a AArch64 target supporting big endian
+proc check_effective_target_aarch64_big_endian { } {
+ return [check_no_compiler_messages aarch64_big_endian assembly {
+ #if !defined(__aarch64__) || !defined(__AARCH64EB__)
+ #error FOO
+ #endif
+ }]
+}
+
# Return 1 is this is an arm target using 32-bit instructions
proc check_effective_target_arm32 { } {
return [check_no_compiler_messages arm32 assembly {
@@ -2621,6 +2639,7 @@ proc check_effective_target_vect_shift { } {
|| [istarget ia64-*-*]
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& [check_effective_target_mips_loongson]) } {
@@ -2690,6 +2709,7 @@ proc check_effective_target_vect_float { } {
|| [istarget mipsisa64*-*-*]
|| [istarget x86_64-*-*]
|| [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
|| [check_effective_target_arm32] } {
set et_vect_float_saved 1
}
@@ -2711,6 +2731,7 @@ proc check_effective_target_vect_double { } {
} else {
set et_vect_double_saved 0
if { [istarget i?86-*-*]
+ || [istarget aarch64*-*-*]
|| [istarget x86_64-*-*] } {
if { [check_no_compiler_messages vect_double assembly {
#ifdef __tune_atom__
@@ -2993,6 +3014,7 @@ proc check_effective_target_vect_widen_mult_qi_to_hi { } {
set et_vect_widen_mult_qi_to_hi_saved 0
}
if { [istarget powerpc*-*-*]
+ || [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
set et_vect_widen_mult_qi_to_hi_saved 1
}
@@ -3025,6 +3047,7 @@ proc check_effective_target_vect_widen_mult_hi_to_si { } {
if { [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]) } {
@@ -3202,6 +3225,7 @@ proc check_effective_target_vect_pack_trunc { } {
if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
|| [istarget spu-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]
&& [check_effective_target_arm_little_endian]) } {
@@ -3229,6 +3253,7 @@ proc check_effective_target_vect_unpack { } {
|| [istarget x86_64-*-*]
|| [istarget spu-*-*]
|| [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]
&& [check_effective_target_arm_little_endian]) } {
set et_vect_unpack_saved 1
@@ -3292,6 +3317,7 @@ proc check_effective_target_vect_hw_misalign { } {
} else {
set et_vect_hw_misalign_saved 0
if { ([istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
|| [istarget i?86-*-*]) } {
set et_vect_hw_misalign_saved 1
}
@@ -3530,6 +3556,7 @@ proc check_effective_target_vect_short_mult { } {
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
+ || [istarget aarch64*-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& [check_effective_target_mips_loongson]) } {
@@ -3555,6 +3582,7 @@ proc check_effective_target_vect_int_mult { } {
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
|| [check_effective_target_arm32] } {
set et_vect_int_mult_saved 1
}
@@ -3629,7 +3657,8 @@ foreach N {2 3 4 8} {
&& [check_effective_target_vect_extract_even_odd] } {
set et_vect_stridedN_saved 1
}
- if { [istarget arm*-*-*] && N >= 2 && N <= 4 } {
+ if { ([istarget arm*-*-*]
+ || [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } {
set et_vect_stridedN_saved 1
}
}
@@ -3859,6 +3888,7 @@ proc check_effective_target_sync_int_long { } {
if { [istarget ia64-*-*]
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*]
|| [istarget alpha*-*-*]
|| [istarget arm*-*-linux-gnueabi]
|| [istarget bfin*-*linux*]
@@ -3886,7 +3916,8 @@ proc check_effective_target_sync_char_short { } {
set et_sync_char_short_saved 0
# This is intentionally powerpc but not rs6000, rs6000 doesn't have the
# load-reserved/store-conditional instructions.
- if { [istarget ia64-*-*]
+ if { [istarget aarch64*-*-*]
+ || [istarget ia64-*-*]
|| [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| [istarget alpha*-*-*]
@@ -4606,6 +4637,8 @@ proc check_vect_support_and_set_flags { } {
} else {
set dg-do-what-default compile
}
+ } elseif [istarget "aarch64*-*-*"] {
+ set dg-do-what-default run
} else {
return 0
}
diff --git a/libcpp/ChangeLog.aarch64 b/libcpp/ChangeLog.aarch64
new file mode 100644
index 00000000000..34106d40f26
--- /dev/null
+++ b/libcpp/ChangeLog.aarch64
@@ -0,0 +1,13 @@
+2012-05-25 Ian Bolton <ian.bolton@arm.com>
+ Jim MacArthur <jim.macarthur@arm.com>
+ Marcus Shawcroft <marcus.shawcroft@arm.com>
+ Nigel Stephens <nigel.stephens@arm.com>
+ Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+ Sofiane Naci <sofiane.naci@arm.com>
+ Stephen Thomas <stephen,thomas@arm.com>
+ Tejas Belagod <tejas.belagod@arm.com>
+ Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * configure.ac: Enable AArch64.
+ * configure: Regenerate.
diff --git a/libcpp/configure b/libcpp/configure
index fb5654de54d..c518e464a7f 100755
--- a/libcpp/configure
+++ b/libcpp/configure
@@ -7368,6 +7368,7 @@ fi
case $target in
+ aarch64*-*-* | \
alpha*-*-* | \
arm*-*-*eabi* | \
arm*-*-symbianelf* | \
diff --git a/libcpp/configure.ac b/libcpp/configure.ac
index 070ab633d80..eac6524f102 100644
--- a/libcpp/configure.ac
+++ b/libcpp/configure.ac
@@ -148,6 +148,7 @@ fi
m4_changequote(,)
case $target in
+ aarch64*-*-* | \
alpha*-*-* | \
arm*-*-*eabi* | \
arm*-*-symbianelf* | \
diff --git a/libgcc/ChangeLog.aarch64 b/libgcc/ChangeLog.aarch64
new file mode 100644
index 00000000000..c6cab974e2a
--- /dev/null
+++ b/libgcc/ChangeLog.aarch64
@@ -0,0 +1,56 @@
+2012-09-06 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/sfp-machine.h (FP_EX_INVALID, FP_EX_DIVZERO)
+ (FP_EX_OVERFLOW, FP_EX_UNDERFLOW, FP_EX_INEXACT)
+ (FP_HANDLE_EXCEPTIONS, FP_RND_NEAREST, FP_RND_ZERO, FP_RND_PINF)
+ (FP_RND_MINF, _FP_DECL_EX, FP_INIT_FOUNDMODE, FP_ROUNDMODE): New.
+
+2012-09-03 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Cache
+ the ctr_el0 register.
+
+2012-09-03 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Lift
+ declarations to top of function. Update comment. Correct
+ icache_linesize and dcache_linesize calculation.
+
+2012-07-17 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ * config/aarch64/sfp-machine.h (__ARM_EABI__): Remove.
+
+2012-06-08 Jim MacArthur <jim.macarthur@arm.com>
+
+ * config.host
+ (aarch64*-*-elf): Remove t-softfp-sfdf and t-softfp-excl.
+ (aarch64*-*-linux*): Likewise.
+
+2012-06-08 Jim MacArthur <jim.macarthur@arm.com>
+
+ * config.host
+ (aarch64*-*-elf): Add t-aarch64.
+ (aarch64*-*-linux*): Add t-aarch64, remove t-linux.
+ * config/aarch64/lib1funcs.S: Delete.
+ * config/aarch64/sync-cache.c: New file.
+ * config/aarch64/t-aarch64: New file.
+ * config/aarch64/t-linux: Delete.
+
+2012-06-08 Jim MacArthur <jim.macarthur@arm.com>
+
+ * config/aarch64/t-aarch64: Delete.
+ * config.host (aarch64*-*-elf): Remove reference to t-aarch64.
+
+2012-05-25 Ian Bolton <ian.bolton@arm.com>
+ Jim MacArthur <jim.macarthur@arm.com>
+ Marcus Shawcroft <marcus.shawcroft@arm.com>
+ Nigel Stephens <nigel.stephens@arm.com>
+ Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+ Sofiane Naci <sofiane.naci@arm.com>
+ Stephen Thomas <stephen,thomas@arm.com>
+ Tejas Belagod <tejas.belagod@arm.com>
+ Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * configure.ac: Enable AArch64.
+ * configure: Regenerate.
diff --git a/libgcc/config.host b/libgcc/config.host
index ef9791bffd6..7f51f1e6ef7 100644
--- a/libgcc/config.host
+++ b/libgcc/config.host
@@ -83,6 +83,9 @@ m32c*-*-*)
cpu_type=m32c
tmake_file=t-fdpbit
;;
+aarch64*-*-*)
+ cpu_type=aarch64
+ ;;
alpha*-*-*)
cpu_type=alpha
;;
@@ -279,6 +282,16 @@ i[34567]86-*-mingw* | x86_64-*-mingw*)
esac
case ${host} in
+aarch64*-*-elf)
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
+ tmake_file="${tmake_file} ${cpu_type}/t-aarch64"
+ tmake_file="${tmake_file} ${cpu_type}/t-softfp t-softfp"
+ ;;
+aarch64*-*-linux*)
+ md_unwind_header=aarch64/linux-unwind.h
+ tmake_file="${tmake_file} ${cpu_type}/t-aarch64"
+ tmake_file="${tmake_file} ${cpu_type}/t-softfp t-softfp"
+ ;;
alpha*-*-linux*)
tmake_file="${tmake_file} alpha/t-alpha alpha/t-ieee t-crtfm alpha/t-linux"
extra_parts="$extra_parts crtfastmath.o"
diff --git a/libgcc/config/aarch64/crti.S b/libgcc/config/aarch64/crti.S
new file mode 100644
index 00000000000..49611303b02
--- /dev/null
+++ b/libgcc/config/aarch64/crti.S
@@ -0,0 +1,68 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+/* An executable stack is *not* required for these functions. */
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif
+
+# This file creates a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+#ifdef __ELF__
+#define TYPE(x) .type x,function
+#else
+#define TYPE(x)
+#endif
+
+ # Note - this macro is complemented by the FUNC_END macro
+ # in crtn.S. If you change this macro you must also change
+ # that macro match.
+.macro FUNC_START
+ # Create a stack frame and save any call-preserved registers
+ stp x29, x30, [sp, #-16]!
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+.endm
+
+ .section ".init"
+ .align 2
+ .global _init
+ TYPE(_init)
+_init:
+ FUNC_START
+
+
+ .section ".fini"
+ .align 2
+ .global _fini
+ TYPE(_fini)
+_fini:
+ FUNC_START
+
+# end of crti.S
diff --git a/libgcc/config/aarch64/crtn.S b/libgcc/config/aarch64/crtn.S
new file mode 100644
index 00000000000..70dbc19c592
--- /dev/null
+++ b/libgcc/config/aarch64/crtn.S
@@ -0,0 +1,61 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+/* An executable stack is *not* required for these functions. */
+#if defined(__ELF__) && defined(__linux__)
+.section .note.GNU-stack,"",%progbits
+.previous
+#endif
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ # Note - this macro is complemented by the FUNC_START macro
+ # in crti.S. If you change this macro you must also change
+ # that macro match.
+ #
+ # Note - we do not try any fancy optimizations of the return
+ # sequences here, it is just not worth it. Instead keep things
+ # simple. Restore all the save resgisters, including the link
+ # register and then perform the correct function return instruction.
+.macro FUNC_END
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+ ldp x29, x30, [sp], #16
+ ret
+.endm
+
+
+ .section ".init"
+ ;;
+ FUNC_END
+
+ .section ".fini"
+ ;;
+ FUNC_END
+
+# end of crtn.S
diff --git a/libgcc/config/aarch64/linux-unwind.h b/libgcc/config/aarch64/linux-unwind.h
new file mode 100644
index 00000000000..1e2d40b7d98
--- /dev/null
+++ b/libgcc/config/aarch64/linux-unwind.h
@@ -0,0 +1,143 @@
+/* Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef inhibit_libc
+
+#include <signal.h>
+#include <sys/ucontext.h>
+
+#define MD_FALLBACK_FRAME_STATE_FOR aarch64_fallback_frame_state
+
+static _Unwind_Reason_Code
+aarch64_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState * fs)
+{
+ /* The kernel creates an rt_sigframe on the stack immediately prior
+ to delivering a signal.
+
+ This structure must have the same shape as the linux kernel
+ equivalent. */
+ struct rt_sigframe
+ {
+ siginfo_t info;
+ struct ucontext uc;
+ };
+
+ struct rt_sigframe *rt_;
+ _Unwind_Ptr new_cfa;
+ unsigned *pc = context->ra;
+ struct sigcontext *sc;
+ struct _aarch64_ctx *extension_marker;
+ int i;
+
+ /* A signal frame will have a return address pointing to
+ __default_sa_restorer. This code is hardwired as:
+
+ 0xd2801168 movz x8, #0x8b
+ 0xd4000001 svc 0x0
+ */
+ if (pc[0] != 0xd2801168 || pc[1] != 0xd4000001)
+ {
+ return _URC_END_OF_STACK;
+ }
+
+ rt_ = context->cfa;
+ sc = &rt_->uc.uc_mcontext;
+
+/* This define duplicates the definition in aarch64.md */
+#define SP_REGNUM 31
+
+ new_cfa = (_Unwind_Ptr) sc;
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = STACK_POINTER_REGNUM;
+ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
+
+ for (i = 0; i < AARCH64_DWARF_NUMBER_R; i++)
+ {
+ fs->regs.reg[AARCH64_DWARF_R0 + i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[AARCH64_DWARF_R0 + i].loc.offset =
+ (_Unwind_Ptr) & (sc->regs[i]) - new_cfa;
+ }
+
+ /* The core context may be extended with an arbitrary set of
+ additional contexts appended sequentially. Each additional
+ context contains a magic identifier and size in bytes. The size
+ field can be used to skip over unrecognized context extensions.
+ The end of the context sequence is marked by a context with magic
+ 0 or size 0. */
+ for (extension_marker = (struct _aarch64_ctx *) &sc->__reserved;
+ extension_marker->magic;
+ extension_marker = (struct _aarch64_ctx *)
+ ((unsigned char *) extension_marker + extension_marker->size))
+ {
+ if (extension_marker->magic == FPSIMD_MAGIC)
+ {
+ struct fpsimd_context *ctx =
+ (struct fpsimd_context *) extension_marker;
+ int i;
+
+ for (i = 0; i < AARCH64_DWARF_NUMBER_V; i++)
+ {
+ _Unwind_Sword offset;
+
+ fs->regs.reg[AARCH64_DWARF_V0 + i].how = REG_SAVED_OFFSET;
+
+ /* sigcontext contains 32 128bit registers for V0 to
+ V31. The kernel will have saved the contents of the
+ V registers. We want to unwind the callee save D
+ registers. Each D register comprises the least
+ significant half of the corresponding V register. We
+ need to offset into the saved V register dependent on
+ our endianness to find the saved D register. */
+
+ offset = (_Unwind_Ptr) & (ctx->vregs[i]) - new_cfa;
+
+ /* The endianness adjustment code below expects that a
+ saved V register is 16 bytes. */
+ gcc_assert (sizeof (ctx->vregs[0]) == 16);
+#if defined (__AARCH64EB__)
+ offset = offset + 8;
+#endif
+ fs->regs.reg[AARCH64_DWARF_V0 + i].loc.offset = offset;
+ }
+ }
+ else
+ {
+ /* There is context provided that we do not recognize! */
+ }
+ }
+
+ fs->regs.reg[31].how = REG_SAVED_OFFSET;
+ fs->regs.reg[31].loc.offset = (_Unwind_Ptr) & (sc->sp) - new_cfa;
+
+ fs->signal_frame = 1;
+
+ fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].how = REG_SAVED_VAL_OFFSET;
+ fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].loc.offset =
+ (_Unwind_Ptr) (sc->pc) - new_cfa;
+
+ fs->retaddr_column = DWARF_ALT_FRAME_RETURN_COLUMN;
+
+ return _URC_NO_REASON;
+}
+
+#endif
diff --git a/libgcc/config/aarch64/sfp-machine.h b/libgcc/config/aarch64/sfp-machine.h
new file mode 100644
index 00000000000..3a09ae7605f
--- /dev/null
+++ b/libgcc/config/aarch64/sfp-machine.h
@@ -0,0 +1,153 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define _FP_W_TYPE_SIZE 64
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE int
+
+typedef int TItype __attribute__ ((mode (TI)));
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
+
+/* The type of the result of a floating point comparison. This must
+ match __libgcc_cmp_return__ in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* This appears to be in line with the VFP conventions in the v7-a
+ ARM-ARM. Need to check with the v8 version. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define FP_EX_INVALID 0x01
+#define FP_EX_DIVZERO 0x02
+#define FP_EX_OVERFLOW 0x04
+#define FP_EX_UNDERFLOW 0x08
+#define FP_EX_INEXACT 0x10
+
+#define FP_HANDLE_EXCEPTIONS \
+ do { \
+ const float fp_max = __FLT_MAX__; \
+ const float fp_min = __FLT_MIN__; \
+ const float fp_1e32 = 1.0e32f; \
+ const float fp_zero = 0.0; \
+ const float fp_one = 1.0; \
+ unsigned fpsr; \
+ if (_fex & FP_EX_INVALID) \
+ { \
+ __asm__ __volatile__ ("fdiv\ts0, %s0, %s0" \
+ : \
+ : "w" (fp_zero) \
+ : "s0"); \
+ __asm__ __volatile__ ("mrs\t%0, fpsr" : "=r" (fpsr)); \
+ } \
+ if (_fex & FP_EX_DIVZERO) \
+ { \
+ __asm__ __volatile__ ("fdiv\ts0, %s0, %s1" \
+ : \
+ : "w" (fp_one), "w" (fp_zero) \
+ : "s0"); \
+ __asm__ __volatile__ ("mrs\t%0, fpsr" : "=r" (fpsr)); \
+ } \
+ if (_fex & FP_EX_OVERFLOW) \
+ { \
+ __asm__ __volatile__ ("fadd\ts0, %s0, %s1" \
+ : \
+ : "w" (fp_max), "w" (fp_1e32) \
+ : "s0"); \
+ __asm__ __volatile__ ("mrs\t%0, fpsr" : "=r" (fpsr)); \
+ } \
+ if (_fex & FP_EX_UNDERFLOW) \
+ { \
+ __asm__ __volatile__ ("fmul\ts0, %s0, %s0" \
+ : \
+ : "w" (fp_min) \
+ : "s0"); \
+ __asm__ __volatile__ ("mrs\t%0, fpsr" : "=r" (fpsr)); \
+ } \
+ if (_fex & FP_EX_INEXACT) \
+ { \
+ __asm__ __volatile__ ("fsub\ts0, %s0, %s1" \
+ : \
+ : "w" (fp_max), "w" (fp_one) \
+ : "s0"); \
+ __asm__ __volatile__ ("mrs\t%0, fpsr" : "=r" (fpsr)); \
+ } \
+ } while (0)
+
+
+#define FP_RND_NEAREST 0
+#define FP_RND_ZERO 0xc00000
+#define FP_RND_PINF 0x400000
+#define FP_RND_MINF 0x800000
+
+#define _FP_DECL_EX \
+ unsigned long int _fpcr __attribute__ ((unused)) = FP_RND_NEAREST
+
+#define FP_INIT_ROUNDMODE \
+ do { \
+ __asm__ __volatile__ ("mrs %0, fpcr" \
+ : "=r" (_fpcr)); \
+ } while (0)
+
+#define FP_ROUNDMODE (_fpcr & 0xc00000)
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#if defined __AARCH64EB__
+# define __BYTE_ORDER __BIG_ENDIAN
+#else
+# define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
diff --git a/libgcc/config/aarch64/sync-cache.c b/libgcc/config/aarch64/sync-cache.c
new file mode 100644
index 00000000000..d7b621ee6d8
--- /dev/null
+++ b/libgcc/config/aarch64/sync-cache.c
@@ -0,0 +1,57 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+void
+__aarch64_sync_cache_range (const void *base, const void *end)
+{
+ unsigned icache_lsize;
+ unsigned dcache_lsize;
+ static unsigned int cache_info = 0;
+ const char *address;
+
+ if (! cache_info)
+ /* CTR_EL0 [3:0] contains log2 of icache line size in words.
+ CTR_EL0 [19:16] contains log2 of dcache line size in words. */
+ asm volatile ("mrs\t%0, ctr_el0":"=r" (cache_info));
+
+ icache_lsize = 4 << (cache_info & 0xF);
+ dcache_lsize = 4 << ((cache_info >> 16) & 0xF);
+
+ /* Loop over the address range, clearing one cache line at once.
+ Data cache must be flushed to unification first to make sure the
+ instruction cache fetches the updated data. 'end' is exclusive,
+ as per the GNU definition of __clear_cache. */
+
+ for (address = base; address < (const char *) end; address += dcache_lsize)
+ asm volatile ("dc\tcvau, %0"
+ :
+ : "r" (address)
+ : "memory");
+
+ asm volatile ("dsb\tish" : : : "memory");
+
+ for (address = base; address < (const char *) end; address += icache_lsize)
+ asm volatile ("ic\tivau, %0"
+ :
+ : "r" (address)
+ : "memory");
+
+ asm volatile ("dsb\tish; isb" : : : "memory");
+}
diff --git a/libgcc/config/aarch64/t-aarch64 b/libgcc/config/aarch64/t-aarch64
new file mode 100644
index 00000000000..002cb832902
--- /dev/null
+++ b/libgcc/config/aarch64/t-aarch64
@@ -0,0 +1,21 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2012 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB2ADD += $(srcdir)/config/aarch64/sync-cache.c
diff --git a/libgcc/config/aarch64/t-softfp b/libgcc/config/aarch64/t-softfp
new file mode 100644
index 00000000000..6500b5243e4
--- /dev/null
+++ b/libgcc/config/aarch64/t-softfp
@@ -0,0 +1,7 @@
+softfp_float_modes := tf
+softfp_int_modes := si di ti
+softfp_extensions := sftf dftf
+softfp_truncations := tfsf tfdf
+softfp_exclude_libgcc2 := n
+
+TARGET_LIBGCC2_CFLAGS += -Wno-missing-prototypes
diff --git a/libgomp/ChangeLog.aarch64 b/libgomp/ChangeLog.aarch64
new file mode 100644
index 00000000000..4855758eaa4
--- /dev/null
+++ b/libgomp/ChangeLog.aarch64
@@ -0,0 +1,12 @@
+2012-05-25 Ian Bolton <ian.bolton@arm.com>
+ Jim MacArthur <jim.macarthur@arm.com>
+ Marcus Shawcroft <marcus.shawcroft@arm.com>
+ Nigel Stephens <nigel.stephens@arm.com>
+ Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Richard Earnshaw <rearnsha@arm.com>
+ Sofiane Naci <sofiane.naci@arm.com>
+ Stephen Thomas <stephen,thomas@arm.com>
+ Tejas Belagod <tejas.belagod@arm.com>
+ Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * configure.tgt: Add AArch64.
diff --git a/libgomp/configure.tgt b/libgomp/configure.tgt
index 210dd5da348..359c28e02e6 100644
--- a/libgomp/configure.tgt
+++ b/libgomp/configure.tgt
@@ -27,6 +27,10 @@ config_path="posix"
if test $enable_linux_futex = yes; then
case "${target}" in
+ aarch64*-*-linux*)
+ config_path="linux posix"
+ ;;
+
alpha*-*-linux*)
config_path="linux/alpha linux posix"
;;
diff --git a/libstdc++-v3/ChangeLog.aarch64 b/libstdc++-v3/ChangeLog.aarch64
new file mode 100644
index 00000000000..ff224185db0
--- /dev/null
+++ b/libstdc++-v3/ChangeLog.aarch64
@@ -0,0 +1,4 @@
+2012-05-25 Yufeng Zhang <yufeng.zhang@arm.com>
+
+ * config/cpu/aarch64/cxxabi_tweaks.h: New file.
+ * configure.host: Enable aarch64.
diff --git a/libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h b/libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h
new file mode 100644
index 00000000000..31a423f4fd5
--- /dev/null
+++ b/libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h
@@ -0,0 +1,60 @@
+// Control various target specific ABI tweaks. AArch64 version.
+
+// Copyright (C) 2004, 2006, 2008, 2009, 2011, 2012
+// Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// <http://www.gnu.org/licenses/>.
+
+/** @file cxxabi_tweaks.h
+ * The header provides an CPU-variable interface to the C++ ABI.
+ */
+
+#ifndef _CXXABI_TWEAKS_H
+#define _CXXABI_TWEAKS_H 1
+
+#ifdef __cplusplus
+namespace __cxxabiv1
+{
+ extern "C"
+ {
+#endif
+
+ // The AArch64 ABI uses the least significant bit of a 64-bit
+ // guard variable.
+#define _GLIBCXX_GUARD_TEST(x) ((*(x) & 1) != 0)
+#define _GLIBCXX_GUARD_SET(x) *(x) = 1
+#define _GLIBCXX_GUARD_BIT 1
+#define _GLIBCXX_GUARD_PENDING_BIT __guard_test_bit (1, 1)
+#define _GLIBCXX_GUARD_WAITING_BIT __guard_test_bit (2, 1)
+ __extension__ typedef int __guard __attribute__((mode (__DI__)));
+
+ // __cxa_vec_ctor has void return type.
+ typedef void __cxa_vec_ctor_return_type;
+#define _GLIBCXX_CXA_VEC_CTOR_RETURN(x) return
+ // Constructors and destructors do not return a value.
+ typedef void __cxa_cdtor_return_type;
+
+#ifdef __cplusplus
+ }
+} // namespace __cxxabiv1
+#endif
+
+#endif
diff --git a/libstdc++-v3/configure.host b/libstdc++-v3/configure.host
index ca4d81957b6..731e83222a6 100644
--- a/libstdc++-v3/configure.host
+++ b/libstdc++-v3/configure.host
@@ -94,6 +94,9 @@ error_constants_dir="os/generic"
# variants into the established source config/cpu/* sub-directories.
# THIS TABLE IS SORTED. KEEP IT THAT WAY.
case "${host_cpu}" in
+ aarch64*)
+ try_cpu=aarch64
+ ;;
alpha*)
try_cpu=alpha
;;