aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristophe Lyon <christophe.lyon@linaro.org>2014-02-10 11:36:51 +0000
committerChristophe Lyon <christophe.lyon@linaro.org>2014-02-10 11:36:51 +0000
commit7d6ee503a84ef58712d1ec75843c8f95479a20a3 (patch)
treee676963825a21a65be5d5e8101a3240999d56ebe
parent05f4da6835745190ec918f1b6083ee5a4c286ffe (diff)
Backport crypto intrinsics support.
gcc: 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206518 2014-01-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/arm.c (arm_init_iwmmxt_builtins): Skip non-iwmmxt builtins. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206151 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/neon.ml (crypto_intrinsics): Add vceq_64 and vtst_p64. * config/arm/arm_neon.h: Regenerate. * config/arm/neon-docgen.ml: Add vceq_p64 and vtst_p64. * doc/arm-neon-intrinsics.texi: Regenerate. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206149 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/arm_acle.h: Add underscores before variables. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206132 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/neon-docgen.ml: Add crypto intrinsics documentation. * doc/arm-neon-intrinsics.texi: Regenerate. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206131 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/neon-testgen.ml (effective_target): Handle "CRYPTO". 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206130 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * config/arm/arm.c (enum arm_builtins): Add crypto builtins. (arm_init_neon_builtins): Handle crypto builtins. (bdesc_2arg): Likewise. (bdesc_1arg): Likewise. (bdesc_3arg): New table. (arm_expand_ternop_builtin): New function. (arm_expand_unop_builtin): Handle sha1h explicitly. (arm_expand_builtin): Handle ternary builtins. * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Define __ARM_FEATURE_CRYPTO. * config/arm/arm.md: Include crypto.md. (is_neon_type): Add crypto types. * config/arm/arm_neon_builtins.def: Add TImode reinterprets. * config/arm/crypto.def: New. * config/arm/crypto.md: Likewise. * config/arm/iterators.md (CRYPTO_UNARY): New int iterator. (CRYPTO_BINARY): Likewise. (CRYPTO_TERNARY): Likewise. (CRYPTO_SELECTING): Likewise. (crypto_pattern): New int attribute. (crypto_size_sfx): Likewise. (crypto_mode): Likewise. (crypto_type): Likewise. * config/arm/neon-gen.ml: Handle poly64_t and poly128_t types. Handle crypto intrinsics. * config/arm/neon.ml: Add support for poly64 and polt128 types and intrinsics. Define crypto intrinsics. * config/arm/neon.md (neon_vreinterpretti<mode>): New pattern. (neon_vreinterpretv16qi<mode>): Use VQXMOV mode iterator. (neon_vreinterpretv8hi<mode>): Likewise. (neon_vreinterpretv4si<mode>): Likewise. (neon_vreinterpretv4sf<mode>): Likewise. (neon_vreinterpretv2di<mode>): Likewise. * config/arm/unspecs.md (UNSPEC_AESD, UNSPEC_AESE, UNSPEC_AESIMC, UNSPEC_AESMC, UNSPEC_SHA1C, UNSPEC_SHA1M, UNSPEC_SHA1P, UNSPEC_SHA1H, UNSPEC_SHA1SU0, UNSPEC_SHA1SU1, UNSPEC_SHA256H, UNSPEC_SHA256H2, UNSPEC_SHA256SU0, UNSPEC_SHA256SU1, VMULLP64): Define. * config/arm/arm_neon.h: Regenerate. Modifications needed to backport into linaro-4_8-branch: * config/arm/arm.md (attribute neon_type): neon_crypto_aes, neon_crypto_sha1_xor, neon_crypto_sha1_fast, neon_crypto_sha1_slow, neon_crypto_sha256_fast, neon_crypto_sha256_slow, neon_mul_d_long: New. instead of: * config/arm/arm.md: Include crypto.md. (is_neon_type): Add crypto types. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206128 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * Makefile.in (TEXI_GCC_FILES): Add arm-acle-intrinsics.texi. * config.gcc (extra_headers): Add arm_acle.h. * config/arm/arm.c (FL_CRC32): Define. (arm_have_crc): Likewise. (arm_option_override): Set arm_have_crc. (arm_builtins): Add CRC32 builtins. (bdesc_2arg): Likewise. (arm_init_crc32_builtins): New function. (arm_init_builtins): Initialise CRC32 builtins. (arm_file_start): Handle architecture extensions. * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Define __ARM_FEATURE_CRC32. Define __ARM_32BIT_STATE. (TARGET_CRC32): Define. * config/arm/arm-arches.def: Add armv8-a+crc. * config/arm/arm-tables.opt: Regenerate. * config/arm/arm.md (type): Add crc. (<crc_variant>): New insn. * config/arm/arm_acle.h: New file. * config/arm/iterators.md (CRC): New int iterator. (crc_variant, crc_mode): New int attributes. * confg/arm/unspecs.md (UNSPEC_CRC32B, UNSPEC_CRC32H, UNSPEC_CRC32W, UNSPEC_CRC32CB, UNSPEC_CRC32CH, UNSPEC_CRC32CW): New unspecs. * doc/invoke.texi: Document -march=armv8-a+crc option. * doc/extend.texi: Document ACLE intrinsics. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206120 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/aarch64/aarch64-builtins.c (aarch64_init_simd_builtins): Define builtin types for poly64_t poly128_t. (TYPES_BINOPP, aarch64_types_binopp_qualifiers): New. * aarch64/aarch64-simd-builtins.def: Update builtins table. * config/aarch64/aarch64-simd.md (aarch64_crypto_pmulldi, aarch64_crypto_pmullv2di): New. * config/aarch64/aarch64.c (aarch64_simd_mangle_map): Update table for poly64x2_t mangler. * config/aarch64/arm_neon.h (poly64x2_t, poly64_t, poly128_t): Define. (vmull_p64, vmull_high_p64): New. * config/aarch64/iterators.md (UNSPEC_PMULL<2>): New. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206119 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/aarch64/aarch64-simd-builtins.def: Update builtins table. * config/aarch64/aarch64-simd.md (aarch64_crypto_sha256h<sha256_op>v4si, aarch64_crypto_sha256su0v4si, aarch64_crypto_sha256su1v4si): New. * config/aarch64/arm_neon.h (vsha256hq_u32, vsha256h2q_u32, vsha256su0q_u32, vsha256su1q_u32): New. * config/aarch64/iterators.md (UNSPEC_SHA256H<2>, UNSPEC_SHA256SU<01>): New. (CRYPTO_SHA256): New int iterator. (sha256_op): New int attribute. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206118 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/aarch64/aarch64-simd-builtins.def: Update builtins table. * config/aarch64/aarch64-builtins.c (aarch64_types_ternopu_qualifiers, TYPES_TERNOPU): New. * config/aarch64/aarch64-simd.md (aarch64_crypto_sha1hsi, aarch64_crypto_sha1su1v4si, aarch64_crypto_sha1<sha1_op>v4si, aarch64_crypto_sha1su0v4si): New. * config/aarch64/arm_neon.h (vsha1cq_u32, sha1mq_u32, vsha1pq_u32, vsha1h_u32, vsha1su0q_u32, vsha1su1q_u32): New. * config/aarch64/iterators.md (UNSPEC_SHA1<CPMH>, UNSPEC_SHA1SU<01>): New. (CRYPTO_SHA1): New int iterator. (sha1_op): New int attribute. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206117 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/aarch64/aarch64-simd-builtins.def: Update builtins table. * config/aarch64/aarch64-builtins.c (aarch64_types_binopu_qualifiers, TYPES_BINOPU): New. * config/aarch64/aarch64-simd.md (aarch64_crypto_aes<aes_op>v16qi, aarch64_crypto_aes<aesmc_op>v16qi): New. * config/aarch64/arm_neon.h (vaeseq_u8, vaesdq_u8, vaesmcq_u8, vaesimcq_u8): New. * config/aarch64/iterators.md (UNSPEC_AESE, UNSPEC_AESD, UNSPEC_AESMC, UNSPEC_AESIMC): New. (CRYPTO_AES, CRYPTO_AESMC): New int iterators. (aes_op, aesmc_op): New int attributes. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206115 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/arm/types.md (neon_mul_d_long, crypto_aes, crypto_sha1_xor, crypto_sha1_fast, crypto_sha1_slow, crypto_sha256_fast, crypto_sha256_slow): New. Modifications needed to backport into linaro-4_8-branch: * config/aarch64/aarch64-simd.md (attribute simd_type): (simd_mul_d_long, simd_crypto_aes, simd_crypto_sha1_xor, simd_crypto_sha1_fast, simd_crypto_sha1_slow, simd_crypto_sha256_fast, simd_crypto_sha256_slow) : New. instead of the above change. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206114 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * config/aarch64/aarch64.h (TARGET_CRYPTO): New. (__ARM_FEATURE_CRYPTO): Define if TARGET_CRYPTO is true. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r205384. 2013-11-26 James Greenhalgh <james.greenhalgh@arm.com> * config/aarch64/aarch64-builtins.c (aarch64_type_qualifiers): Add qualifier_poly. (aarch64_build_scalar_type): Also build Poly types. (aarch64_build_vector_type): Likewise. (aarch64_build_type): Likewise. (aarch64_build_signed_type): New. (aarch64_build_unsigned_type): Likewise. (aarch64_build_poly_type): Likewise. (aarch64_init_simd_builtins): Also handle Poly types. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r205383. 2013-11-26 James Greenhalgh <james.greenhalgh@arm.com> * config/aarch64/aarch64-builtins.c (VAR1): Use new naming scheme for aarch64_builtins. (aarch64_builtin_vectorized_function): Use new aarch64_builtins names. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r205092. 2013-11-20 James Greenhalgh <james.greenhalgh@arm.com> * gcc/config/aarch64/aarch64-builtins.c (aarch64_simd_itype): Remove. (aarch64_simd_builtin_datum): Remove itype, add qualifiers pointer. (VAR1): Use qualifiers. (aarch64_build_scalar_type): New. (aarch64_build_vector_type): Likewise. (aarch64_build_type): Likewise. (aarch64_init_simd_builtins): Refactor, remove special cases, consolidate main loop. (aarch64_simd_expand_args): Likewise. gcc/testsuite: 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206519 2014-01-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * lib/target-supports.exp (check_effective_target_arm_crypto_ok_nocache): New. (check_effective_target_arm_crypto_ok): Use above procedure. (add_options_for_arm_crypto): Use et_arm_crypto_flags. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206151 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * gcc.target/arm/neon-vceq_p64.c: New test. * gcc.target/arm/neon-vtst_p64.c: Likewise. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206131 2013-12-04 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * lib/target-supports.exp (check_effective_target_arm_crypto_ok): New procedure. (add_options_for_arm_crypto): Likewise. * gcc.target/arm/crypto-vaesdq_u8.c: New test. * gcc.target/arm/crypto-vaeseq_u8.c: Likewise. * gcc.target/arm/crypto-vaesimcq_u8.c: Likewise. * gcc.target/arm/crypto-vaesmcq_u8.c: Likewise. * gcc.target/arm/crypto-vldrq_p128.c: Likewise. * gcc.target/arm/crypto-vmull_high_p64.c: Likewise. * gcc.target/arm/crypto-vmullp64.c: Likewise. * gcc.target/arm/crypto-vsha1cq_u32.c: Likewise. * gcc.target/arm/crypto-vsha1h_u32.c: Likewise. * gcc.target/arm/crypto-vsha1mq_u32.c: Likewise. * gcc.target/arm/crypto-vsha1pq_u32.c: Likewise. * gcc.target/arm/crypto-vsha1su0q_u32.c: Likewise. * gcc.target/arm/crypto-vsha1su1q_u32.c: Likewise. * gcc.target/arm/crypto-vsha256h2q_u32.c: Likewise. * gcc.target/arm/crypto-vsha256hq_u32.c: Likewise. * gcc.target/arm/crypto-vsha256su0q_u32.c: Likewise. * gcc.target/arm/crypto-vsha256su1q_u32.c: Likewise. * gcc.target/arm/crypto-vstrq_p128.c: Likewise. * gcc.target/arm/neon/vbslQp64: Generate. * gcc.target/arm/neon/vbslp64: Likewise. * gcc.target/arm/neon/vcombinep64: Likewise. * gcc.target/arm/neon/vcreatep64: Likewise. * gcc.target/arm/neon/vdupQ_lanep64: Likewise. * gcc.target/arm/neon/vdupQ_np64: Likewise. * gcc.target/arm/neon/vdup_lanep64: Likewise. * gcc.target/arm/neon/vdup_np64: Likewise. * gcc.target/arm/neon/vextQp64: Likewise. * gcc.target/arm/neon/vextp64: Likewise. * gcc.target/arm/neon/vget_highp64: Likewise. * gcc.target/arm/neon/vget_lowp64: Likewise. * gcc.target/arm/neon/vld1Q_dupp64: Likewise. * gcc.target/arm/neon/vld1Q_lanep64: Likewise. * gcc.target/arm/neon/vld1Qp64: Likewise. * gcc.target/arm/neon/vld1_dupp64: Likewise. * gcc.target/arm/neon/vld1_lanep64: Likewise. * gcc.target/arm/neon/vld1p64: Likewise. * gcc.target/arm/neon/vld2_dupp64: Likewise. * gcc.target/arm/neon/vld2p64: Likewise. * gcc.target/arm/neon/vld3_dupp64: Likewise. * gcc.target/arm/neon/vld3p64: Likewise. * gcc.target/arm/neon/vld4_dupp64: Likewise. * gcc.target/arm/neon/vld4p64: Likewise. * gcc.target/arm/neon/vreinterpretQf32_p128: Likewise. * gcc.target/arm/neon/vreinterpretQf32_p64: Likewise. * gcc.target/arm/neon/vreinterpretQp128_f32: Likewise. * gcc.target/arm/neon/vreinterpretQp128_p16: Likewise. * gcc.target/arm/neon/vreinterpretQp128_p64: Likewise. * gcc.target/arm/neon/vreinterpretQp128_p8: Likewise. * gcc.target/arm/neon/vreinterpretQp128_s16: Likewise. * gcc.target/arm/neon/vreinterpretQp128_s32: Likewise. * gcc.target/arm/neon/vreinterpretQp128_s64: Likewise. * gcc.target/arm/neon/vreinterpretQp128_s8: Likewise. * gcc.target/arm/neon/vreinterpretQp128_u16: Likewise. * gcc.target/arm/neon/vreinterpretQp128_u32: Likewise. * gcc.target/arm/neon/vreinterpretQp128_u64: Likewise. * gcc.target/arm/neon/vreinterpretQp128_u8: Likewise. * gcc.target/arm/neon/vreinterpretQp16_p128: Likewise. * gcc.target/arm/neon/vreinterpretQp16_p64: Likewise. * gcc.target/arm/neon/vreinterpretQp64_f32: Likewise. * gcc.target/arm/neon/vreinterpretQp64_p128: Likewise. * gcc.target/arm/neon/vreinterpretQp64_p16: Likewise. * gcc.target/arm/neon/vreinterpretQp64_p8: Likewise. * gcc.target/arm/neon/vreinterpretQp64_s16: Likewise. * gcc.target/arm/neon/vreinterpretQp64_s32: Likewise. * gcc.target/arm/neon/vreinterpretQp64_s64: Likewise. * gcc.target/arm/neon/vreinterpretQp64_s8: Likewise. * gcc.target/arm/neon/vreinterpretQp64_u16: Likewise. * gcc.target/arm/neon/vreinterpretQp64_u32: Likewise. * gcc.target/arm/neon/vreinterpretQp64_u64: Likewise. * gcc.target/arm/neon/vreinterpretQp64_u8: Likewise. * gcc.target/arm/neon/vreinterpretQp8_p128: Likewise. * gcc.target/arm/neon/vreinterpretQp8_p64: Likewise. * gcc.target/arm/neon/vreinterpretQs16_p128: Likewise. * gcc.target/arm/neon/vreinterpretQs16_p64: Likewise. * gcc.target/arm/neon/vreinterpretQs32_p128: Likewise. * gcc.target/arm/neon/vreinterpretQs32_p64: Likewise. * gcc.target/arm/neon/vreinterpretQs64_p128: Likewise. * gcc.target/arm/neon/vreinterpretQs64_p64: Likewise. * gcc.target/arm/neon/vreinterpretQs8_p128: Likewise. * gcc.target/arm/neon/vreinterpretQs8_p64: Likewise. * gcc.target/arm/neon/vreinterpretQu16_p128: Likewise. * gcc.target/arm/neon/vreinterpretQu16_p64: Likewise. * gcc.target/arm/neon/vreinterpretQu32_p128: Likewise. * gcc.target/arm/neon/vreinterpretQu32_p64: Likewise. * gcc.target/arm/neon/vreinterpretQu64_p128: Likewise. * gcc.target/arm/neon/vreinterpretQu64_p64: Likewise. * gcc.target/arm/neon/vreinterpretQu8_p128: Likewise. * gcc.target/arm/neon/vreinterpretQu8_p64: Likewise. * gcc.target/arm/neon/vreinterpretf32_p64: Likewise. * gcc.target/arm/neon/vreinterpretp16_p64: Likewise. * gcc.target/arm/neon/vreinterpretp64_f32: Likewise. * gcc.target/arm/neon/vreinterpretp64_p16: Likewise. * gcc.target/arm/neon/vreinterpretp64_p8: Likewise. * gcc.target/arm/neon/vreinterpretp64_s16: Likewise. * gcc.target/arm/neon/vreinterpretp64_s32: Likewise. * gcc.target/arm/neon/vreinterpretp64_s64: Likewise. * gcc.target/arm/neon/vreinterpretp64_s8: Likewise. * gcc.target/arm/neon/vreinterpretp64_u16: Likewise. * gcc.target/arm/neon/vreinterpretp64_u32: Likewise. * gcc.target/arm/neon/vreinterpretp64_u64: Likewise. * gcc.target/arm/neon/vreinterpretp64_u8: Likewise. * gcc.target/arm/neon/vreinterpretp8_p64: Likewise. * gcc.target/arm/neon/vreinterprets16_p64: Likewise. * gcc.target/arm/neon/vreinterprets32_p64: Likewise. * gcc.target/arm/neon/vreinterprets64_p64: Likewise. * gcc.target/arm/neon/vreinterprets8_p64: Likewise. * gcc.target/arm/neon/vreinterpretu16_p64: Likewise. * gcc.target/arm/neon/vreinterpretu32_p64: Likewise. * gcc.target/arm/neon/vreinterpretu64_p64: Likewise. * gcc.target/arm/neon/vreinterpretu8_p64: Likewise. * gcc.target/arm/neon/vsliQ_np64: Likewise. * gcc.target/arm/neon/vsli_np64: Likewise. * gcc.target/arm/neon/vsriQ_np64: Likewise. * gcc.target/arm/neon/vsri_np64: Likewise. * gcc.target/arm/neon/vst1Q_lanep64: Likewise. * gcc.target/arm/neon/vst1Qp64: Likewise. * gcc.target/arm/neon/vst1_lanep64: Likewise. * gcc.target/arm/neon/vst1p64: Likewise. * gcc.target/arm/neon/vst2p64: Likewise. * gcc.target/arm/neon/vst3p64: Likewise. * gcc.target/arm/neon/vst4p64: Likewise. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206128 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com> * lib/target-supports.exp (add_options_for_arm_crc): New procedure. (check_effective_target_arm_crc_ok_nocache): Likewise. (check_effective_target_arm_crc_ok): Likewise. * gcc.target/arm/acle/: New directory. * gcc.target/arm/acle/acle.exp: New. * gcc.target/arm/acle/crc32b.c: New test. * gcc.target/arm/acle/crc32h.c: Likewise. * gcc.target/arm/acle/crc32w.c: Likewise. * gcc.target/arm/acle/crc32d.c: Likewise. * gcc.target/arm/acle/crc32cb.c: Likewise. * gcc.target/arm/acle/crc32ch.c: Likewise. * gcc.target/arm/acle/crc32cw.c: Likewise. * gcc.target/arm/acle/crc32cd.c: Likewise. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206120 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * gcc.target/aarch64/pmull_1.c: New. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206119 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * gcc.target/aarch64/sha256_1.c: New. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206118 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * gcc.target/aarch64/sha1_1.c: New. 2014-02-10 Michael Collison <michael.collison@linaro.org> Backport from trunk r206117 2013-12-19 Tejas Belagod <tejas.belagod@arm.com> * gcc.target/aarch64/aes_1.c: New. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/linaro/gcc-4_8-branch@207655 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog.linaro257
-rw-r--r--gcc/Makefile.in3
-rw-r--r--gcc/config.gcc2
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c1249
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def23
-rw-r--r--gcc/config/aarch64/aarch64-simd.md131
-rw-r--r--gcc/config/aarch64/aarch64.c1
-rw-r--r--gcc/config/aarch64/aarch64.h4
-rw-r--r--gcc/config/aarch64/arm_neon.h112
-rw-r--r--gcc/config/aarch64/iterators.md31
-rw-r--r--gcc/config/arm/arm-arches.def1
-rw-r--r--gcc/config/arm/arm-tables.opt7
-rw-r--r--gcc/config/arm/arm.c338
-rw-r--r--gcc/config/arm/arm.h11
-rw-r--r--gcc/config/arm/arm.md21
-rw-r--r--gcc/config/arm/arm_neon.h2280
-rw-r--r--gcc/config/arm/arm_neon_builtins.def11
-rw-r--r--gcc/config/arm/iterators.md57
-rw-r--r--gcc/config/arm/neon-docgen.ml80
-rw-r--r--gcc/config/arm/neon-gen.ml99
-rw-r--r--gcc/config/arm/neon-testgen.ml3
-rw-r--r--gcc/config/arm/neon.md20
-rw-r--r--gcc/config/arm/neon.ml424
-rw-r--r--gcc/config/arm/unspecs.md21
-rw-r--r--gcc/doc/arm-neon-intrinsics.texi1049
-rw-r--r--gcc/doc/extend.texi9
-rw-r--r--gcc/doc/invoke.texi5
-rw-r--r--gcc/testsuite/ChangeLog.linaro195
-rw-r--r--gcc/testsuite/lib/target-supports.exp66
29 files changed, 4833 insertions, 1677 deletions
diff --git a/gcc/ChangeLog.linaro b/gcc/ChangeLog.linaro
index e51391ab715..98e36873db7 100644
--- a/gcc/ChangeLog.linaro
+++ b/gcc/ChangeLog.linaro
@@ -1,3 +1,260 @@
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206518
+ 2014-01-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm.c (arm_init_iwmmxt_builtins): Skip
+ non-iwmmxt builtins.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206151
+ 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/neon.ml (crypto_intrinsics): Add vceq_64 and vtst_p64.
+ * config/arm/arm_neon.h: Regenerate.
+ * config/arm/neon-docgen.ml: Add vceq_p64 and vtst_p64.
+ * doc/arm-neon-intrinsics.texi: Regenerate.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206149
+ 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm_acle.h: Add underscores before variables.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206132
+ 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/neon-docgen.ml: Add crypto intrinsics documentation.
+ * doc/arm-neon-intrinsics.texi: Regenerate.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206131
+ 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/neon-testgen.ml (effective_target): Handle "CRYPTO".
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206130
+ 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm.c (enum arm_builtins): Add crypto builtins.
+ (arm_init_neon_builtins): Handle crypto builtins.
+ (bdesc_2arg): Likewise.
+ (bdesc_1arg): Likewise.
+ (bdesc_3arg): New table.
+ (arm_expand_ternop_builtin): New function.
+ (arm_expand_unop_builtin): Handle sha1h explicitly.
+ (arm_expand_builtin): Handle ternary builtins.
+ * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS):
+ Define __ARM_FEATURE_CRYPTO.
+ * config/arm/arm.md: Include crypto.md.
+ (is_neon_type): Add crypto types.
+ * config/arm/arm_neon_builtins.def: Add TImode reinterprets.
+ * config/arm/crypto.def: New.
+ * config/arm/crypto.md: Likewise.
+ * config/arm/iterators.md (CRYPTO_UNARY): New int iterator.
+ (CRYPTO_BINARY): Likewise.
+ (CRYPTO_TERNARY): Likewise.
+ (CRYPTO_SELECTING): Likewise.
+ (crypto_pattern): New int attribute.
+ (crypto_size_sfx): Likewise.
+ (crypto_mode): Likewise.
+ (crypto_type): Likewise.
+ * config/arm/neon-gen.ml: Handle poly64_t and poly128_t types.
+ Handle crypto intrinsics.
+ * config/arm/neon.ml: Add support for poly64 and polt128 types
+ and intrinsics. Define crypto intrinsics.
+ * config/arm/neon.md (neon_vreinterpretti<mode>): New pattern.
+ (neon_vreinterpretv16qi<mode>): Use VQXMOV mode iterator.
+ (neon_vreinterpretv8hi<mode>): Likewise.
+ (neon_vreinterpretv4si<mode>): Likewise.
+ (neon_vreinterpretv4sf<mode>): Likewise.
+ (neon_vreinterpretv2di<mode>): Likewise.
+ * config/arm/unspecs.md (UNSPEC_AESD, UNSPEC_AESE, UNSPEC_AESIMC,
+ UNSPEC_AESMC, UNSPEC_SHA1C, UNSPEC_SHA1M, UNSPEC_SHA1P, UNSPEC_SHA1H,
+ UNSPEC_SHA1SU0, UNSPEC_SHA1SU1, UNSPEC_SHA256H, UNSPEC_SHA256H2,
+ UNSPEC_SHA256SU0, UNSPEC_SHA256SU1, VMULLP64): Define.
+ * config/arm/arm_neon.h: Regenerate.
+
+ Modifications needed to backport into linaro-4_8-branch:
+ * config/arm/arm.md (attribute neon_type): neon_crypto_aes,
+ neon_crypto_sha1_xor, neon_crypto_sha1_fast,
+ neon_crypto_sha1_slow, neon_crypto_sha256_fast,
+ neon_crypto_sha256_slow, neon_mul_d_long: New.
+ instead of:
+ * config/arm/arm.md: Include crypto.md.
+ (is_neon_type): Add crypto types.
+
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206128
+ 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * Makefile.in (TEXI_GCC_FILES): Add arm-acle-intrinsics.texi.
+ * config.gcc (extra_headers): Add arm_acle.h.
+ * config/arm/arm.c (FL_CRC32): Define.
+ (arm_have_crc): Likewise.
+ (arm_option_override): Set arm_have_crc.
+ (arm_builtins): Add CRC32 builtins.
+ (bdesc_2arg): Likewise.
+ (arm_init_crc32_builtins): New function.
+ (arm_init_builtins): Initialise CRC32 builtins.
+ (arm_file_start): Handle architecture extensions.
+ * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Define __ARM_FEATURE_CRC32.
+ Define __ARM_32BIT_STATE.
+ (TARGET_CRC32): Define.
+ * config/arm/arm-arches.def: Add armv8-a+crc.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/arm.md (type): Add crc.
+ (<crc_variant>): New insn.
+ * config/arm/arm_acle.h: New file.
+ * config/arm/iterators.md (CRC): New int iterator.
+ (crc_variant, crc_mode): New int attributes.
+ * confg/arm/unspecs.md (UNSPEC_CRC32B, UNSPEC_CRC32H, UNSPEC_CRC32W,
+ UNSPEC_CRC32CB, UNSPEC_CRC32CH, UNSPEC_CRC32CW): New unspecs.
+ * doc/invoke.texi: Document -march=armv8-a+crc option.
+ * doc/extend.texi: Document ACLE intrinsics.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206120
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-builtins.c (aarch64_init_simd_builtins):
+ Define builtin types for poly64_t poly128_t.
+ (TYPES_BINOPP, aarch64_types_binopp_qualifiers): New.
+ * aarch64/aarch64-simd-builtins.def: Update builtins table.
+ * config/aarch64/aarch64-simd.md (aarch64_crypto_pmulldi,
+ aarch64_crypto_pmullv2di): New.
+ * config/aarch64/aarch64.c (aarch64_simd_mangle_map): Update table for
+ poly64x2_t mangler.
+ * config/aarch64/arm_neon.h (poly64x2_t, poly64_t, poly128_t): Define.
+ (vmull_p64, vmull_high_p64): New.
+ * config/aarch64/iterators.md (UNSPEC_PMULL<2>): New.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206119
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd-builtins.def: Update builtins table.
+ * config/aarch64/aarch64-simd.md (aarch64_crypto_sha256h<sha256_op>v4si,
+ aarch64_crypto_sha256su0v4si, aarch64_crypto_sha256su1v4si): New.
+ * config/aarch64/arm_neon.h (vsha256hq_u32, vsha256h2q_u32,
+ vsha256su0q_u32, vsha256su1q_u32): New.
+ * config/aarch64/iterators.md (UNSPEC_SHA256H<2>, UNSPEC_SHA256SU<01>):
+ New.
+ (CRYPTO_SHA256): New int iterator.
+ (sha256_op): New int attribute.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206118
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd-builtins.def: Update builtins table.
+ * config/aarch64/aarch64-builtins.c (aarch64_types_ternopu_qualifiers,
+ TYPES_TERNOPU): New.
+ * config/aarch64/aarch64-simd.md (aarch64_crypto_sha1hsi,
+ aarch64_crypto_sha1su1v4si, aarch64_crypto_sha1<sha1_op>v4si,
+ aarch64_crypto_sha1su0v4si): New.
+ * config/aarch64/arm_neon.h (vsha1cq_u32, sha1mq_u32, vsha1pq_u32,
+ vsha1h_u32, vsha1su0q_u32, vsha1su1q_u32): New.
+ * config/aarch64/iterators.md (UNSPEC_SHA1<CPMH>, UNSPEC_SHA1SU<01>):
+ New.
+ (CRYPTO_SHA1): New int iterator.
+ (sha1_op): New int attribute.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206117
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64-simd-builtins.def: Update builtins table.
+ * config/aarch64/aarch64-builtins.c (aarch64_types_binopu_qualifiers,
+ TYPES_BINOPU): New.
+ * config/aarch64/aarch64-simd.md (aarch64_crypto_aes<aes_op>v16qi,
+ aarch64_crypto_aes<aesmc_op>v16qi): New.
+ * config/aarch64/arm_neon.h (vaeseq_u8, vaesdq_u8, vaesmcq_u8,
+ vaesimcq_u8): New.
+ * config/aarch64/iterators.md (UNSPEC_AESE, UNSPEC_AESD, UNSPEC_AESMC,
+ UNSPEC_AESIMC): New.
+ (CRYPTO_AES, CRYPTO_AESMC): New int iterators.
+ (aes_op, aesmc_op): New int attributes.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206115
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/arm/types.md (neon_mul_d_long, crypto_aes, crypto_sha1_xor,
+ crypto_sha1_fast, crypto_sha1_slow, crypto_sha256_fast,
+ crypto_sha256_slow): New.
+
+ Modifications needed to backport into linaro-4_8-branch:
+ * config/aarch64/aarch64-simd.md (attribute simd_type):
+ (simd_mul_d_long, simd_crypto_aes, simd_crypto_sha1_xor,
+ simd_crypto_sha1_fast, simd_crypto_sha1_slow, simd_crypto_sha256_fast,
+ simd_crypto_sha256_slow) : New.
+ instead of the above change.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206114
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * config/aarch64/aarch64.h (TARGET_CRYPTO): New.
+ (__ARM_FEATURE_CRYPTO): Define if TARGET_CRYPTO is true.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r205384.
+ 2013-11-26 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-builtins.c
+ (aarch64_type_qualifiers): Add qualifier_poly.
+ (aarch64_build_scalar_type): Also build Poly types.
+ (aarch64_build_vector_type): Likewise.
+ (aarch64_build_type): Likewise.
+ (aarch64_build_signed_type): New.
+ (aarch64_build_unsigned_type): Likewise.
+ (aarch64_build_poly_type): Likewise.
+ (aarch64_init_simd_builtins): Also handle Poly types.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r205383.
+ 2013-11-26 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-builtins.c
+ (VAR1): Use new naming scheme for aarch64_builtins.
+ (aarch64_builtin_vectorized_function): Use new
+ aarch64_builtins names.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r205092.
+ 2013-11-20 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * gcc/config/aarch64/aarch64-builtins.c
+ (aarch64_simd_itype): Remove.
+ (aarch64_simd_builtin_datum): Remove itype, add
+ qualifiers pointer.
+ (VAR1): Use qualifiers.
+ (aarch64_build_scalar_type): New.
+ (aarch64_build_vector_type): Likewise.
+ (aarch64_build_type): Likewise.
+ (aarch64_init_simd_builtins): Refactor, remove special cases,
+ consolidate main loop.
+ (aarch64_simd_expand_args): Likewise.
+
2014-02-01 Christophe Lyon <christophe.lyon@linaro.org>
Backport from trunk r202875,202980.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 2194dd4c2d1..f17bd31605e 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -4282,7 +4282,8 @@ TEXI_GCC_FILES = gcc.texi gcc-common.texi gcc-vers.texi frontends.texi \
gcov.texi trouble.texi bugreport.texi service.texi \
contribute.texi compat.texi funding.texi gnu.texi gpl_v3.texi \
fdl.texi contrib.texi cppenv.texi cppopts.texi avr-mmcu.texi \
- implement-c.texi implement-cxx.texi arm-neon-intrinsics.texi
+ implement-c.texi implement-cxx.texi arm-neon-intrinsics.texi \
+ arm-acle-intrinsics.texi
# we explicitly use $(srcdir)/doc/tm.texi here to avoid confusion with
# the generated tm.texi; the latter might have a more recent timestamp,
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 064790c3a5a..c1012637b47 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -325,7 +325,7 @@ am33_2.0-*-linux*)
;;
arm*-*-*)
cpu_type=arm
- extra_headers="mmintrin.h arm_neon.h"
+ extra_headers="mmintrin.h arm_neon.h arm_acle.h"
target_type_format_char='%'
c_target_objs="arm-c.o"
cxx_target_objs="arm-c.o"
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 6816b9cfdaa..fdec1040f6c 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -80,57 +80,122 @@ enum aarch64_simd_builtin_type_mode
#define UP(X) X##_UP
-typedef enum
+#define SIMD_MAX_BUILTIN_ARGS 5
+
+enum aarch64_type_qualifiers
{
- AARCH64_SIMD_BINOP,
- AARCH64_SIMD_TERNOP,
- AARCH64_SIMD_QUADOP,
- AARCH64_SIMD_UNOP,
- AARCH64_SIMD_GETLANE,
- AARCH64_SIMD_SETLANE,
- AARCH64_SIMD_CREATE,
- AARCH64_SIMD_DUP,
- AARCH64_SIMD_DUPLANE,
- AARCH64_SIMD_COMBINE,
- AARCH64_SIMD_SPLIT,
- AARCH64_SIMD_LANEMUL,
- AARCH64_SIMD_LANEMULL,
- AARCH64_SIMD_LANEMULH,
- AARCH64_SIMD_LANEMAC,
- AARCH64_SIMD_SCALARMUL,
- AARCH64_SIMD_SCALARMULL,
- AARCH64_SIMD_SCALARMULH,
- AARCH64_SIMD_SCALARMAC,
- AARCH64_SIMD_CONVERT,
- AARCH64_SIMD_FIXCONV,
- AARCH64_SIMD_SELECT,
- AARCH64_SIMD_RESULTPAIR,
- AARCH64_SIMD_REINTERP,
- AARCH64_SIMD_VTBL,
- AARCH64_SIMD_VTBX,
- AARCH64_SIMD_LOAD1,
- AARCH64_SIMD_LOAD1LANE,
- AARCH64_SIMD_STORE1,
- AARCH64_SIMD_STORE1LANE,
- AARCH64_SIMD_LOADSTRUCT,
- AARCH64_SIMD_LOADSTRUCTLANE,
- AARCH64_SIMD_STORESTRUCT,
- AARCH64_SIMD_STORESTRUCTLANE,
- AARCH64_SIMD_LOGICBINOP,
- AARCH64_SIMD_SHIFTINSERT,
- AARCH64_SIMD_SHIFTIMM,
- AARCH64_SIMD_SHIFTACC
-} aarch64_simd_itype;
+ /* T foo. */
+ qualifier_none = 0x0,
+ /* unsigned T foo. */
+ qualifier_unsigned = 0x1, /* 1 << 0 */
+ /* const T foo. */
+ qualifier_const = 0x2, /* 1 << 1 */
+ /* T *foo. */
+ qualifier_pointer = 0x4, /* 1 << 2 */
+ /* const T *foo. */
+ qualifier_const_pointer = 0x6, /* qualifier_const | qualifier_pointer */
+ /* Used when expanding arguments if an operand could
+ be an immediate. */
+ qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_maybe_immediate = 0x10, /* 1 << 4 */
+ /* void foo (...). */
+ qualifier_void = 0x20, /* 1 << 5 */
+ /* Some patterns may have internal operands, this qualifier is an
+ instruction to the initialisation code to skip this operand. */
+ qualifier_internal = 0x40, /* 1 << 6 */
+ /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum
+ rather than using the type of the operand. */
+ qualifier_map_mode = 0x80, /* 1 << 7 */
+ /* qualifier_pointer | qualifier_map_mode */
+ qualifier_pointer_map_mode = 0x84,
+ /* qualifier_const_pointer | qualifier_map_mode */
+ qualifier_const_pointer_map_mode = 0x86,
+ /* Polynomial types. */
+ qualifier_poly = 0x100
+};
typedef struct
{
const char *name;
- const aarch64_simd_itype itype;
enum aarch64_simd_builtin_type_mode mode;
const enum insn_code code;
unsigned int fcode;
+ enum aarch64_type_qualifiers *qualifiers;
} aarch64_simd_builtin_datum;
+static enum aarch64_type_qualifiers
+aarch64_types_unop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none };
+#define TYPES_UNOP (aarch64_types_unop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned };
+#define TYPES_UNOPU (aarch64_types_unopu_qualifiers)
+#define TYPES_CREATE (aarch64_types_unop_qualifiers)
+#define TYPES_REINTERP (aarch64_types_unop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_maybe_immediate };
+#define TYPES_BINOP (aarch64_types_binop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned };
+#define TYPES_BINOPU (aarch64_types_binopu_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binopp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_poly, qualifier_poly, qualifier_poly };
+#define TYPES_BINOPP (aarch64_types_binopp_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_ternop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_TERNOP (aarch64_types_ternop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned, qualifier_unsigned };
+#define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_quadop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none,
+ qualifier_none, qualifier_none };
+#define TYPES_QUADOP (aarch64_types_quadop_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_getlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_GETLANE (aarch64_types_getlane_qualifiers)
+#define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_SETLANE (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTINSERT (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTACC (aarch64_types_setlane_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_combine_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_COMBINE (aarch64_types_combine_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_const_pointer_map_mode };
+#define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
+#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers)
+
+/* The first argument (return type) of a store should be void type,
+ which we represent with qualifier_void. Their first operand will be
+ a DImode pointer to the location to store to, so we must use
+ qualifier_map_mode | qualifier_pointer to build a pointer to the
+ element type of the vector. */
+static enum aarch64_type_qualifiers
+aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_pointer_map_mode, qualifier_none };
+#define TYPES_STORE1 (aarch64_types_store1_qualifiers)
+#define TYPES_STORESTRUCT (aarch64_types_store1_qualifiers)
+
#define CF0(N, X) CODE_FOR_aarch64_##N##X
#define CF1(N, X) CODE_FOR_##N##X##1
#define CF2(N, X) CODE_FOR_##N##X##2
@@ -139,7 +204,7 @@ typedef struct
#define CF10(N, X) CODE_FOR_##N##X
#define VAR1(T, N, MAP, A) \
- {#N, AARCH64_SIMD_##T, UP (A), CF##MAP (N, A), 0},
+ {#N, UP (A), CF##MAP (N, A), 0, TYPES_##T},
#define VAR2(T, N, MAP, A, B) \
VAR1 (T, N, MAP, A) \
VAR1 (T, N, MAP, B)
@@ -261,7 +326,7 @@ static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
#undef VAR1
#define VAR1(T, N, MAP, A) \
- AARCH64_SIMD_BUILTIN_##N##A,
+ AARCH64_SIMD_BUILTIN_##T##_##N##A,
enum aarch64_builtins
{
@@ -278,118 +343,212 @@ static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX];
#define NUM_DREG_TYPES 6
#define NUM_QREG_TYPES 6
+/* Return a tree for a signed or unsigned argument of either
+ the mode specified by MODE, or the inner mode of MODE. */
+tree
+aarch64_build_scalar_type (enum machine_mode mode,
+ bool unsigned_p,
+ bool poly_p)
+{
+#undef INT_TYPES
+#define INT_TYPES \
+ AARCH64_TYPE_BUILDER (QI) \
+ AARCH64_TYPE_BUILDER (HI) \
+ AARCH64_TYPE_BUILDER (SI) \
+ AARCH64_TYPE_BUILDER (DI) \
+ AARCH64_TYPE_BUILDER (EI) \
+ AARCH64_TYPE_BUILDER (OI) \
+ AARCH64_TYPE_BUILDER (CI) \
+ AARCH64_TYPE_BUILDER (XI) \
+ AARCH64_TYPE_BUILDER (TI) \
+
+/* Statically declare all the possible types we might need. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_p = NULL; \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL;
+
+ INT_TYPES
+
+ static tree float_aarch64_type_node = NULL;
+ static tree double_aarch64_type_node = NULL;
+
+ gcc_assert (!VECTOR_MODE_P (mode));
+
+/* If we've already initialised this type, don't initialise it again,
+ otherwise ask for a new type of the correct size. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return (X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = make_unsigned_type (GET_MODE_PRECISION (mode))); \
+ else if (poly_p) \
+ return (X##_aarch64_type_node_p \
+ ? X##_aarch64_type_node_p \
+ : X##_aarch64_type_node_p \
+ = make_unsigned_type (GET_MODE_PRECISION (mode))); \
+ else \
+ return (X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = make_signed_type (GET_MODE_PRECISION (mode))); \
+ break;
+
+ switch (mode)
+ {
+ INT_TYPES
+ case SFmode:
+ if (!float_aarch64_type_node)
+ {
+ float_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE;
+ layout_type (float_aarch64_type_node);
+ }
+ return float_aarch64_type_node;
+ break;
+ case DFmode:
+ if (!double_aarch64_type_node)
+ {
+ double_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE;
+ layout_type (double_aarch64_type_node);
+ }
+ return double_aarch64_type_node;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+tree
+aarch64_build_vector_type (enum machine_mode mode,
+ bool unsigned_p,
+ bool poly_p)
+{
+ tree eltype;
+
+#define VECTOR_TYPES \
+ AARCH64_TYPE_BUILDER (V16QI) \
+ AARCH64_TYPE_BUILDER (V8HI) \
+ AARCH64_TYPE_BUILDER (V4SI) \
+ AARCH64_TYPE_BUILDER (V2DI) \
+ AARCH64_TYPE_BUILDER (V8QI) \
+ AARCH64_TYPE_BUILDER (V4HI) \
+ AARCH64_TYPE_BUILDER (V2SI) \
+ \
+ AARCH64_TYPE_BUILDER (V4SF) \
+ AARCH64_TYPE_BUILDER (V2DF) \
+ AARCH64_TYPE_BUILDER (V2SF) \
+/* Declare our "cache" of values. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL; \
+ static tree X##_aarch64_type_node_p = NULL;
+
+ VECTOR_TYPES
+
+ gcc_assert (VECTOR_MODE_P (mode));
+
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ else if (poly_p) \
+ return X##_aarch64_type_node_p \
+ ? X##_aarch64_type_node_p \
+ : X##_aarch64_type_node_p \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ else \
+ return X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ break;
+
+ switch (mode)
+ {
+ default:
+ eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode),
+ unsigned_p, poly_p);
+ return build_vector_type_for_mode (eltype, mode);
+ break;
+ VECTOR_TYPES
+ }
+}
+
+tree
+aarch64_build_type (enum machine_mode mode, bool unsigned_p, bool poly_p)
+{
+ if (VECTOR_MODE_P (mode))
+ return aarch64_build_vector_type (mode, unsigned_p, poly_p);
+ else
+ return aarch64_build_scalar_type (mode, unsigned_p, poly_p);
+}
+
+tree
+aarch64_build_signed_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, false, false);
+}
+
+tree
+aarch64_build_unsigned_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, true, false);
+}
+
+tree
+aarch64_build_poly_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, false, true);
+}
+
static void
aarch64_init_simd_builtins (void)
{
unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
- /* Scalar type nodes. */
- tree aarch64_simd_intQI_type_node;
- tree aarch64_simd_intHI_type_node;
- tree aarch64_simd_polyQI_type_node;
- tree aarch64_simd_polyHI_type_node;
- tree aarch64_simd_intSI_type_node;
- tree aarch64_simd_intDI_type_node;
- tree aarch64_simd_float_type_node;
- tree aarch64_simd_double_type_node;
-
- /* Pointer to scalar type nodes. */
- tree intQI_pointer_node;
- tree intHI_pointer_node;
- tree intSI_pointer_node;
- tree intDI_pointer_node;
- tree float_pointer_node;
- tree double_pointer_node;
-
- /* Const scalar type nodes. */
- tree const_intQI_node;
- tree const_intHI_node;
- tree const_intSI_node;
- tree const_intDI_node;
- tree const_float_node;
- tree const_double_node;
-
- /* Pointer to const scalar type nodes. */
- tree const_intQI_pointer_node;
- tree const_intHI_pointer_node;
- tree const_intSI_pointer_node;
- tree const_intDI_pointer_node;
- tree const_float_pointer_node;
- tree const_double_pointer_node;
-
- /* Vector type nodes. */
- tree V8QI_type_node;
- tree V4HI_type_node;
- tree V2SI_type_node;
- tree V2SF_type_node;
- tree V16QI_type_node;
- tree V8HI_type_node;
- tree V4SI_type_node;
- tree V4SF_type_node;
- tree V2DI_type_node;
- tree V2DF_type_node;
-
- /* Scalar unsigned type nodes. */
- tree intUQI_type_node;
- tree intUHI_type_node;
- tree intUSI_type_node;
- tree intUDI_type_node;
-
- /* Opaque integer types for structures of vectors. */
- tree intEI_type_node;
- tree intOI_type_node;
- tree intCI_type_node;
- tree intXI_type_node;
-
- /* Pointer to vector type nodes. */
- tree V8QI_pointer_node;
- tree V4HI_pointer_node;
- tree V2SI_pointer_node;
- tree V2SF_pointer_node;
- tree V16QI_pointer_node;
- tree V8HI_pointer_node;
- tree V4SI_pointer_node;
- tree V4SF_pointer_node;
- tree V2DI_pointer_node;
- tree V2DF_pointer_node;
-
- /* Operations which return results as pairs. */
- tree void_ftype_pv8qi_v8qi_v8qi;
- tree void_ftype_pv4hi_v4hi_v4hi;
- tree void_ftype_pv2si_v2si_v2si;
- tree void_ftype_pv2sf_v2sf_v2sf;
- tree void_ftype_pdi_di_di;
- tree void_ftype_pv16qi_v16qi_v16qi;
- tree void_ftype_pv8hi_v8hi_v8hi;
- tree void_ftype_pv4si_v4si_v4si;
- tree void_ftype_pv4sf_v4sf_v4sf;
- tree void_ftype_pv2di_v2di_v2di;
- tree void_ftype_pv2df_v2df_v2df;
-
- tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
- tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
- tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
-
- /* Create distinguished type nodes for AARCH64_SIMD vector element types,
- and pointers to values of such types, so we can detect them later. */
- aarch64_simd_intQI_type_node =
- make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_intHI_type_node =
- make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_polyQI_type_node =
- make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_polyHI_type_node =
- make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_intSI_type_node =
- make_signed_type (GET_MODE_PRECISION (SImode));
- aarch64_simd_intDI_type_node =
- make_signed_type (GET_MODE_PRECISION (DImode));
- aarch64_simd_float_type_node = make_node (REAL_TYPE);
- aarch64_simd_double_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (aarch64_simd_float_type_node) = FLOAT_TYPE_SIZE;
- TYPE_PRECISION (aarch64_simd_double_type_node) = DOUBLE_TYPE_SIZE;
- layout_type (aarch64_simd_float_type_node);
- layout_type (aarch64_simd_double_type_node);
+ /* Signed scalar type nodes. */
+ tree aarch64_simd_intQI_type_node = aarch64_build_signed_type (QImode);
+ tree aarch64_simd_intHI_type_node = aarch64_build_signed_type (HImode);
+ tree aarch64_simd_intSI_type_node = aarch64_build_signed_type (SImode);
+ tree aarch64_simd_intDI_type_node = aarch64_build_signed_type (DImode);
+ tree aarch64_simd_intTI_type_node = aarch64_build_signed_type (TImode);
+ tree aarch64_simd_intEI_type_node = aarch64_build_signed_type (EImode);
+ tree aarch64_simd_intOI_type_node = aarch64_build_signed_type (OImode);
+ tree aarch64_simd_intCI_type_node = aarch64_build_signed_type (CImode);
+ tree aarch64_simd_intXI_type_node = aarch64_build_signed_type (XImode);
+
+ /* Unsigned scalar type nodes. */
+ tree aarch64_simd_intUQI_type_node = aarch64_build_unsigned_type (QImode);
+ tree aarch64_simd_intUHI_type_node = aarch64_build_unsigned_type (HImode);
+ tree aarch64_simd_intUSI_type_node = aarch64_build_unsigned_type (SImode);
+ tree aarch64_simd_intUDI_type_node = aarch64_build_unsigned_type (DImode);
+
+ /* Poly scalar type nodes. */
+ tree aarch64_simd_polyQI_type_node = aarch64_build_poly_type (QImode);
+ tree aarch64_simd_polyHI_type_node = aarch64_build_poly_type (HImode);
+ tree aarch64_simd_polyDI_type_node = aarch64_build_poly_type (DImode);
+ tree aarch64_simd_polyTI_type_node = aarch64_build_poly_type (TImode);
+
+ /* Float type nodes. */
+ tree aarch64_simd_float_type_node = aarch64_build_signed_type (SFmode);
+ tree aarch64_simd_double_type_node = aarch64_build_signed_type (DFmode);
/* Define typedefs which exactly correspond to the modes we are basing vector
types on. If you change these names you'll need to change
@@ -410,518 +569,139 @@ aarch64_init_simd_builtins (void)
"__builtin_aarch64_simd_poly8");
(*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
"__builtin_aarch64_simd_poly16");
-
- intQI_pointer_node = build_pointer_type (aarch64_simd_intQI_type_node);
- intHI_pointer_node = build_pointer_type (aarch64_simd_intHI_type_node);
- intSI_pointer_node = build_pointer_type (aarch64_simd_intSI_type_node);
- intDI_pointer_node = build_pointer_type (aarch64_simd_intDI_type_node);
- float_pointer_node = build_pointer_type (aarch64_simd_float_type_node);
- double_pointer_node = build_pointer_type (aarch64_simd_double_type_node);
-
- /* Next create constant-qualified versions of the above types. */
- const_intQI_node = build_qualified_type (aarch64_simd_intQI_type_node,
- TYPE_QUAL_CONST);
- const_intHI_node = build_qualified_type (aarch64_simd_intHI_type_node,
- TYPE_QUAL_CONST);
- const_intSI_node = build_qualified_type (aarch64_simd_intSI_type_node,
- TYPE_QUAL_CONST);
- const_intDI_node = build_qualified_type (aarch64_simd_intDI_type_node,
- TYPE_QUAL_CONST);
- const_float_node = build_qualified_type (aarch64_simd_float_type_node,
- TYPE_QUAL_CONST);
- const_double_node = build_qualified_type (aarch64_simd_double_type_node,
- TYPE_QUAL_CONST);
-
- const_intQI_pointer_node = build_pointer_type (const_intQI_node);
- const_intHI_pointer_node = build_pointer_type (const_intHI_node);
- const_intSI_pointer_node = build_pointer_type (const_intSI_node);
- const_intDI_pointer_node = build_pointer_type (const_intDI_node);
- const_float_pointer_node = build_pointer_type (const_float_node);
- const_double_pointer_node = build_pointer_type (const_double_node);
-
- /* Now create vector types based on our AARCH64 SIMD element types. */
- /* 64-bit vectors. */
- V8QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V8QImode);
- V4HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V4HImode);
- V2SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V2SImode);
- V2SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V2SFmode);
- /* 128-bit vectors. */
- V16QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V16QImode);
- V8HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V8HImode);
- V4SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V4SImode);
- V4SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V4SFmode);
- V2DI_type_node =
- build_vector_type_for_mode (aarch64_simd_intDI_type_node, V2DImode);
- V2DF_type_node =
- build_vector_type_for_mode (aarch64_simd_double_type_node, V2DFmode);
-
- /* Unsigned integer types for various mode sizes. */
- intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
- intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
- intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
- intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
-
- (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
- "__builtin_aarch64_simd_uqi");
- (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
- "__builtin_aarch64_simd_uhi");
- (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
- "__builtin_aarch64_simd_usi");
- (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
- "__builtin_aarch64_simd_udi");
-
- /* Opaque integer types for structures of vectors. */
- intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
- intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
- intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
- intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
-
- (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyDI_type_node,
+ "__builtin_aarch64_simd_poly64");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyTI_type_node,
+ "__builtin_aarch64_simd_poly128");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node,
"__builtin_aarch64_simd_ti");
- (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node,
"__builtin_aarch64_simd_ei");
- (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node,
"__builtin_aarch64_simd_oi");
- (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node,
"__builtin_aarch64_simd_ci");
- (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node,
"__builtin_aarch64_simd_xi");
- /* Pointers to vector types. */
- V8QI_pointer_node = build_pointer_type (V8QI_type_node);
- V4HI_pointer_node = build_pointer_type (V4HI_type_node);
- V2SI_pointer_node = build_pointer_type (V2SI_type_node);
- V2SF_pointer_node = build_pointer_type (V2SF_type_node);
- V16QI_pointer_node = build_pointer_type (V16QI_type_node);
- V8HI_pointer_node = build_pointer_type (V8HI_type_node);
- V4SI_pointer_node = build_pointer_type (V4SI_type_node);
- V4SF_pointer_node = build_pointer_type (V4SF_type_node);
- V2DI_pointer_node = build_pointer_type (V2DI_type_node);
- V2DF_pointer_node = build_pointer_type (V2DF_type_node);
-
- /* Operations which return results as pairs. */
- void_ftype_pv8qi_v8qi_v8qi =
- build_function_type_list (void_type_node, V8QI_pointer_node,
- V8QI_type_node, V8QI_type_node, NULL);
- void_ftype_pv4hi_v4hi_v4hi =
- build_function_type_list (void_type_node, V4HI_pointer_node,
- V4HI_type_node, V4HI_type_node, NULL);
- void_ftype_pv2si_v2si_v2si =
- build_function_type_list (void_type_node, V2SI_pointer_node,
- V2SI_type_node, V2SI_type_node, NULL);
- void_ftype_pv2sf_v2sf_v2sf =
- build_function_type_list (void_type_node, V2SF_pointer_node,
- V2SF_type_node, V2SF_type_node, NULL);
- void_ftype_pdi_di_di =
- build_function_type_list (void_type_node, intDI_pointer_node,
- aarch64_simd_intDI_type_node,
- aarch64_simd_intDI_type_node, NULL);
- void_ftype_pv16qi_v16qi_v16qi =
- build_function_type_list (void_type_node, V16QI_pointer_node,
- V16QI_type_node, V16QI_type_node, NULL);
- void_ftype_pv8hi_v8hi_v8hi =
- build_function_type_list (void_type_node, V8HI_pointer_node,
- V8HI_type_node, V8HI_type_node, NULL);
- void_ftype_pv4si_v4si_v4si =
- build_function_type_list (void_type_node, V4SI_pointer_node,
- V4SI_type_node, V4SI_type_node, NULL);
- void_ftype_pv4sf_v4sf_v4sf =
- build_function_type_list (void_type_node, V4SF_pointer_node,
- V4SF_type_node, V4SF_type_node, NULL);
- void_ftype_pv2di_v2di_v2di =
- build_function_type_list (void_type_node, V2DI_pointer_node,
- V2DI_type_node, V2DI_type_node, NULL);
- void_ftype_pv2df_v2df_v2df =
- build_function_type_list (void_type_node, V2DF_pointer_node,
- V2DF_type_node, V2DF_type_node, NULL);
-
- dreg_types[0] = V8QI_type_node;
- dreg_types[1] = V4HI_type_node;
- dreg_types[2] = V2SI_type_node;
- dreg_types[3] = V2SF_type_node;
- dreg_types[4] = aarch64_simd_intDI_type_node;
- dreg_types[5] = aarch64_simd_double_type_node;
-
- qreg_types[0] = V16QI_type_node;
- qreg_types[1] = V8HI_type_node;
- qreg_types[2] = V4SI_type_node;
- qreg_types[3] = V4SF_type_node;
- qreg_types[4] = V2DI_type_node;
- qreg_types[5] = V2DF_type_node;
-
- /* If NUM_DREG_TYPES != NUM_QREG_TYPES, we will need separate nested loops
- for qreg and dreg reinterp inits. */
- for (i = 0; i < NUM_DREG_TYPES; i++)
- {
- int j;
- for (j = 0; j < NUM_DREG_TYPES; j++)
- {
- reinterp_ftype_dreg[i][j]
- = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
- reinterp_ftype_qreg[i][j]
- = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
- }
- }
+ /* Unsigned integer types for various mode sizes. */
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node,
+ "__builtin_aarch64_simd_uqi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node,
+ "__builtin_aarch64_simd_uhi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node,
+ "__builtin_aarch64_simd_usi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node,
+ "__builtin_aarch64_simd_udi");
for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
{
+ bool print_type_signature_p = false;
+ char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
const char *const modenames[] =
- {
- "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
- "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
- "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi"
- };
+ {
+ "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
+ "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
+ "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi"
+ };
+ const enum machine_mode modes[] =
+ {
+ V8QImode, V4HImode, V2SImode, V2SFmode, DImode, DFmode,
+ V16QImode, V8HImode, V4SImode, V4SFmode, V2DImode,
+ V2DFmode, TImode, EImode, OImode, XImode, SImode,
+ SFmode, HImode, QImode
+ };
char namebuf[60];
tree ftype = NULL;
tree fndecl = NULL;
- int is_load = 0;
- int is_store = 0;
gcc_assert (ARRAY_SIZE (modenames) == T_MAX);
d->fcode = fcode;
- switch (d->itype)
+ /* We must track two variables here. op_num is
+ the operand number as in the RTL pattern. This is
+ required to access the mode (e.g. V4SF mode) of the
+ argument, from which the base type can be derived.
+ arg_num is an index in to the qualifiers data, which
+ gives qualifiers to the type (e.g. const unsigned).
+ The reason these two variables may differ by one is the
+ void return type. While all return types take the 0th entry
+ in the qualifiers array, there is no operand for them in the
+ RTL pattern. */
+ int op_num = insn_data[d->code].n_operands - 1;
+ int arg_num = d->qualifiers[0] & qualifier_void
+ ? op_num + 1
+ : op_num;
+ tree return_type = void_type_node, args = void_list_node;
+ tree eltype;
+
+ /* Build a function type directly from the insn_data for this
+ builtin. The build_function_type () function takes care of
+ removing duplicates for us. */
+ for (; op_num >= 0; arg_num--, op_num--)
{
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOAD1LANE:
- case AARCH64_SIMD_LOADSTRUCT:
- case AARCH64_SIMD_LOADSTRUCTLANE:
- is_load = 1;
- /* Fall through. */
- case AARCH64_SIMD_STORE1:
- case AARCH64_SIMD_STORE1LANE:
- case AARCH64_SIMD_STORESTRUCT:
- case AARCH64_SIMD_STORESTRUCTLANE:
- if (!is_load)
- is_store = 1;
- /* Fall through. */
- case AARCH64_SIMD_UNOP:
- case AARCH64_SIMD_BINOP:
- case AARCH64_SIMD_TERNOP:
- case AARCH64_SIMD_QUADOP:
- case AARCH64_SIMD_COMBINE:
- case AARCH64_SIMD_CONVERT:
- case AARCH64_SIMD_CREATE:
- case AARCH64_SIMD_DUP:
- case AARCH64_SIMD_DUPLANE:
- case AARCH64_SIMD_FIXCONV:
- case AARCH64_SIMD_GETLANE:
- case AARCH64_SIMD_LANEMAC:
- case AARCH64_SIMD_LANEMUL:
- case AARCH64_SIMD_LANEMULH:
- case AARCH64_SIMD_LANEMULL:
- case AARCH64_SIMD_LOGICBINOP:
- case AARCH64_SIMD_SCALARMAC:
- case AARCH64_SIMD_SCALARMUL:
- case AARCH64_SIMD_SCALARMULH:
- case AARCH64_SIMD_SCALARMULL:
- case AARCH64_SIMD_SELECT:
- case AARCH64_SIMD_SETLANE:
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTIMM:
- case AARCH64_SIMD_SHIFTINSERT:
- case AARCH64_SIMD_SPLIT:
- case AARCH64_SIMD_VTBL:
- case AARCH64_SIMD_VTBX:
- {
- int k;
- tree return_type = void_type_node, args = void_list_node;
- tree eltype;
- /* Build a function type directly from the insn_data for this
- builtin. The build_function_type () function takes care of
- removing duplicates for us. */
-
- for (k = insn_data[d->code].n_operands -1; k >= 0; k--)
- {
- /* Skip an internal operand for vget_{low, high}. */
- if (k == 2 && d->itype == AARCH64_SIMD_SPLIT)
- continue;
-
- if (is_load && k == 1)
- {
- /* AdvSIMD load patterns always have the memory operand
- (a DImode pointer) in the operand 1 position. We
- want a const pointer to the element type in that
- position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = const_intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = const_intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = const_intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = const_float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = const_intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = const_double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else if (is_store && k == 0)
- {
- /* Similarly, AdvSIMD store patterns use operand 0 as
- the memory location to store to (a DImode pointer).
- Use a pointer to the element type of the store in
- that position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else
- {
- switch (insn_data[d->code].operand[k].mode)
- {
- case VOIDmode:
- eltype = void_type_node;
- break;
- /* Scalars. */
- case QImode:
- eltype = aarch64_simd_intQI_type_node;
- break;
- case HImode:
- eltype = aarch64_simd_intHI_type_node;
- break;
- case SImode:
- eltype = aarch64_simd_intSI_type_node;
- break;
- case SFmode:
- eltype = aarch64_simd_float_type_node;
- break;
- case DFmode:
- eltype = aarch64_simd_double_type_node;
- break;
- case DImode:
- eltype = aarch64_simd_intDI_type_node;
- break;
- case TImode:
- eltype = intTI_type_node;
- break;
- case EImode:
- eltype = intEI_type_node;
- break;
- case OImode:
- eltype = intOI_type_node;
- break;
- case CImode:
- eltype = intCI_type_node;
- break;
- case XImode:
- eltype = intXI_type_node;
- break;
- /* 64-bit vectors. */
- case V8QImode:
- eltype = V8QI_type_node;
- break;
- case V4HImode:
- eltype = V4HI_type_node;
- break;
- case V2SImode:
- eltype = V2SI_type_node;
- break;
- case V2SFmode:
- eltype = V2SF_type_node;
- break;
- /* 128-bit vectors. */
- case V16QImode:
- eltype = V16QI_type_node;
- break;
- case V8HImode:
- eltype = V8HI_type_node;
- break;
- case V4SImode:
- eltype = V4SI_type_node;
- break;
- case V4SFmode:
- eltype = V4SF_type_node;
- break;
- case V2DImode:
- eltype = V2DI_type_node;
- break;
- case V2DFmode:
- eltype = V2DF_type_node;
- break;
- default:
- gcc_unreachable ();
- }
- }
-
- if (k == 0 && !is_store)
- return_type = eltype;
- else
- args = tree_cons (NULL_TREE, eltype, args);
- }
- ftype = build_function_type (return_type, args);
- }
- break;
+ enum machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
+ enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num];
- case AARCH64_SIMD_RESULTPAIR:
- {
- switch (insn_data[d->code].operand[1].mode)
- {
- case V8QImode:
- ftype = void_ftype_pv8qi_v8qi_v8qi;
- break;
- case V4HImode:
- ftype = void_ftype_pv4hi_v4hi_v4hi;
- break;
- case V2SImode:
- ftype = void_ftype_pv2si_v2si_v2si;
- break;
- case V2SFmode:
- ftype = void_ftype_pv2sf_v2sf_v2sf;
- break;
- case DImode:
- ftype = void_ftype_pdi_di_di;
- break;
- case V16QImode:
- ftype = void_ftype_pv16qi_v16qi_v16qi;
- break;
- case V8HImode:
- ftype = void_ftype_pv8hi_v8hi_v8hi;
- break;
- case V4SImode:
- ftype = void_ftype_pv4si_v4si_v4si;
- break;
- case V4SFmode:
- ftype = void_ftype_pv4sf_v4sf_v4sf;
- break;
- case V2DImode:
- ftype = void_ftype_pv2di_v2di_v2di;
- break;
- case V2DFmode:
- ftype = void_ftype_pv2df_v2df_v2df;
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
+ if (qualifiers & qualifier_unsigned)
+ {
+ type_signature[arg_num] = 'u';
+ print_type_signature_p = true;
+ }
+ else if (qualifiers & qualifier_poly)
+ {
+ type_signature[arg_num] = 'p';
+ print_type_signature_p = true;
+ }
+ else
+ type_signature[arg_num] = 's';
+
+ /* Skip an internal operand for vget_{low, high}. */
+ if (qualifiers & qualifier_internal)
+ continue;
+
+ /* Some builtins have different user-facing types
+ for certain arguments, encoded in d->mode. */
+ if (qualifiers & qualifier_map_mode)
+ op_mode = modes[d->mode];
+
+ /* For pointers, we want a pointer to the basic type
+ of the vector. */
+ if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
+ op_mode = GET_MODE_INNER (op_mode);
+
+ eltype = aarch64_build_type (op_mode,
+ qualifiers & qualifier_unsigned,
+ qualifiers & qualifier_poly);
+
+ /* Add qualifiers. */
+ if (qualifiers & qualifier_const)
+ eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);
+
+ if (qualifiers & qualifier_pointer)
+ eltype = build_pointer_type (eltype);
+
+ /* If we have reached arg_num == 0, we are at a non-void
+ return type. Otherwise, we are still processing
+ arguments. */
+ if (arg_num == 0)
+ return_type = eltype;
+ else
+ args = tree_cons (NULL_TREE, eltype, args);
+ }
- case AARCH64_SIMD_REINTERP:
- {
- /* We iterate over 6 doubleword types, then 6 quadword
- types. */
- int rhs_d = d->mode % NUM_DREG_TYPES;
- int rhs_q = (d->mode - NUM_DREG_TYPES) % NUM_QREG_TYPES;
- switch (insn_data[d->code].operand[0].mode)
- {
- case V8QImode:
- ftype = reinterp_ftype_dreg[0][rhs_d];
- break;
- case V4HImode:
- ftype = reinterp_ftype_dreg[1][rhs_d];
- break;
- case V2SImode:
- ftype = reinterp_ftype_dreg[2][rhs_d];
- break;
- case V2SFmode:
- ftype = reinterp_ftype_dreg[3][rhs_d];
- break;
- case DImode:
- ftype = reinterp_ftype_dreg[4][rhs_d];
- break;
- case DFmode:
- ftype = reinterp_ftype_dreg[5][rhs_d];
- break;
- case V16QImode:
- ftype = reinterp_ftype_qreg[0][rhs_q];
- break;
- case V8HImode:
- ftype = reinterp_ftype_qreg[1][rhs_q];
- break;
- case V4SImode:
- ftype = reinterp_ftype_qreg[2][rhs_q];
- break;
- case V4SFmode:
- ftype = reinterp_ftype_qreg[3][rhs_q];
- break;
- case V2DImode:
- ftype = reinterp_ftype_qreg[4][rhs_q];
- break;
- case V2DFmode:
- ftype = reinterp_ftype_qreg[5][rhs_q];
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
+ ftype = build_function_type (return_type, args);
- default:
- gcc_unreachable ();
- }
gcc_assert (ftype != NULL);
- snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
- d->name, modenames[d->mode]);
+ if (print_type_signature_p)
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s_%s",
+ d->name, modenames[d->mode], type_signature);
+ else
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
+ d->name, modenames[d->mode]);
fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
NULL, NULL_TREE);
@@ -952,8 +732,6 @@ typedef enum
SIMD_ARG_STOP
} builtin_simd_arg;
-#define SIMD_MAX_BUILTIN_ARGS 5
-
static rtx
aarch64_simd_expand_args (rtx target, int icode, int have_retval,
tree exp, ...)
@@ -1079,99 +857,58 @@ aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
{
aarch64_simd_builtin_datum *d =
&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
- aarch64_simd_itype itype = d->itype;
enum insn_code icode = d->code;
+ builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
+ int num_args = insn_data[d->code].n_operands;
+ int is_void = 0;
+ int k;
- switch (itype)
- {
- case AARCH64_SIMD_UNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
+ is_void = !!(d->qualifiers[0] & qualifier_void);
- case AARCH64_SIMD_BINOP:
- {
- rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
- /* Handle constants only if the predicate allows it. */
- bool op1_const_int_p =
- (CONST_INT_P (arg2)
- && (*insn_data[icode].operand[2].predicate)
- (arg2, insn_data[icode].operand[2].mode));
- return aarch64_simd_expand_args
- (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- op1_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- }
+ num_args += is_void;
+
+ for (k = 1; k < num_args; k++)
+ {
+ /* We have four arrays of data, each indexed in a different fashion.
+ qualifiers - element 0 always describes the function return type.
+ operands - element 0 is either the operand for return value (if
+ the function has a non-void return type) or the operand for the
+ first argument.
+ expr_args - element 0 always holds the first argument.
+ args - element 0 is always used for the return type. */
+ int qualifiers_k = k;
+ int operands_k = k - is_void;
+ int expr_args_k = k - 1;
+
+ if (d->qualifiers[qualifiers_k] & qualifier_immediate)
+ args[k] = SIMD_ARG_CONSTANT;
+ else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
+ {
+ rtx arg
+ = expand_normal (CALL_EXPR_ARG (exp,
+ (expr_args_k)));
+ /* Handle constants only if the predicate allows it. */
+ bool op_const_int_p =
+ (CONST_INT_P (arg)
+ && (*insn_data[icode].operand[operands_k].predicate)
+ (arg, insn_data[icode].operand[operands_k].mode));
+ args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG;
+ }
+ else
+ args[k] = SIMD_ARG_COPY_TO_REG;
- case AARCH64_SIMD_TERNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_QUADOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOADSTRUCT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_STORE1:
- case AARCH64_SIMD_STORESTRUCT:
- return aarch64_simd_expand_args (target, icode, 0, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_REINTERP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_CREATE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_COMBINE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_GETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTIMM:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTINSERT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- default:
- gcc_unreachable ();
}
+ args[k] = SIMD_ARG_STOP;
+
+ /* The interface to aarch64_simd_expand_args expects a 0 if
+ the function is void, and a 1 if it is not. */
+ return aarch64_simd_expand_args
+ (target, icode, !is_void, exp,
+ args[1],
+ args[2],
+ args[3],
+ args[4],
+ SIMD_ARG_STOP);
}
/* Expand an expression EXP that calls a built-in function,
@@ -1211,11 +948,11 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
#define AARCH64_CHECK_BUILTIN_MODE(C, N) 1
#define AARCH64_FIND_FRINT_VARIANT(N) \
(AARCH64_CHECK_BUILTIN_MODE (2, D) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v2df] \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2df] \
: (AARCH64_CHECK_BUILTIN_MODE (4, S) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v4sf] \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v4sf] \
: (AARCH64_CHECK_BUILTIN_MODE (2, S) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v2sf] \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2sf] \
: NULL_TREE)))
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
{
@@ -1251,7 +988,7 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
case BUILT_IN_CLZ:
{
if (AARCH64_CHECK_BUILTIN_MODE (4, S))
- return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_clzv4si];
+ return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_clzv4si];
return NULL_TREE;
}
#undef AARCH64_CHECK_BUILTIN_MODE
@@ -1261,47 +998,47 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
case BUILT_IN_LFLOOR:
case BUILT_IN_IFLOORF:
{
- tree new_tree = NULL_TREE;
+ enum aarch64_builtins builtin;
if (AARCH64_CHECK_BUILTIN_MODE (2, D))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lfloorv2dfv2di];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2dfv2di;
else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lfloorv4sfv4si];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv4sfv4si;
else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lfloorv2sfv2si];
- return new_tree;
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
}
case BUILT_IN_LCEIL:
case BUILT_IN_ICEILF:
{
- tree new_tree = NULL_TREE;
+ enum aarch64_builtins builtin;
if (AARCH64_CHECK_BUILTIN_MODE (2, D))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lceilv2dfv2di];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2dfv2di;
else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lceilv4sfv4si];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv4sfv4si;
else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lceilv2sfv2si];
- return new_tree;
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
}
case BUILT_IN_LROUND:
case BUILT_IN_IROUNDF:
{
- tree new_tree = NULL_TREE;
+ enum aarch64_builtins builtin;
if (AARCH64_CHECK_BUILTIN_MODE (2, D))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lroundv2dfv2di];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2dfv2di;
else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lroundv4sfv4si];
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv4sfv4si;
else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
- new_tree =
- aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_lroundv2sfv2si];
- return new_tree;
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
}
default:
@@ -1314,7 +1051,7 @@ aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
#undef VAR1
#define VAR1(T, N, MAP, A) \
- case AARCH64_SIMD_BUILTIN_##N##A:
+ case AARCH64_SIMD_BUILTIN_##T##_##N##A:
tree
aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 4046d7a7001..a7abfe24bcd 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -359,3 +359,26 @@
/* Implemented by aarch64_st1<VALL:mode>. */
BUILTIN_VALL (STORE1, st1, 0)
+ /* Implemented by aarch64_crypto_aes<op><mode>. */
+ VAR1 (BINOPU, crypto_aese, 0, v16qi)
+ VAR1 (BINOPU, crypto_aesd, 0, v16qi)
+ VAR1 (UNOPU, crypto_aesmc, 0, v16qi)
+ VAR1 (UNOPU, crypto_aesimc, 0, v16qi)
+
+ /* Implemented by aarch64_crypto_sha1<op><mode>. */
+ VAR1 (UNOPU, crypto_sha1h, 0, si)
+ VAR1 (BINOPU, crypto_sha1su1, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1c, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1m, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1p, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1su0, 0, v4si)
+
+ /* Implemented by aarch64_crypto_sha256<op><mode>. */
+ VAR1 (TERNOPU, crypto_sha256h, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha256h2, 0, v4si)
+ VAR1 (BINOPU, crypto_sha256su0, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha256su1, 0, v4si)
+
+ /* Implemented by aarch64_crypto_pmull<mode>. */
+ VAR1 (BINOPP, crypto_pmull, 0, di)
+ VAR1 (BINOPP, crypto_pmull, 0, v2di)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 7a27f6c7644..6caac8f351c 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -21,7 +21,7 @@
; Main data types used by the insntructions
-(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,SF,HI,QI"
+(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,TI,DI,DF,SI,SF,HI,QI"
(const_string "unknown"))
@@ -195,6 +195,7 @@
simd_move,\
simd_move_imm,\
simd_mul,\
+ simd_mul_d_long,\
simd_mul_elt,\
simd_mull,\
simd_mull_elt,\
@@ -235,6 +236,12 @@
simd_trn,\
simd_uzp,\
simd_zip,\
+ simd_crypto_aes,\
+ simd_crypto_sha1_xor,\
+ simd_crypto_sha1_fast,\
+ simd_crypto_sha1_slow,\
+ simd_crypto_sha256_fast,\
+ simd_crypto_sha256_slow,\
none"
(const_string "none"))
@@ -4174,3 +4181,125 @@
(set_attr "simd_mode" "<MODE>")]
)
+;; aes
+
+(define_insn "aarch64_crypto_aes<aes_op>v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "w")]
+ CRYPTO_AES))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "aes<aes_op>\\t%0.16b, %2.16b"
+ [(set_attr "simd_type" "simd_crypto_aes")
+ (set_attr "simd_mode" "V16QI")])
+
+(define_insn "aarch64_crypto_aes<aesmc_op>v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
+ CRYPTO_AESMC))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "aes<aesmc_op>\\t%0.16b, %1.16b"
+ [(set_attr "simd_type" "simd_crypto_aes")
+ (set_attr "simd_mode" "V16QI")])
+
+;; sha1
+
+(define_insn "aarch64_crypto_sha1hsi"
+ [(set (match_operand:SI 0 "register_operand" "=w")
+ (unspec:SI [(match_operand:SI 1
+ "register_operand" "w")]
+ UNSPEC_SHA1H))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1h\\t%s0, %s1"
+ [(set_attr "simd_type" "simd_crypto_sha1_fast")
+ (set_attr "simd_mode" "SI")])
+
+(define_insn "aarch64_crypto_sha1su1v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA1SU1))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1su1\\t%0.4s, %2.4s"
+ [(set_attr "simd_type" "simd_crypto_sha1_fast")
+ (set_attr "simd_mode" "V4SI")])
+
+(define_insn "aarch64_crypto_sha1<sha1_op>v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA1))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1<sha1_op>\\t%q0, %s2, %3.4s"
+ [(set_attr "simd_type" "simd_crypto_sha1_slow")
+ (set_attr "simd_mode" "V4SI")])
+
+(define_insn "aarch64_crypto_sha1su0v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA1SU0))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1su0\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "simd_type" "simd_crypto_sha1_xor")
+ (set_attr "simd_mode" "V4SI")])
+
+
+;; sha256
+
+(define_insn "aarch64_crypto_sha256h<sha256_op>v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA256))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha256h<sha256_op>\\t%q0, %q2, %3.4s"
+ [(set_attr "simd_type" "simd_crypto_sha256_slow")
+ (set_attr "simd_mode" "V4SI")])
+
+(define_insn "aarch64_crypto_sha256su0v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA256SU0))]
+ "TARGET_SIMD &&TARGET_CRYPTO"
+ "sha256su0\\t%0.4s, %2.4s"
+ [(set_attr "simd_type" "simd_crypto_sha256_fast")
+ (set_attr "simd_mode" "V4SI")])
+
+(define_insn "aarch64_crypto_sha256su1v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA256SU1))]
+ "TARGET_SIMD &&TARGET_CRYPTO"
+ "sha256su1\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "simd_type""simd_crypto_sha256_slow")
+ (set_attr "simd_mode" "V4SI")])
+
+
+;; pmull
+
+(define_insn "aarch64_crypto_pmulldi"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (unspec:TI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:DI 2 "register_operand" "w")]
+ UNSPEC_PMULL))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "pmull\\t%0.1q, %1.1d, %2.1d"
+ [(set_attr "simd_type" "simd_mul_d_long")
+ (set_attr "simd_mode" "TI")])
+
+(define_insn "aarch64_crypto_pmullv2di"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
+ (match_operand:V2DI 2 "register_operand" "w")]
+ UNSPEC_PMULL2))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "pmull2\\t%0.1q, %1.2d, %2.2d"
+ [(set_attr "simd_type" "simd_mul_d_long")
+ (set_attr "simd_mode" "TI")]) \ No newline at end of file
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 0e0203f8250..31bb3f8dfcb 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -6247,6 +6247,7 @@ static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
{ V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
{ V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
{ V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
+ { V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
{ VOIDmode, NULL, NULL }
};
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 1317b18f18b..99fcce5662d 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -49,6 +49,8 @@
break; \
} \
\
+ if (TARGET_CRYPTO) \
+ builtin_define ("__ARM_FEATURE_CRYPTO"); \
} while (0)
@@ -173,6 +175,8 @@ extern unsigned long aarch64_isa_flags;
extern unsigned long aarch64_tune_flags;
#define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL)
+/* Crypto is an optional feature. */
+#define TARGET_CRYPTO AARCH64_ISA_CRYPTO
/* Standard register usage. */
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index e630f081fca..60f23e74e34 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -75,6 +75,8 @@ typedef __builtin_aarch64_simd_poly8 poly8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_poly16 poly16x8_t
__attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly64 poly64x2_t
+ __attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_uqi uint8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_uhi uint16x8_t
@@ -88,6 +90,8 @@ typedef float float32_t;
typedef double float64_t;
typedef __builtin_aarch64_simd_poly8 poly8_t;
typedef __builtin_aarch64_simd_poly16 poly16_t;
+typedef __builtin_aarch64_simd_poly64 poly64_t;
+typedef __builtin_aarch64_simd_poly128 poly128_t;
typedef struct int8x8x2_t
{
@@ -17540,6 +17544,37 @@ vaddvq_f64 (float64x2_t __a)
return vgetq_lane_f64 (t, 0);
}
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vaes */
+
+static __inline uint8x16_t
+vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+{
+ return __builtin_aarch64_crypto_aesev16qi_uuu (data, key);
+}
+
+static __inline uint8x16_t
+__attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+{
+ return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key);
+}
+
+static __inline uint8x16_t
+vaesmcq_u8 (uint8x16_t data)
+{
+ return __builtin_aarch64_crypto_aesmcv16qi_uu (data);
+}
+
+static __inline uint8x16_t
+vaesimcq_u8 (uint8x16_t data)
+{
+ return __builtin_aarch64_crypto_aesimcv16qi_uu (data);
+}
+
+#endif
+
/* vcage */
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
@@ -23373,6 +23408,83 @@ vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vsha1 */
+
+static __inline uint32x4_t
+vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1cv4si_uuuu (hash_abcd, hash_e, wk);
+}
+static __inline uint32x4_t
+vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1mv4si_uuuu (hash_abcd, hash_e, wk);
+}
+static __inline uint32x4_t
+vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1pv4si_uuuu (hash_abcd, hash_e, wk);
+}
+
+static __inline uint32_t
+vsha1h_u32 (uint32_t hash_e)
+{
+ return __builtin_aarch64_crypto_sha1hsi_uu (hash_e);
+}
+
+static __inline uint32x4_t
+vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11)
+{
+ return __builtin_aarch64_crypto_sha1su0v4si_uuuu (w0_3, w4_7, w8_11);
+}
+
+static __inline uint32x4_t
+vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15)
+{
+ return __builtin_aarch64_crypto_sha1su1v4si_uuu (tw0_3, w12_15);
+}
+
+static __inline uint32x4_t
+vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha256hv4si_uuuu (hash_abcd, hash_efgh, wk);
+}
+
+static __inline uint32x4_t
+vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha256h2v4si_uuuu (hash_efgh, hash_abcd, wk);
+}
+
+static __inline uint32x4_t
+vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7)
+{
+ return __builtin_aarch64_crypto_sha256su0v4si_uuu (w0_3, w4_7);
+}
+
+static __inline uint32x4_t
+vsha256su1q_u32 (uint32x4_t tw0_3, uint32x4_t w8_11, uint32x4_t w12_15)
+{
+ return __builtin_aarch64_crypto_sha256su1v4si_uuuu (tw0_3, w8_11, w12_15);
+}
+
+static __inline poly128_t
+vmull_p64 (poly64_t a, poly64_t b)
+{
+ return
+ __builtin_aarch64_crypto_pmulldi_ppp (a, b);
+}
+
+static __inline poly128_t
+vmull_high_p64 (poly64x2_t a, poly64x2_t b)
+{
+ return __builtin_aarch64_crypto_pmullv2di_ppp (a, b);
+}
+
+#endif
+
/* vshl */
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 9431c2ba421..737ed68cbb6 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -245,6 +245,22 @@
UNSPEC_UZP2 ; Used in vector permute patterns.
UNSPEC_TRN1 ; Used in vector permute patterns.
UNSPEC_TRN2 ; Used in vector permute patterns.
+ UNSPEC_AESE ; Used in aarch64-simd.md.
+ UNSPEC_AESD ; Used in aarch64-simd.md.
+ UNSPEC_AESMC ; Used in aarch64-simd.md.
+ UNSPEC_AESIMC ; Used in aarch64-simd.md.
+ UNSPEC_SHA1C ; Used in aarch64-simd.md.
+ UNSPEC_SHA1M ; Used in aarch64-simd.md.
+ UNSPEC_SHA1P ; Used in aarch64-simd.md.
+ UNSPEC_SHA1H ; Used in aarch64-simd.md.
+ UNSPEC_SHA1SU0 ; Used in aarch64-simd.md.
+ UNSPEC_SHA1SU1 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256H ; Used in aarch64-simd.md.
+ UNSPEC_SHA256H2 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256SU0 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256SU1 ; Used in aarch64-simd.md.
+ UNSPEC_PMULL ; Used in aarch64-simd.md.
+ UNSPEC_PMULL2 ; Used in aarch64-simd.md.
])
;; -------------------------------------------------------------------
@@ -764,6 +780,13 @@
(define_int_iterator FRECP [UNSPEC_FRECPE UNSPEC_FRECPX])
+(define_int_iterator CRYPTO_AES [UNSPEC_AESE UNSPEC_AESD])
+(define_int_iterator CRYPTO_AESMC [UNSPEC_AESMC UNSPEC_AESIMC])
+
+(define_int_iterator CRYPTO_SHA1 [UNSPEC_SHA1C UNSPEC_SHA1M UNSPEC_SHA1P])
+
+(define_int_iterator CRYPTO_SHA256 [UNSPEC_SHA256H UNSPEC_SHA256H2])
+
;; -------------------------------------------------------------------
;; Int Iterators Attributes.
;; -------------------------------------------------------------------
@@ -880,3 +903,11 @@
(UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
(define_int_attr frecp_suffix [(UNSPEC_FRECPE "e") (UNSPEC_FRECPX "x")])
+
+(define_int_attr aes_op [(UNSPEC_AESE "e") (UNSPEC_AESD "d")])
+(define_int_attr aesmc_op [(UNSPEC_AESMC "mc") (UNSPEC_AESIMC "imc")])
+
+(define_int_attr sha1_op [(UNSPEC_SHA1C "c") (UNSPEC_SHA1P "p")
+ (UNSPEC_SHA1M "m")])
+
+(define_int_attr sha256_op [(UNSPEC_SHA256H "") (UNSPEC_SHA256H2 "2")])
diff --git a/gcc/config/arm/arm-arches.def b/gcc/config/arm/arm-arches.def
index fcf34012262..9b7d20c2e23 100644
--- a/gcc/config/arm/arm-arches.def
+++ b/gcc/config/arm/arm-arches.def
@@ -54,5 +54,6 @@ ARM_ARCH("armv7-r", cortexr4, 7R, FL_CO_PROC | FL_FOR_ARCH7R)
ARM_ARCH("armv7-m", cortexm3, 7M, FL_CO_PROC | FL_FOR_ARCH7M)
ARM_ARCH("armv7e-m", cortexm4, 7EM, FL_CO_PROC | FL_FOR_ARCH7EM)
ARM_ARCH("armv8-a", cortexa53, 8A, FL_CO_PROC | FL_FOR_ARCH8A)
+ARM_ARCH("armv8-a+crc",cortexa53, 8A,FL_CO_PROC | FL_CRC32 | FL_FOR_ARCH8A)
ARM_ARCH("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT)
ARM_ARCH("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT | FL_IWMMXT2)
diff --git a/gcc/config/arm/arm-tables.opt b/gcc/config/arm/arm-tables.opt
index bf206959569..4cf2417fccf 100644
--- a/gcc/config/arm/arm-tables.opt
+++ b/gcc/config/arm/arm-tables.opt
@@ -359,10 +359,13 @@ EnumValue
Enum(arm_arch) String(armv8-a) Value(23)
EnumValue
-Enum(arm_arch) String(iwmmxt) Value(24)
+Enum(arm_arch) String(armv8-a+crc) Value(24)
EnumValue
-Enum(arm_arch) String(iwmmxt2) Value(25)
+Enum(arm_arch) String(iwmmxt) Value(25)
+
+EnumValue
+Enum(arm_arch) String(iwmmxt2) Value(26)
Enum
Name(arm_fpu) Type(int)
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index e90df15400a..36defc0428b 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -726,6 +726,7 @@ static int thumb_call_reg_needed;
#define FL_ARCH7 (1 << 22) /* Architecture 7. */
#define FL_ARM_DIV (1 << 23) /* Hardware divide (ARM mode). */
#define FL_ARCH8 (1 << 24) /* Architecture 8. */
+#define FL_CRC32 (1 << 25) /* ARMv8 CRC32 instructions. */
#define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
#define FL_IWMMXT2 (1 << 30) /* "Intel Wireless MMX2 technology". */
@@ -888,6 +889,9 @@ int arm_condexec_mask = 0;
/* The number of bits used in arm_condexec_mask. */
int arm_condexec_masklen = 0;
+/* Nonzero if chip supports the ARMv8 CRC instructions. */
+int arm_arch_crc = 0;
+
/* The condition codes of the ARM, and the inverse function. */
static const char * const arm_condition_codes[] =
{
@@ -1874,6 +1878,7 @@ arm_option_override (void)
arm_arch_thumb_hwdiv = (insn_flags & FL_THUMB_DIV) != 0;
arm_arch_arm_hwdiv = (insn_flags & FL_ARM_DIV) != 0;
arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0;
+ arm_arch_crc = (insn_flags & FL_CRC32) != 0;
if (arm_restrict_it == 2)
arm_restrict_it = arm_arch8 && TARGET_THUMB2;
@@ -20688,6 +20693,30 @@ enum arm_builtins
ARM_BUILTIN_WMERGE,
+ ARM_BUILTIN_CRC32B,
+ ARM_BUILTIN_CRC32H,
+ ARM_BUILTIN_CRC32W,
+ ARM_BUILTIN_CRC32CB,
+ ARM_BUILTIN_CRC32CH,
+ ARM_BUILTIN_CRC32CW,
+
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
+#define CRYPTO1(L, U, M1, M2) \
+ ARM_BUILTIN_CRYPTO_##U,
+#define CRYPTO2(L, U, M1, M2, M3) \
+ ARM_BUILTIN_CRYPTO_##U,
+#define CRYPTO3(L, U, M1, M2, M3, M4) \
+ ARM_BUILTIN_CRYPTO_##U,
+
+#include "crypto.def"
+
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
#include "arm_neon_builtins.def"
,ARM_BUILTIN_MAX
@@ -20709,6 +20738,9 @@ enum arm_builtins
static GTY(()) tree arm_builtin_decls[ARM_BUILTIN_MAX];
+#define NUM_DREG_TYPES 5
+#define NUM_QREG_TYPES 6
+
static void
arm_init_neon_builtins (void)
{
@@ -20722,6 +20754,7 @@ arm_init_neon_builtins (void)
tree neon_polyHI_type_node;
tree neon_intSI_type_node;
tree neon_intDI_type_node;
+ tree neon_intUTI_type_node;
tree neon_float_type_node;
tree intQI_pointer_node;
@@ -20784,9 +20817,9 @@ arm_init_neon_builtins (void)
tree void_ftype_pv4sf_v4sf_v4sf;
tree void_ftype_pv2di_v2di_v2di;
- tree reinterp_ftype_dreg[5][5];
- tree reinterp_ftype_qreg[5][5];
- tree dreg_types[5], qreg_types[5];
+ tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
+ tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
+ tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
/* Create distinguished type nodes for NEON vector element types,
and pointers to values of such types, so we can detect them later. */
@@ -20876,6 +20909,8 @@ arm_init_neon_builtins (void)
intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+ neon_intUTI_type_node = make_unsigned_type (GET_MODE_PRECISION (TImode));
+
(*lang_hooks.types.register_builtin_type) (intUQI_type_node,
"__builtin_neon_uqi");
@@ -20885,6 +20920,10 @@ arm_init_neon_builtins (void)
"__builtin_neon_usi");
(*lang_hooks.types.register_builtin_type) (intUDI_type_node,
"__builtin_neon_udi");
+ (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+ "__builtin_neon_poly64");
+ (*lang_hooks.types.register_builtin_type) (neon_intUTI_type_node,
+ "__builtin_neon_poly128");
/* Opaque integer types for structures of vectors. */
intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
@@ -20946,6 +20985,80 @@ arm_init_neon_builtins (void)
build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node,
V2DI_type_node, NULL);
+ if (TARGET_CRYPTO && TARGET_HARD_FLOAT)
+ {
+ tree V4USI_type_node =
+ build_vector_type_for_mode (intUSI_type_node, V4SImode);
+
+ tree V16UQI_type_node =
+ build_vector_type_for_mode (intUQI_type_node, V16QImode);
+
+ tree v16uqi_ftype_v16uqi
+ = build_function_type_list (V16UQI_type_node, V16UQI_type_node, NULL_TREE);
+
+ tree v16uqi_ftype_v16uqi_v16uqi
+ = build_function_type_list (V16UQI_type_node, V16UQI_type_node,
+ V16UQI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node,
+ V4USI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi_v4usi_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node,
+ V4USI_type_node, V4USI_type_node, NULL_TREE);
+
+ tree uti_ftype_udi_udi
+ = build_function_type_list (neon_intUTI_type_node, intUDI_type_node,
+ intUDI_type_node, NULL_TREE);
+
+ #undef CRYPTO1
+ #undef CRYPTO2
+ #undef CRYPTO3
+ #undef C
+ #undef N
+ #undef CF
+ #undef FT1
+ #undef FT2
+ #undef FT3
+
+ #define C(U) \
+ ARM_BUILTIN_CRYPTO_##U
+ #define N(L) \
+ "__builtin_arm_crypto_"#L
+ #define FT1(R, A) \
+ R##_ftype_##A
+ #define FT2(R, A1, A2) \
+ R##_ftype_##A1##_##A2
+ #define FT3(R, A1, A2, A3) \
+ R##_ftype_##A1##_##A2##_##A3
+ #define CRYPTO1(L, U, R, A) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT1 (R, A), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+ #define CRYPTO2(L, U, R, A1, A2) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT2 (R, A1, A2), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+
+ #define CRYPTO3(L, U, R, A1, A2, A3) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT3 (R, A1, A2, A3), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+ #include "crypto.def"
+
+ #undef CRYPTO1
+ #undef CRYPTO2
+ #undef CRYPTO3
+ #undef C
+ #undef N
+ #undef FT1
+ #undef FT2
+ #undef FT3
+ }
dreg_types[0] = V8QI_type_node;
dreg_types[1] = V4HI_type_node;
dreg_types[2] = V2SI_type_node;
@@ -20957,14 +21070,17 @@ arm_init_neon_builtins (void)
qreg_types[2] = V4SI_type_node;
qreg_types[3] = V4SF_type_node;
qreg_types[4] = V2DI_type_node;
+ qreg_types[5] = neon_intUTI_type_node;
- for (i = 0; i < 5; i++)
+ for (i = 0; i < NUM_QREG_TYPES; i++)
{
int j;
- for (j = 0; j < 5; j++)
+ for (j = 0; j < NUM_QREG_TYPES; j++)
{
- reinterp_ftype_dreg[i][j]
- = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+ if (i < NUM_DREG_TYPES && j < NUM_DREG_TYPES)
+ reinterp_ftype_dreg[i][j]
+ = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+
reinterp_ftype_qreg[i][j]
= build_function_type_list (qreg_types[i], qreg_types[j], NULL);
}
@@ -21179,10 +21295,14 @@ arm_init_neon_builtins (void)
case NEON_REINTERP:
{
- /* We iterate over 5 doubleword types, then 5 quadword
- types. V4HF is not a type used in reinterpret, so we translate
+ /* We iterate over NUM_DREG_TYPES doubleword types,
+ then NUM_QREG_TYPES quadword types.
+ V4HF is not a type used in reinterpret, so we translate
d->mode to the correct index in reinterp_ftype_dreg. */
- int rhs = (d->mode - ((d->mode > T_V4HF) ? 1 : 0)) % 5;
+ bool qreg_p
+ = GET_MODE_SIZE (insn_data[d->code].operand[0].mode) > 8;
+ int rhs = (d->mode - ((!qreg_p && (d->mode > T_V4HF)) ? 1 : 0))
+ % NUM_QREG_TYPES;
switch (insn_data[d->code].operand[0].mode)
{
case V8QImode: ftype = reinterp_ftype_dreg[0][rhs]; break;
@@ -21195,6 +21315,7 @@ arm_init_neon_builtins (void)
case V4SImode: ftype = reinterp_ftype_qreg[2][rhs]; break;
case V4SFmode: ftype = reinterp_ftype_qreg[3][rhs]; break;
case V2DImode: ftype = reinterp_ftype_qreg[4][rhs]; break;
+ case TImode: ftype = reinterp_ftype_qreg[5][rhs]; break;
default: gcc_unreachable ();
}
}
@@ -21245,6 +21366,9 @@ arm_init_neon_builtins (void)
}
}
+#undef NUM_DREG_TYPES
+#undef NUM_QREG_TYPES
+
#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
do \
{ \
@@ -21267,7 +21391,7 @@ struct builtin_description
const enum rtx_code comparison;
const unsigned int flag;
};
-
+
static const struct builtin_description bdesc_2arg[] =
{
#define IWMMXT_BUILTIN(code, string, builtin) \
@@ -21373,6 +21497,33 @@ static const struct builtin_description bdesc_2arg[] =
IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
+
+#define CRC32_BUILTIN(L, U) \
+ {0, CODE_FOR_##L, "__builtin_arm_"#L, ARM_BUILTIN_##U, \
+ UNKNOWN, 0},
+ CRC32_BUILTIN (crc32b, CRC32B)
+ CRC32_BUILTIN (crc32h, CRC32H)
+ CRC32_BUILTIN (crc32w, CRC32W)
+ CRC32_BUILTIN (crc32cb, CRC32CB)
+ CRC32_BUILTIN (crc32ch, CRC32CH)
+ CRC32_BUILTIN (crc32cw, CRC32CW)
+#undef CRC32_BUILTIN
+
+
+#define CRYPTO_BUILTIN(L, U) \
+ {0, CODE_FOR_crypto_##L, "__builtin_arm_crypto_"#L, ARM_BUILTIN_CRYPTO_##U, \
+ UNKNOWN, 0},
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+#define CRYPTO2(L, U, R, A1, A2) CRYPTO_BUILTIN (L, U)
+#define CRYPTO1(L, U, R, A)
+#define CRYPTO3(L, U, R, A1, A2, A3)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
};
static const struct builtin_description bdesc_1arg[] =
@@ -21401,8 +21552,28 @@ static const struct builtin_description bdesc_1arg[] =
IWMMXT_BUILTIN (tbcstv8qi, "tbcstb", TBCSTB)
IWMMXT_BUILTIN (tbcstv4hi, "tbcsth", TBCSTH)
IWMMXT_BUILTIN (tbcstv2si, "tbcstw", TBCSTW)
+
+#define CRYPTO1(L, U, R, A) CRYPTO_BUILTIN (L, U)
+#define CRYPTO2(L, U, R, A1, A2)
+#define CRYPTO3(L, U, R, A1, A2, A3)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
};
+static const struct builtin_description bdesc_3arg[] =
+{
+#define CRYPTO3(L, U, R, A1, A2, A3) CRYPTO_BUILTIN (L, U)
+#define CRYPTO1(L, U, R, A)
+#define CRYPTO2(L, U, R, A1, A2)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+ };
+#undef CRYPTO_BUILTIN
+
/* Set up all the iWMMXt builtins. This is not called if
TARGET_IWMMXT is zero. */
@@ -21597,7 +21768,7 @@ arm_init_iwmmxt_builtins (void)
enum machine_mode mode;
tree type;
- if (d->name == 0)
+ if (d->name == 0 || !(d->mask == FL_IWMMXT || d->mask == FL_IWMMXT2))
continue;
mode = insn_data[d->icode].operand[1].mode;
@@ -21792,6 +21963,42 @@ arm_init_fp16_builtins (void)
}
static void
+arm_init_crc32_builtins ()
+{
+ tree si_ftype_si_qi
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intQI_type_node, NULL_TREE);
+ tree si_ftype_si_hi
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intHI_type_node, NULL_TREE);
+ tree si_ftype_si_si
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intSI_type_node, NULL_TREE);
+
+ arm_builtin_decls[ARM_BUILTIN_CRC32B]
+ = add_builtin_function ("__builtin_arm_crc32b", si_ftype_si_qi,
+ ARM_BUILTIN_CRC32B, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32H]
+ = add_builtin_function ("__builtin_arm_crc32h", si_ftype_si_hi,
+ ARM_BUILTIN_CRC32H, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32W]
+ = add_builtin_function ("__builtin_arm_crc32w", si_ftype_si_si,
+ ARM_BUILTIN_CRC32W, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CB]
+ = add_builtin_function ("__builtin_arm_crc32cb", si_ftype_si_qi,
+ ARM_BUILTIN_CRC32CB, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CH]
+ = add_builtin_function ("__builtin_arm_crc32ch", si_ftype_si_hi,
+ ARM_BUILTIN_CRC32CH, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CW]
+ = add_builtin_function ("__builtin_arm_crc32cw", si_ftype_si_si,
+ ARM_BUILTIN_CRC32CW, BUILT_IN_MD, NULL, NULL_TREE);
+}
+
+static void
arm_init_builtins (void)
{
if (TARGET_REALLY_IWMMXT)
@@ -21802,6 +22009,9 @@ arm_init_builtins (void)
if (arm_fp16_format)
arm_init_fp16_builtins ();
+
+ if (TARGET_CRC32)
+ arm_init_crc32_builtins ();
}
/* Return the ARM builtin for CODE. */
@@ -21895,6 +22105,73 @@ safe_vector_operand (rtx x, enum machine_mode mode)
return x;
}
+/* Function to expand ternary builtins. */
+static rtx
+arm_expand_ternop_builtin (enum insn_code icode,
+ tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = NULL_RTX;
+
+ /* The sha1c, sha1p, sha1m crypto builtins require a different vec_select
+ lane operand depending on endianness. */
+ bool builtin_sha1cpm_p = false;
+
+ if (insn_data[icode].n_operands == 5)
+ {
+ gcc_assert (icode == CODE_FOR_crypto_sha1c
+ || icode == CODE_FOR_crypto_sha1p
+ || icode == CODE_FOR_crypto_sha1m);
+ builtin_sha1cpm_p = true;
+ }
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[3].mode;
+
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+ if (VECTOR_MODE_P (mode2))
+ op2 = safe_vector_operand (op2, mode2);
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
+ && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode)
+ && (GET_MODE (op2) == mode2 || GET_MODE (op2) == VOIDmode));
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+ if (builtin_sha1cpm_p)
+ op3 = GEN_INT (TARGET_BIG_END ? 1 : 0);
+
+ if (builtin_sha1cpm_p)
+ pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
+ else
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
/* Subroutine of arm_expand_builtin to take care of binop insns. */
static rtx
@@ -21944,8 +22221,16 @@ arm_expand_unop_builtin (enum insn_code icode,
rtx pat;
tree arg0 = CALL_EXPR_ARG (exp, 0);
rtx op0 = expand_normal (arg0);
+ rtx op1 = NULL_RTX;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ bool builtin_sha1h_p = false;
+
+ if (insn_data[icode].n_operands == 3)
+ {
+ gcc_assert (icode == CODE_FOR_crypto_sha1h);
+ builtin_sha1h_p = true;
+ }
if (! target
|| GET_MODE (target) != tmode
@@ -21961,8 +22246,13 @@ arm_expand_unop_builtin (enum insn_code icode,
if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
op0 = copy_to_mode_reg (mode0, op0);
}
+ if (builtin_sha1h_p)
+ op1 = GEN_INT (TARGET_BIG_END ? 1 : 0);
- pat = GEN_FCN (icode) (target, op0);
+ if (builtin_sha1h_p)
+ pat = GEN_FCN (icode) (target, op0, op1);
+ else
+ pat = GEN_FCN (icode) (target, op0);
if (! pat)
return 0;
emit_insn (pat);
@@ -22927,6 +23217,10 @@ arm_expand_builtin (tree exp,
if (d->code == (const enum arm_builtins) fcode)
return arm_expand_unop_builtin (d->icode, exp, target, 0);
+ for (i = 0, d = bdesc_3arg; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ if (d->code == (const enum arm_builtins) fcode)
+ return arm_expand_ternop_builtin (d->icode, exp, target);
+
/* @@@ Should really do something sensible here. */
return NULL_RTX;
}
@@ -25060,7 +25354,22 @@ arm_file_start (void)
{
const char *fpu_name;
if (arm_selected_arch)
- asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ {
+ const char* pos = strchr (arm_selected_arch->name, '+');
+ if (pos)
+ {
+ char buf[15];
+ gcc_assert (strlen (arm_selected_arch->name)
+ <= sizeof (buf) / sizeof (*pos));
+ strncpy (buf, arm_selected_arch->name,
+ (pos - arm_selected_arch->name) * sizeof (*pos));
+ buf[pos - arm_selected_arch->name] = '\0';
+ asm_fprintf (asm_out_file, "\t.arch %s\n", buf);
+ asm_fprintf (asm_out_file, "\t.arch_extension %s\n", pos + 1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ }
else if (strncmp (arm_selected_cpu->name, "generic", 7) == 0)
asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_cpu->name + 8);
else
@@ -26774,6 +27083,7 @@ static arm_mangle_map_entry arm_mangle_map[] = {
{ V2SFmode, "__builtin_neon_sf", "18__simd64_float32_t" },
{ V8QImode, "__builtin_neon_poly8", "16__simd64_poly8_t" },
{ V4HImode, "__builtin_neon_poly16", "17__simd64_poly16_t" },
+
/* 128-bit containerized types. */
{ V16QImode, "__builtin_neon_qi", "16__simd128_int8_t" },
{ V16QImode, "__builtin_neon_uqi", "17__simd128_uint8_t" },
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 418846ef7f4..aeb95481c51 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -49,8 +49,14 @@ extern char arm_arch_name[];
builtin_define ("__ARM_FEATURE_QBIT"); \
if (TARGET_ARM_SAT) \
builtin_define ("__ARM_FEATURE_SAT"); \
+ if (TARGET_CRYPTO) \
+ builtin_define ("__ARM_FEATURE_CRYPTO"); \
if (unaligned_access) \
builtin_define ("__ARM_FEATURE_UNALIGNED"); \
+ if (TARGET_CRC32) \
+ builtin_define ("__ARM_FEATURE_CRC32"); \
+ if (TARGET_32BIT) \
+ builtin_define ("__ARM_32BIT_STATE"); \
if (TARGET_ARM_FEATURE_LDREX) \
builtin_define_with_int_value ( \
"__ARM_FEATURE_LDREX", TARGET_ARM_FEATURE_LDREX); \
@@ -274,6 +280,8 @@ extern void (*arm_lang_output_object_attributes_hook)(void);
#define TARGET_LDRD (arm_arch5e && ARM_DOUBLEWORD_ALIGN \
&& !TARGET_THUMB1)
+#define TARGET_CRC32 (arm_arch_crc)
+
/* The following two macros concern the ability to execute coprocessor
instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently
only ever tested when we know we are generating for VFP hardware; we need
@@ -554,6 +562,9 @@ extern int arm_arch_thumb_hwdiv;
than core registers. */
extern int prefer_neon_for_64bits;
+/* Nonzero if chip supports the ARMv8 CRC instructions. */
+extern int arm_arch_crc;
+
#ifndef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_APCS_FRAME)
#endif
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 102e4e375c6..fbf285e150a 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -396,6 +396,7 @@
branch,\
call,\
clz,\
+ crc,\
extend,\
f_2_r,\
f_cvt,\
@@ -628,6 +629,13 @@
neon_mrrc,\
neon_ldm_2,\
neon_stm_2,\
+ neon_crypto_aes,\
+ neon_crypto_sha1_xor,\
+ neon_crypto_sha1_fast,\
+ neon_crypto_sha1_slow,\
+ neon_crypto_sha256_fast,\
+ neon_crypto_sha256_slow,\
+ neon_mul_d_long,\
none"
(const_string "none"))
@@ -12808,6 +12816,17 @@
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
+;; ARMv8 CRC32 instructions.
+(define_insn "<crc_variant>"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:<crc_mode> 2 "s_register_operand" "r")]
+ CRC))]
+ "TARGET_CRC32"
+ "<crc_variant>\\t%0, %1, %2"
+ [(set_attr "type" "crc")
+ (set_attr "conds" "unconditional")]
+)
;; Load the load/store double peephole optimizations.
(include "ldrdstrd.md")
@@ -12845,6 +12864,8 @@
(include "thumb2.md")
;; Neon patterns
(include "neon.md")
+;; Crypto patterns
+(include "crypto.md")
;; Synchronization Primitives
(include "sync.md")
;; Fixed-point patterns
diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h
index e23d03b9d10..1abbba2256c 100644
--- a/gcc/config/arm/arm_neon.h
+++ b/gcc/config/arm/arm_neon.h
@@ -42,10 +42,13 @@ typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_di int64x1_t;
-typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_hf float16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x1_t;
+#endif
typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
@@ -57,6 +60,9 @@ typedef __builtin_neon_di int64x2_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_sf float32x4_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_poly8 poly8x16_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_poly16 poly16x8_t __attribute__ ((__vector_size__ (16)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x2_t __attribute__ ((__vector_size__ (16)));
+#endif
typedef __builtin_neon_uqi uint8x16_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_uhi uint16x8_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
@@ -65,6 +71,10 @@ typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
typedef float float32_t;
typedef __builtin_neon_poly8 poly8_t;
typedef __builtin_neon_poly16 poly16_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64_t;
+typedef __builtin_neon_poly128 poly128_t;
+#endif
typedef struct int8x8x2_t
{
@@ -176,6 +186,22 @@ typedef struct poly16x8x2_t
poly16x8_t val[2];
} poly16x8x2_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x2_t
+{
+ poly64x1_t val[2];
+} poly64x1x2_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x2_t
+{
+ poly64x2_t val[2];
+} poly64x2x2_t;
+#endif
+
+
typedef struct int8x8x3_t
{
int8x8_t val[3];
@@ -286,6 +312,22 @@ typedef struct poly16x8x3_t
poly16x8_t val[3];
} poly16x8x3_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x3_t
+{
+ poly64x1_t val[3];
+} poly64x1x3_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x3_t
+{
+ poly64x2_t val[3];
+} poly64x2x3_t;
+#endif
+
+
typedef struct int8x8x4_t
{
int8x8_t val[4];
@@ -396,6 +438,22 @@ typedef struct poly16x8x4_t
poly16x8_t val[4];
} poly16x8x4_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x4_t
+{
+ poly64x1_t val[4];
+} poly64x1x4_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x4_t
+{
+ poly64x2_t val[4];
+} poly64x2x4_t;
+#endif
+
+
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vadd_s8 (int8x8_t __a, int8x8_t __b)
@@ -4361,6 +4419,14 @@ vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 4);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -4421,6 +4487,14 @@ vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -4481,6 +4555,14 @@ vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -4541,6 +4623,14 @@ vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -5309,6 +5399,14 @@ vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vcreate_p64 (uint64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vcreate_s8 (uint64_t __a)
{
@@ -5429,6 +5527,14 @@ vdup_n_p16 (poly16_t __a)
return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_n_p64 (poly64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vdup_n_s64 (int64_t __a)
{
@@ -5441,6 +5547,14 @@ vdup_n_u64 (uint64_t __a)
return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_n_p64 (poly64_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vdupq_n_s8 (int8_t __a)
{
@@ -5693,6 +5807,14 @@ vdup_lane_p16 (poly16x4_t __a, const int __b)
return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vdup_lane_s64 (int64x1_t __a, const int __b)
{
@@ -5759,6 +5881,14 @@ vdupq_lane_p16 (poly16x4_t __a, const int __b)
return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vdupq_lane_s64 (int64x1_t __a, const int __b)
{
@@ -5771,6 +5901,14 @@ vdupq_lane_u64 (uint64x1_t __a, const int __b)
return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (poly64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vcombine_s8 (int8x8_t __a, int8x8_t __b)
{
@@ -5837,6 +5975,14 @@ vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_high_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vget_high_s8 (int8x16_t __a)
{
@@ -5957,6 +6103,14 @@ vget_low_p16 (poly16x8_t __a)
return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_low_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vget_low_s64 (int64x2_t __a)
{
@@ -7041,6 +7195,14 @@ vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -7107,6 +7269,14 @@ vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -7389,6 +7559,14 @@ vrev16q_p8 (poly8x16_t __a)
return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
{
@@ -7455,6 +7633,14 @@ vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return (poly64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
{
@@ -8007,6 +8193,14 @@ vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
return __rv;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vld1_s8 (const int8_t * __a)
{
@@ -8073,6 +8267,14 @@ vld1_p16 (const poly16_t * __a)
return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vld1q_s8 (const int8_t * __a)
{
@@ -8193,6 +8395,14 @@ vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_lane_p64 (const poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
{
@@ -8259,6 +8469,14 @@ vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_p64 (const poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
{
@@ -8325,6 +8543,14 @@ vld1_dup_p16 (const poly16_t * __a)
return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vld1_dup_s64 (const int64_t * __a)
{
@@ -8391,6 +8617,14 @@ vld1q_dup_p16 (const poly16_t * __a)
return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vld1q_dup_s64 (const int64_t * __a)
{
@@ -8403,6 +8637,14 @@ vld1q_dup_u64 (const uint64_t * __a)
return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p64 (poly64_t * __a, poly64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1_s8 (int8_t * __a, int8x8_t __b)
{
@@ -8469,6 +8711,14 @@ vst1_p16 (poly16_t * __a, poly16x4_t __b)
__builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p64 (poly64_t * __a, poly64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1q_s8 (int8_t * __a, int8x16_t __b)
{
@@ -8589,6 +8839,14 @@ vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
__builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p64 (poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
{
@@ -8655,6 +8913,14 @@ vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
__builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p64 (poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
{
@@ -8739,6 +9005,16 @@ vld2_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
vld2_s64 (const int64_t * __a)
{
@@ -9034,6 +9310,16 @@ vld2_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
vld2_dup_s64 (const int64_t * __a)
{
@@ -9113,6 +9399,15 @@ vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
__builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t __b)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst2_s64 (int64_t * __a, int64x1x2_t __b)
{
@@ -9367,6 +9662,16 @@ vld3_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
vld3_s64 (const int64_t * __a)
{
@@ -9662,6 +9967,16 @@ vld3_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
vld3_dup_s64 (const int64_t * __a)
{
@@ -9741,6 +10056,15 @@ vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
__builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t __b)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst3_s64 (int64_t * __a, int64x1x3_t __b)
{
@@ -9995,6 +10319,16 @@ vld4_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
vld4_s64 (const int64_t * __a)
{
@@ -10290,6 +10624,16 @@ vld4_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
vld4_dup_s64 (const int64_t * __a)
{
@@ -10369,6 +10713,15 @@ vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
__builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t __b)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst4_s64 (int64_t * __a, int64x1x4_t __b)
{
@@ -11033,23 +11386,25 @@ vornq_u64 (uint64x2_t __a, uint64x2_t __b)
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s8 (int8x8_t __a)
+vreinterpret_p8_p16 (poly16x4_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s16 (int16x4_t __a)
+vreinterpret_p8_f32 (float32x2_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s32 (int32x2_t __a)
+vreinterpret_p8_p64 (poly64x1_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
+#endif
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vreinterpret_p8_s64 (int64x1_t __a)
{
@@ -11057,99 +11412,77 @@ vreinterpret_p8_s64 (int64x1_t __a)
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_f32 (float32x2_t __a)
+vreinterpret_p8_u64 (uint64x1_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u8 (uint8x8_t __a)
+vreinterpret_p8_s8 (int8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u16 (uint16x4_t __a)
+vreinterpret_p8_s16 (int16x4_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u32 (uint32x2_t __a)
+vreinterpret_p8_s32 (int32x2_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u64 (uint64x1_t __a)
+vreinterpret_p8_u8 (uint8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_p16 (poly16x4_t __a)
+vreinterpret_p8_u16 (uint16x4_t __a)
{
return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s8 (int8x16_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s16 (int16x8_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s32 (int32x4_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s64 (int64x2_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_f32 (float32x4_t __a)
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u8 (uint8x16_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u16 (uint16x8_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p64 (poly64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_p16 (poly16x8_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
@@ -11171,18 +11504,6 @@ vreinterpret_p16_s32 (int32x2_t __a)
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s64 (int64x1_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_f32 (float32x2_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vreinterpret_p16_u8 (uint8x8_t __a)
{
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
@@ -11200,76 +11521,36 @@ vreinterpret_p16_u32 (uint32x2_t __a)
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u64 (uint64x1_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_p8 (poly8x8_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s8 (int8x16_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s16 (int16x8_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s32 (int32x4_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s64 (int64x2_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_f32 (float32x4_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u8 (uint8x16_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u16 (uint16x8_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p64 (poly64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_p8 (poly8x16_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
@@ -11291,12 +11572,6 @@ vreinterpret_f32_s32 (int32x2_t __a)
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s64 (int64x1_t __a)
-{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_u8 (uint8x8_t __a)
{
return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
@@ -11314,82 +11589,124 @@ vreinterpret_f32_u32 (uint32x2_t __a)
return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si ((int32x2_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u64 (uint64x1_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p8 (poly8x8_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p8 (poly8x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p16 (poly16x4_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p16 (poly16x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s8 (int8x16_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s64 (int64x1_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s16 (int16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u64 (uint64x1_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s32 (int32x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s8 (int8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s64 (int64x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s16 (int16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u8 (uint8x16_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s32 (int32x2_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u16 (uint16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u8 (uint8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u32 (uint32x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u16 (uint16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u64 (uint64x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u32 (uint32x2_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p8 (poly8x16_t __a)
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p16 (poly16x8_t __a)
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -11411,12 +11728,6 @@ vreinterpret_s64_s32 (int32x2_t __a)
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_f32 (float32x2_t __a)
-{
- return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vreinterpret_s64_u8 (uint8x8_t __a)
{
return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
@@ -11434,550 +11745,1204 @@ vreinterpret_s64_u32 (uint32x2_t __a)
return (int64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u64 (uint64x1_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p8 (poly8x8_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p16 (poly16x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s8 (int8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p64 (poly64x1_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s16 (int16x8_t __a)
+#endif
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s32 (int32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_f32 (float32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u8 (uint8x16_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u16 (uint16x8_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u32 (uint32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u64 (uint64x2_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p8 (poly8x16_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p16 (poly16x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s8 (int8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s16 (int16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p64 (poly64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s32 (int32x2_t __a)
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s64 (int64x1_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_f32 (float32x2_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u8 (uint8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u16 (uint16x4_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u32 (uint32x2_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p8 (poly8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p16 (poly16x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s8 (int8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s16 (int16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s32 (int32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p64 (poly64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s64 (int64x2_t __a)
+#endif
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_f32 (float32x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u8 (uint8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u16 (uint16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u32 (uint32x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p8 (poly8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p16 (poly16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s16 (int16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s32 (int32x2_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s64 (int64x1_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_f32 (float32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p64 (poly64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u8 (uint8x8_t __a)
+#endif
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u16 (uint16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u32 (uint32x2_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u64 (uint64x1_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p8 (poly8x8_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p16 (poly16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s16 (int16x8_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s32 (int32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s64 (int64x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_f32 (float32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u8 (uint8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p64 (poly64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u16 (uint16x8_t __a)
+#endif
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u32 (uint32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u64 (uint64x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p8 (poly8x16_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p16 (poly16x8_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s8 (int8x8_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s32 (int32x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s64 (int64x1_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_f32 (float32x2_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u8 (uint8x8_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u16 (uint16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p64 (poly64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u32 (uint32x2_t __a)
+#endif
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u64 (uint64x1_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p8 (poly8x8_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p16 (poly16x4_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s8 (int8x16_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s32 (int32x4_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s64 (int64x2_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_f32 (float32x4_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u8 (uint8x16_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u16 (uint16x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p64 (poly64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p8 (poly8x16_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p16 (poly16x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s8 (int8x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s16 (int16x4_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s64 (int64x1_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_f32 (float32x2_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u8 (uint8x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u16 (uint16x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u32 (uint32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u64 (uint64x1_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p128 (poly128_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p8 (poly8x8_t __a)
+#endif
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p16 (poly16x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s8 (int8x16_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s16 (int16x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s64 (int64x2_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_f32 (float32x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u8 (uint8x16_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u16 (uint16x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u32 (uint32x4_t __a)
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u64 (uint64x2_t __a)
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p128 (poly128_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p128 (poly128_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p16 (poly16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p128 (poly128_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p8 (poly8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p16 (poly16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_f32 (float32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p64 (poly64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s64 (int64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u64 (uint64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s8 (int8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s16 (int16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s32 (int32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u8 (uint8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u16 (uint16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u32 (uint32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si ((int32x4_t) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p128 (poly128_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p128 (poly128_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p128 (poly128_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p128 (poly128_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
@@ -11992,106 +12957,108 @@ vreinterpretq_s32_p16 (poly16x8_t __a)
return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s8 (int8x8_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s16 (int16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s32 (int32x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p128 (poly128_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s64 (int64x1_t __a)
+#endif
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_f32 (float32x2_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u16 (uint16x4_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u32 (uint32x2_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u64 (uint64x1_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p8 (poly8x8_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p16 (poly16x4_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s8 (int8x16_t __a)
+vreinterpretq_u8_p8 (poly8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s16 (int16x8_t __a)
+vreinterpretq_u8_p16 (poly16x8_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s32 (int32x4_t __a)
+vreinterpretq_u8_f32 (float32x4_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
}
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s64 (int64x2_t __a)
+vreinterpretq_u8_p64 (poly64x2_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_f32 (float32x4_t __a)
-{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u16 (uint16x8_t __a)
+vreinterpretq_u8_p128 (poly128_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
}
+#endif
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u32 (uint32x4_t __a)
+vreinterpretq_u8_s64 (int64x2_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
@@ -12101,75 +13068,79 @@ vreinterpretq_u8_u64 (uint64x2_t __a)
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p8 (poly8x16_t __a)
+vreinterpretq_u8_s8 (int8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p16 (poly16x8_t __a)
+vreinterpretq_u8_s16 (int16x8_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s8 (int8x8_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s16 (int16x4_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s32 (int32x2_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s64 (int64x1_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_f32 (float32x2_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u8 (uint8x8_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u32 (uint32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u64 (uint64x1_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p128 (poly128_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p8 (poly8x8_t __a)
+#endif
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p16 (poly16x4_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
@@ -12191,167 +13162,266 @@ vreinterpretq_u16_s32 (int32x4_t __a)
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s64 (int64x2_t __a)
+vreinterpretq_u16_u8 (uint8x16_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_f32 (float32x4_t __a)
+vreinterpretq_u16_u32 (uint32x4_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u8 (uint8x16_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u32 (uint32x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u64 (uint64x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p8 (poly8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p16 (poly16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p128 (poly128_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s8 (int8x8_t __a)
+#endif
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s16 (int16x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s32 (int32x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s64 (int64x1_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_f32 (float32x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u8 (uint8x8_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u16 (uint16x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u64 (uint64x1_t __a)
+
+#ifdef __ARM_FEATURE_CRYPTO
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vldrq_p128 (poly128_t const * __ptr)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p8 (poly8x8_t __a)
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p16 (poly16x4_t __a)
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s8 (int8x16_t __a)
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ __t = __builtin_arm_crypto_sha1h (__t);
+ return vgetq_lane_u32 (__t, 0);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s16 (int16x8_t __a)
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s32 (int32x4_t __a)
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s64 (int64x2_t __a)
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_f32 (float32x4_t __a)
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u8 (uint8x16_t __a)
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u16 (uint16x8_t __a)
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u64 (uint64x2_t __a)
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p8 (poly8x16_t __a)
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p16 (poly16x8_t __a)
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
}
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#endif
#ifdef __cplusplus
}
#endif
diff --git a/gcc/config/arm/arm_neon_builtins.def b/gcc/config/arm/arm_neon_builtins.def
index 92f1d7ad1c4..5fa17bd0bf1 100644
--- a/gcc/config/arm/arm_neon_builtins.def
+++ b/gcc/config/arm/arm_neon_builtins.def
@@ -158,11 +158,12 @@ VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretdi, v8qi, v4hi, v2si, v2sf, di),
-VAR5 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di),
+VAR6 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretti, v16qi, v8hi, v4si, v4sf, v2di, ti),
VAR10 (LOAD1, vld1,
v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di),
VAR10 (LOAD1LANE, vld1_lane,
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 172efe74d0e..ffe4ceb7b76 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -201,6 +201,20 @@
(define_int_iterator NEON_VRINT [UNSPEC_NVRINTP UNSPEC_NVRINTZ UNSPEC_NVRINTM
UNSPEC_NVRINTX UNSPEC_NVRINTA UNSPEC_NVRINTN])
+(define_int_iterator CRC [UNSPEC_CRC32B UNSPEC_CRC32H UNSPEC_CRC32W
+ UNSPEC_CRC32CB UNSPEC_CRC32CH UNSPEC_CRC32CW])
+
+(define_int_iterator CRYPTO_UNARY [UNSPEC_AESMC UNSPEC_AESIMC])
+
+(define_int_iterator CRYPTO_BINARY [UNSPEC_AESD UNSPEC_AESE
+ UNSPEC_SHA1SU1 UNSPEC_SHA256SU0])
+
+(define_int_iterator CRYPTO_TERNARY [UNSPEC_SHA1SU0 UNSPEC_SHA256H
+ UNSPEC_SHA256H2 UNSPEC_SHA256SU1])
+
+(define_int_iterator CRYPTO_SELECTING [UNSPEC_SHA1C UNSPEC_SHA1M
+ UNSPEC_SHA1P])
+
;;----------------------------------------------------------------------------
;; Mode attributes
;;----------------------------------------------------------------------------
@@ -500,6 +514,49 @@
(define_int_attr nvrint_variant [(UNSPEC_NVRINTZ "z") (UNSPEC_NVRINTP "p")
(UNSPEC_NVRINTA "a") (UNSPEC_NVRINTM "m")
(UNSPEC_NVRINTX "x") (UNSPEC_NVRINTN "n")])
+
+(define_int_attr crc_variant [(UNSPEC_CRC32B "crc32b") (UNSPEC_CRC32H "crc32h")
+ (UNSPEC_CRC32W "crc32w") (UNSPEC_CRC32CB "crc32cb")
+ (UNSPEC_CRC32CH "crc32ch") (UNSPEC_CRC32CW "crc32cw")])
+
+(define_int_attr crc_mode [(UNSPEC_CRC32B "QI") (UNSPEC_CRC32H "HI")
+ (UNSPEC_CRC32W "SI") (UNSPEC_CRC32CB "QI")
+ (UNSPEC_CRC32CH "HI") (UNSPEC_CRC32CW "SI")])
+
+(define_int_attr crypto_pattern [(UNSPEC_SHA1H "sha1h") (UNSPEC_AESMC "aesmc")
+ (UNSPEC_AESIMC "aesimc") (UNSPEC_AESD "aesd")
+ (UNSPEC_AESE "aese") (UNSPEC_SHA1SU1 "sha1su1")
+ (UNSPEC_SHA256SU0 "sha256su0") (UNSPEC_SHA1C "sha1c")
+ (UNSPEC_SHA1M "sha1m") (UNSPEC_SHA1P "sha1p")
+ (UNSPEC_SHA1SU0 "sha1su0") (UNSPEC_SHA256H "sha256h")
+ (UNSPEC_SHA256H2 "sha256h2")
+ (UNSPEC_SHA256SU1 "sha256su1")])
+
+(define_int_attr crypto_type
+ [(UNSPEC_AESE "neon_crypto_aes") (UNSPEC_AESD "neon_crypto_aes")
+ (UNSPEC_AESMC "neon_crypto_aes") (UNSPEC_AESIMC "neon_crypto_aes")
+ (UNSPEC_SHA1C "neon_crypto_sha1_slow") (UNSPEC_SHA1P "neon_crypto_sha1_slow")
+ (UNSPEC_SHA1M "neon_crypto_sha1_slow") (UNSPEC_SHA1SU1 "neon_crypto_sha1_fast")
+ (UNSPEC_SHA1SU0 "neon_crypto_sha1_xor") (UNSPEC_SHA256H "neon_crypto_sha256_slow")
+ (UNSPEC_SHA256H2 "neon_crypto_sha256_slow") (UNSPEC_SHA256SU0 "neon_crypto_sha256_fast")
+ (UNSPEC_SHA256SU1 "neon_crypto_sha256_slow")])
+
+(define_int_attr crypto_size_sfx [(UNSPEC_SHA1H "32") (UNSPEC_AESMC "8")
+ (UNSPEC_AESIMC "8") (UNSPEC_AESD "8")
+ (UNSPEC_AESE "8") (UNSPEC_SHA1SU1 "32")
+ (UNSPEC_SHA256SU0 "32") (UNSPEC_SHA1C "32")
+ (UNSPEC_SHA1M "32") (UNSPEC_SHA1P "32")
+ (UNSPEC_SHA1SU0 "32") (UNSPEC_SHA256H "32")
+ (UNSPEC_SHA256H2 "32") (UNSPEC_SHA256SU1 "32")])
+
+(define_int_attr crypto_mode [(UNSPEC_SHA1H "V4SI") (UNSPEC_AESMC "V16QI")
+ (UNSPEC_AESIMC "V16QI") (UNSPEC_AESD "V16QI")
+ (UNSPEC_AESE "V16QI") (UNSPEC_SHA1SU1 "V4SI")
+ (UNSPEC_SHA256SU0 "V4SI") (UNSPEC_SHA1C "V4SI")
+ (UNSPEC_SHA1M "V4SI") (UNSPEC_SHA1P "V4SI")
+ (UNSPEC_SHA1SU0 "V4SI") (UNSPEC_SHA256H "V4SI")
+ (UNSPEC_SHA256H2 "V4SI") (UNSPEC_SHA256SU1 "V4SI")])
+
;; Both kinds of return insn.
(define_code_iterator returns [return simple_return])
(define_code_attr return_str [(return "") (simple_return "simple_")])
diff --git a/gcc/config/arm/neon-docgen.ml b/gcc/config/arm/neon-docgen.ml
index f17314f2ab3..46cae14fdc2 100644
--- a/gcc/config/arm/neon-docgen.ml
+++ b/gcc/config/arm/neon-docgen.ml
@@ -329,6 +329,85 @@ let gnu_header chan =
"@c This file is generated automatically using gcc/config/arm/neon-docgen.ml";
"@c Please do not edit manually."]
+let crypto_doc =
+"
+@itemize @bullet
+@item poly128_t vldrq_p128(poly128_t const *)
+@end itemize
+
+@itemize @bullet
+@item void vstrq_p128(poly128_t *, poly128_t)
+@end itemize
+
+@itemize @bullet
+@item uint64x1_t vceq_p64 (poly64x1_t, poly64x1_t)
+@end itemize
+
+@itemize @bullet
+@item uint64x1_t vtst_p64 (poly64x1_t, poly64x1_t)
+@end itemize
+
+@itemize @bullet
+@item uint32_t vsha1h_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{sha1h.32 @var{q0}, @var{q1}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1cq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1c.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1pq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1p.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1mq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1m.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1su0q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su0.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1su1q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su1.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256hq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256h2q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h2.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su0q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su0.32 @var{q0}, @var{q1}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su1q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su1.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item poly128_t vmull_p64 (poly64_t a, poly64_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
+
+@itemize @bullet
+@item poly128_t vmull_high_p64 (poly64x2_t a, poly64x2_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
+"
+
(* Program entry point. *)
let _ =
if Array.length Sys.argv <> 2 then
@@ -339,6 +418,7 @@ let _ =
let chan = open_out file in
gnu_header chan;
List.iter (document_group chan) intrinsic_groups;
+ Printf.fprintf chan "%s\n" crypto_doc;
close_out chan
with Sys_error sys ->
failwith ("Could not create output file " ^ file ^ ": " ^ sys)
diff --git a/gcc/config/arm/neon-gen.ml b/gcc/config/arm/neon-gen.ml
index 948b162ccfa..e5da658687f 100644
--- a/gcc/config/arm/neon-gen.ml
+++ b/gcc/config/arm/neon-gen.ml
@@ -114,6 +114,7 @@ let rec signed_ctype = function
| T_uint32x4 -> T_int32x4
| T_uint64x1 -> T_int64x1
| T_uint64x2 -> T_int64x2
+ | T_poly64x2 -> T_int64x2
(* Cast to types defined by mode in arm.c, not random types pulled in from
the <stdint.h> header in use. This fixes incompatible pointer errors when
compiling with C++. *)
@@ -125,6 +126,8 @@ let rec signed_ctype = function
| T_float32 -> T_floatSF
| T_poly8 -> T_intQI
| T_poly16 -> T_intHI
+ | T_poly64 -> T_intDI
+ | T_poly128 -> T_intTI
| T_arrayof (n, elt) -> T_arrayof (n, signed_ctype elt)
| T_ptrto elt -> T_ptrto (signed_ctype elt)
| T_const elt -> T_const (signed_ctype elt)
@@ -362,80 +365,96 @@ let print_ops ops =
abase : "ARM" base name for the type (i.e. int in int8x8_t).
esize : element size.
enum : element count.
+ alevel: architecture level at which available.
*)
+type fpulevel = CRYPTO | ALL
+
let deftypes () =
let typeinfo = [
(* Doubleword vector types. *)
- "__builtin_neon_qi", "int", 8, 8;
- "__builtin_neon_hi", "int", 16, 4;
- "__builtin_neon_si", "int", 32, 2;
- "__builtin_neon_di", "int", 64, 1;
- "__builtin_neon_hf", "float", 16, 4;
- "__builtin_neon_sf", "float", 32, 2;
- "__builtin_neon_poly8", "poly", 8, 8;
- "__builtin_neon_poly16", "poly", 16, 4;
- "__builtin_neon_uqi", "uint", 8, 8;
- "__builtin_neon_uhi", "uint", 16, 4;
- "__builtin_neon_usi", "uint", 32, 2;
- "__builtin_neon_udi", "uint", 64, 1;
+ "__builtin_neon_qi", "int", 8, 8, ALL;
+ "__builtin_neon_hi", "int", 16, 4, ALL;
+ "__builtin_neon_si", "int", 32, 2, ALL;
+ "__builtin_neon_di", "int", 64, 1, ALL;
+ "__builtin_neon_hf", "float", 16, 4, ALL;
+ "__builtin_neon_sf", "float", 32, 2, ALL;
+ "__builtin_neon_poly8", "poly", 8, 8, ALL;
+ "__builtin_neon_poly16", "poly", 16, 4, ALL;
+ "__builtin_neon_poly64", "poly", 64, 1, CRYPTO;
+ "__builtin_neon_uqi", "uint", 8, 8, ALL;
+ "__builtin_neon_uhi", "uint", 16, 4, ALL;
+ "__builtin_neon_usi", "uint", 32, 2, ALL;
+ "__builtin_neon_udi", "uint", 64, 1, ALL;
(* Quadword vector types. *)
- "__builtin_neon_qi", "int", 8, 16;
- "__builtin_neon_hi", "int", 16, 8;
- "__builtin_neon_si", "int", 32, 4;
- "__builtin_neon_di", "int", 64, 2;
- "__builtin_neon_sf", "float", 32, 4;
- "__builtin_neon_poly8", "poly", 8, 16;
- "__builtin_neon_poly16", "poly", 16, 8;
- "__builtin_neon_uqi", "uint", 8, 16;
- "__builtin_neon_uhi", "uint", 16, 8;
- "__builtin_neon_usi", "uint", 32, 4;
- "__builtin_neon_udi", "uint", 64, 2
+ "__builtin_neon_qi", "int", 8, 16, ALL;
+ "__builtin_neon_hi", "int", 16, 8, ALL;
+ "__builtin_neon_si", "int", 32, 4, ALL;
+ "__builtin_neon_di", "int", 64, 2, ALL;
+ "__builtin_neon_sf", "float", 32, 4, ALL;
+ "__builtin_neon_poly8", "poly", 8, 16, ALL;
+ "__builtin_neon_poly16", "poly", 16, 8, ALL;
+ "__builtin_neon_poly64", "poly", 64, 2, CRYPTO;
+ "__builtin_neon_uqi", "uint", 8, 16, ALL;
+ "__builtin_neon_uhi", "uint", 16, 8, ALL;
+ "__builtin_neon_usi", "uint", 32, 4, ALL;
+ "__builtin_neon_udi", "uint", 64, 2, ALL
] in
List.iter
- (fun (cbase, abase, esize, enum) ->
+ (fun (cbase, abase, esize, enum, fpulevel) ->
let attr =
match enum with
1 -> ""
| _ -> Printf.sprintf "\t__attribute__ ((__vector_size__ (%d)))"
(esize * enum / 8) in
- Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr)
+ if fpulevel == CRYPTO then
+ Format.printf "#ifdef __ARM_FEATURE_CRYPTO\n";
+ Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr;
+ if fpulevel == CRYPTO then
+ Format.printf "#endif\n";)
typeinfo;
Format.print_newline ();
(* Extra types not in <stdint.h>. *)
Format.printf "typedef float float32_t;\n";
Format.printf "typedef __builtin_neon_poly8 poly8_t;\n";
- Format.printf "typedef __builtin_neon_poly16 poly16_t;\n"
+ Format.printf "typedef __builtin_neon_poly16 poly16_t;\n";
+ Format.printf "#ifdef __ARM_FEATURE_CRYPTO\n";
+ Format.printf "typedef __builtin_neon_poly64 poly64_t;\n";
+ Format.printf "typedef __builtin_neon_poly128 poly128_t;\n";
+ Format.printf "#endif\n"
-(* Output structs containing arrays, for load & store instructions etc. *)
+(* Output structs containing arrays, for load & store instructions etc.
+ poly128_t is deliberately not included here because it has no array types
+ defined for it. *)
let arrtypes () =
let typeinfo = [
- "int", 8; "int", 16;
- "int", 32; "int", 64;
- "uint", 8; "uint", 16;
- "uint", 32; "uint", 64;
- "float", 32; "poly", 8;
- "poly", 16
+ "int", 8, ALL; "int", 16, ALL;
+ "int", 32, ALL; "int", 64, ALL;
+ "uint", 8, ALL; "uint", 16, ALL;
+ "uint", 32, ALL; "uint", 64, ALL;
+ "float", 32, ALL; "poly", 8, ALL;
+ "poly", 16, ALL; "poly", 64, CRYPTO
] in
- let writestruct elname elsize regsize arrsize =
+ let writestruct elname elsize regsize arrsize fpulevel =
let elnum = regsize / elsize in
let structname =
Printf.sprintf "%s%dx%dx%d_t" elname elsize elnum arrsize in
let sfmt = start_function () in
- Format.printf "typedef struct %s" structname;
+ Format.printf "%stypedef struct %s"
+ (if fpulevel == CRYPTO then "#ifdef __ARM_FEATURE_CRYPTO\n" else "") structname;
open_braceblock sfmt;
Format.printf "%s%dx%d_t val[%d];" elname elsize elnum arrsize;
close_braceblock sfmt;
- Format.printf " %s;" structname;
+ Format.printf " %s;%s" structname (if fpulevel == CRYPTO then "\n#endif\n" else "");
end_function sfmt;
in
for n = 2 to 4 do
List.iter
- (fun (elname, elsize) ->
- writestruct elname elsize 64 n;
- writestruct elname elsize 128 n)
+ (fun (elname, elsize, alevel) ->
+ writestruct elname elsize 64 n alevel;
+ writestruct elname elsize 128 n alevel)
typeinfo
done
@@ -491,6 +510,8 @@ let _ =
print_ops ops;
Format.print_newline ();
print_ops reinterp;
+ print_ops reinterpq;
+ Format.printf "%s" crypto_intrinsics;
print_lines [
"#ifdef __cplusplus";
"}";
diff --git a/gcc/config/arm/neon-testgen.ml b/gcc/config/arm/neon-testgen.ml
index 543318bfcc6..e1e4e250787 100644
--- a/gcc/config/arm/neon-testgen.ml
+++ b/gcc/config/arm/neon-testgen.ml
@@ -167,6 +167,7 @@ let effective_target features =
| _ -> false)
features with
Requires_feature "FMA" -> "arm_neonv2"
+ | Requires_feature "CRYPTO" -> "arm_crypto"
| Requires_arch 8 -> "arm_v8_neon"
| Requires_FP_bit 1 -> "arm_neon_fp16"
| _ -> assert false
@@ -300,5 +301,5 @@ let test_intrinsic_group dir (opcode, features, shape, name, munge, types) =
(* Program entry point. *)
let _ =
let directory = if Array.length Sys.argv <> 1 then Sys.argv.(1) else "." in
- List.iter (test_intrinsic_group directory) (reinterp @ ops)
+ List.iter (test_intrinsic_group directory) (reinterp @ reinterpq @ ops)
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index d4445b57124..7442cabe16d 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -4500,9 +4500,19 @@
DONE;
})
+(define_expand "neon_vreinterpretti<mode>"
+ [(match_operand:TI 0 "s_register_operand" "")
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+
(define_expand "neon_vreinterpretv16qi<mode>"
[(match_operand:V16QI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4511,7 +4521,7 @@
(define_expand "neon_vreinterpretv8hi<mode>"
[(match_operand:V8HI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4520,7 +4530,7 @@
(define_expand "neon_vreinterpretv4si<mode>"
[(match_operand:V4SI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4529,7 +4539,7 @@
(define_expand "neon_vreinterpretv4sf<mode>"
[(match_operand:V4SF 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4538,7 +4548,7 @@
(define_expand "neon_vreinterpretv2di<mode>"
[(match_operand:V2DI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
diff --git a/gcc/config/arm/neon.ml b/gcc/config/arm/neon.ml
index ca9a4c06aa6..738ee066bb0 100644
--- a/gcc/config/arm/neon.ml
+++ b/gcc/config/arm/neon.ml
@@ -22,7 +22,7 @@
(* Shorthand types for vector elements. *)
type elts = S8 | S16 | S32 | S64 | F16 | F32 | U8 | U16 | U32 | U64 | P8 | P16
- | I8 | I16 | I32 | I64 | B8 | B16 | B32 | B64 | Conv of elts * elts
+ | P64 | P128 | I8 | I16 | I32 | I64 | B8 | B16 | B32 | B64 | Conv of elts * elts
| Cast of elts * elts | NoElts
type eltclass = Signed | Unsigned | Float | Poly | Int | Bits
@@ -47,13 +47,15 @@ type vectype = T_int8x8 | T_int8x16
| T_uint8 | T_uint16
| T_uint32 | T_uint64
| T_poly8 | T_poly16
+ | T_poly64 | T_poly64x1
+ | T_poly64x2 | T_poly128
| T_float16 | T_float32
| T_arrayof of int * vectype
| T_ptrto of vectype | T_const of vectype
| T_void | T_intQI
| T_intHI | T_intSI
- | T_intDI | T_floatHF
- | T_floatSF
+ | T_intDI | T_intTI
+ | T_floatHF | T_floatSF
(* The meanings of the following are:
TImode : "Tetra", two registers (four words).
@@ -96,7 +98,7 @@ type arity = Arity0 of vectype
| Arity4 of vectype * vectype * vectype * vectype * vectype
type vecmode = V8QI | V4HI | V4HF |V2SI | V2SF | DI
- | V16QI | V8HI | V4SI | V4SF | V2DI
+ | V16QI | V8HI | V4SI | V4SF | V2DI | TI
| QI | HI | SI | SF
type opcode =
@@ -299,7 +301,8 @@ let rec elt_width = function
S8 | U8 | P8 | I8 | B8 -> 8
| S16 | U16 | P16 | I16 | B16 | F16 -> 16
| S32 | F32 | U32 | I32 | B32 -> 32
- | S64 | U64 | I64 | B64 -> 64
+ | S64 | U64 | P64 | I64 | B64 -> 64
+ | P128 -> 128
| Conv (a, b) ->
let wa = elt_width a and wb = elt_width b in
if wa = wb then wa else raise (MixedMode (a, b))
@@ -309,7 +312,7 @@ let rec elt_width = function
let rec elt_class = function
S8 | S16 | S32 | S64 -> Signed
| U8 | U16 | U32 | U64 -> Unsigned
- | P8 | P16 -> Poly
+ | P8 | P16 | P64 | P128 -> Poly
| F16 | F32 -> Float
| I8 | I16 | I32 | I64 -> Int
| B8 | B16 | B32 | B64 -> Bits
@@ -330,6 +333,8 @@ let elt_of_class_width c w =
| Unsigned, 64 -> U64
| Poly, 8 -> P8
| Poly, 16 -> P16
+ | Poly, 64 -> P64
+ | Poly, 128 -> P128
| Int, 8 -> I8
| Int, 16 -> I16
| Int, 32 -> I32
@@ -402,7 +407,7 @@ let rec mode_of_elt ?argpos elt shape =
Float | ConvClass(_, Float) -> true | _ -> false in
let idx =
match elt_width elt with
- 8 -> 0 | 16 -> 1 | 32 -> 2 | 64 -> 3
+ 8 -> 0 | 16 -> 1 | 32 -> 2 | 64 -> 3 | 128 -> 4
| _ -> failwith "Bad element width"
in match shape with
All (_, Dreg) | By_scalar Dreg | Pair_result Dreg | Unary_scalar Dreg
@@ -413,7 +418,7 @@ let rec mode_of_elt ?argpos elt shape =
[| V8QI; V4HI; V2SI; DI |].(idx)
| All (_, Qreg) | By_scalar Qreg | Pair_result Qreg | Unary_scalar Qreg
| Binary_imm Qreg | Long_noreg Qreg | Wide_noreg Qreg ->
- [| V16QI; V8HI; if flt then V4SF else V4SI; V2DI |].(idx)
+ [| V16QI; V8HI; if flt then V4SF else V4SI; V2DI; TI|].(idx)
| All (_, (Corereg | PtrTo _ | CstPtrTo _)) ->
[| QI; HI; if flt then SF else SI; DI |].(idx)
| Long | Wide | Wide_lane | Wide_scalar
@@ -474,6 +479,8 @@ let type_for_elt shape elt no =
| U16 -> T_uint16x4
| U32 -> T_uint32x2
| U64 -> T_uint64x1
+ | P64 -> T_poly64x1
+ | P128 -> T_poly128
| F16 -> T_float16x4
| F32 -> T_float32x2
| P8 -> T_poly8x8
@@ -493,6 +500,8 @@ let type_for_elt shape elt no =
| F32 -> T_float32x4
| P8 -> T_poly8x16
| P16 -> T_poly16x8
+ | P64 -> T_poly64x2
+ | P128 -> T_poly128
| _ -> failwith "Bad elt type for Qreg"
end
| Corereg ->
@@ -507,6 +516,8 @@ let type_for_elt shape elt no =
| U64 -> T_uint64
| P8 -> T_poly8
| P16 -> T_poly16
+ | P64 -> T_poly64
+ | P128 -> T_poly128
| F32 -> T_float32
| _ -> failwith "Bad elt type for Corereg"
end
@@ -527,10 +538,10 @@ let type_for_elt shape elt no =
let vectype_size = function
T_int8x8 | T_int16x4 | T_int32x2 | T_int64x1
| T_uint8x8 | T_uint16x4 | T_uint32x2 | T_uint64x1
- | T_float32x2 | T_poly8x8 | T_poly16x4 | T_float16x4 -> 64
+ | T_float32x2 | T_poly8x8 | T_poly64x1 | T_poly16x4 | T_float16x4 -> 64
| T_int8x16 | T_int16x8 | T_int32x4 | T_int64x2
| T_uint8x16 | T_uint16x8 | T_uint32x4 | T_uint64x2
- | T_float32x4 | T_poly8x16 | T_poly16x8 -> 128
+ | T_float32x4 | T_poly8x16 | T_poly64x2 | T_poly16x8 -> 128
| _ -> raise Not_found
let inttype_for_array num elttype =
@@ -1041,14 +1052,22 @@ let ops =
"vRsraQ_n", shift_right_acc, su_8_64;
(* Vector shift right and insert. *)
+ Vsri, [Requires_feature "CRYPTO"], Use_operands [| Dreg; Dreg; Immed |], "vsri_n", shift_insert,
+ [P64];
Vsri, [], Use_operands [| Dreg; Dreg; Immed |], "vsri_n", shift_insert,
P8 :: P16 :: su_8_64;
+ Vsri, [Requires_feature "CRYPTO"], Use_operands [| Qreg; Qreg; Immed |], "vsriQ_n", shift_insert,
+ [P64];
Vsri, [], Use_operands [| Qreg; Qreg; Immed |], "vsriQ_n", shift_insert,
P8 :: P16 :: su_8_64;
(* Vector shift left and insert. *)
+ Vsli, [Requires_feature "CRYPTO"], Use_operands [| Dreg; Dreg; Immed |], "vsli_n", shift_insert,
+ [P64];
Vsli, [], Use_operands [| Dreg; Dreg; Immed |], "vsli_n", shift_insert,
P8 :: P16 :: su_8_64;
+ Vsli, [Requires_feature "CRYPTO"], Use_operands [| Qreg; Qreg; Immed |], "vsliQ_n", shift_insert,
+ [P64];
Vsli, [], Use_operands [| Qreg; Qreg; Immed |], "vsliQ_n", shift_insert,
P8 :: P16 :: su_8_64;
@@ -1135,6 +1154,11 @@ let ops =
(* Create vector from literal bit pattern. *)
Vcreate,
+ [Requires_feature "CRYPTO"; No_op], (* Not really, but it can yield various things that are too
+ hard for the test generator at this time. *)
+ Use_operands [| Dreg; Corereg |], "vcreate", create_vector,
+ [P64];
+ Vcreate,
[No_op], (* Not really, but it can yield various things that are too
hard for the test generator at this time. *)
Use_operands [| Dreg; Corereg |], "vcreate", create_vector,
@@ -1148,12 +1172,25 @@ let ops =
Use_operands [| Dreg; Corereg |], "vdup_n", bits_1,
pf_su_8_32;
Vdup_n,
+ [No_op; Requires_feature "CRYPTO";
+ Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Dreg; Corereg |], "vdup_n", notype_1,
+ [P64];
+ Vdup_n,
[No_op;
Instruction_name ["vmov"];
Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
Use_operands [| Dreg; Corereg |], "vdup_n", notype_1,
[S64; U64];
Vdup_n,
+ [No_op; Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| Qreg;
+ Alternatives [ Corereg;
+ Element_of_dreg ] |]]],
+ Use_operands [| Qreg; Corereg |], "vdupQ_n", bits_1,
+ [P64];
+ Vdup_n,
[Disassembles_as [Use_operands [| Qreg;
Alternatives [ Corereg;
Element_of_dreg ] |]]],
@@ -1206,21 +1243,33 @@ let ops =
[Disassembles_as [Use_operands [| Dreg; Element_of_dreg |]]],
Unary_scalar Dreg, "vdup_lane", bits_2, pf_su_8_32;
Vdup_lane,
+ [No_op; Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Unary_scalar Dreg, "vdup_lane", bits_2, [P64];
+ Vdup_lane,
[No_op; Const_valuator (fun _ -> 0)],
Unary_scalar Dreg, "vdup_lane", bits_2, [S64; U64];
Vdup_lane,
[Disassembles_as [Use_operands [| Qreg; Element_of_dreg |]]],
Unary_scalar Qreg, "vdupQ_lane", bits_2, pf_su_8_32;
Vdup_lane,
+ [No_op; Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Unary_scalar Qreg, "vdupQ_lane", bits_2, [P64];
+ Vdup_lane,
[No_op; Const_valuator (fun _ -> 0)],
Unary_scalar Qreg, "vdupQ_lane", bits_2, [S64; U64];
(* Combining vectors. *)
+ Vcombine, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Qreg; Dreg; Dreg |], "vcombine", notype_2,
+ [P64];
Vcombine, [No_op],
Use_operands [| Qreg; Dreg; Dreg |], "vcombine", notype_2,
pf_su_8_64;
(* Splitting vectors. *)
+ Vget_high, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Dreg; Qreg |], "vget_high",
+ notype_1, [P64];
Vget_high, [No_op],
Use_operands [| Dreg; Qreg |], "vget_high",
notype_1, pf_su_8_64;
@@ -1229,7 +1278,10 @@ let ops =
Fixed_vector_reg],
Use_operands [| Dreg; Qreg |], "vget_low",
notype_1, pf_su_8_32;
- Vget_low, [No_op],
+ Vget_low, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Dreg; Qreg |], "vget_low",
+ notype_1, [P64];
+ Vget_low, [No_op],
Use_operands [| Dreg; Qreg |], "vget_low",
notype_1, [S64; U64];
@@ -1412,9 +1464,15 @@ let ops =
[S16; S32];
(* Vector extract. *)
+ Vext, [Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; Dreg; Dreg; Immed |], "vext", extend,
+ [P64];
Vext, [Const_valuator (fun _ -> 0)],
Use_operands [| Dreg; Dreg; Dreg; Immed |], "vext", extend,
pf_su_8_64;
+ Vext, [Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Use_operands [| Qreg; Qreg; Qreg; Immed |], "vextQ", extend,
+ [P64];
Vext, [Const_valuator (fun _ -> 0)],
Use_operands [| Qreg; Qreg; Qreg; Immed |], "vextQ", extend,
pf_su_8_64;
@@ -1435,11 +1493,21 @@ let ops =
(* Bit selection. *)
Vbsl,
+ [Requires_feature "CRYPTO"; Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Dreg; Dreg; Dreg |]]],
+ Use_operands [| Dreg; Dreg; Dreg; Dreg |], "vbsl", bit_select,
+ [P64];
+ Vbsl,
[Instruction_name ["vbsl"; "vbit"; "vbif"];
Disassembles_as [Use_operands [| Dreg; Dreg; Dreg |]]],
Use_operands [| Dreg; Dreg; Dreg; Dreg |], "vbsl", bit_select,
pf_su_8_64;
Vbsl,
+ [Requires_feature "CRYPTO"; Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Qreg; Qreg; Qreg |]]],
+ Use_operands [| Qreg; Qreg; Qreg; Qreg |], "vbslQ", bit_select,
+ [P64];
+ Vbsl,
[Instruction_name ["vbsl"; "vbit"; "vbif"];
Disassembles_as [Use_operands [| Qreg; Qreg; Qreg |]]],
Use_operands [| Qreg; Qreg; Qreg; Qreg |], "vbslQ", bit_select,
@@ -1461,10 +1529,21 @@ let ops =
(* Element/structure loads. VLD1 variants. *)
Vldx 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1", bits_1,
+ [P64];
+ Vldx 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1", bits_1,
pf_su_8_64;
+ Vldx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q", bits_1,
+ [P64];
Vldx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q", bits_1,
@@ -1476,6 +1555,13 @@ let ops =
Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
"vld1_lane", bits_3, pf_su_8_32;
Vldx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
+ "vld1_lane", bits_3, [P64];
+ Vldx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]];
Const_valuator (fun _ -> 0)],
@@ -1487,6 +1573,12 @@ let ops =
Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
"vld1Q_lane", bits_3, pf_su_8_32;
Vldx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
+ "vld1Q_lane", bits_3, [P64];
+ Vldx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
@@ -1498,6 +1590,12 @@ let ops =
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
bits_1, pf_su_8_32;
Vldx_dup 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
+ bits_1, [P64];
+ Vldx_dup 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
@@ -1510,16 +1608,32 @@ let ops =
(* Treated identically to vld1_dup above as we now
do a single load followed by a duplicate. *)
Vldx_dup 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
+ bits_1, [P64];
+ Vldx_dup 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
bits_1, [S64; U64];
(* VST1 variants. *)
+ Vstx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Dreg |], "vst1",
+ store_1, [P64];
Vstx 1, [Disassembles_as [Use_operands [| VecArray (1, Dreg);
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Dreg |], "vst1",
store_1, pf_su_8_64;
+ Vstx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg |], "vst1Q",
+ store_1, [P64];
Vstx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Qreg |], "vst1Q",
@@ -1531,6 +1645,13 @@ let ops =
Use_operands [| PtrTo Corereg; Dreg; Immed |],
"vst1_lane", store_3, pf_su_8_32;
Vstx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| PtrTo Corereg; Dreg; Immed |],
+ "vst1_lane", store_3, [P64];
+ Vstx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]];
Const_valuator (fun _ -> 0)],
@@ -1542,6 +1663,12 @@ let ops =
Use_operands [| PtrTo Corereg; Qreg; Immed |],
"vst1Q_lane", store_3, pf_su_8_32;
Vstx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg; Immed |],
+ "vst1Q_lane", store_3, [P64];
+ Vstx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Qreg; Immed |],
@@ -1550,6 +1677,9 @@ let ops =
(* VLD2 variants. *)
Vldx 2, [], Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2", bits_1, pf_su_8_32;
+ Vldx 2, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2", bits_1, [P64];
Vldx 2, [Instruction_name ["vld1"]],
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2", bits_1, [S64; U64];
@@ -1581,6 +1711,12 @@ let ops =
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2_dup", bits_1, pf_su_8_32;
Vldx_dup 2,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (2, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2_dup", bits_1, [P64];
+ Vldx_dup 2,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (2, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
@@ -1591,6 +1727,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
store_1, pf_su_8_32;
+ Vstx 2, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
+ store_1, [P64];
Vstx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1619,6 +1761,9 @@ let ops =
(* VLD3 variants. *)
Vldx 3, [], Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3", bits_1, pf_su_8_32;
+ Vldx 3, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3", bits_1, [P64];
Vldx 3, [Instruction_name ["vld1"]],
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3", bits_1, [S64; U64];
@@ -1650,6 +1795,12 @@ let ops =
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3_dup", bits_1, pf_su_8_32;
Vldx_dup 3,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (3, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3_dup", bits_1, [P64];
+ Vldx_dup 3,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (3, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
@@ -1660,6 +1811,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
store_1, pf_su_8_32;
+ Vstx 3, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
+ store_1, [P64];
Vstx 3, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1688,6 +1845,9 @@ let ops =
(* VLD4/VST4 variants. *)
Vldx 4, [], Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4", bits_1, pf_su_8_32;
+ Vldx 4, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4", bits_1, [P64];
Vldx 4, [Instruction_name ["vld1"]],
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4", bits_1, [S64; U64];
@@ -1719,6 +1879,12 @@ let ops =
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4_dup", bits_1, pf_su_8_32;
Vldx_dup 4,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (4, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4_dup", bits_1, [P64];
+ Vldx_dup 4,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (4, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
@@ -1728,6 +1894,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
store_1, pf_su_8_32;
+ Vstx 4, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
+ store_1, [P64];
Vstx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1779,26 +1951,32 @@ let ops =
Vorn, [], All (3, Qreg), "vornQ", notype_2, su_8_64;
]
+let type_in_crypto_only t
+ = (t == P64) or (t == P128)
+
+let cross_product s1 s2
+ = List.filter (fun (e, e') -> e <> e')
+ (List.concat (List.map (fun e1 -> List.map (fun e2 -> (e1,e2)) s1) s2))
+
let reinterp =
- let elems = P8 :: P16 :: F32 :: su_8_64 in
- List.fold_right
- (fun convto acc ->
- let types = List.fold_right
- (fun convfrom acc ->
- if convfrom <> convto then
- Cast (convto, convfrom) :: acc
- else
- acc)
- elems
- []
- in
- let dconv = Vreinterp, [No_op], Use_operands [| Dreg; Dreg |],
- "vreinterpret", conv_1, types
- and qconv = Vreinterp, [No_op], Use_operands [| Qreg; Qreg |],
- "vreinterpretQ", conv_1, types in
- dconv :: qconv :: acc)
- elems
- []
+ let elems = P8 :: P16 :: F32 :: P64 :: su_8_64 in
+ let casts = cross_product elems elems in
+ List.map
+ (fun (convto, convfrom) ->
+ Vreinterp, (if (type_in_crypto_only convto) or (type_in_crypto_only convfrom)
+ then [Requires_feature "CRYPTO"] else []) @ [No_op], Use_operands [| Dreg; Dreg |],
+ "vreinterpret", conv_1, [Cast (convto, convfrom)])
+ casts
+
+let reinterpq =
+ let elems = P8 :: P16 :: F32 :: P64 :: P128 :: su_8_64 in
+ let casts = cross_product elems elems in
+ List.map
+ (fun (convto, convfrom) ->
+ Vreinterp, (if (type_in_crypto_only convto) or (type_in_crypto_only convfrom)
+ then [Requires_feature "CRYPTO"] else []) @ [No_op], Use_operands [| Qreg; Qreg |],
+ "vreinterpretQ", conv_1, [Cast (convto, convfrom)])
+ casts
(* Output routines. *)
@@ -1808,6 +1986,7 @@ let rec string_of_elt = function
| I8 -> "i8" | I16 -> "i16" | I32 -> "i32" | I64 -> "i64"
| B8 -> "8" | B16 -> "16" | B32 -> "32" | B64 -> "64"
| F16 -> "f16" | F32 -> "f32" | P8 -> "p8" | P16 -> "p16"
+ | P64 -> "p64" | P128 -> "p128"
| Conv (a, b) | Cast (a, b) -> string_of_elt a ^ "_" ^ string_of_elt b
| NoElts -> failwith "No elts"
@@ -1851,6 +2030,10 @@ let string_of_vectype vt =
| T_uint64 -> affix "uint64"
| T_poly8 -> affix "poly8"
| T_poly16 -> affix "poly16"
+ | T_poly64 -> affix "poly64"
+ | T_poly64x1 -> affix "poly64x1"
+ | T_poly64x2 -> affix "poly64x2"
+ | T_poly128 -> affix "poly128"
| T_float16 -> affix "float16"
| T_float32 -> affix "float32"
| T_immediate _ -> "const int"
@@ -1859,6 +2042,7 @@ let string_of_vectype vt =
| T_intHI -> "__builtin_neon_hi"
| T_intSI -> "__builtin_neon_si"
| T_intDI -> "__builtin_neon_di"
+ | T_intTI -> "__builtin_neon_ti"
| T_floatHF -> "__builtin_neon_hf"
| T_floatSF -> "__builtin_neon_sf"
| T_arrayof (num, base) ->
@@ -1884,7 +2068,7 @@ let string_of_mode = function
V8QI -> "v8qi" | V4HI -> "v4hi" | V4HF -> "v4hf" | V2SI -> "v2si"
| V2SF -> "v2sf" | DI -> "di" | V16QI -> "v16qi" | V8HI -> "v8hi"
| V4SI -> "v4si" | V4SF -> "v4sf" | V2DI -> "v2di" | QI -> "qi"
- | HI -> "hi" | SI -> "si" | SF -> "sf"
+ | HI -> "hi" | SI -> "si" | SF -> "sf" | TI -> "ti"
(* Use uppercase chars for letters which form part of the intrinsic name, but
should be omitted from the builtin name (the info is passed in an extra
@@ -1991,3 +2175,181 @@ let analyze_all_shapes features shape f =
| _ -> assert false
with Not_found -> [f shape]
+(* The crypto intrinsics have unconventional shapes and are not that
+ numerous to be worth the trouble of encoding here. We implement them
+ explicitly here. *)
+let crypto_intrinsics =
+"
+#ifdef __ARM_FEATURE_CRYPTO
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vldrq_p128 (poly128_t const * __ptr)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
+}
+
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ __t = __builtin_arm_crypto_sha1h (__t);
+ return vgetq_lane_u32 (__t, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
+{
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
+{
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#endif
+"
diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md
index 508603cf6c8..af4b832e88b 100644
--- a/gcc/config/arm/unspecs.md
+++ b/gcc/config/arm/unspecs.md
@@ -149,6 +149,27 @@
(define_c_enum "unspec" [
UNSPEC_ASHIFT_SIGNED
UNSPEC_ASHIFT_UNSIGNED
+ UNSPEC_CRC32B
+ UNSPEC_CRC32H
+ UNSPEC_CRC32W
+ UNSPEC_CRC32CB
+ UNSPEC_CRC32CH
+ UNSPEC_CRC32CW
+ UNSPEC_AESD
+ UNSPEC_AESE
+ UNSPEC_AESIMC
+ UNSPEC_AESMC
+ UNSPEC_SHA1C
+ UNSPEC_SHA1M
+ UNSPEC_SHA1P
+ UNSPEC_SHA1H
+ UNSPEC_SHA1SU0
+ UNSPEC_SHA1SU1
+ UNSPEC_SHA256H
+ UNSPEC_SHA256H2
+ UNSPEC_SHA256SU0
+ UNSPEC_SHA256SU1
+ UNSPEC_VMULLP64
UNSPEC_LOAD_COUNT
UNSPEC_VABD
UNSPEC_VABDL
diff --git a/gcc/doc/arm-neon-intrinsics.texi b/gcc/doc/arm-neon-intrinsics.texi
index fcd6c0f5305..b1468683f83 100644
--- a/gcc/doc/arm-neon-intrinsics.texi
+++ b/gcc/doc/arm-neon-intrinsics.texi
@@ -4079,6 +4079,12 @@
@subsubsection Vector shift right and insert
@itemize @bullet
+@item poly64x1_t vsri_n_p64 (poly64x1_t, poly64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{d0}, @var{d0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vsri_n_u32 (uint32x2_t, uint32x2_t, const int)
@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{d0}, @var{d0}, #@var{0}}
@end itemize
@@ -4139,6 +4145,12 @@
@itemize @bullet
+@item poly64x2_t vsriq_n_p64 (poly64x2_t, poly64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{q0}, @var{q0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vsriq_n_u32 (uint32x4_t, uint32x4_t, const int)
@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{q0}, @var{q0}, #@var{0}}
@end itemize
@@ -4203,6 +4215,12 @@
@subsubsection Vector shift left and insert
@itemize @bullet
+@item poly64x1_t vsli_n_p64 (poly64x1_t, poly64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{d0}, @var{d0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vsli_n_u32 (uint32x2_t, uint32x2_t, const int)
@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{d0}, @var{d0}, #@var{0}}
@end itemize
@@ -4263,6 +4281,12 @@
@itemize @bullet
+@item poly64x2_t vsliq_n_p64 (poly64x2_t, poly64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{q0}, @var{q0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vsliq_n_u32 (uint32x4_t, uint32x4_t, const int)
@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{q0}, @var{q0}, #@var{0}}
@end itemize
@@ -5071,6 +5095,11 @@
@subsubsection Create vector from literal bit pattern
@itemize @bullet
+@item poly64x1_t vcreate_p64 (uint64_t)
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vcreate_u32 (uint64_t)
@end itemize
@@ -5184,6 +5213,11 @@
@itemize @bullet
+@item poly64x1_t vdup_n_p64 (poly64_t)
+@end itemize
+
+
+@itemize @bullet
@item uint64x1_t vdup_n_u64 (uint64_t)
@end itemize
@@ -5194,6 +5228,11 @@
@itemize @bullet
+@item poly64x2_t vdupq_n_p64 (poly64_t)
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vdupq_n_u32 (uint32_t)
@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
@end itemize
@@ -5440,6 +5479,11 @@
@itemize @bullet
+@item poly64x1_t vdup_lane_p64 (poly64x1_t, const int)
+@end itemize
+
+
+@itemize @bullet
@item uint64x1_t vdup_lane_u64 (uint64x1_t, const int)
@end itemize
@@ -5504,6 +5548,11 @@
@itemize @bullet
+@item poly64x2_t vdupq_lane_p64 (poly64x1_t, const int)
+@end itemize
+
+
+@itemize @bullet
@item uint64x2_t vdupq_lane_u64 (uint64x1_t, const int)
@end itemize
@@ -5518,6 +5567,11 @@
@subsubsection Combining vectors
@itemize @bullet
+@item poly64x2_t vcombine_p64 (poly64x1_t, poly64x1_t)
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vcombine_u32 (uint32x2_t, uint32x2_t)
@end itemize
@@ -5577,6 +5631,11 @@
@subsubsection Splitting vectors
@itemize @bullet
+@item poly64x1_t vget_high_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vget_high_u32 (uint32x4_t)
@end itemize
@@ -5686,6 +5745,11 @@
@itemize @bullet
+@item poly64x1_t vget_low_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
@item uint64x1_t vget_low_u64 (uint64x2_t)
@end itemize
@@ -6818,6 +6882,12 @@
@subsubsection Vector extract
@itemize @bullet
+@item poly64x1_t vext_p64 (poly64x1_t, poly64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vext_u32 (uint32x2_t, uint32x2_t, const int)
@*@emph{Form of expected instruction(s):} @code{vext.32 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
@end itemize
@@ -6884,6 +6954,12 @@
@itemize @bullet
+@item poly64x2_t vextq_p64 (poly64x2_t, poly64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vextq_u32 (uint32x4_t, uint32x4_t, const int)
@*@emph{Form of expected instruction(s):} @code{vext.32 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
@end itemize
@@ -7174,6 +7250,12 @@
@subsubsection Bit selection
@itemize @bullet
+@item poly64x1_t vbsl_p64 (uint64x1_t, poly64x1_t, poly64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vbsl_u32 (uint32x2_t, uint32x2_t, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
@end itemize
@@ -7240,6 +7322,12 @@
@itemize @bullet
+@item poly64x2_t vbslq_p64 (uint64x2_t, poly64x2_t, poly64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vbslq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
@end itemize
@@ -7646,6 +7734,12 @@
@subsubsection Element/structure loads, VLD1 variants
@itemize @bullet
+@item poly64x1_t vld1_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint32x2_t vld1_u32 (const uint32_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -7712,6 +7806,12 @@
@itemize @bullet
+@item poly64x2_t vld1q_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint32x4_t vld1q_u32 (const uint32_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@end itemize
@@ -7832,6 +7932,12 @@
@itemize @bullet
+@item poly64x1_t vld1_lane_p64 (const poly64_t *, poly64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1_t vld1_lane_u64 (const uint64_t *, uint64x1_t, const int)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -7898,6 +8004,12 @@
@itemize @bullet
+@item poly64x2_t vld1q_lane_p64 (const poly64_t *, poly64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x2_t vld1q_lane_u64 (const uint64_t *, uint64x2_t, const int)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -7964,6 +8076,12 @@
@itemize @bullet
+@item poly64x1_t vld1_dup_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1_t vld1_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -8030,6 +8148,12 @@
@itemize @bullet
+@item poly64x2_t vld1q_dup_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x2_t vld1q_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -8046,6 +8170,12 @@
@subsubsection Element/structure stores, VST1 variants
@itemize @bullet
+@item void vst1_p64 (poly64_t *, poly64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst1_u32 (uint32_t *, uint32x2_t)
@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -8112,6 +8242,12 @@
@itemize @bullet
+@item void vst1q_p64 (poly64_t *, poly64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst1q_u32 (uint32_t *, uint32x4_t)
@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@end itemize
@@ -8232,6 +8368,12 @@
@itemize @bullet
+@item void vst1_lane_p64 (poly64_t *, poly64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst1_lane_s64 (int64_t *, int64x1_t, const int)
@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -8298,6 +8440,12 @@
@itemize @bullet
+@item void vst1q_lane_p64 (poly64_t *, poly64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst1q_lane_s64 (int64_t *, int64x2_t, const int)
@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
@end itemize
@@ -8368,6 +8516,12 @@
@itemize @bullet
+@item poly64x1x2_t vld2_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x2_t vld2_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@end itemize
@@ -8578,6 +8732,12 @@
@itemize @bullet
+@item poly64x1x2_t vld2_dup_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x2_t vld2_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@end itemize
@@ -8648,6 +8808,12 @@
@itemize @bullet
+@item void vst2_p64 (poly64_t *, poly64x1x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst2_u64 (uint64_t *, uint64x1x2_t)
@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
@end itemize
@@ -8862,6 +9028,12 @@
@itemize @bullet
+@item poly64x1x3_t vld3_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x3_t vld3_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
@end itemize
@@ -9072,6 +9244,12 @@
@itemize @bullet
+@item poly64x1x3_t vld3_dup_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x3_t vld3_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
@end itemize
@@ -9142,6 +9320,12 @@
@itemize @bullet
+@item void vst3_p64 (poly64_t *, poly64x1x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst3_u64 (uint64_t *, uint64x1x3_t)
@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
@end itemize
@@ -9356,6 +9540,12 @@
@itemize @bullet
+@item poly64x1x4_t vld4_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x4_t vld4_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
@end itemize
@@ -9566,6 +9756,12 @@
@itemize @bullet
+@item poly64x1x4_t vld4_dup_p64 (const poly64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item uint64x1x4_t vld4_dup_u64 (const uint64_t *)
@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
@end itemize
@@ -9636,6 +9832,12 @@
@itemize @bullet
+@item void vst4_p64 (poly64_t *, poly64x1x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+@end itemize
+
+
+@itemize @bullet
@item void vst4_u64 (uint64_t *, uint64x1x4_t)
@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
@end itemize
@@ -10286,27 +10488,27 @@
@subsubsection Reinterpret casts
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_u32 (uint32x2_t)
+@item poly8x8_t vreinterpret_p8_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_u16 (uint16x4_t)
+@item poly8x8_t vreinterpret_p8_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_u8 (uint8x8_t)
+@item poly8x8_t vreinterpret_p8_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_s32 (int32x2_t)
+@item poly8x8_t vreinterpret_p8_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_s16 (int16x4_t)
+@item poly8x8_t vreinterpret_p8_u64 (uint64x1_t)
@end itemize
@@ -10316,72 +10518,77 @@
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_u64 (uint64x1_t)
+@item poly8x8_t vreinterpret_p8_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_s64 (int64x1_t)
+@item poly8x8_t vreinterpret_p8_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_f32 (float32x2_t)
+@item poly8x8_t vreinterpret_p8_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item poly8x8_t vreinterpret_p8_p16 (poly16x4_t)
+@item poly8x8_t vreinterpret_p8_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_u32 (uint32x4_t)
+@item poly8x8_t vreinterpret_p8_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_u16 (uint16x8_t)
+@item poly16x4_t vreinterpret_p16_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_u8 (uint8x16_t)
+@item poly16x4_t vreinterpret_p16_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_s32 (int32x4_t)
+@item poly16x4_t vreinterpret_p16_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_s16 (int16x8_t)
+@item poly16x4_t vreinterpret_p16_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_s8 (int8x16_t)
+@item poly16x4_t vreinterpret_p16_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_u64 (uint64x2_t)
+@item poly16x4_t vreinterpret_p16_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_s64 (int64x2_t)
+@item poly16x4_t vreinterpret_p16_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_f32 (float32x4_t)
+@item poly16x4_t vreinterpret_p16_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item poly8x16_t vreinterpretq_p8_p16 (poly16x8_t)
+@item poly16x4_t vreinterpret_p16_u8 (uint8x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly16x4_t vreinterpret_p16_u16 (uint16x4_t)
@end itemize
@@ -10391,792 +10598,1152 @@
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_u16 (uint16x4_t)
+@item float32x2_t vreinterpret_f32_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_u8 (uint8x8_t)
+@item float32x2_t vreinterpret_f32_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_s32 (int32x2_t)
+@item float32x2_t vreinterpret_f32_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_s16 (int16x4_t)
+@item float32x2_t vreinterpret_f32_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_s8 (int8x8_t)
+@item float32x2_t vreinterpret_f32_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_u64 (uint64x1_t)
+@item float32x2_t vreinterpret_f32_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_s64 (int64x1_t)
+@item float32x2_t vreinterpret_f32_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_f32 (float32x2_t)
+@item float32x2_t vreinterpret_f32_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item poly16x4_t vreinterpret_p16_p8 (poly8x8_t)
+@item float32x2_t vreinterpret_f32_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_u32 (uint32x4_t)
+@item float32x2_t vreinterpret_f32_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_u16 (uint16x8_t)
+@item float32x2_t vreinterpret_f32_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_u8 (uint8x16_t)
+@item poly64x1_t vreinterpret_p64_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_s32 (int32x4_t)
+@item poly64x1_t vreinterpret_p64_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_s16 (int16x8_t)
+@item poly64x1_t vreinterpret_p64_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_s8 (int8x16_t)
+@item poly64x1_t vreinterpret_p64_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_u64 (uint64x2_t)
+@item poly64x1_t vreinterpret_p64_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_s64 (int64x2_t)
+@item poly64x1_t vreinterpret_p64_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_f32 (float32x4_t)
+@item poly64x1_t vreinterpret_p64_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item poly16x8_t vreinterpretq_p16_p8 (poly8x16_t)
+@item poly64x1_t vreinterpret_p64_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_u32 (uint32x2_t)
+@item poly64x1_t vreinterpret_p64_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_u16 (uint16x4_t)
+@item poly64x1_t vreinterpret_p64_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_u8 (uint8x8_t)
+@item poly64x1_t vreinterpret_p64_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_s32 (int32x2_t)
+@item int64x1_t vreinterpret_s64_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_s16 (int16x4_t)
+@item int64x1_t vreinterpret_s64_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_s8 (int8x8_t)
+@item int64x1_t vreinterpret_s64_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_u64 (uint64x1_t)
+@item int64x1_t vreinterpret_s64_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_s64 (int64x1_t)
+@item int64x1_t vreinterpret_s64_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_p16 (poly16x4_t)
+@item int64x1_t vreinterpret_s64_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item float32x2_t vreinterpret_f32_p8 (poly8x8_t)
+@item int64x1_t vreinterpret_s64_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_u32 (uint32x4_t)
+@item int64x1_t vreinterpret_s64_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_u16 (uint16x8_t)
+@item int64x1_t vreinterpret_s64_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_u8 (uint8x16_t)
+@item int64x1_t vreinterpret_s64_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_s32 (int32x4_t)
+@item int64x1_t vreinterpret_s64_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_s16 (int16x8_t)
+@item uint64x1_t vreinterpret_u64_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_s8 (int8x16_t)
+@item uint64x1_t vreinterpret_u64_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_u64 (uint64x2_t)
+@item uint64x1_t vreinterpret_u64_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_s64 (int64x2_t)
+@item uint64x1_t vreinterpret_u64_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_p16 (poly16x8_t)
+@item uint64x1_t vreinterpret_u64_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item float32x4_t vreinterpretq_f32_p8 (poly8x16_t)
+@item uint64x1_t vreinterpret_u64_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_u32 (uint32x2_t)
+@item uint64x1_t vreinterpret_u64_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_u16 (uint16x4_t)
+@item uint64x1_t vreinterpret_u64_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_u8 (uint8x8_t)
+@item uint64x1_t vreinterpret_u64_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_s32 (int32x2_t)
+@item uint64x1_t vreinterpret_u64_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_s16 (int16x4_t)
+@item uint64x1_t vreinterpret_u64_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_s8 (int8x8_t)
+@item int8x8_t vreinterpret_s8_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_u64 (uint64x1_t)
+@item int8x8_t vreinterpret_s8_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_f32 (float32x2_t)
+@item int8x8_t vreinterpret_s8_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_p16 (poly16x4_t)
+@item int8x8_t vreinterpret_s8_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item int64x1_t vreinterpret_s64_p8 (poly8x8_t)
+@item int8x8_t vreinterpret_s8_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_u32 (uint32x4_t)
+@item int8x8_t vreinterpret_s8_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_u16 (uint16x8_t)
+@item int8x8_t vreinterpret_s8_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_u8 (uint8x16_t)
+@item int8x8_t vreinterpret_s8_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_s32 (int32x4_t)
+@item int8x8_t vreinterpret_s8_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_s16 (int16x8_t)
+@item int8x8_t vreinterpret_s8_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_s8 (int8x16_t)
+@item int8x8_t vreinterpret_s8_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_u64 (uint64x2_t)
+@item int16x4_t vreinterpret_s16_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_f32 (float32x4_t)
+@item int16x4_t vreinterpret_s16_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_p16 (poly16x8_t)
+@item int16x4_t vreinterpret_s16_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item int64x2_t vreinterpretq_s64_p8 (poly8x16_t)
+@item int16x4_t vreinterpret_s16_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_u32 (uint32x2_t)
+@item int16x4_t vreinterpret_s16_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_u16 (uint16x4_t)
+@item int16x4_t vreinterpret_s16_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_u8 (uint8x8_t)
+@item int16x4_t vreinterpret_s16_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_s32 (int32x2_t)
+@item int16x4_t vreinterpret_s16_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_s16 (int16x4_t)
+@item int16x4_t vreinterpret_s16_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_s8 (int8x8_t)
+@item int16x4_t vreinterpret_s16_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_s64 (int64x1_t)
+@item int16x4_t vreinterpret_s16_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_f32 (float32x2_t)
+@item int32x2_t vreinterpret_s32_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_p16 (poly16x4_t)
+@item int32x2_t vreinterpret_s32_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item uint64x1_t vreinterpret_u64_p8 (poly8x8_t)
+@item int32x2_t vreinterpret_s32_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_u32 (uint32x4_t)
+@item int32x2_t vreinterpret_s32_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_u16 (uint16x8_t)
+@item int32x2_t vreinterpret_s32_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_u8 (uint8x16_t)
+@item int32x2_t vreinterpret_s32_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_s32 (int32x4_t)
+@item int32x2_t vreinterpret_s32_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_s16 (int16x8_t)
+@item int32x2_t vreinterpret_s32_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_s8 (int8x16_t)
+@item int32x2_t vreinterpret_s32_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_s64 (int64x2_t)
+@item int32x2_t vreinterpret_s32_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_f32 (float32x4_t)
+@item int32x2_t vreinterpret_s32_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_p16 (poly16x8_t)
+@item uint8x8_t vreinterpret_u8_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item uint64x2_t vreinterpretq_u64_p8 (poly8x16_t)
+@item uint8x8_t vreinterpret_u8_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_u32 (uint32x2_t)
+@item uint8x8_t vreinterpret_u8_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_u16 (uint16x4_t)
+@item uint8x8_t vreinterpret_u8_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_u8 (uint8x8_t)
+@item uint8x8_t vreinterpret_u8_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_s32 (int32x2_t)
+@item uint8x8_t vreinterpret_u8_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_s16 (int16x4_t)
+@item uint8x8_t vreinterpret_u8_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_u64 (uint64x1_t)
+@item uint8x8_t vreinterpret_u8_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_s64 (int64x1_t)
+@item uint8x8_t vreinterpret_u8_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_f32 (float32x2_t)
+@item uint8x8_t vreinterpret_u8_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_p16 (poly16x4_t)
+@item uint8x8_t vreinterpret_u8_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item int8x8_t vreinterpret_s8_p8 (poly8x8_t)
+@item uint16x4_t vreinterpret_u16_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_u32 (uint32x4_t)
+@item uint16x4_t vreinterpret_u16_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_u16 (uint16x8_t)
+@item uint16x4_t vreinterpret_u16_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_u8 (uint8x16_t)
+@item uint16x4_t vreinterpret_u16_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_s32 (int32x4_t)
+@item uint16x4_t vreinterpret_u16_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_s16 (int16x8_t)
+@item uint16x4_t vreinterpret_u16_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_u64 (uint64x2_t)
+@item uint16x4_t vreinterpret_u16_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_s64 (int64x2_t)
+@item uint16x4_t vreinterpret_u16_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_f32 (float32x4_t)
+@item uint16x4_t vreinterpret_u16_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_p16 (poly16x8_t)
+@item uint16x4_t vreinterpret_u16_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item int8x16_t vreinterpretq_s8_p8 (poly8x16_t)
+@item uint16x4_t vreinterpret_u16_u32 (uint32x2_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_u32 (uint32x2_t)
+@item uint32x2_t vreinterpret_u32_p8 (poly8x8_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_u16 (uint16x4_t)
+@item uint32x2_t vreinterpret_u32_p16 (poly16x4_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_u8 (uint8x8_t)
+@item uint32x2_t vreinterpret_u32_f32 (float32x2_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_s32 (int32x2_t)
+@item uint32x2_t vreinterpret_u32_p64 (poly64x1_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_s8 (int8x8_t)
+@item uint32x2_t vreinterpret_u32_s64 (int64x1_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_u64 (uint64x1_t)
+@item uint32x2_t vreinterpret_u32_u64 (uint64x1_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_s64 (int64x1_t)
+@item uint32x2_t vreinterpret_u32_s8 (int8x8_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_f32 (float32x2_t)
+@item uint32x2_t vreinterpret_u32_s16 (int16x4_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_p16 (poly16x4_t)
+@item uint32x2_t vreinterpret_u32_s32 (int32x2_t)
@end itemize
@itemize @bullet
-@item int16x4_t vreinterpret_s16_p8 (poly8x8_t)
+@item uint32x2_t vreinterpret_u32_u8 (uint8x8_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_u32 (uint32x4_t)
+@item uint32x2_t vreinterpret_u32_u16 (uint16x4_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_u16 (uint16x8_t)
+@item poly8x16_t vreinterpretq_p8_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_u8 (uint8x16_t)
+@item poly8x16_t vreinterpretq_p8_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_s32 (int32x4_t)
+@item poly8x16_t vreinterpretq_p8_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_s8 (int8x16_t)
+@item poly8x16_t vreinterpretq_p8_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_u64 (uint64x2_t)
+@item poly8x16_t vreinterpretq_p8_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_s64 (int64x2_t)
+@item poly8x16_t vreinterpretq_p8_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_f32 (float32x4_t)
+@item poly8x16_t vreinterpretq_p8_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_p16 (poly16x8_t)
+@item poly8x16_t vreinterpretq_p8_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item int16x8_t vreinterpretq_s16_p8 (poly8x16_t)
+@item poly8x16_t vreinterpretq_p8_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_u32 (uint32x2_t)
+@item poly8x16_t vreinterpretq_p8_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_u16 (uint16x4_t)
+@item poly8x16_t vreinterpretq_p8_u16 (uint16x8_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_u8 (uint8x8_t)
+@item poly8x16_t vreinterpretq_p8_u32 (uint32x4_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_s16 (int16x4_t)
+@item poly16x8_t vreinterpretq_p16_p8 (poly8x16_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_s8 (int8x8_t)
+@item poly16x8_t vreinterpretq_p16_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_u64 (uint64x1_t)
+@item poly16x8_t vreinterpretq_p16_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_s64 (int64x1_t)
+@item poly16x8_t vreinterpretq_p16_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_f32 (float32x2_t)
+@item poly16x8_t vreinterpretq_p16_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_p16 (poly16x4_t)
+@item poly16x8_t vreinterpretq_p16_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item int32x2_t vreinterpret_s32_p8 (poly8x8_t)
+@item poly16x8_t vreinterpretq_p16_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_u32 (uint32x4_t)
+@item poly16x8_t vreinterpretq_p16_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_u16 (uint16x8_t)
+@item poly16x8_t vreinterpretq_p16_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_u8 (uint8x16_t)
+@item poly16x8_t vreinterpretq_p16_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_s16 (int16x8_t)
+@item poly16x8_t vreinterpretq_p16_u16 (uint16x8_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_s8 (int8x16_t)
+@item poly16x8_t vreinterpretq_p16_u32 (uint32x4_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_u64 (uint64x2_t)
+@item float32x4_t vreinterpretq_f32_p8 (poly8x16_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_s64 (int64x2_t)
+@item float32x4_t vreinterpretq_f32_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_f32 (float32x4_t)
+@item float32x4_t vreinterpretq_f32_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_p16 (poly16x8_t)
+@item float32x4_t vreinterpretq_f32_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item int32x4_t vreinterpretq_s32_p8 (poly8x16_t)
+@item float32x4_t vreinterpretq_f32_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_u32 (uint32x2_t)
+@item float32x4_t vreinterpretq_f32_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_u16 (uint16x4_t)
+@item float32x4_t vreinterpretq_f32_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_s32 (int32x2_t)
+@item float32x4_t vreinterpretq_f32_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_s16 (int16x4_t)
+@item float32x4_t vreinterpretq_f32_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_s8 (int8x8_t)
+@item float32x4_t vreinterpretq_f32_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_u64 (uint64x1_t)
+@item float32x4_t vreinterpretq_f32_u16 (uint16x8_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_s64 (int64x1_t)
+@item float32x4_t vreinterpretq_f32_u32 (uint32x4_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_f32 (float32x2_t)
+@item poly64x2_t vreinterpretq_p64_p8 (poly8x16_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_p16 (poly16x4_t)
+@item poly64x2_t vreinterpretq_p64_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item uint8x8_t vreinterpret_u8_p8 (poly8x8_t)
+@item poly64x2_t vreinterpretq_p64_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_u32 (uint32x4_t)
+@item poly64x2_t vreinterpretq_p64_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_u16 (uint16x8_t)
+@item poly64x2_t vreinterpretq_p64_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_s32 (int32x4_t)
+@item poly64x2_t vreinterpretq_p64_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_s16 (int16x8_t)
+@item poly64x2_t vreinterpretq_p64_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_s8 (int8x16_t)
+@item poly64x2_t vreinterpretq_p64_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_u64 (uint64x2_t)
+@item poly64x2_t vreinterpretq_p64_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_s64 (int64x2_t)
+@item poly64x2_t vreinterpretq_p64_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_f32 (float32x4_t)
+@item poly64x2_t vreinterpretq_p64_u16 (uint16x8_t)
@end itemize
@itemize @bullet
-@item uint8x16_t vreinterpretq_u8_p16 (poly16x8_t)
+@item poly64x2_t vreinterpretq_p64_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_s64 (int64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_u64 (uint64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_s8 (int8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_s16 (int16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_s32 (int32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item poly128_t vreinterpretq_p128_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_p128 (poly128_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_u64 (uint64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_s8 (int8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_s16 (int16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_s32 (int32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int64x2_t vreinterpretq_s64_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_p128 (poly128_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_s64 (int64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_s8 (int8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_s16 (int16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_s32 (int32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item uint64x2_t vreinterpretq_u64_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_p128 (poly128_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_s64 (int64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_u64 (uint64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_s16 (int16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_s32 (int32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int8x16_t vreinterpretq_s8_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_p128 (poly128_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_s64 (int64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_u64 (uint64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_s8 (int8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_s32 (int32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int16x8_t vreinterpretq_s16_u32 (uint32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_p8 (poly8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_p16 (poly16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_f32 (float32x4_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_p64 (poly64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_p128 (poly128_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_s64 (int64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_u64 (uint64x2_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_s8 (int8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_s16 (int16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_u8 (uint8x16_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_u16 (uint16x8_t)
+@end itemize
+
+
+@itemize @bullet
+@item int32x4_t vreinterpretq_s32_u32 (uint32x4_t)
@end itemize
@@ -11186,82 +11753,82 @@
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_u32 (uint32x2_t)
+@item uint8x16_t vreinterpretq_u8_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_u8 (uint8x8_t)
+@item uint8x16_t vreinterpretq_u8_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_s32 (int32x2_t)
+@item uint8x16_t vreinterpretq_u8_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_s16 (int16x4_t)
+@item uint8x16_t vreinterpretq_u8_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_s8 (int8x8_t)
+@item uint8x16_t vreinterpretq_u8_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_u64 (uint64x1_t)
+@item uint8x16_t vreinterpretq_u8_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_s64 (int64x1_t)
+@item uint8x16_t vreinterpretq_u8_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_f32 (float32x2_t)
+@item uint8x16_t vreinterpretq_u8_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_p16 (poly16x4_t)
+@item uint8x16_t vreinterpretq_u8_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item uint16x4_t vreinterpret_u16_p8 (poly8x8_t)
+@item uint8x16_t vreinterpretq_u8_u16 (uint16x8_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_u32 (uint32x4_t)
+@item uint8x16_t vreinterpretq_u8_u32 (uint32x4_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_u8 (uint8x16_t)
+@item uint16x8_t vreinterpretq_u16_p8 (poly8x16_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_s32 (int32x4_t)
+@item uint16x8_t vreinterpretq_u16_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_s16 (int16x8_t)
+@item uint16x8_t vreinterpretq_u16_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_s8 (int8x16_t)
+@item uint16x8_t vreinterpretq_u16_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_u64 (uint64x2_t)
+@item uint16x8_t vreinterpretq_u16_p128 (poly128_t)
@end itemize
@@ -11271,77 +11838,77 @@
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_f32 (float32x4_t)
+@item uint16x8_t vreinterpretq_u16_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_p16 (poly16x8_t)
+@item uint16x8_t vreinterpretq_u16_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item uint16x8_t vreinterpretq_u16_p8 (poly8x16_t)
+@item uint16x8_t vreinterpretq_u16_s16 (int16x8_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_u16 (uint16x4_t)
+@item uint16x8_t vreinterpretq_u16_s32 (int32x4_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_u8 (uint8x8_t)
+@item uint16x8_t vreinterpretq_u16_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_s32 (int32x2_t)
+@item uint16x8_t vreinterpretq_u16_u32 (uint32x4_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_s16 (int16x4_t)
+@item uint32x4_t vreinterpretq_u32_p8 (poly8x16_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_s8 (int8x8_t)
+@item uint32x4_t vreinterpretq_u32_p16 (poly16x8_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_u64 (uint64x1_t)
+@item uint32x4_t vreinterpretq_u32_f32 (float32x4_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_s64 (int64x1_t)
+@item uint32x4_t vreinterpretq_u32_p64 (poly64x2_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_f32 (float32x2_t)
+@item uint32x4_t vreinterpretq_u32_p128 (poly128_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_p16 (poly16x4_t)
+@item uint32x4_t vreinterpretq_u32_s64 (int64x2_t)
@end itemize
@itemize @bullet
-@item uint32x2_t vreinterpret_u32_p8 (poly8x8_t)
+@item uint32x4_t vreinterpretq_u32_u64 (uint64x2_t)
@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_u16 (uint16x8_t)
+@item uint32x4_t vreinterpretq_u32_s8 (int8x16_t)
@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_u8 (uint8x16_t)
+@item uint32x4_t vreinterpretq_u32_s16 (int16x8_t)
@end itemize
@@ -11351,39 +11918,91 @@
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_s16 (int16x8_t)
+@item uint32x4_t vreinterpretq_u32_u8 (uint8x16_t)
@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_s8 (int8x16_t)
+@item uint32x4_t vreinterpretq_u32_u16 (uint16x8_t)
@end itemize
+
+
+
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_u64 (uint64x2_t)
+@item poly128_t vldrq_p128(poly128_t const *)
@end itemize
+@itemize @bullet
+@item void vstrq_p128(poly128_t *, poly128_t)
+@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_s64 (int64x2_t)
+@item uint64x1_t vceq_p64 (poly64x1_t, poly64x1_t)
@end itemize
+@itemize @bullet
+@item uint64x1_t vtst_p64 (poly64x1_t, poly64x1_t)
+@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_f32 (float32x4_t)
+@item uint32_t vsha1h_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{sha1h.32 @var{q0}, @var{q1}}
@end itemize
+@itemize @bullet
+@item uint32x4_t vsha1cq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1c.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_p16 (poly16x8_t)
+@item uint32x4_t vsha1pq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1p.32 @var{q0}, @var{q1}, @var{q2}}
@end itemize
+@itemize @bullet
+@item uint32x4_t vsha1mq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1m.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
@itemize @bullet
-@item uint32x4_t vreinterpretq_u32_p8 (poly8x16_t)
+@item uint32x4_t vsha1su0q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su0.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1su1q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su1.32 @var{q0}, @var{q1}, @var{q2}}
@end itemize
+@itemize @bullet
+@item uint32x4_t vsha256hq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256h2q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h2.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su0q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su0.32 @var{q0}, @var{q1}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su1q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su1.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+@itemize @bullet
+@item poly128_t vmull_p64 (poly64_t a, poly64_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
+@itemize @bullet
+@item poly128_t vmull_high_p64 (poly64x2_t a, poly64x2_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 3885993997d..9f3820d7aa0 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -8792,6 +8792,7 @@ instructions, but allow the compiler to schedule those calls.
* Alpha Built-in Functions::
* ARM iWMMXt Built-in Functions::
* ARM NEON Intrinsics::
+* ARM ACLE Intrinsics::
* AVR Built-in Functions::
* Blackfin Built-in Functions::
* FR-V Built-in Functions::
@@ -9057,6 +9058,14 @@ when the @option{-mfpu=neon} switch is used:
@include arm-neon-intrinsics.texi
+@node ARM ACLE Intrinsics
+@subsection ARM ACLE Intrinsics
+
+These built-in intrinsics for the ARMv8-A CRC32 extension are available when
+the @option{-march=armv8-a+crc} switch is used:
+
+@include arm-acle-intrinsics.texi
+
@node AVR Built-in Functions
@subsection AVR Built-in Functions
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e0ed853720c..ded1cbae6a0 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -11332,9 +11332,12 @@ of the @option{-mcpu=} option. Permissible names are: @samp{armv2},
@samp{armv6}, @samp{armv6j},
@samp{armv6t2}, @samp{armv6z}, @samp{armv6zk}, @samp{armv6-m},
@samp{armv7}, @samp{armv7-a}, @samp{armv7-r}, @samp{armv7-m},
-@samp{armv8-a},
+@samp{armv8-a}, @samp{armv8-a+crc},
@samp{iwmmxt}, @samp{iwmmxt2}, @samp{ep9312}.
+@option{-march=armv8-a+crc} enables code generation for the ARMv8-A
+architecture together with the optional CRC32 extensions.
+
@option{-march=native} causes the compiler to auto-detect the architecture
of the build computer. At present, this feature is only supported on
Linux, and not all architectures are recognized. If the auto-detect is
diff --git a/gcc/testsuite/ChangeLog.linaro b/gcc/testsuite/ChangeLog.linaro
index f698577bec0..96ec0816474 100644
--- a/gcc/testsuite/ChangeLog.linaro
+++ b/gcc/testsuite/ChangeLog.linaro
@@ -1,3 +1,198 @@
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206519
+ 2014-01-10 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * lib/target-supports.exp
+ (check_effective_target_arm_crypto_ok_nocache): New.
+ (check_effective_target_arm_crypto_ok): Use above procedure.
+ (add_options_for_arm_crypto): Use et_arm_crypto_flags.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206151
+ 2013-12-20 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * gcc.target/arm/neon-vceq_p64.c: New test.
+ * gcc.target/arm/neon-vtst_p64.c: Likewise.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206131
+ 2013-12-04 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_arm_crypto_ok):
+ New procedure.
+ (add_options_for_arm_crypto): Likewise.
+ * gcc.target/arm/crypto-vaesdq_u8.c: New test.
+ * gcc.target/arm/crypto-vaeseq_u8.c: Likewise.
+ * gcc.target/arm/crypto-vaesimcq_u8.c: Likewise.
+ * gcc.target/arm/crypto-vaesmcq_u8.c: Likewise.
+ * gcc.target/arm/crypto-vldrq_p128.c: Likewise.
+ * gcc.target/arm/crypto-vmull_high_p64.c: Likewise.
+ * gcc.target/arm/crypto-vmullp64.c: Likewise.
+ * gcc.target/arm/crypto-vsha1cq_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha1h_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha1mq_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha1pq_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha1su0q_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha1su1q_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha256h2q_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha256hq_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha256su0q_u32.c: Likewise.
+ * gcc.target/arm/crypto-vsha256su1q_u32.c: Likewise.
+ * gcc.target/arm/crypto-vstrq_p128.c: Likewise.
+ * gcc.target/arm/neon/vbslQp64: Generate.
+ * gcc.target/arm/neon/vbslp64: Likewise.
+ * gcc.target/arm/neon/vcombinep64: Likewise.
+ * gcc.target/arm/neon/vcreatep64: Likewise.
+ * gcc.target/arm/neon/vdupQ_lanep64: Likewise.
+ * gcc.target/arm/neon/vdupQ_np64: Likewise.
+ * gcc.target/arm/neon/vdup_lanep64: Likewise.
+ * gcc.target/arm/neon/vdup_np64: Likewise.
+ * gcc.target/arm/neon/vextQp64: Likewise.
+ * gcc.target/arm/neon/vextp64: Likewise.
+ * gcc.target/arm/neon/vget_highp64: Likewise.
+ * gcc.target/arm/neon/vget_lowp64: Likewise.
+ * gcc.target/arm/neon/vld1Q_dupp64: Likewise.
+ * gcc.target/arm/neon/vld1Q_lanep64: Likewise.
+ * gcc.target/arm/neon/vld1Qp64: Likewise.
+ * gcc.target/arm/neon/vld1_dupp64: Likewise.
+ * gcc.target/arm/neon/vld1_lanep64: Likewise.
+ * gcc.target/arm/neon/vld1p64: Likewise.
+ * gcc.target/arm/neon/vld2_dupp64: Likewise.
+ * gcc.target/arm/neon/vld2p64: Likewise.
+ * gcc.target/arm/neon/vld3_dupp64: Likewise.
+ * gcc.target/arm/neon/vld3p64: Likewise.
+ * gcc.target/arm/neon/vld4_dupp64: Likewise.
+ * gcc.target/arm/neon/vld4p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQf32_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQf32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_f32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_p16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_p8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_s16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_s32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_s64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_s8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_u16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_u32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_u64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp128_u8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp16_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_f32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_p16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_p8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_s16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_s32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_s64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_s8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_u16: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_u32: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_u64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp64_u8: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp8_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQp8_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs16_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs32_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs64_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs64_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs8_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQs8_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu16_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu32_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu64_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu64_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu8_p128: Likewise.
+ * gcc.target/arm/neon/vreinterpretQu8_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretf32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretp16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_f32: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_p16: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_p8: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_s16: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_s32: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_s64: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_s8: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_u16: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_u32: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_u64: Likewise.
+ * gcc.target/arm/neon/vreinterpretp64_u8: Likewise.
+ * gcc.target/arm/neon/vreinterpretp8_p64: Likewise.
+ * gcc.target/arm/neon/vreinterprets16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterprets32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterprets64_p64: Likewise.
+ * gcc.target/arm/neon/vreinterprets8_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretu16_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretu32_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretu64_p64: Likewise.
+ * gcc.target/arm/neon/vreinterpretu8_p64: Likewise.
+ * gcc.target/arm/neon/vsliQ_np64: Likewise.
+ * gcc.target/arm/neon/vsli_np64: Likewise.
+ * gcc.target/arm/neon/vsriQ_np64: Likewise.
+ * gcc.target/arm/neon/vsri_np64: Likewise.
+ * gcc.target/arm/neon/vst1Q_lanep64: Likewise.
+ * gcc.target/arm/neon/vst1Qp64: Likewise.
+ * gcc.target/arm/neon/vst1_lanep64: Likewise.
+ * gcc.target/arm/neon/vst1p64: Likewise.
+ * gcc.target/arm/neon/vst2p64: Likewise.
+ * gcc.target/arm/neon/vst3p64: Likewise.
+ * gcc.target/arm/neon/vst4p64: Likewise.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206128
+ 2013-12-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * lib/target-supports.exp (add_options_for_arm_crc): New procedure.
+ (check_effective_target_arm_crc_ok_nocache): Likewise.
+ (check_effective_target_arm_crc_ok): Likewise.
+ * gcc.target/arm/acle/: New directory.
+ * gcc.target/arm/acle/acle.exp: New.
+ * gcc.target/arm/acle/crc32b.c: New test.
+ * gcc.target/arm/acle/crc32h.c: Likewise.
+ * gcc.target/arm/acle/crc32w.c: Likewise.
+ * gcc.target/arm/acle/crc32d.c: Likewise.
+ * gcc.target/arm/acle/crc32cb.c: Likewise.
+ * gcc.target/arm/acle/crc32ch.c: Likewise.
+ * gcc.target/arm/acle/crc32cw.c: Likewise.
+ * gcc.target/arm/acle/crc32cd.c: Likewise.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206120
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.target/aarch64/pmull_1.c: New.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206119
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.target/aarch64/sha256_1.c: New.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206118
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.target/aarch64/sha1_1.c: New.
+
+2014-02-10 Michael Collison <michael.collison@linaro.org>
+
+ Backport from trunk r206117
+ 2013-12-19 Tejas Belagod <tejas.belagod@arm.com>
+
+ * gcc.target/aarch64/aes_1.c: New.
+
2014-02-01 Christophe Lyon <christophe.lyon@linaro.org>
Backport from trunk r203057.
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index d33b7a10109..8ee3d76fde0 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -2189,6 +2189,49 @@ proc check_effective_target_arm_unaligned { } {
}]
}
+# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_crypto_flags to the
+# best options to add.
+
+proc check_effective_target_arm_crypto_ok_nocache { } {
+ global et_arm_crypto_flags
+ set et_arm_crypto_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=crypto-neon-fp-armv8" "-mfpu=crypto-neon-fp-armv8 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_crypto_ok object {
+ #include "arm_neon.h"
+ uint8x16_t
+ foo (uint8x16_t a, uint8x16_t b)
+ {
+ return vaeseq_u8 (a, b);
+ }
+ } "$flags"] } {
+ set et_arm_crypto_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+# Return 1 if this is an ARM target supporting -mfpu=crypto-neon-fp-armv8
+
+proc check_effective_target_arm_crypto_ok { } {
+ return [check_cached_effective_target arm_crypto_ok \
+ check_effective_target_arm_crypto_ok_nocache]
+}
+
+# Add options for crypto extensions.
+proc add_options_for_arm_crypto { flags } {
+ if { ! [check_effective_target_arm_crypto_ok] } {
+ return "$flags"
+ }
+ global et_arm_crypto_flags
+ return "$flags $et_arm_crypto_flags"
+}
+
# Add the options needed for NEON. We need either -mfloat-abi=softfp
# or -mfloat-abi=hard, but if one is already specified by the
# multilib, use it. Similarly, if a -mfpu option already enables
@@ -2217,6 +2260,14 @@ proc add_options_for_arm_v8_neon { flags } {
return "$flags $et_arm_v8_neon_flags -march=armv8-a"
}
+proc add_options_for_arm_crc { flags } {
+ if { ! [check_effective_target_arm_crc_ok] } {
+ return "$flags"
+ }
+ global et_arm_crc_flags
+ return "$flags $et_arm_crc_flags"
+}
+
# Add the options needed for NEON. We need either -mfloat-abi=softfp
# or -mfloat-abi=hard, but if one is already specified by the
# multilib, use it. Similarly, if a -mfpu option already enables
@@ -2258,6 +2309,21 @@ proc check_effective_target_arm_neon_ok { } {
check_effective_target_arm_neon_ok_nocache]
}
+proc check_effective_target_arm_crc_ok_nocache { } {
+ global et_arm_crc_flags
+ set et_arm_crc_flags "-march=armv8-a+crc"
+ return [check_no_compiler_messages_nocache arm_crc_ok object {
+ #if !defined (__ARM_FEATURE_CRC32)
+ #error FOO
+ #endif
+ } "$et_arm_crc_flags"]
+}
+
+proc check_effective_target_arm_crc_ok { } {
+ return [check_cached_effective_target arm_crc_ok \
+ check_effective_target_arm_crc_ok_nocache]
+}
+
# Return 1 if this is an ARM target supporting -mfpu=neon-fp16
# -mfloat-abi=softfp or equivalent options. Some multilibs may be
# incompatible with these options. Also set et_arm_neon_flags to the