aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog.arm807
-rw-r--r--gcc/builtins.c1
-rw-r--r--gcc/c-family/ChangeLog.arm6
-rw-r--r--gcc/c-family/c-common.c1
-rw-r--r--gcc/common/config/arm/arm-common.c43
-rw-r--r--gcc/config.gcc67
-rw-r--r--gcc/config.host2
-rw-r--r--gcc/config.in6
-rw-r--r--gcc/config/aarch64/aarch64.c1
-rw-r--r--gcc/config/alpha/alpha.c1
-rw-r--r--gcc/config/arm/arm-arches.def6
-rw-r--r--gcc/config/arm/arm-builtins.c612
-rw-r--r--gcc/config/arm/arm-c.c8
-rw-r--r--gcc/config/arm/arm-cores.def2
-rw-r--r--gcc/config/arm/arm-flags.h209
-rw-r--r--gcc/config/arm/arm-opts.h2
-rw-r--r--gcc/config/arm/arm-protos.h186
-rw-r--r--gcc/config/arm/arm-tables.opt19
-rw-r--r--gcc/config/arm/arm-tune.md9
-rw-r--r--gcc/config/arm/arm.c1419
-rw-r--r--gcc/config/arm/arm.h58
-rw-r--r--gcc/config/arm/arm.md276
-rw-r--r--gcc/config/arm/arm.opt8
-rw-r--r--gcc/config/arm/arm_acle.h141
-rw-r--r--gcc/config/arm/arm_acle_builtins.def44
-rw-r--r--gcc/config/arm/arm_cmse.h199
-rw-r--r--gcc/config/arm/bpabi.h6
-rw-r--r--gcc/config/arm/constraints.md21
-rw-r--r--gcc/config/arm/elf.h19
-rw-r--r--gcc/config/arm/iterators.md47
-rw-r--r--gcc/config/arm/neon.md28
-rw-r--r--gcc/config/arm/predicates.md6
-rw-r--r--gcc/config/arm/sync.md283
-rw-r--r--gcc/config/arm/t-rmprofile176
-rw-r--r--gcc/config/arm/thumb1.md203
-rw-r--r--gcc/config/arm/thumb2.md49
-rw-r--r--gcc/config/arm/types.md7
-rw-r--r--gcc/config/arm/unspecs.md20
-rw-r--r--gcc/config/arm/vfp.md37
-rw-r--r--gcc/config/i386/driver-mingw32.c26
-rw-r--r--gcc/config/i386/i386.c1
-rw-r--r--gcc/config/i386/x-mingw323
-rw-r--r--gcc/config/ia64/ia64.c1
-rw-r--r--gcc/config/mips/mips.c1
-rw-r--r--gcc/config/rs6000/rs6000.c1
-rw-r--r--gcc/config/sparc/sparc.c1
-rwxr-xr-xgcc/configure24
-rw-r--r--gcc/configure.ac12
-rw-r--r--gcc/doc/extend.texi35
-rw-r--r--gcc/doc/install.texi72
-rw-r--r--gcc/doc/invoke.texi30
-rw-r--r--gcc/doc/sourcebuild.texi27
-rw-r--r--gcc/doc/tm.texi12
-rw-r--r--gcc/doc/tm.texi.in2
-rw-r--r--gcc/genconditions.c1
-rw-r--r--gcc/genemit.c1
-rw-r--r--gcc/genoutput.c1
-rw-r--r--gcc/genpeep.c1
-rw-r--r--gcc/genpreds.c1
-rw-r--r--gcc/genrecog.c1
-rw-r--r--gcc/hooks.c9
-rw-r--r--gcc/hooks.h1
-rw-r--r--gcc/memmodel.h86
-rw-r--r--gcc/optabs.c1
-rw-r--r--gcc/target.def16
-rw-r--r--gcc/testsuite/ChangeLog.arm365
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/acle.exp19
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/cdp.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/cdp2.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/ldc.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/ldc2.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/ldc2l.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/ldcl.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mcr.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mcr2.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mcrr.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mcrr2.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mrc.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mrc2.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mrrc.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/mrrc2.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/stc.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/stc2.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/stc2l.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/stcl.c18
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-acq_rel.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acquire-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-acquire.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acquire-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-acquire-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-char-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-char.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-char-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-char-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-consume-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-consume.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-consume-2.c11
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-consume-3.c11
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-int-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-int.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-int-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-int-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-relaxed-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-relaxed.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-relaxed-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-relaxed-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-release-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-release.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-release-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-release-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-seq_cst.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-short-1.c (renamed from gcc/testsuite/gcc.target/arm/atomic-op-short.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-short-2.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/atomic-op-short-3.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/cbz.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c57
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c53
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c63
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c54
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c57
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c56
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c96
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c25
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c19
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c21
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c29
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c71
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c86
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-1.c39
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-2.c36
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/bitfield-3.c37
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-1.c106
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-10.c9
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-12.c14
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-14.c13
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-15.c72
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-3.c45
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-4.c34
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse-9.c20
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/cmse.exp72
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c55
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c51
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c61
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c52
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c55
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c54
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c94
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c43
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c45
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c42
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c41
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c38
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c38
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c34
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c33
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c27
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c27
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c46
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c25
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c25
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c38
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c25
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c69
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c84
-rw-r--r--gcc/testsuite/gcc.target/arm/cmse/struct-1.c33
-rw-r--r--gcc/testsuite/gcc.target/arm/movdi_movw.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/movhi_movw.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/movsi_movw.c10
-rw-r--r--gcc/testsuite/gcc.target/arm/optional_thumb-1.c7
-rw-r--r--gcc/testsuite/gcc.target/arm/optional_thumb-2.c7
-rw-r--r--gcc/testsuite/gcc.target/arm/optional_thumb-3.c9
-rw-r--r--gcc/testsuite/gcc.target/arm/pr42574.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/pure-code/ffunction-sections.c17
-rw-r--r--gcc/testsuite/gcc.target/arm/pure-code/no-casesi.c29
-rw-r--r--gcc/testsuite/gcc.target/arm/pure-code/no-literal-pool.c68
-rw-r--r--gcc/testsuite/gcc.target/arm/pure-code/pure-code.exp58
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-1.c (renamed from gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data.c)0
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-2.c27
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-3.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-4.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-5.c15
-rw-r--r--gcc/testsuite/lib/target-supports.exp163
-rw-r--r--gcc/tree.h63
-rw-r--r--gcc/tsan.c1
-rw-r--r--gcc/varasm.c48
-rw-r--r--libgcc/ChangeLog.arm69
-rw-r--r--libgcc/config/arm/bpabi-v6m.S3
-rw-r--r--libgcc/config/arm/cmse.c108
-rw-r--r--libgcc/config/arm/cmse_nonsecure_call.S131
-rw-r--r--libgcc/config/arm/lib1funcs.S293
-rw-r--r--libgcc/config/arm/libunwind.S6
-rw-r--r--libgcc/config/arm/t-arm14
-rw-r--r--libgcc/config/arm/t-softfp2
199 files changed, 9070 insertions, 887 deletions
diff --git a/gcc/ChangeLog.arm b/gcc/ChangeLog.arm
new file mode 100644
index 00000000000..cb587fff910
--- /dev/null
+++ b/gcc/ChangeLog.arm
@@ -0,0 +1,807 @@
+2017-01-31 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * configure.ac (--enable-mingw-wildcard): Add new configurable feature.
+ * configure: Regenerate.
+ * config.in: Regenerate.
+ * config/i386/driver-mingw32.c: new file.
+ * config/i386/x-mingw32: Add rule to build driver-mingw32.o.
+ * config.host: Link driver-mingw32.o on MinGW host.
+ * doc/install.texi: Document new --enable-mingw-wildcard configure
+ option.
+
+2017-01-24 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (*ldcstc): Split into ...
+ (*ldc): ... this and ...
+ (*stc): ... this.
+ (ldcstc): Split into ...
+ (ldc): ... this and ...
+ (stc): ... this.
+ (cdp,*ldc,*stc,mrc,mcr,mrrc,mcrr): Add operand constraints.
+ (mrc, mrrc): Add source mode to coprocessor pattern SETs.
+ * config/arm/arm.c (arm_coproc_builtin_available): Put function name on
+ new line and fix availability of MCRR2 and MRRC2 builtins.
+ (arm_coproc_ldc_stc_legitimate_address): Put function name on new line.
+ * config/arm/arm-builtins.c (arm_type_qualifiers): Style fix.
+ * config/arm/arm_acle.h: Fix availability of __arm_mcrr2 and
+ __arm_mrrc2 intrinsics.
+ * config/arm/constraints.md (Uz): Finish sentence explaining the
+ constraint.
+ * config/arm/iterators.md (LDCSTCI,LDCSTC,ldcstc): Split into ...
+ (LDCI,LDC,ldc): ... this and ...
+ (STCI,STC,stc): ... this.
+ * gcc/doc/sourcebuild.texi (arm_coproc2_ok,arm_coproc3_ok): Fix
+ language.
+ (arm_coproc4_ok): New.
+
+2016-12-14 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.c (TARGET_USE_BLOCKS_FOR_CONSTANT_P): Remove.
+ (arm_use_blocks_for_constant_p): Remove.
+
+2016-12-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-cores.def (cortex-m23): Add FL2_CMSE flag.
+ (cortex-m33): Likewise.
+
+2016-12-09 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-09 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ PR rtl-optimization/78255
+ * gcc/postreload.c (reload_cse_simplify): Do not CSE a function if
+ NO_FUNCTION_CSE is true.
+
+2016-12-09 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-opts.h: Move struct arm_arch_core_flag and
+ arm_arch_core_flags to ...
+ * common/config/arm/arm-common.c: There.
+
+2016-12-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-12-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR rtl-optimization/78617
+ * lra-remat.c (do_remat): Initialize live_hard_regs from live in
+ registers, also setting hard registers mapped to pseudo registers.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ PR target/71607
+ * config/arm/arm.md (use_literal_pool): Removes.
+ (64-bit immediate split): No longer takes cost into consideration
+ if 'arm_disable_literal_pool' is enabled.
+ * config/arm/arm.c (arm_use_blocks_for_constant_p): New.
+ (TARGET_USE_BLOCKS_FOR_CONSTANT_P): Define.
+ (arm_max_const_double_inline_cost): Remove use of
+ arm_disable_literal_pool.
+ * config/arm/vfp.md (no_literal_pool_df_immediate): New.
+ (no_literal_pool_sf_immediate): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (<mcrr>): New.
+ (<mrrc>): New.
+ * config/arm/arm.c (arm_arch5te): New.
+ (arm_option_override): Set arm_arch5te.
+ (arm_coproc_builtin_available): Add support for mcrr, mcrr2, mrrc
+ and mrrc2.
+ * config/arm/arm-builtins.c (MCRR_QUALIFIERS): Define to...
+ (arm_mcrr_qualifiers): ... this. New.
+ (MRRC_QUALIFIERS): Define to...
+ (arm_mrrc_qualifiers): ... this. New.
+ * config/arm/arm_acle.h (__arm_mcrr, __arm_mcrr2, __arm_mrrc,
+ __arm_mrrc2): New.
+ * config/arm/arm_acle_builtins.def (mcrr, mcrr2, mrrc, mrrc2): New.
+ * config/arm/iterators.md (MCRRI, mcrr, MCRR): New.
+ (MRRCI, mrrc, MRRC): New.
+ * config/arm/unspecs.md (VUNSPEC_MCRR, VUNSPEC_MCRR2, VUNSPEC_MRRC,
+ VUNSPEC_MRRC2): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (<mcr>): New.
+ (<mrc>): New.
+ * config/arm/arm.c (arm_coproc_builtin_available): Add
+ support for mcr, mrc, mcr2 and mrc2.
+ * config/arm/arm-builtins.c (MCR_QUALIFIERS): Define to...
+ (arm_mcr_qualifiers): ... this. New.
+ (MRC_QUALIFIERS): Define to ...
+ (arm_mrc_qualifiers): ... this. New.
+ (MCR_QUALIFIERS): Define to ...
+ (arm_mcr_qualifiers): ... this. New.
+ * config/arm/arm_acle.h (__arm_mcr, __arm_mrc, __arm_mcr2,
+ __arm_mrc2): New.
+ * config/arm/arm_acle_builtins.def (mcr, mcr2, mrc, mrc2): New.
+ * config/arm/iterators.md (MCRI, mcr, MCR, MRCI, mrc, MRC): New.
+ * config/arm/unspecs.md (VUNSPEC_MCR, VUNSPEC_MCR2, VUNSPEC_MRC,
+ VUNSPEC_MRC2): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (*ldcstc): New.
+ (<ldcstc>): New.
+ * config/arm/arm.c (arm_coproc_builtin_available): Add
+ support for ldc,ldcl,stc,stcl,ldc2,ldc2l,stc2 and stc2l.
+ (arm_coproc_ldc_stc_legitimate_address): New.
+ * config/arm/arm-builtins.c (arm_type_qualifiers): Add
+ 'qualifier_const_pointer'.
+ (LDC_QUALIFIERS): Define to...
+ (arm_ldc_qualifiers): ... this. New.
+ (STC_QUALIFIERS): Define to...
+ (arm_stc_qualifiers): ... this. New.
+ * config/arm/arm-protos.h
+ (arm_coproc_ldc_stc_legitimate_address): New.
+ * config/arm/arm_acle.h (__arm_ldc, __arm_ldcl, __arm_stc,
+ __arm_stcl, __arm_ldc2, __arm_ldc2l, __arm_stc2, __arm_stc2l): New.
+ * config/arm/arm_acle_builtins.def (ldc, ldc2, ldcl, ldc2l, stc,
+ stc2, stcl, stc2l): New.
+ * config/arm/constraints.md (Uz): New.
+ * config/arm/iterators.md (LDCSTCI, ldcstc, LDCSTC): New.
+ * config/arm/unspecs.md (VUNSPEC_LDC, VUNSPEC_LDC2, VUNSPEC_LDCL,
+ VUNSPEC_LDC2L, VUNSPEC_STC, VUNSPEC_STC2, VUNSPEC_STCL,
+ VUNSPEC_STC2L): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (<cdp>): New.
+ * config/arm/arm.c (neon_const_bounds): Rename this ...
+ (arm_const_bounds): ... this.
+ (arm_coproc_builtin_available): New.
+ * config/arm/arm-builtins.c (SIMD_MAX_BUILTIN_ARGS): Increase.
+ (arm_type_qualifiers): Add 'qualifier_unsigned_immediate'.
+ (CDP_QUALIFIERS): Define to...
+ (arm_cdp_qualifiers): ... this. New.
+ (void_UP): Define.
+ (arm_expand_builtin_args): Add case for 6 arguments.
+ * config/arm/arm-protos.h (neon_const_bounds): Rename this ...
+ (arm_const_bounds): ... this.
+ (arm_coproc_builtin_available): New.
+ * config/arm/arm_acle.h (__arm_cdp): New.
+ (__arm_cdp2): New.
+ * config/arm/arm_acle_builtins.def (cdp): New.
+ (cdp2): New.
+ * config/arm/iterators.md (CDPI,CDP,cdp): New.
+ * config/arm/neon.md: Rename all 'neon_const_bounds' to
+ 'arm_const_bounds'.
+ * config/arm/types.md (coproc): New.
+ * config/arm/unspecs.md (VUNSPEC_CDP, VUNSPEC_CDP2): New.
+ * gcc/doc/extend.texi (ACLE): Add a mention of Coprocessor intrinsics.
+ * gcc/doc/sourcebuild.tex
+ (arm_coproc1_ok, arm_coproc2_ok, arm_coproc3_ok): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm-builtins.c (arm_unsigned_binop_qualifiers): New.
+ (UBINOP_QUALIFIERS): New.
+ (si_UP): Define.
+ (acle_builtin_data): New. Change comment.
+ (arm_builtins): Remove ARM_BUILTIN_CRC32B, ARM_BUILTIN_CRC32H,
+ ARM_BUILTIN_CRC32W, ARM_BUILTIN_CRC32CB, ARM_BUILTIN_CRC32CH,
+ ARM_BUILTIN_CRC32CW. Add ARM_BUILTIN_ACLE_BASE and include
+ arm_acle_builtins.def.
+ (ARM_BUILTIN_ACLE_PATTERN_START): Define.
+ (arm_init_acle_builtins): New.
+ (CRC32_BUILTIN): Remove.
+ (bdesc_2arg): Remove entries for crc32b, crc32h, crc32w,
+ crc32cb, crc32ch and crc32cw.
+ (arm_init_crc32_builtins): Remove.
+ (arm_init_builtins): Use arm_init_acle_builtins rather
+ than arm_init_crc32_builtins.
+ (arm_expand_acle_builtin): New.
+ (arm_expand_builtin): Use 'arm_expand_acle_builtin'.
+ (si_UP): New define.
+ * config/arm/arm_acle_builtins.def: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm-builtins.c (neon_builtin_datum): Rename to ...
+ (arm_builtin_datum): ... this.
+ (arm_init_neon_builtin): Rename to ...
+ (arm_init_builtin): ... this. Add a new parameter PREFIX.
+ (NEON_MAX_BUILTIN_ARGS): Remove, it was unused.
+ (arm_init_neon_builtins): Replace 'arm_init_neon_builtin' with
+ 'arm_init_builtin'. Replace type 'neon_builtin_datum' with
+ 'arm_builtin_datum'.
+ (builtin_arg): Rename enum's replacing 'NEON_ARG' with
+ 'ARG_BUILTIN' and add a 'ARG_BUILTIN_NEON_MEMORY.
+ (arm_expand_neon_args): Rename to ...
+ (arm_expand_builtin_args): ... this. Rename builtin_arg
+ enum values and differentiate between ARG_BUILTIN_MEMORY
+ and ARG_BUILTIN_NEON_MEMORY.
+ (arm_expand_neon_builtin_1): Rename to ...
+ (arm_expand_builtin_1): ... this. Rename builtin_arg enum
+ values, arm_expand_builtin_args.
+ (arm_expand_neon_builtin): Use arm_expand_builtin_1.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-09-23 Matthew Wahab <matthew.wahab@arm.com>
+
+ * config/arm/arm-builtins.c (arm_init_neon_builtin): New.
+ (arm_init_builtins): Move body of a loop to the standalone
+ function arm_init_neon_builtin.
+ (arm_expand_neon_builtin_1): New. Update comment. Function body
+ moved from arm_neon_builtin with some white-space fixes.
+ (arm_expand_neon_builtin): Move code into the standalone function
+ arm_expand_neon_builtin_1.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-01 Jeff Law <law@redhat.com>
+
+ * config/arm/arm.c (arm_handle_cmse_nonsecure_call): Remove unused
+ variable main_variant.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-builtins.c (arm_builtins): Define
+ ARM_BUILTIN_CMSE_NONSECURE_CALLER.
+ (bdesc_2arg): Add line for cmse_nonsecure_caller.
+ (arm_init_builtins): Handle cmse_nonsecure_caller.
+ (arm_expand_builtin): Likewise.
+ * config/arm/arm_cmse.h (cmse_nonsecure_caller): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (detect_cmse_nonsecure_call): New.
+ (cmse_nonsecure_call_clear_caller_saved): New.
+ (arm_reorg): Use cmse_nonsecure_call_clear_caller_saved.
+ (arm_function_ok_for_sibcall): Disable sibcalls for
+ cmse_nonsecure_call.
+ * config/arm/arm-protos.h (detect_cmse_nonsecure_call): New.
+ * config/arm/arm.md (call): Handle cmse_nonsecure_entry.
+ (call_value): Likewise.
+ (nonsecure_call_internal): New.
+ (nonsecure_call_value_internal): New.
+ * config/arm/thumb1.md (*nonsecure_call_reg_thumb1_v5): New.
+ (*nonsecure_call_value_reg_thumb1_v5): New.
+ * config/arm/thumb2.md (*nonsecure_call_reg_thumb2): New.
+ (*nonsecure_call_value_reg_thumb2): New.
+ * config/arm/unspecs.md (UNSPEC_NONSECURE_MEM): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (gimplify.h): New include.
+ (arm_handle_cmse_nonsecure_call): New.
+ (arm_attribute_table): Added cmse_nonsecure_call.
+ (arm_comp_type_attributes): Deny compatibility of function types
+ with without the cmse_nonsecure_call attribute.
+ * doc/extend.texi (ARM ARMv8-M Security Extensions): New attribute.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (output_return_instruction): Clear
+ registers.
+ (thumb2_expand_return): Likewise.
+ (thumb1_expand_epilogue): Likewise.
+ (thumb_exit): Likewise.
+ (arm_expand_epilogue): Likewise.
+ (cmse_nonsecure_entry_clear_before_return): New.
+ (comp_not_to_clear_mask_str_un): New.
+ (compute_not_to_clear_mask): New.
+ * config/arm/thumb1.md (*epilogue_insns): Change length attribute.
+ * config/arm/thumb2.md (*thumb2_return): Disable for
+ cmse_nonsecure_entry functions.
+ (*thumb2_cmse_entry_return): Duplicate thumb2_return pattern for
+ cmse_nonsecure_entry functions.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (use_return_insn): Change to return with bxns
+ when cmse_nonsecure_entry.
+ (output_return_instruction): Likewise.
+ (arm_output_function_prologue): Likewise.
+ (thumb_pop): Likewise.
+ (thumb_exit): Likewise.
+ (thumb2_expand_return): Assert that entry functions always have simple
+ returns.
+ (arm_expand_epilogue): Handle entry functions.
+ (arm_function_ok_for_sibcall): Disable sibcall for entry functions.
+ (arm_asm_declare_function_name): New.
+ * config/arm/arm-protos.h (arm_asm_declare_function_name): New.
+ * config/arm/elf.h (ASM_DECLARE_FUNCTION_NAME): Redefine to
+ use arm_asm_declare_function_name.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (arm_handle_cmse_nonsecure_entry): New.
+ (arm_attribute_table): Added cmse_nonsecure_entry
+ (arm_compute_func_type): Handle cmse_nonsecure_entry.
+ (cmse_func_args_or_return_in_stack): New.
+ (arm_handle_cmse_nonsecure_entry): New.
+ * config/arm/arm.h (ARM_FT_CMSE_ENTRY): New macro define.
+ (IS_CMSE_ENTRY): Likewise.
+ * doc/extend.texi (ARM ARMv8-M Security Extensions): New attribute.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config.gcc (extra_headers): Added arm_cmse.h.
+ * config/arm/arm-arches.def (ARM_ARCH):
+ (armv8-m): Add FL2_CMSE.
+ (armv8-m.main): Likewise.
+ (armv8-m.main+dsp): Likewise.
+ * config/arm/arm-c.c
+ (arm_cpu_builtins): Added __ARM_FEATURE_CMSE macro.
+ * config/arm/arm-flags.h: Define FL2_CMSE.
+ * config/arm.c (arm_arch_cmse): New.
+ (arm_option_override): New error for unsupported cmse target.
+ * config/arm/arm.h (arm_arch_cmse): New.
+ * config/arm/arm.opt (mcmse): New.
+ * config/arm/arm_cmse.h: New file.
+ * doc/invoke.texi (ARM Options): Add -mcmse.
+ * doc/sourcebuild.texi (arm_cmse_ok): Add new effective target.
+ * doc/extend.texi: Add ARMv8-M Security Extensions entry.
+
+2016-12-01 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-30 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/t-rmprofile: Add mappings for Cortex-M23 and Cortex-M33.
+
+2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config.gcc: Allow new rmprofile value for configure option
+ --with-multilib-list.
+ * config/arm/t-rmprofile: New file.
+ * doc/install.texi (--with-multilib-list): Document new rmprofile value
+ for ARM.
+
+2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-05-04 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config.gcc: Error out when conflicting multilib is detected. Do not
+ loop over multilibs since no combination is legal.
+
+2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR target/77904
+ * config/arm/arm.c (thumb1_compute_save_reg_mask): Mark frame pointer
+ in save register mask if it is needed.
+
+2016-11-18 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-18 Terry Guo <terry.guo@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * common/config/arm/arm-common.c (arm_target_thumb_only): New function.
+ * config/arm/arm-opts.h: Include arm-flags.h.
+ (struct arm_arch_core_flag): Define.
+ (arm_arch_core_flags): Define.
+ * config/arm/arm-protos.h: Include arm-flags.h
+ (FL_NONE, FL_ANY, FL_CO_PROC, FL_ARCH3M, FL_MODE26, FL_MODE32,
+ FL_ARCH4, FL_ARCH5, FL_THUMB, FL_LDSCHED, FL_STRONG, FL_ARCH5E,
+ FL_XSCALE, FL_ARCH6, FL_VFPV2, FL_WBUF, FL_ARCH6K, FL_THUMB2, FL_NOTM,
+ FL_THUMB_DIV, FL_VFPV3, FL_NEON, FL_ARCH7EM, FL_ARCH7, FL_ARM_DIV,
+ FL_ARCH8, FL_CRC32, FL_SMALLMUL, FL_NO_VOLATILE_CE, FL_IWMMXT,
+ FL_IWMMXT2, FL_ARCH6KZ, FL2_ARCH8_1, FL2_ARCH8_2, FL2_FP16INST,
+ FL_TUNE, FL_FOR_ARCH2, FL_FOR_ARCH3, FL_FOR_ARCH3M, FL_FOR_ARCH4,
+ FL_FOR_ARCH4T, FL_FOR_ARCH5, FL_FOR_ARCH5T, FL_FOR_ARCH5E,
+ FL_FOR_ARCH5TE, FL_FOR_ARCH5TEJ, FL_FOR_ARCH6, FL_FOR_ARCH6J,
+ FL_FOR_ARCH6K, FL_FOR_ARCH6Z, FL_FOR_ARCH6ZK, FL_FOR_ARCH6KZ,
+ FL_FOR_ARCH6T2, FL_FOR_ARCH6M, FL_FOR_ARCH7, FL_FOR_ARCH7A,
+ FL_FOR_ARCH7VE, FL_FOR_ARCH7R, FL_FOR_ARCH7M, FL_FOR_ARCH7EM,
+ FL_FOR_ARCH8A, FL2_FOR_ARCH8_1A, FL2_FOR_ARCH8_2A, FL_FOR_ARCH8M_BASE,
+ FL_FOR_ARCH8M_MAIN, arm_feature_set, ARM_FSET_MAKE,
+ ARM_FSET_MAKE_CPU1, ARM_FSET_MAKE_CPU2, ARM_FSET_CPU1, ARM_FSET_CPU2,
+ ARM_FSET_EMPTY, ARM_FSET_ANY, ARM_FSET_HAS_CPU1, ARM_FSET_HAS_CPU2,
+ ARM_FSET_HAS_CPU, ARM_FSET_ADD_CPU1, ARM_FSET_ADD_CPU2,
+ ARM_FSET_DEL_CPU1, ARM_FSET_DEL_CPU2, ARM_FSET_UNION, ARM_FSET_INTER,
+ ARM_FSET_XOR, ARM_FSET_EXCLUDE, ARM_FSET_IS_EMPTY,
+ ARM_FSET_CPU_SUBSET): Move to ...
+ * config/arm/arm-flags.h: This new file.
+ * config/arm/arm.h (TARGET_MODE_SPEC_FUNCTIONS): Define.
+ (EXTRA_SPEC_FUNCTIONS): Add TARGET_MODE_SPEC_FUNCTIONS to its value.
+ (TARGET_MODE_SPECS): Define.
+ (DRIVER_SELF_SPECS): Add TARGET_MODE_SPECS to its value.
+
+2016-11-18 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-18 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-protos.h (FL_NONE, FL_ANY, FL_CO_PROC, FL_ARCH3M,
+ FL_MODE26, FL_MODE32, FL_ARCH4, FL_ARCH5, FL_THUMB, FL_LDSCHED,
+ FL_STRONG, FL_ARCH5E, FL_XSCALE, FL_ARCH6, FL_VFPV2, FL_WBUF,
+ FL_ARCH6K, FL_THUMB2, FL_NOTM, FL_THUMB_DIV, FL_VFPV3, FL_NEON,
+ FL_ARCH7EM, FL_ARCH7, FL_ARM_DIV, FL_ARCH8, FL_CRC32, FL_SMALLMUL,
+ FL_NO_VOLATILE_CE, FL_IWMMXT, FL_IWMMXT2, FL_ARCH6KZ, FL2_ARCH8_1,
+ FL2_ARCH8_2, FL2_FP16INST): Reindent comment, add final dot when
+ missing and make value unsigned.
+ (arm_feature_set): Use unsigned entries instead of unsigned long.
+
+2016-11-08 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-08 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR target/77933
+ * config/arm/arm.c (thumb1_expand_prologue): Distinguish between lr
+ being live in the function and lr needing to be saved. Distinguish
+ between already saved pushable registers and registers to push.
+ Check for LR being an available pushable register.
+
+2016-11-17 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.md (arm_addsi3): Add alternative for addition of
+ general register with general register or ARM constant into SP
+ register.
+
+2016-11-09 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-04 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-arches.def (armv8-m.main+dsp): Set Cortex-M33 as
+ representative core for this architecture.
+ * config/arm/arm-cores.def (cortex-m33): Define new processor.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/arm-tune.md: Likewise.
+ * config/arm/bpabi.h (BE8_LINK_SPEC): Add Cortex-M33 to the list of
+ valid -mcpu options.
+ * doc/invoke.texi (ARM Options): Document new Cortex-M33 processor.
+
+2016-11-09 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-04 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-arches.def (armv8-m.base): Set Cortex-M23 as
+ representative core for this architecture.
+ * config/arm/arm-cores.def (cortex-m23): Define new processor.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/arm-tune.md: Likewise.
+ * config/arm/arm.c (arm_v6m_tune): Add Cortex-M23 to the list of cores
+ this tuning parameters apply to in the comment.
+ * config/arm/bpabi.h (BE8_LINK_SPEC): Add Cortex-M23 to the list of
+ valid -mcpu options.
+ * doc/invoke.texi (ARM Options): Document new Cortex-M23 processor.
+
+2016-11-08 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-09-23 Uros Bizjak <ubizjak@gmail.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ * hooks.h (hook_uint_uintp_false): Rename to...
+ (hook_bool_uint_uintp_false): ... this.
+ * hooks.c (hook_uint_uintp_false): Rename to...
+ (hook_bool_uint_uintp_false): ... this.
+ * target.def (elf_flags_numeric): Use hook_bool_uint_uintp_false
+ instead of hook_uint_uintp_false.
+
+ 2016-09-23 Richard Biener <rguenther@suse.de>
+
+ * hooks.h (hook_uint_uintp_false): Declare.
+
+ 2016-09-22 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Terry Guo <terry.guo@arm.com>
+
+ * target.def (elf_flags_numeric): New target hook.
+ * targhooks.h (default_asm_elf_flags_numeric): New.
+ * varasm.c (default_asm_elf_flags_numeric): New.
+ (default_elf_asm_named_section): Use new target hook.
+ * config/arm/arm.opt (mpure-code): New.
+ * config/arm/arm.h (SECTION_ARM_PURECODE): New.
+ * config/arm/arm.c (arm_asm_init_sections): Add section
+ attribute to default text section if -mpure-code.
+ (arm_option_check_internal): Diagnose use of option with
+ non supported targets and/or options.
+ (arm_asm_elf_flags_numeric): New.
+ (arm_function_section): New.
+ (arm_elf_section_type_flags): New.
+ * config/arm/elf.h (JUMP_TABLES_IN_TEXT_SECTION): Disable
+ for -mpure-code.
+ * gcc/doc/texi (TARGET_ASM_ELF_FLAGS_NUMERIC): New.
+ * gcc/doc/texi.in (TARGET_ASM_ELF_FLAGS_NUMERIC): Likewise.
+
+2016-11-08 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-05-27 Ulrich Weigand <uweigand@de.ibm.com>
+
+ * configure.ac: Treat a --with-headers option without argument
+ the same as the default (i.e. consult sys-include directory).
+ * configure: Regenerate.
+
+2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.h (TARGET_HAVE_LDREX): Define for ARMv8-M Baseline.
+ (TARGET_HAVE_LDREXBH): Likewise.
+ (TARGET_HAVE_LDACQ): Likewise.
+
+2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (arm_split_atomic_op): Add function comment. Add
+ logic to to decide whether to copy over old value to register for new
+ value.
+ * config/arm/sync.md: Add comments explaning why mode and code
+ attribute are not defined in iterators.md
+ (thumb1_atomic_op_str): New code attribute.
+ (thumb1_atomic_newop_str): Likewise.
+ (thumb1_atomic_fetch_op_str): Likewise.
+ (thumb1_atomic_fetch_newop_str): Likewise.
+ (thumb1_atomic_fetch_oldop_str): Likewise.
+ (atomic_exchange<mode>): Add new ARMv8-M Baseline only alternatives to
+ mirror the more restrictive constraints of the Thumb-1 insns after
+ split compared to Thumb-2 counterpart insns.
+ (atomic_<sync_optab><mode>): Likewise. Add comment to keep constraints
+ in sync with non atomic version.
+ (atomic_nand<mode>): Likewise.
+ (atomic_fetch_<sync_optab><mode>): Likewise.
+ (atomic_fetch_nand<mode>): Likewise.
+ (atomic_<sync_optab>_fetch<mode>): Likewise.
+ (atomic_nand_fetch<mode>): Likewise.
+ * config/arm/thumb1.md (thumb1_addsi3): Add comment to keep contraint
+ in sync with atomic version.
+ (thumb1_subsi3_insn): Likewise.
+ (thumb1_andsi3_insn): Likewise.
+ (thumb1_iorsi3_insn): Likewise.
+ (thumb1_xorsi3_insn): Likewise.
+
+2016-10-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/sync.md (atomic_compare_and_swap<mode>_1): Add new ARMv8-M
+ Baseline only alternatives to (i) hold store atomic success value in a
+ return register rather than a scratch register, (ii) use a low register
+ for it and to (iii) ensure the cbranchsi insn generated by the split
+ respect the constraints of Thumb-1 cbranchsi4_insn and
+ cbranchsi4_scratch.
+ * config/arm/thumb1.md (cbranchsi4_insn): Add comment to indicate
+ constraints must match those in atomic_compare_and_swap.
+ (cbranchsi4_scratch): Likewise.
+
+2016-10-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.c (arm_expand_compare_and_swap): Add new bdst local
+ variable. Add the new parameter to the insn generator. Set that
+ parameter to be CC flag for 32-bit targets, bval otherwise. Set the
+ return value from the negation of that parameter for Thumb-1, keeping
+ the logic unchanged otherwise except for using bdst as the destination
+ register of the compare_and_swap insn.
+ (arm_split_compare_and_swap): Add explanation about how is the value
+ returned to the function comment. Rename scratch variable to
+ neg_bval. Adapt initialization of variables holding operands to the
+ new operand numbers. Use return register to hold result of store
+ exclusive for Thumb-1, scratch register otherwise. Construct the
+ appropriate cbranch for Thumb-1 targets, keeping the logic unchanged
+ for 32-bit targets. Guard Z flag setting to restrict to 32bit targets.
+ Use gen_cbranchsi4 rather than hand-written conditional branch to loop
+ for strongly ordered compare_and_swap.
+ * config/arm/predicates.md (cc_register_operand): New predicate.
+ * config/arm/sync.md (atomic_compare_and_swap<mode>_1): Use a
+ match_operand with the new predicate to accept either the CC flag or a
+ destination register for the boolean return value, restricting it to
+ CC flag only via constraint. Adapt operand numbers accordingly.
+
+2016-10-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-25 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/constraints.md (Q constraint): Document its use for
+ Thumb-1.
+ (Pf constraint): New constraint for relaxed, consume or relaxed memory
+ models.
+ * config/arm/sync.md (atomic_load<mode>): Add new ARMv8-M Baseline only
+ alternatives to allow any register when memory model matches Pf and
+ thus lda is used, but only low registers otherwise. Use unpredicated
+ output template for Thumb-1 targets.
+ (atomic_store<mode>): Likewise for stl.
+ (arm_load_exclusive<mode>): Add new ARMv8-M Baseline only alternative
+ whose output template does not have predication.
+ (arm_load_acquire_exclusive<mode>): Likewise.
+ (arm_load_exclusivesi): Likewise.
+ (arm_load_acquire_exclusivesi): Likewise.
+ (arm_store_release_exclusive<mode>): Likewise.
+ (arm_store_exclusive<mode>): Use unpredicated output template for
+ Thumb-1 targets.
+
+2016-10-25 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-09-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed,
+ is_mm_consume, is_mm_acquire, is_mm_release, is_mm_acq_rel,
+ is_mm_seq_cst, is_mm_sync): Move to ...
+ * memmodel.h: This. New file.
+ * builtins.c: Include memmodel.h.
+ * optabs.c: Likewise.
+ * tsan.c: Likewise.
+ * config/aarch64/aarch64.c: Likewise.
+ * config/alpha/alpha.c: Likewise.
+ * config/arm/arm.c: Likewise.
+ * config/i386/i386.c: Likewise.
+ * config/ia64/ia64.c: Likewise.
+ * config/mips/mips.c: Likewise.
+ * config/rs6000/rs6000.c: Likewise.
+ * config/sparc/sparc.c: Likewise.
+ * genconditions.c: Include memmodel.h in generated file.
+ * genemit.c: Likewise.
+ * genoutput.c: Likewise.
+ * genpeep.c: Likewise.
+ * genpreds.c: Likewise.
+ * genrecog.c: Likewise.
+
+2016-09-01 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-14 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.h (TARGET_HAVE_LDACQ): Enable for ARMv8-M Mainline.
+ (TARGET_HAVE_LDACQD): New macro.
+ * config/arm/sync.md (atomic_loaddi): Use TARGET_HAVE_LDACQD rather
+ than TARGET_HAVE_LDACQ.
+ (arm_load_acquire_exclusivedi): Likewise.
+ (arm_store_release_exclusivedi): Likewise.
+
+2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.h (TARGET_HAVE_CBZ): Define.
+ (TARGET_IDIV): Set for all Thumb targets provided they have hardware
+ divide feature.
+ * config/arm/arm.md (divsi3): New unpredicable alternative for ARMv8-M
+ Baseline. Make initial alternative TARGET_32BIT only.
+ (udivsi3): Likewise.
+ * config/arm/thumb1.md (thumb1_cbz): New define_insn.
+ * doc/sourcebuild.texi (arm_thumb1_cbz_ok): Document new effective
+ target.
+
+2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.h (TARGET_HAVE_MOVT): Include ARMv8-M as having MOVT.
+ * config/arm/arm.c (arm_arch_name): (const_ok_for_op): Check MOVT/MOVW
+ availability with TARGET_HAVE_MOVT.
+ (thumb_legitimate_constant_p): Strip the high part of a label_ref.
+ (thumb1_rtx_costs): Also return 0 if setting a half word constant and
+ MOVW is available and replace (unsigned HOST_WIDE_INT) INTVAL by
+ UINTVAL.
+ (thumb1_size_rtx_costs): Make set of half word constant also cost 1
+ extra instruction if MOVW is available. Use a cost variable
+ incremented by COSTS_N_INSNS (1) when the condition match rather than
+ returning an arithmetic expression based on COSTS_N_INSNS. Make
+ constant with bottom half word zero cost 2 instruction if MOVW is
+ available.
+ * config/arm/arm.md (define_attr "arch"): Add v8mb.
+ (define_attr "arch_enabled"): Set to yes if arch value is v8mb and
+ target is ARMv8-M Baseline.
+ (arm_movt): New unpredicable alternative for ARMv8-M Baseline.
+ (arm_movtas_ze): Likewise.
+ * config/arm/thumb1.md (thumb1_movdi_insn): Add ARMv8-M Baseline only
+ alternative for constants satisfying j constraint.
+ (thumb1_movsi_insn): Likewise.
+ (movsi splitter for K alternative): Tighten condition to not trigger
+ if movt is available and j constraint is satisfied.
+ (Pe immediate splitter): Likewise.
+ (thumb1_movhi_insn): Add ARMv8-M Baseline only alternative for
+ constant fitting in an halfword to use MOVW.
+ * doc/sourcebuild.texi (arm_thumb1_movt_ok): Document new ARM
+ effective target.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm.h (TARGET_USE_MOVT): Check MOVT/MOVW availability
+ with TARGET_HAVE_MOVT.
+ (TARGET_HAVE_MOVT): Define.
+ * config/arm/arm.c (const_ok_for_op): Check MOVT/MOVW
+ availability with TARGET_HAVE_MOVT.
+ * config/arm/arm.md (arm_movt): Use TARGET_HAVE_MOVT to check MOVT
+ availability.
+ (addsi splitter): Use TARGET_THUMB && TARGET_HAVE_MOVT rather than
+ TARGET_THUMB2.
+ (symbol_refs movsi splitter): Remove TARGET_32BIT check.
+ (arm_movtas_ze): Use TARGET_HAVE_MOVT to check MOVT availability.
+ * config/arm/constraints.md (define_constraint "j"): Use
+ TARGET_HAVE_MOVT to check MOVT availability.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-protos.h: Reindent FL_FOR_* macro definitions.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/arm-arches.def (armv8-m.base): Define new architecture.
+ (armv8-m.main): Likewise.
+ (armv8-m.main+dsp): Likewise.
+ * config/arm/arm-protos.h (FL_FOR_ARCH8M_BASE): Define.
+ (FL_FOR_ARCH8M_MAIN): Likewise.
+ * config/arm/arm-tables.opt: Regenerate.
+ * config/arm/bpabi.h: Add armv8-m.base, armv8-m.main and
+ armv8-m.main+dsp to BE8_LINK_SPEC.
+ * config/arm/arm.h (TARGET_HAVE_LDACQ): Exclude ARMv8-M.
+ (enum base_architecture): Add BASE_ARCH_8M_BASE and BASE_ARCH_8M_MAIN.
+ * config/arm/arm.c (arm_arch_name): Increase size to work with ARMv8-M
+ Baseline and Mainline.
+ (arm_option_override_internal): Also disable arm_restrict_it when
+ !arm_arch_notm. Update comment for -munaligned-access to also cover
+ ARMv8-M Baseline.
+ (arm_file_start): Increase buffer size for printing architecture name.
+ * doc/invoke.texi: Document architectures armv8-m.base, armv8-m.main
+ and armv8-m.main+dsp.
+ (mno-unaligned-access): Clarify that this is disabled by default for
+ ARMv8-M Baseline architectures as well.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/elf.h: Use __ARM_ARCH_ISA_THUMB and __ARM_ARCH_ISA_ARM to
+ decide whether to prevent some libgcc routines being included for some
+ multilibs rather than __ARM_ARCH_6M__ and add comment to indicate the
+ link between this condition and the one in
+ libgcc/config/arm/lib1func.S.
diff --git a/gcc/builtins.c b/gcc/builtins.c
index bb9ee35cd8c..bf663279872 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "predict.h"
#include "tm_p.h"
diff --git a/gcc/c-family/ChangeLog.arm b/gcc/c-family/ChangeLog.arm
new file mode 100644
index 00000000000..195184fa29e
--- /dev/null
+++ b/gcc/c-family/ChangeLog.arm
@@ -0,0 +1,6 @@
+2016-10-25 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-09-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * c-common.c: Include memmodel.h.
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index d2e3ad46a01..c6eecaf60b7 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "function.h"
#include "tree.h"
+#include "memmodel.h"
#include "c-common.h"
#include "gimple-expr.h"
#include "tm_p.h"
diff --git a/gcc/common/config/arm/arm-common.c b/gcc/common/config/arm/arm-common.c
index a9abd6b026e..1cb59aae5ed 100644
--- a/gcc/common/config/arm/arm-common.c
+++ b/gcc/common/config/arm/arm-common.c
@@ -97,6 +97,49 @@ arm_rewrite_mcpu (int argc, const char **argv)
return arm_rewrite_selected_cpu (argv[argc - 1]);
}
+struct arm_arch_core_flag
+{
+ const char *const name;
+ const arm_feature_set flags;
+};
+
+static const struct arm_arch_core_flag arm_arch_core_flags[] =
+{
+#undef ARM_CORE
+#define ARM_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \
+ {NAME, FLAGS},
+#include "config/arm/arm-cores.def"
+#undef ARM_CORE
+#undef ARM_ARCH
+#define ARM_ARCH(NAME, CORE, ARCH, FLAGS) \
+ {NAME, FLAGS},
+#include "config/arm/arm-arches.def"
+#undef ARM_ARCH
+};
+
+/* Called by the driver to check whether the target denoted by current
+ command line options is a Thumb-only target. ARGV is an array of
+ -march and -mcpu values (ie. it contains the rhs after the equal
+ sign) and we use the last one of them to make a decision. The
+ number of elements in ARGV is given in ARGC. */
+const char *
+arm_target_thumb_only (int argc, const char **argv)
+{
+ unsigned int opt;
+
+ if (argc)
+ {
+ for (opt = 0; opt < (ARRAY_SIZE (arm_arch_core_flags)); opt++)
+ if ((strcmp (argv[argc - 1], arm_arch_core_flags[opt].name) == 0)
+ && !ARM_FSET_HAS_CPU1(arm_arch_core_flags[opt].flags, FL_NOTM))
+ return "-mthumb";
+
+ return NULL;
+ }
+ else
+ return NULL;
+}
+
#undef ARM_CPU_NAME_LENGTH
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 858b878d4b3..9550d035ff9 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -327,7 +327,7 @@ arc*-*-*)
arm*-*-*)
cpu_type=arm
extra_objs="arm-builtins.o aarch-common.o"
- extra_headers="mmintrin.h arm_neon.h arm_acle.h"
+ extra_headers="mmintrin.h arm_neon.h arm_acle.h arm_cmse.h"
target_type_format_char='%'
c_target_objs="arm-c.o"
cxx_target_objs="arm-c.o"
@@ -3811,38 +3811,51 @@ case "${target}" in
# Add extra multilibs
if test "x$with_multilib_list" != x; then
arm_multilibs=`echo $with_multilib_list | sed -e 's/,/ /g'`
- for arm_multilib in ${arm_multilibs}; do
- case ${arm_multilib} in
- aprofile)
+ case ${arm_multilibs} in
+ aprofile)
# Note that arm/t-aprofile is a
# stand-alone make file fragment to be
# used only with itself. We do not
# specifically use the
# TM_MULTILIB_OPTION framework because
# this shorthand is more
- # pragmatic. Additionally it is only
- # designed to work without any
- # with-cpu, with-arch with-mode
- # with-fpu or with-float options.
- if test "x$with_arch" != x \
- || test "x$with_cpu" != x \
- || test "x$with_float" != x \
- || test "x$with_fpu" != x \
- || test "x$with_mode" != x ; then
- echo "Error: You cannot use any of --with-arch/cpu/fpu/float/mode with --with-multilib-list=aprofile" 1>&2
- exit 1
- fi
- tmake_file="${tmake_file} arm/t-aprofile"
- break
- ;;
- default)
- ;;
- *)
- echo "Error: --with-multilib-list=${with_multilib_list} not supported." 1>&2
- exit 1
- ;;
- esac
- done
+ # pragmatic.
+ tmake_profile_file="arm/t-aprofile"
+ ;;
+ rmprofile)
+ # Note that arm/t-rmprofile is a
+ # stand-alone make file fragment to be
+ # used only with itself. We do not
+ # specifically use the
+ # TM_MULTILIB_OPTION framework because
+ # this shorthand is more
+ # pragmatic.
+ tmake_profile_file="arm/t-rmprofile"
+ ;;
+ default)
+ ;;
+ *)
+ echo "Error: --with-multilib-list=${with_multilib_list} not supported." 1>&2
+ exit 1
+ ;;
+ esac
+
+ if test "x${tmake_profile_file}" != x ; then
+ # arm/t-aprofile and arm/t-rmprofile are only
+ # designed to work without any with-cpu,
+ # with-arch, with-mode, with-fpu or with-float
+ # options.
+ if test "x$with_arch" != x \
+ || test "x$with_cpu" != x \
+ || test "x$with_float" != x \
+ || test "x$with_fpu" != x \
+ || test "x$with_mode" != x ; then
+ echo "Error: You cannot use any of --with-arch/cpu/fpu/float/mode with --with-multilib-list=${with_multilib_list}" 1>&2
+ exit 1
+ fi
+
+ tmake_file="${tmake_file} ${tmake_profile_file}"
+ fi
fi
;;
diff --git a/gcc/config.host b/gcc/config.host
index 5b3c75714ee..bd0b1bdf905 100644
--- a/gcc/config.host
+++ b/gcc/config.host
@@ -239,6 +239,7 @@ case ${host} in
host_xmake_file="${host_xmake_file} i386/x-mingw32"
host_exeext=.exe
out_host_hook_obj=host-mingw32.o
+ host_extra_gcc_objs="${host_extra_gcc_objs} driver-mingw32.o"
host_lto_plugin_soname=liblto_plugin-0.dll
;;
x86_64-*-mingw*)
@@ -247,6 +248,7 @@ case ${host} in
host_xmake_file="${host_xmake_file} i386/x-mingw32"
host_exeext=.exe
out_host_hook_obj=host-mingw32.o
+ host_extra_gcc_objs="${host_extra_gcc_objs} driver-mingw32.o"
host_lto_plugin_soname=liblto_plugin-0.dll
;;
i[34567]86-*-darwin* | x86_64-*-darwin*)
diff --git a/gcc/config.in b/gcc/config.in
index 4a70ddfa315..dcff614cb92 100644
--- a/gcc/config.in
+++ b/gcc/config.in
@@ -1977,6 +1977,12 @@
#endif
+/* Value to set MinGW's _dowildcard to. */
+#ifndef USED_FOR_TARGET
+#undef MINGW_DOWILDCARD
+#endif
+
+
/* Define if host mkdir takes a single argument. */
#ifndef USED_FOR_TARGET
#undef MKDIR_TAKES_ONE_ARG
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index f80afb92c45..4b58fc6ab8d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -26,6 +26,7 @@
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "cfghooks.h"
#include "cfgloop.h"
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index e023d3bc278..1efe8879b82 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "df.h"
#include "tm_p.h"
diff --git a/gcc/config/arm/arm-arches.def b/gcc/config/arm/arm-arches.def
index fd02b18db01..1d244297054 100644
--- a/gcc/config/arm/arm-arches.def
+++ b/gcc/config/arm/arm-arches.def
@@ -62,6 +62,12 @@ ARM_ARCH("armv8.1-a", cortexa53, 8A,
ARM_ARCH("armv8.1-a+crc",cortexa53, 8A,
ARM_FSET_MAKE (FL_CO_PROC | FL_CRC32 | FL_FOR_ARCH8A,
FL2_FOR_ARCH8_1A))
+ARM_ARCH("armv8-m.base", cortexm23, 8M_BASE,
+ ARM_FSET_MAKE (FL_FOR_ARCH8M_BASE, FL2_CMSE))
+ARM_ARCH("armv8-m.main", cortexm7, 8M_MAIN,
+ ARM_FSET_MAKE (FL_CO_PROC | FL_FOR_ARCH8M_MAIN, FL2_CMSE))
+ARM_ARCH("armv8-m.main+dsp", cortexm33, 8M_MAIN,
+ ARM_FSET_MAKE (FL_CO_PROC | FL_ARCH7EM | FL_FOR_ARCH8M_MAIN, FL2_CMSE))
ARM_ARCH("iwmmxt", iwmmxt, 5TE, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT))
ARM_ARCH("iwmmxt2", iwmmxt2, 5TE, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT | FL_IWMMXT2))
diff --git a/gcc/config/arm/arm-builtins.c b/gcc/config/arm/arm-builtins.c
index 68b2839879f..ca622519b7d 100644
--- a/gcc/config/arm/arm-builtins.c
+++ b/gcc/config/arm/arm-builtins.c
@@ -37,7 +37,7 @@
#include "langhooks.h"
#include "case-cfn-macros.h"
-#define SIMD_MAX_BUILTIN_ARGS 5
+#define SIMD_MAX_BUILTIN_ARGS 7
enum arm_type_qualifiers
{
@@ -49,9 +49,12 @@ enum arm_type_qualifiers
qualifier_const = 0x2, /* 1 << 1 */
/* T *foo. */
qualifier_pointer = 0x4, /* 1 << 2 */
+ /* const T * foo. */
+ qualifier_const_pointer = 0x6,
/* Used when expanding arguments if an operand could
be an immediate. */
qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_unsigned_immediate = 0x9,
qualifier_maybe_immediate = 0x10, /* 1 << 4 */
/* void foo (...). */
qualifier_void = 0x20, /* 1 << 5 */
@@ -156,6 +159,80 @@ arm_load1_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
qualifier_none, qualifier_struct_load_store_lane_index };
#define LOAD1LANE_QUALIFIERS (arm_load1_lane_qualifiers)
+/* unsigned T (unsigned T, unsigned T, unsigned T). */
+static enum arm_type_qualifiers
+arm_unsigned_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned };
+#define UBINOP_QUALIFIERS (arm_unsigned_binop_qualifiers)
+
+/* void (unsigned immediate, unsigned immediate, unsigned immediate,
+ unsigned immediate, unsigned immediate, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_cdp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate };
+#define CDP_QUALIFIERS \
+ (arm_cdp_qualifiers)
+
+/* void (unsigned immediate, unsigned immediate, const void *). */
+static enum arm_type_qualifiers
+arm_ldc_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_const_pointer };
+#define LDC_QUALIFIERS \
+ (arm_ldc_qualifiers)
+
+/* void (unsigned immediate, unsigned immediate, void *). */
+static enum arm_type_qualifiers
+arm_stc_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_pointer };
+#define STC_QUALIFIERS \
+ (arm_stc_qualifiers)
+
+/* void (unsigned immediate, unsigned immediate, T, unsigned immediate,
+ unsigned immediate, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_mcr_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_none,
+ qualifier_unsigned_immediate, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate };
+#define MCR_QUALIFIERS \
+ (arm_mcr_qualifiers)
+
+/* T (unsigned immediate, unsigned immediate, unsigned immediate,
+ unsigned immediate, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_mrc_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_unsigned_immediate };
+#define MRC_QUALIFIERS \
+ (arm_mrc_qualifiers)
+
+/* void (unsigned immediate, unsigned immediate, T, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_mcrr_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_none,
+ qualifier_unsigned_immediate };
+#define MCRR_QUALIFIERS \
+ (arm_mcrr_qualifiers)
+
+/* T (unsigned immediate, unsigned immediate, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_mrrc_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate, qualifier_unsigned_immediate };
+#define MRRC_QUALIFIERS \
+ (arm_mrrc_qualifiers)
+
/* The first argument (return type) of a store should be void type,
which we represent with qualifier_void. Their first operand will be
a DImode pointer to the location to store to, so we must use
@@ -190,6 +267,8 @@ arm_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
#define ti_UP TImode
#define ei_UP EImode
#define oi_UP OImode
+#define si_UP SImode
+#define void_UP VOIDmode
#define UP(X) X##_UP
@@ -199,7 +278,7 @@ typedef struct {
const enum insn_code code;
unsigned int fcode;
enum arm_type_qualifiers *qualifiers;
-} neon_builtin_datum;
+} arm_builtin_datum;
#define CF(N,X) CODE_FOR_neon_##N##X
@@ -239,20 +318,30 @@ typedef struct {
VAR11 (T, N, A, B, C, D, E, F, G, H, I, J, K) \
VAR1 (T, N, L)
-/* The NEON builtin data can be found in arm_neon_builtins.def.
+/* The builtin data can be found in arm_neon_builtins.def and
+ arm_acle_builtins.def.
The mode entries in the following table correspond to the "key" type of the
instruction variant, i.e. equivalent to that which would be specified after
the assembler mnemonic, which usually refers to the last vector operand.
The modes listed per instruction should be the same as those defined for
- that instruction's pattern in neon.md. */
+ that instruction's pattern, for instance in neon.md. */
-static neon_builtin_datum neon_builtin_data[] =
+static arm_builtin_datum neon_builtin_data[] =
{
#include "arm_neon_builtins.def"
};
#undef CF
#undef VAR1
+#define VAR1(T, N, A) \
+ {#N, UP (A), CODE_FOR_##N, 0, T##_QUALIFIERS},
+
+static arm_builtin_datum acle_builtin_data[] =
+{
+#include "arm_acle_builtins.def"
+};
+
+#undef VAR1
#define VAR1(T, N, X) \
ARM_BUILTIN_NEON_##N##X,
@@ -505,16 +594,11 @@ enum arm_builtins
ARM_BUILTIN_WMERGE,
- ARM_BUILTIN_CRC32B,
- ARM_BUILTIN_CRC32H,
- ARM_BUILTIN_CRC32W,
- ARM_BUILTIN_CRC32CB,
- ARM_BUILTIN_CRC32CH,
- ARM_BUILTIN_CRC32CW,
-
ARM_BUILTIN_GET_FPSCR,
ARM_BUILTIN_SET_FPSCR,
+ ARM_BUILTIN_CMSE_NONSECURE_CALLER,
+
#undef CRYPTO1
#undef CRYPTO2
#undef CRYPTO3
@@ -539,11 +623,22 @@ enum arm_builtins
#include "arm_neon_builtins.def"
+#undef VAR1
+#define VAR1(T, N, X) \
+ ARM_BUILTIN_##N,
+
+ ARM_BUILTIN_ACLE_BASE,
+
+#include "arm_acle_builtins.def"
+
ARM_BUILTIN_MAX
};
#define ARM_BUILTIN_NEON_PATTERN_START \
- (ARM_BUILTIN_MAX - ARRAY_SIZE (neon_builtin_data))
+ (ARM_BUILTIN_NEON_BASE + 1)
+
+#define ARM_BUILTIN_ACLE_PATTERN_START \
+ (ARM_BUILTIN_ACLE_BASE + 1)
#undef CF
#undef VAR1
@@ -895,6 +990,132 @@ arm_init_simd_builtin_scalar_types (void)
"__builtin_neon_uti");
}
+/* Set up a builtin. It will use information stored in the argument struct D to
+ derive the builtin's type signature and name. It will append the name in D
+ to the PREFIX passed and use these to create a builtin declaration that is
+ then stored in 'arm_builtin_decls' under index FCODE. This FCODE is also
+ written back to D for future use. */
+
+static void
+arm_init_builtin (unsigned int fcode, arm_builtin_datum *d,
+ const char * prefix)
+{
+ bool print_type_signature_p = false;
+ char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
+ char namebuf[60];
+ tree ftype = NULL;
+ tree fndecl = NULL;
+
+ d->fcode = fcode;
+
+ /* We must track two variables here. op_num is
+ the operand number as in the RTL pattern. This is
+ required to access the mode (e.g. V4SF mode) of the
+ argument, from which the base type can be derived.
+ arg_num is an index in to the qualifiers data, which
+ gives qualifiers to the type (e.g. const unsigned).
+ The reason these two variables may differ by one is the
+ void return type. While all return types take the 0th entry
+ in the qualifiers array, there is no operand for them in the
+ RTL pattern. */
+ int op_num = insn_data[d->code].n_operands - 1;
+ int arg_num = d->qualifiers[0] & qualifier_void
+ ? op_num + 1
+ : op_num;
+ tree return_type = void_type_node, args = void_list_node;
+ tree eltype;
+
+ /* Build a function type directly from the insn_data for this
+ builtin. The build_function_type () function takes care of
+ removing duplicates for us. */
+ for (; op_num >= 0; arg_num--, op_num--)
+ {
+ machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
+ enum arm_type_qualifiers qualifiers = d->qualifiers[arg_num];
+
+ if (qualifiers & qualifier_unsigned)
+ {
+ type_signature[arg_num] = 'u';
+ print_type_signature_p = true;
+ }
+ else if (qualifiers & qualifier_poly)
+ {
+ type_signature[arg_num] = 'p';
+ print_type_signature_p = true;
+ }
+ else
+ type_signature[arg_num] = 's';
+
+ /* Skip an internal operand for vget_{low, high}. */
+ if (qualifiers & qualifier_internal)
+ continue;
+
+ /* Some builtins have different user-facing types
+ for certain arguments, encoded in d->mode. */
+ if (qualifiers & qualifier_map_mode)
+ op_mode = d->mode;
+
+ /* For pointers, we want a pointer to the basic type
+ of the vector. */
+ if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
+ op_mode = GET_MODE_INNER (op_mode);
+
+ eltype = arm_simd_builtin_type
+ (op_mode,
+ (qualifiers & qualifier_unsigned) != 0,
+ (qualifiers & qualifier_poly) != 0);
+ gcc_assert (eltype != NULL);
+
+ /* Add qualifiers. */
+ if (qualifiers & qualifier_const)
+ eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);
+
+ if (qualifiers & qualifier_pointer)
+ eltype = build_pointer_type (eltype);
+
+ /* If we have reached arg_num == 0, we are at a non-void
+ return type. Otherwise, we are still processing
+ arguments. */
+ if (arg_num == 0)
+ return_type = eltype;
+ else
+ args = tree_cons (NULL_TREE, eltype, args);
+ }
+
+ ftype = build_function_type (return_type, args);
+
+ gcc_assert (ftype != NULL);
+
+ if (print_type_signature_p
+ && IN_RANGE (fcode, ARM_BUILTIN_NEON_BASE, ARM_BUILTIN_ACLE_BASE - 1))
+ snprintf (namebuf, sizeof (namebuf), "%s_%s_%s",
+ prefix, d->name, type_signature);
+ else
+ snprintf (namebuf, sizeof (namebuf), "%s_%s",
+ prefix, d->name);
+
+ fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ arm_builtin_decls[fcode] = fndecl;
+}
+
+/* Set up ACLE builtins, even builtins for instructions that are not
+ in the current target ISA to allow the user to compile particular modules
+ with different target specific options that differ from the command line
+ options. Such builtins will be rejected in arm_expand_builtin. */
+
+static void
+arm_init_acle_builtins (void)
+{
+ unsigned int i, fcode = ARM_BUILTIN_ACLE_PATTERN_START;
+
+ for (i = 0; i < ARRAY_SIZE (acle_builtin_data); i++, fcode++)
+ {
+ arm_builtin_datum *d = &acle_builtin_data[i];
+ arm_init_builtin (fcode, d, "__builtin_arm");
+ }
+}
+
/* Set up all the NEON builtins, even builtins for instructions that are not
in the current target ISA to allow the user to compile particular modules
with different target specific options that differ from the command line
@@ -924,103 +1145,8 @@ arm_init_neon_builtins (void)
for (i = 0; i < ARRAY_SIZE (neon_builtin_data); i++, fcode++)
{
- bool print_type_signature_p = false;
- char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
- neon_builtin_datum *d = &neon_builtin_data[i];
- char namebuf[60];
- tree ftype = NULL;
- tree fndecl = NULL;
-
- d->fcode = fcode;
-
- /* We must track two variables here. op_num is
- the operand number as in the RTL pattern. This is
- required to access the mode (e.g. V4SF mode) of the
- argument, from which the base type can be derived.
- arg_num is an index in to the qualifiers data, which
- gives qualifiers to the type (e.g. const unsigned).
- The reason these two variables may differ by one is the
- void return type. While all return types take the 0th entry
- in the qualifiers array, there is no operand for them in the
- RTL pattern. */
- int op_num = insn_data[d->code].n_operands - 1;
- int arg_num = d->qualifiers[0] & qualifier_void
- ? op_num + 1
- : op_num;
- tree return_type = void_type_node, args = void_list_node;
- tree eltype;
-
- /* Build a function type directly from the insn_data for this
- builtin. The build_function_type () function takes care of
- removing duplicates for us. */
- for (; op_num >= 0; arg_num--, op_num--)
- {
- machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
- enum arm_type_qualifiers qualifiers = d->qualifiers[arg_num];
-
- if (qualifiers & qualifier_unsigned)
- {
- type_signature[arg_num] = 'u';
- print_type_signature_p = true;
- }
- else if (qualifiers & qualifier_poly)
- {
- type_signature[arg_num] = 'p';
- print_type_signature_p = true;
- }
- else
- type_signature[arg_num] = 's';
-
- /* Skip an internal operand for vget_{low, high}. */
- if (qualifiers & qualifier_internal)
- continue;
-
- /* Some builtins have different user-facing types
- for certain arguments, encoded in d->mode. */
- if (qualifiers & qualifier_map_mode)
- op_mode = d->mode;
-
- /* For pointers, we want a pointer to the basic type
- of the vector. */
- if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
- op_mode = GET_MODE_INNER (op_mode);
-
- eltype = arm_simd_builtin_type
- (op_mode,
- (qualifiers & qualifier_unsigned) != 0,
- (qualifiers & qualifier_poly) != 0);
- gcc_assert (eltype != NULL);
-
- /* Add qualifiers. */
- if (qualifiers & qualifier_const)
- eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);
-
- if (qualifiers & qualifier_pointer)
- eltype = build_pointer_type (eltype);
-
- /* If we have reached arg_num == 0, we are at a non-void
- return type. Otherwise, we are still processing
- arguments. */
- if (arg_num == 0)
- return_type = eltype;
- else
- args = tree_cons (NULL_TREE, eltype, args);
- }
-
- ftype = build_function_type (return_type, args);
-
- gcc_assert (ftype != NULL);
-
- if (print_type_signature_p)
- snprintf (namebuf, sizeof (namebuf), "__builtin_neon_%s_%s",
- d->name, type_signature);
- else
- snprintf (namebuf, sizeof (namebuf), "__builtin_neon_%s",
- d->name);
-
- fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
- NULL, NULL_TREE);
- arm_builtin_decls[fcode] = fndecl;
+ arm_builtin_datum *d = &neon_builtin_data[i];
+ arm_init_builtin (fcode, d, "__builtin_neon");
}
}
@@ -1251,18 +1377,6 @@ static const struct builtin_description bdesc_2arg[] =
FP_BUILTIN (set_fpscr, SET_FPSCR)
#undef FP_BUILTIN
-#define CRC32_BUILTIN(L, U) \
- {ARM_FSET_EMPTY, CODE_FOR_##L, "__builtin_arm_"#L, \
- ARM_BUILTIN_##U, UNKNOWN, 0},
- CRC32_BUILTIN (crc32b, CRC32B)
- CRC32_BUILTIN (crc32h, CRC32H)
- CRC32_BUILTIN (crc32w, CRC32W)
- CRC32_BUILTIN (crc32cb, CRC32CB)
- CRC32_BUILTIN (crc32ch, CRC32CH)
- CRC32_BUILTIN (crc32cw, CRC32CW)
-#undef CRC32_BUILTIN
-
-
#define CRYPTO_BUILTIN(L, U) \
{ARM_FSET_EMPTY, CODE_FOR_crypto_##L, "__builtin_arm_crypto_"#L, \
ARM_BUILTIN_CRYPTO_##U, UNKNOWN, 0},
@@ -1719,42 +1833,6 @@ arm_init_fp16_builtins (void)
"__fp16");
}
-static void
-arm_init_crc32_builtins ()
-{
- tree si_ftype_si_qi
- = build_function_type_list (unsigned_intSI_type_node,
- unsigned_intSI_type_node,
- unsigned_intQI_type_node, NULL_TREE);
- tree si_ftype_si_hi
- = build_function_type_list (unsigned_intSI_type_node,
- unsigned_intSI_type_node,
- unsigned_intHI_type_node, NULL_TREE);
- tree si_ftype_si_si
- = build_function_type_list (unsigned_intSI_type_node,
- unsigned_intSI_type_node,
- unsigned_intSI_type_node, NULL_TREE);
-
- arm_builtin_decls[ARM_BUILTIN_CRC32B]
- = add_builtin_function ("__builtin_arm_crc32b", si_ftype_si_qi,
- ARM_BUILTIN_CRC32B, BUILT_IN_MD, NULL, NULL_TREE);
- arm_builtin_decls[ARM_BUILTIN_CRC32H]
- = add_builtin_function ("__builtin_arm_crc32h", si_ftype_si_hi,
- ARM_BUILTIN_CRC32H, BUILT_IN_MD, NULL, NULL_TREE);
- arm_builtin_decls[ARM_BUILTIN_CRC32W]
- = add_builtin_function ("__builtin_arm_crc32w", si_ftype_si_si,
- ARM_BUILTIN_CRC32W, BUILT_IN_MD, NULL, NULL_TREE);
- arm_builtin_decls[ARM_BUILTIN_CRC32CB]
- = add_builtin_function ("__builtin_arm_crc32cb", si_ftype_si_qi,
- ARM_BUILTIN_CRC32CB, BUILT_IN_MD, NULL, NULL_TREE);
- arm_builtin_decls[ARM_BUILTIN_CRC32CH]
- = add_builtin_function ("__builtin_arm_crc32ch", si_ftype_si_hi,
- ARM_BUILTIN_CRC32CH, BUILT_IN_MD, NULL, NULL_TREE);
- arm_builtin_decls[ARM_BUILTIN_CRC32CW]
- = add_builtin_function ("__builtin_arm_crc32cw", si_ftype_si_si,
- ARM_BUILTIN_CRC32CW, BUILT_IN_MD, NULL, NULL_TREE);
-}
-
void
arm_init_builtins (void)
{
@@ -1772,8 +1850,7 @@ arm_init_builtins (void)
arm_init_crypto_builtins ();
}
- if (TARGET_CRC32)
- arm_init_crc32_builtins ();
+ arm_init_acle_builtins ();
if (TARGET_VFP && TARGET_HARD_FLOAT)
{
@@ -1789,6 +1866,17 @@ arm_init_builtins (void)
= add_builtin_function ("__builtin_arm_stfscr", ftype_set_fpscr,
ARM_BUILTIN_SET_FPSCR, BUILT_IN_MD, NULL, NULL_TREE);
}
+
+ if (use_cmse)
+ {
+ tree ftype_cmse_nonsecure_caller
+ = build_function_type_list (unsigned_type_node, NULL);
+ arm_builtin_decls[ARM_BUILTIN_CMSE_NONSECURE_CALLER]
+ = add_builtin_function ("__builtin_arm_cmse_nonsecure_caller",
+ ftype_cmse_nonsecure_caller,
+ ARM_BUILTIN_CMSE_NONSECURE_CALLER, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ }
}
/* Return the ARM builtin for CODE. */
@@ -1973,15 +2061,15 @@ arm_expand_unop_builtin (enum insn_code icode,
}
typedef enum {
- NEON_ARG_COPY_TO_REG,
- NEON_ARG_CONSTANT,
- NEON_ARG_LANE_INDEX,
- NEON_ARG_STRUCT_LOAD_STORE_LANE_INDEX,
- NEON_ARG_MEMORY,
- NEON_ARG_STOP
+ ARG_BUILTIN_COPY_TO_REG,
+ ARG_BUILTIN_CONSTANT,
+ ARG_BUILTIN_LANE_INDEX,
+ ARG_BUILTIN_STRUCT_LOAD_STORE_LANE_INDEX,
+ ARG_BUILTIN_NEON_MEMORY,
+ ARG_BUILTIN_MEMORY,
+ ARG_BUILTIN_STOP
} builtin_arg;
-#define NEON_MAX_BUILTIN_ARGS 5
/* EXP is a pointer argument to a Neon load or store intrinsic. Derive
and return an expression for the accessed memory.
@@ -2031,9 +2119,9 @@ neon_dereference_pointer (tree exp, tree type, machine_mode mem_mode,
build_int_cst (build_pointer_type (array_type), 0));
}
-/* Expand a Neon builtin. */
+/* Expand a builtin. */
static rtx
-arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
+arm_expand_builtin_args (rtx target, machine_mode map_mode, int fcode,
int icode, int have_retval, tree exp,
builtin_arg *args)
{
@@ -2044,6 +2132,7 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
tree formals;
int argc = 0;
+ rtx_insn * insn;
if (have_retval
&& (!target
@@ -2057,14 +2146,14 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
{
builtin_arg thisarg = args[argc];
- if (thisarg == NEON_ARG_STOP)
+ if (thisarg == ARG_BUILTIN_STOP)
break;
else
{
int opno = argc + have_retval;
arg[argc] = CALL_EXPR_ARG (exp, argc);
mode[argc] = insn_data[icode].operand[opno].mode;
- if (thisarg == NEON_ARG_MEMORY)
+ if (thisarg == ARG_BUILTIN_NEON_MEMORY)
{
machine_mode other_mode
= insn_data[icode].operand[1 - opno].mode;
@@ -2074,15 +2163,17 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
map_mode);
}
- /* Use EXPAND_MEMORY for NEON_ARG_MEMORY to ensure a MEM_P
- be returned. */
+ /* Use EXPAND_MEMORY for ARG_BUILTIN_MEMORY and
+ ARG_BUILTIN_NEON_MEMORY to ensure a MEM_P be returned. */
op[argc] = expand_expr (arg[argc], NULL_RTX, VOIDmode,
- (thisarg == NEON_ARG_MEMORY
+ ((thisarg == ARG_BUILTIN_MEMORY
+ || thisarg == ARG_BUILTIN_NEON_MEMORY)
? EXPAND_MEMORY : EXPAND_NORMAL));
switch (thisarg)
{
- case NEON_ARG_COPY_TO_REG:
+ case ARG_BUILTIN_MEMORY:
+ case ARG_BUILTIN_COPY_TO_REG:
if (POINTER_TYPE_P (TREE_TYPE (arg[argc])))
op[argc] = convert_memory_address (Pmode, op[argc]);
/*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
@@ -2091,7 +2182,7 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
break;
- case NEON_ARG_STRUCT_LOAD_STORE_LANE_INDEX:
+ case ARG_BUILTIN_STRUCT_LOAD_STORE_LANE_INDEX:
gcc_assert (argc > 1);
if (CONST_INT_P (op[argc]))
{
@@ -2103,7 +2194,7 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
}
goto constant_arg;
- case NEON_ARG_LANE_INDEX:
+ case ARG_BUILTIN_LANE_INDEX:
/* Previous argument must be a vector, which this indexes. */
gcc_assert (argc > 0);
if (CONST_INT_P (op[argc]))
@@ -2111,10 +2202,9 @@ arm_expand_neon_args (rtx target, machine_mode map_mode, int fcode,
enum machine_mode vmode = mode[argc - 1];
neon_lane_bounds (op[argc], 0, GET_MODE_NUNITS (vmode), exp);
}
- /* Fall through - if the lane index isn't a constant then
- the next case will error. */
-
- case NEON_ARG_CONSTANT:
+ /* Fall through - If the lane index isn't a constant then the next
+ case will error. */
+ case ARG_BUILTIN_CONSTANT:
constant_arg:
if (!(*insn_data[icode].operand[opno].predicate)
(op[argc], mode[argc]))
@@ -2125,7 +2215,7 @@ constant_arg:
}
break;
- case NEON_ARG_MEMORY:
+ case ARG_BUILTIN_NEON_MEMORY:
/* Check if expand failed. */
if (op[argc] == const0_rtx)
return 0;
@@ -2142,7 +2232,7 @@ constant_arg:
copy_to_mode_reg (Pmode, XEXP (op[argc], 0))));
break;
- case NEON_ARG_STOP:
+ case ARG_BUILTIN_STOP:
gcc_unreachable ();
}
@@ -2173,6 +2263,10 @@ constant_arg:
pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
break;
+ case 6:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
+ break;
+
default:
gcc_unreachable ();
}
@@ -2199,6 +2293,10 @@ constant_arg:
pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
break;
+ case 6:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
+ break;
+
default:
gcc_unreachable ();
}
@@ -2206,50 +2304,39 @@ constant_arg:
if (!pat)
return 0;
+ /* Check whether our current target implements the pattern chosen for this
+ builtin and error out if not. */
+ start_sequence ();
emit_insn (pat);
+ insn = get_insns ();
+ end_sequence ();
+
+ if (recog_memoized (insn) < 0)
+ error ("this builtin is not supported for this target");
+ else
+ emit_insn (insn);
return target;
}
-/* Expand a Neon builtin, i.e. those registered only if TARGET_NEON holds.
- Most of these are "special" because they don't have symbolic
- constants defined per-instruction or per instruction-variant. Instead, the
- required info is looked up in the table neon_builtin_data. */
+/* Expand a builtin. These builtins are "special" because they don't have
+ symbolic constants defined per-instruction or per instruction-variant.
+ Instead, the required info is looked up in the ARM_BUILTIN_DATA record that
+ is passed into the function. */
+
static rtx
-arm_expand_neon_builtin (int fcode, tree exp, rtx target)
+arm_expand_builtin_1 (int fcode, tree exp, rtx target,
+ arm_builtin_datum *d)
{
- /* Check in the context of the function making the call whether the
- builtin is supported. */
- if (! TARGET_NEON)
- {
- fatal_error (input_location,
- "You must enable NEON instructions (e.g. -mfloat-abi=softfp -mfpu=neon) to use these intrinsics.");
- return const0_rtx;
- }
-
- if (fcode == ARM_BUILTIN_NEON_LANE_CHECK)
- {
- /* Builtin is only to check bounds of the lane passed to some intrinsics
- that are implemented with gcc vector extensions in arm_neon.h. */
-
- tree nlanes = CALL_EXPR_ARG (exp, 0);
- gcc_assert (TREE_CODE (nlanes) == INTEGER_CST);
- rtx lane_idx = expand_normal (CALL_EXPR_ARG (exp, 1));
- if (CONST_INT_P (lane_idx))
- neon_lane_bounds (lane_idx, 0, TREE_INT_CST_LOW (nlanes), exp);
- else
- error ("%Klane index must be a constant immediate", exp);
- /* Don't generate any RTL. */
- return const0_rtx;
- }
-
- neon_builtin_datum *d =
- &neon_builtin_data[fcode - ARM_BUILTIN_NEON_PATTERN_START];
enum insn_code icode = d->code;
builtin_arg args[SIMD_MAX_BUILTIN_ARGS + 1];
int num_args = insn_data[d->code].n_operands;
int is_void = 0;
int k;
+ bool neon = false;
+
+ if (IN_RANGE (fcode, ARM_BUILTIN_NEON_BASE, ARM_BUILTIN_ACLE_BASE - 1))
+ neon = true;
is_void = !!(d->qualifiers[0] & qualifier_void);
@@ -2260,8 +2347,8 @@ arm_expand_neon_builtin (int fcode, tree exp, rtx target)
/* We have four arrays of data, each indexed in a different fashion.
qualifiers - element 0 always describes the function return type.
operands - element 0 is either the operand for return value (if
- the function has a non-void return type) or the operand for the
- first argument.
+ the function has a non-void return type) or the operand for the
+ first argument.
expr_args - element 0 always holds the first argument.
args - element 0 is always used for the return type. */
int qualifiers_k = k;
@@ -2269,11 +2356,11 @@ arm_expand_neon_builtin (int fcode, tree exp, rtx target)
int expr_args_k = k - 1;
if (d->qualifiers[qualifiers_k] & qualifier_lane_index)
- args[k] = NEON_ARG_LANE_INDEX;
+ args[k] = ARG_BUILTIN_LANE_INDEX;
else if (d->qualifiers[qualifiers_k] & qualifier_struct_load_store_lane_index)
- args[k] = NEON_ARG_STRUCT_LOAD_STORE_LANE_INDEX;
+ args[k] = ARG_BUILTIN_STRUCT_LOAD_STORE_LANE_INDEX;
else if (d->qualifiers[qualifiers_k] & qualifier_immediate)
- args[k] = NEON_ARG_CONSTANT;
+ args[k] = ARG_BUILTIN_CONSTANT;
else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
{
rtx arg
@@ -2283,21 +2370,79 @@ arm_expand_neon_builtin (int fcode, tree exp, rtx target)
bool op_const_int_p =
(CONST_INT_P (arg)
&& (*insn_data[icode].operand[operands_k].predicate)
- (arg, insn_data[icode].operand[operands_k].mode));
- args[k] = op_const_int_p ? NEON_ARG_CONSTANT : NEON_ARG_COPY_TO_REG;
+ (arg, insn_data[icode].operand[operands_k].mode));
+ args[k] = op_const_int_p ? ARG_BUILTIN_CONSTANT : ARG_BUILTIN_COPY_TO_REG;
}
else if (d->qualifiers[qualifiers_k] & qualifier_pointer)
- args[k] = NEON_ARG_MEMORY;
+ {
+ if (neon)
+ args[k] = ARG_BUILTIN_NEON_MEMORY;
+ else
+ args[k] = ARG_BUILTIN_MEMORY;
+ }
else
- args[k] = NEON_ARG_COPY_TO_REG;
+ args[k] = ARG_BUILTIN_COPY_TO_REG;
}
- args[k] = NEON_ARG_STOP;
+ args[k] = ARG_BUILTIN_STOP;
- /* The interface to arm_expand_neon_args expects a 0 if
+ /* The interface to arm_expand_builtin_args expects a 0 if
the function is void, and a 1 if it is not. */
- return arm_expand_neon_args
- (target, d->mode, fcode, icode, !is_void, exp,
- &args[1]);
+ return arm_expand_builtin_args
+ (target, d->mode, fcode, icode, !is_void, exp,
+ &args[1]);
+}
+
+/* Expand an ACLE builtin, i.e. those registered only if their respective
+ target constraints are met. This check happens within
+ arm_expand_builtin_args. */
+
+static rtx
+arm_expand_acle_builtin (int fcode, tree exp, rtx target)
+{
+
+ arm_builtin_datum *d
+ = &acle_builtin_data[fcode - ARM_BUILTIN_ACLE_PATTERN_START];
+
+ return arm_expand_builtin_1 (fcode, exp, target, d);
+}
+
+/* Expand a Neon builtin, i.e. those registered only if TARGET_NEON holds.
+ Most of these are "special" because they don't have symbolic
+ constants defined per-instruction or per instruction-variant. Instead, the
+ required info is looked up in the table neon_builtin_data. */
+
+static rtx
+arm_expand_neon_builtin (int fcode, tree exp, rtx target)
+{
+ if (fcode >= ARM_BUILTIN_NEON_BASE && ! TARGET_NEON)
+ {
+ fatal_error (input_location,
+ "You must enable NEON instructions"
+ " (e.g. -mfloat-abi=softfp -mfpu=neon)"
+ " to use these intrinsics.");
+ return const0_rtx;
+ }
+
+ if (fcode == ARM_BUILTIN_NEON_LANE_CHECK)
+ {
+ /* Builtin is only to check bounds of the lane passed to some intrinsics
+ that are implemented with gcc vector extensions in arm_neon.h. */
+
+ tree nlanes = CALL_EXPR_ARG (exp, 0);
+ gcc_assert (TREE_CODE (nlanes) == INTEGER_CST);
+ rtx lane_idx = expand_normal (CALL_EXPR_ARG (exp, 1));
+ if (CONST_INT_P (lane_idx))
+ neon_lane_bounds (lane_idx, 0, TREE_INT_CST_LOW (nlanes), exp);
+ else
+ error ("%Klane index must be a constant immediate", exp);
+ /* Don't generate any RTL. */
+ return const0_rtx;
+ }
+
+ arm_builtin_datum *d
+ = &neon_builtin_data[fcode - ARM_BUILTIN_NEON_PATTERN_START];
+
+ return arm_expand_builtin_1 (fcode, exp, target, d);
}
/* Expand an expression EXP that calls a built-in function,
@@ -2334,6 +2479,9 @@ arm_expand_builtin (tree exp,
int mask;
int imm;
+ if (fcode >= ARM_BUILTIN_ACLE_BASE)
+ return arm_expand_acle_builtin (fcode, exp, target);
+
if (fcode >= ARM_BUILTIN_NEON_BASE)
return arm_expand_neon_builtin (fcode, exp, target);
@@ -2368,6 +2516,12 @@ arm_expand_builtin (tree exp,
emit_insn (pat);
return target;
+ case ARM_BUILTIN_CMSE_NONSECURE_CALLER:
+ target = gen_reg_rtx (SImode);
+ op0 = arm_return_addr (0, NULL_RTX);
+ emit_insn (gen_addsi3 (target, op0, const1_rtx));
+ return target;
+
case ARM_BUILTIN_TEXTRMSB:
case ARM_BUILTIN_TEXTRMUB:
case ARM_BUILTIN_TEXTRMSH:
diff --git a/gcc/config/arm/arm-c.c b/gcc/config/arm/arm-c.c
index 4fbdfc50d03..c8ab1cb2377 100644
--- a/gcc/config/arm/arm-c.c
+++ b/gcc/config/arm/arm-c.c
@@ -76,6 +76,14 @@ arm_cpu_builtins (struct cpp_reader* pfile)
def_or_undef_macro (pfile, "__ARM_32BIT_STATE", TARGET_32BIT);
+ if (arm_arch8 && !arm_arch_notm)
+ {
+ if (arm_arch_cmse && use_cmse)
+ builtin_define_with_int_value ("__ARM_FEATURE_CMSE", 3);
+ else
+ builtin_define ("__ARM_FEATURE_CMSE");
+ }
+
if (TARGET_ARM_FEATURE_LDREX)
builtin_define_with_int_value ("__ARM_FEATURE_LDREX",
TARGET_ARM_FEATURE_LDREX);
diff --git a/gcc/config/arm/arm-cores.def b/gcc/config/arm/arm-cores.def
index 829b839c420..add6b21ee52 100644
--- a/gcc/config/arm/arm-cores.def
+++ b/gcc/config/arm/arm-cores.def
@@ -166,7 +166,9 @@ ARM_CORE("cortex-a15.cortex-a7", cortexa15cortexa7, cortexa7, 7A, ARM_FSET_MAKE_
ARM_CORE("cortex-a17.cortex-a7", cortexa17cortexa7, cortexa7, 7A, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV | FL_FOR_ARCH7A), cortex_a12)
/* V8 Architecture Processors */
+ARM_CORE("cortex-m23", cortexm23, cortexm23, 8M_BASE, ARM_FSET_MAKE (FL_LDSCHED | FL_FOR_ARCH8M_BASE, FL2_CMSE), v6m)
ARM_CORE("cortex-a32", cortexa32, cortexa53, 8A, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_CRC32 | FL_FOR_ARCH8A), cortex_a35)
+ARM_CORE("cortex-m33", cortexm33, cortexm33, 8M_MAIN, ARM_FSET_MAKE (FL_LDSCHED | FL_ARCH7EM | FL_FOR_ARCH8M_MAIN, FL2_CMSE), v7m)
ARM_CORE("cortex-a35", cortexa35, cortexa53, 8A, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_CRC32 | FL_FOR_ARCH8A), cortex_a35)
ARM_CORE("cortex-a53", cortexa53, cortexa53, 8A, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_CRC32 | FL_FOR_ARCH8A), cortex_a53)
ARM_CORE("cortex-a57", cortexa57, cortexa57, 8A, ARM_FSET_MAKE_CPU1 (FL_LDSCHED | FL_CRC32 | FL_FOR_ARCH8A), cortex_a57)
diff --git a/gcc/config/arm/arm-flags.h b/gcc/config/arm/arm-flags.h
new file mode 100644
index 00000000000..d6709af5daa
--- /dev/null
+++ b/gcc/config/arm/arm-flags.h
@@ -0,0 +1,209 @@
+/* Flags used to identify the presence of processor capabilities.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ARM_FLAGS_H
+#define GCC_ARM_FLAGS_H
+
+/* Flags used to identify the presence of processor capabilities. */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_NONE (0U) /* No flags. */
+#define FL_ANY (0xffffffffU) /* All flags. */
+#define FL_CO_PROC (1U << 0) /* Has external co-processor bus. */
+#define FL_ARCH3M (1U << 1) /* Extended multiply. */
+#define FL_MODE26 (1U << 2) /* 26-bit mode support. */
+#define FL_MODE32 (1U << 3) /* 32-bit mode support. */
+#define FL_ARCH4 (1U << 4) /* Architecture rel 4. */
+#define FL_ARCH5 (1U << 5) /* Architecture rel 5. */
+#define FL_THUMB (1U << 6) /* Thumb aware. */
+#define FL_LDSCHED (1U << 7) /* Load scheduling necessary. */
+#define FL_STRONG (1U << 8) /* StrongARM. */
+#define FL_ARCH5E (1U << 9) /* DSP extensions to v5. */
+#define FL_XSCALE (1U << 10) /* XScale. */
+/* spare (1U << 11) */
+#define FL_ARCH6 (1U << 12) /* Architecture rel 6. Adds
+ media instructions. */
+#define FL_VFPV2 (1U << 13) /* Vector Floating Point V2. */
+#define FL_WBUF (1U << 14) /* Schedule for write buffer ops.
+ Note: ARM6 & 7 derivatives only. */
+#define FL_ARCH6K (1U << 15) /* Architecture rel 6 K extensions. */
+#define FL_THUMB2 (1U << 16) /* Thumb-2. */
+#define FL_NOTM (1U << 17) /* Instructions not present in the 'M'
+ profile. */
+#define FL_THUMB_DIV (1U << 18) /* Hardware divide (Thumb mode). */
+#define FL_VFPV3 (1U << 19) /* Vector Floating Point V3. */
+#define FL_NEON (1U << 20) /* Neon instructions. */
+#define FL_ARCH7EM (1U << 21) /* Instructions present in the ARMv7E-M
+ architecture. */
+#define FL_ARCH7 (1U << 22) /* Architecture 7. */
+#define FL_ARM_DIV (1U << 23) /* Hardware divide (ARM mode). */
+#define FL_ARCH8 (1U << 24) /* Architecture 8. */
+#define FL_CRC32 (1U << 25) /* ARMv8 CRC32 instructions. */
+#define FL_SMALLMUL (1U << 26) /* Small multiply supported. */
+#define FL_NO_VOLATILE_CE (1U << 27) /* No volatile memory in IT block. */
+
+#define FL_IWMMXT (1U << 29) /* XScale v2 or "Intel Wireless MMX
+ technology". */
+#define FL_IWMMXT2 (1U << 30) /* "Intel Wireless MMX2
+ technology". */
+#define FL_ARCH6KZ (1U << 31) /* ARMv6KZ architecture. */
+
+#define FL2_ARCH8_1 (1U << 0) /* Architecture 8.1. */
+#define FL2_CMSE (1U << 3) /* ARMv8-M Security Extensions. */
+
+/* Flags that only effect tuning, not available instructions. */
+#define FL_TUNE (FL_WBUF | FL_VFPV2 | FL_STRONG | FL_LDSCHED \
+ | FL_CO_PROC)
+
+#define FL_FOR_ARCH2 FL_NOTM
+#define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
+#define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
+#define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
+#define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
+#define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
+#define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
+#define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
+#define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
+#define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
+#define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
+#define FL_FOR_ARCH6J FL_FOR_ARCH6
+#define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
+#define FL_FOR_ARCH6Z FL_FOR_ARCH6
+#define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
+#define FL_FOR_ARCH6KZ (FL_FOR_ARCH6K | FL_ARCH6KZ)
+#define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
+#define FL_FOR_ARCH6M (FL_FOR_ARCH6 & ~FL_NOTM)
+#define FL_FOR_ARCH7 ((FL_FOR_ARCH6T2 & ~FL_NOTM) | FL_ARCH7)
+#define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM | FL_ARCH6K)
+#define FL_FOR_ARCH7VE (FL_FOR_ARCH7A | FL_THUMB_DIV | FL_ARM_DIV)
+#define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_THUMB_DIV)
+#define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_THUMB_DIV)
+#define FL_FOR_ARCH7EM (FL_FOR_ARCH7M | FL_ARCH7EM)
+#define FL_FOR_ARCH8A (FL_FOR_ARCH7VE | FL_ARCH8)
+#define FL2_FOR_ARCH8_1A FL2_ARCH8_1
+#define FL2_FOR_ARCH8_2A (FL2_FOR_ARCH8_1A | FL2_ARCH8_2)
+#define FL_FOR_ARCH8M_BASE (FL_FOR_ARCH6M | FL_ARCH8 | FL_THUMB_DIV)
+#define FL_FOR_ARCH8M_MAIN (FL_FOR_ARCH7M | FL_ARCH8)
+
+/* There are too many feature bits to fit in a single word so the set of cpu and
+ fpu capabilities is a structure. A feature set is created and manipulated
+ with the ARM_FSET macros. */
+
+typedef struct
+{
+ unsigned cpu[2];
+} arm_feature_set;
+
+
+/* Initialize a feature set. */
+
+#define ARM_FSET_MAKE(CPU1,CPU2) { { (CPU1), (CPU2) } }
+
+#define ARM_FSET_MAKE_CPU1(CPU1) ARM_FSET_MAKE ((CPU1), (FL_NONE))
+#define ARM_FSET_MAKE_CPU2(CPU2) ARM_FSET_MAKE ((FL_NONE), (CPU2))
+
+/* Accessors. */
+
+#define ARM_FSET_CPU1(S) ((S).cpu[0])
+#define ARM_FSET_CPU2(S) ((S).cpu[1])
+
+/* Useful combinations. */
+
+#define ARM_FSET_EMPTY ARM_FSET_MAKE (FL_NONE, FL_NONE)
+#define ARM_FSET_ANY ARM_FSET_MAKE (FL_ANY, FL_ANY)
+
+/* Tests for a specific CPU feature. */
+
+#define ARM_FSET_HAS_CPU1(A, F) \
+ (((A).cpu[0] & ((unsigned long)(F))) == ((unsigned long)(F)))
+#define ARM_FSET_HAS_CPU2(A, F) \
+ (((A).cpu[1] & ((unsigned long)(F))) == ((unsigned long)(F)))
+#define ARM_FSET_HAS_CPU(A, F1, F2) \
+ (ARM_FSET_HAS_CPU1 ((A), (F1)) && ARM_FSET_HAS_CPU2 ((A), (F2)))
+
+/* Add a feature to a feature set. */
+
+#define ARM_FSET_ADD_CPU1(DST, F) \
+ do { \
+ (DST).cpu[0] |= (F); \
+ } while (0)
+
+#define ARM_FSET_ADD_CPU2(DST, F) \
+ do { \
+ (DST).cpu[1] |= (F); \
+ } while (0)
+
+/* Remove a feature from a feature set. */
+
+#define ARM_FSET_DEL_CPU1(DST, F) \
+ do { \
+ (DST).cpu[0] &= ~(F); \
+ } while (0)
+
+#define ARM_FSET_DEL_CPU2(DST, F) \
+ do { \
+ (DST).cpu[1] &= ~(F); \
+ } while (0)
+
+/* Union of feature sets. */
+
+#define ARM_FSET_UNION(DST,F1,F2) \
+ do { \
+ (DST).cpu[0] = (F1).cpu[0] | (F2).cpu[0]; \
+ (DST).cpu[1] = (F1).cpu[1] | (F2).cpu[1]; \
+ } while (0)
+
+/* Intersection of feature sets. */
+
+#define ARM_FSET_INTER(DST,F1,F2) \
+ do { \
+ (DST).cpu[0] = (F1).cpu[0] & (F2).cpu[0]; \
+ (DST).cpu[1] = (F1).cpu[1] & (F2).cpu[1]; \
+ } while (0)
+
+/* Exclusive disjunction. */
+
+#define ARM_FSET_XOR(DST,F1,F2) \
+ do { \
+ (DST).cpu[0] = (F1).cpu[0] ^ (F2).cpu[0]; \
+ (DST).cpu[1] = (F1).cpu[1] ^ (F2).cpu[1]; \
+ } while (0)
+
+/* Difference of feature sets: F1 excluding the elements of F2. */
+
+#define ARM_FSET_EXCLUDE(DST,F1,F2) \
+ do { \
+ (DST).cpu[0] = (F1).cpu[0] & ~(F2).cpu[0]; \
+ (DST).cpu[1] = (F1).cpu[1] & ~(F2).cpu[1]; \
+ } while (0)
+
+/* Test for an empty feature set. */
+
+#define ARM_FSET_IS_EMPTY(A) \
+ (!((A).cpu[0]) && !((A).cpu[1]))
+
+/* Tests whether the cpu features of A are a subset of B. */
+
+#define ARM_FSET_CPU_SUBSET(A,B) \
+ ((((A).cpu[0] & (B).cpu[0]) == (A).cpu[0]) \
+ && (((A).cpu[1] & (B).cpu[1]) == (A).cpu[1]))
+
+#endif /* GCC_ARM_FLAGS_H */
diff --git a/gcc/config/arm/arm-opts.h b/gcc/config/arm/arm-opts.h
index a649ba59e47..8483e44b510 100644
--- a/gcc/config/arm/arm-opts.h
+++ b/gcc/config/arm/arm-opts.h
@@ -25,6 +25,8 @@
#ifndef ARM_OPTS_H
#define ARM_OPTS_H
+#include "arm-flags.h"
+
/* The various ARM cores. */
enum processor_type
{
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 0083673b161..293df44c320 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -22,6 +22,8 @@
#ifndef GCC_ARM_PROTOS_H
#define GCC_ARM_PROTOS_H
+#include "arm-flags.h"
+
extern enum unwind_info_type arm_except_unwind_info (struct gcc_options *);
extern int use_return_insn (int, rtx);
extern bool use_simple_return_p (void);
@@ -31,6 +33,7 @@ extern int arm_volatile_func (void);
extern void arm_expand_prologue (void);
extern void arm_expand_epilogue (bool);
extern void arm_declare_function_name (FILE *, const char *, tree);
+extern void arm_asm_declare_function_name (FILE *, const char *, tree);
extern void thumb2_expand_return (bool);
extern const char *arm_strip_name_encoding (const char *);
extern void arm_asm_output_labelref (FILE *, const char *);
@@ -87,7 +90,7 @@ extern rtx neon_make_constant (rtx);
extern tree arm_builtin_vectorized_function (unsigned int, tree, tree);
extern void neon_expand_vector_init (rtx, rtx);
extern void neon_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
-extern void neon_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+extern void arm_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
extern HOST_WIDE_INT neon_element_bits (machine_mode);
extern void neon_emit_pair_result_insn (machine_mode,
rtx (*) (rtx, rtx, rtx, rtx),
@@ -130,6 +133,7 @@ extern int arm_const_double_inline_cost (rtx);
extern bool arm_const_double_by_parts (rtx);
extern bool arm_const_double_by_immediates (rtx);
extern void arm_emit_call_insn (rtx, rtx, bool);
+bool detect_cmse_nonsecure_call (tree);
extern const char *output_call (rtx *);
void arm_emit_movpair (rtx, rtx);
extern const char *output_mov_long_double_arm_from_arm (rtx *);
@@ -165,6 +169,8 @@ extern void arm_expand_compare_and_swap (rtx op[]);
extern void arm_split_compare_and_swap (rtx op[]);
extern void arm_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
extern rtx arm_load_tp (rtx);
+extern bool arm_coproc_builtin_available (enum unspecv);
+extern bool arm_coproc_ldc_stc_legitimate_address (rtx);
#if defined TREE_CODE
extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
@@ -344,184 +350,6 @@ extern void arm_cpu_cpp_builtins (struct cpp_reader *);
extern bool arm_is_constant_pool_ref (rtx);
-/* Flags used to identify the presence of processor capabilities. */
-
-/* Bit values used to identify processor capabilities. */
-#define FL_NONE (0) /* No flags. */
-#define FL_ANY (0xffffffff) /* All flags. */
-#define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
-#define FL_ARCH3M (1 << 1) /* Extended multiply */
-#define FL_MODE26 (1 << 2) /* 26-bit mode support */
-#define FL_MODE32 (1 << 3) /* 32-bit mode support */
-#define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
-#define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
-#define FL_THUMB (1 << 6) /* Thumb aware */
-#define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
-#define FL_STRONG (1 << 8) /* StrongARM */
-#define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
-#define FL_XSCALE (1 << 10) /* XScale */
-/* spare (1 << 11) */
-#define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
- media instructions. */
-#define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
-#define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
- Note: ARM6 & 7 derivatives only. */
-#define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
-#define FL_THUMB2 (1 << 16) /* Thumb-2. */
-#define FL_NOTM (1 << 17) /* Instructions not present in the 'M'
- profile. */
-#define FL_THUMB_DIV (1 << 18) /* Hardware divide (Thumb mode). */
-#define FL_VFPV3 (1 << 19) /* Vector Floating Point V3. */
-#define FL_NEON (1 << 20) /* Neon instructions. */
-#define FL_ARCH7EM (1 << 21) /* Instructions present in the ARMv7E-M
- architecture. */
-#define FL_ARCH7 (1 << 22) /* Architecture 7. */
-#define FL_ARM_DIV (1 << 23) /* Hardware divide (ARM mode). */
-#define FL_ARCH8 (1 << 24) /* Architecture 8. */
-#define FL_CRC32 (1 << 25) /* ARMv8 CRC32 instructions. */
-
-#define FL_SMALLMUL (1 << 26) /* Small multiply supported. */
-#define FL_NO_VOLATILE_CE (1 << 27) /* No volatile memory in IT block. */
-
-#define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
-#define FL_IWMMXT2 (1 << 30) /* "Intel Wireless MMX2 technology". */
-#define FL_ARCH6KZ (1 << 31) /* ARMv6KZ architecture. */
-
-#define FL2_ARCH8_1 (1 << 0) /* Architecture 8.1. */
-
-/* Flags that only effect tuning, not available instructions. */
-#define FL_TUNE (FL_WBUF | FL_VFPV2 | FL_STRONG | FL_LDSCHED \
- | FL_CO_PROC)
-
-#define FL_FOR_ARCH2 FL_NOTM
-#define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
-#define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
-#define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
-#define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
-#define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
-#define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
-#define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
-#define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
-#define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
-#define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
-#define FL_FOR_ARCH6J FL_FOR_ARCH6
-#define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
-#define FL_FOR_ARCH6Z FL_FOR_ARCH6
-#define FL_FOR_ARCH6KZ (FL_FOR_ARCH6K | FL_ARCH6KZ)
-#define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
-#define FL_FOR_ARCH6M (FL_FOR_ARCH6 & ~FL_NOTM)
-#define FL_FOR_ARCH7 ((FL_FOR_ARCH6T2 & ~FL_NOTM) | FL_ARCH7)
-#define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM | FL_ARCH6K)
-#define FL_FOR_ARCH7VE (FL_FOR_ARCH7A | FL_THUMB_DIV | FL_ARM_DIV)
-#define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_THUMB_DIV)
-#define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_THUMB_DIV)
-#define FL_FOR_ARCH7EM (FL_FOR_ARCH7M | FL_ARCH7EM)
-#define FL_FOR_ARCH8A (FL_FOR_ARCH7VE | FL_ARCH8)
-#define FL2_FOR_ARCH8_1A FL2_ARCH8_1
-
-/* There are too many feature bits to fit in a single word so the set of cpu and
- fpu capabilities is a structure. A feature set is created and manipulated
- with the ARM_FSET macros. */
-
-typedef struct
-{
- unsigned long cpu[2];
-} arm_feature_set;
-
-
-/* Initialize a feature set. */
-
-#define ARM_FSET_MAKE(CPU1,CPU2) { { (CPU1), (CPU2) } }
-
-#define ARM_FSET_MAKE_CPU1(CPU1) ARM_FSET_MAKE ((CPU1), (FL_NONE))
-#define ARM_FSET_MAKE_CPU2(CPU2) ARM_FSET_MAKE ((FL_NONE), (CPU2))
-
-/* Accessors. */
-
-#define ARM_FSET_CPU1(S) ((S).cpu[0])
-#define ARM_FSET_CPU2(S) ((S).cpu[1])
-
-/* Useful combinations. */
-
-#define ARM_FSET_EMPTY ARM_FSET_MAKE (FL_NONE, FL_NONE)
-#define ARM_FSET_ANY ARM_FSET_MAKE (FL_ANY, FL_ANY)
-
-/* Tests for a specific CPU feature. */
-
-#define ARM_FSET_HAS_CPU1(A, F) \
- (((A).cpu[0] & ((unsigned long)(F))) == ((unsigned long)(F)))
-#define ARM_FSET_HAS_CPU2(A, F) \
- (((A).cpu[1] & ((unsigned long)(F))) == ((unsigned long)(F)))
-#define ARM_FSET_HAS_CPU(A, F1, F2) \
- (ARM_FSET_HAS_CPU1 ((A), (F1)) && ARM_FSET_HAS_CPU2 ((A), (F2)))
-
-/* Add a feature to a feature set. */
-
-#define ARM_FSET_ADD_CPU1(DST, F) \
- do { \
- (DST).cpu[0] |= (F); \
- } while (0)
-
-#define ARM_FSET_ADD_CPU2(DST, F) \
- do { \
- (DST).cpu[1] |= (F); \
- } while (0)
-
-/* Remove a feature from a feature set. */
-
-#define ARM_FSET_DEL_CPU1(DST, F) \
- do { \
- (DST).cpu[0] &= ~(F); \
- } while (0)
-
-#define ARM_FSET_DEL_CPU2(DST, F) \
- do { \
- (DST).cpu[1] &= ~(F); \
- } while (0)
-
-/* Union of feature sets. */
-
-#define ARM_FSET_UNION(DST,F1,F2) \
- do { \
- (DST).cpu[0] = (F1).cpu[0] | (F2).cpu[0]; \
- (DST).cpu[1] = (F1).cpu[1] | (F2).cpu[1]; \
- } while (0)
-
-/* Intersection of feature sets. */
-
-#define ARM_FSET_INTER(DST,F1,F2) \
- do { \
- (DST).cpu[0] = (F1).cpu[0] & (F2).cpu[0]; \
- (DST).cpu[1] = (F1).cpu[1] & (F2).cpu[1]; \
- } while (0)
-
-/* Exclusive disjunction. */
-
-#define ARM_FSET_XOR(DST,F1,F2) \
- do { \
- (DST).cpu[0] = (F1).cpu[0] ^ (F2).cpu[0]; \
- (DST).cpu[1] = (F1).cpu[1] ^ (F2).cpu[1]; \
- } while (0)
-
-/* Difference of feature sets: F1 excluding the elements of F2. */
-
-#define ARM_FSET_EXCLUDE(DST,F1,F2) \
- do { \
- (DST).cpu[0] = (F1).cpu[0] & ~(F2).cpu[0]; \
- (DST).cpu[1] = (F1).cpu[1] & ~(F2).cpu[1]; \
- } while (0)
-
-/* Test for an empty feature set. */
-
-#define ARM_FSET_IS_EMPTY(A) \
- (!((A).cpu[0]) && !((A).cpu[1]))
-
-/* Tests whether the cpu features of A are a subset of B. */
-
-#define ARM_FSET_CPU_SUBSET(A,B) \
- ((((A).cpu[0] & (B).cpu[0]) == (A).cpu[0]) \
- && (((A).cpu[1] & (B).cpu[1]) == (A).cpu[1]))
-
/* The bits in this mask specify which
instructions we are allowed to generate. */
extern arm_feature_set insn_flags;
diff --git a/gcc/config/arm/arm-tables.opt b/gcc/config/arm/arm-tables.opt
index adec6c95367..16b8d13cd7a 100644
--- a/gcc/config/arm/arm-tables.opt
+++ b/gcc/config/arm/arm-tables.opt
@@ -307,9 +307,15 @@ EnumValue
Enum(processor_type) String(cortex-a17.cortex-a7) Value(cortexa17cortexa7)
EnumValue
+Enum(processor_type) String(cortex-m23) Value(cortexm23)
+
+EnumValue
Enum(processor_type) String(cortex-a32) Value(cortexa32)
EnumValue
+Enum(processor_type) String(cortex-m33) Value(cortexm33)
+
+EnumValue
Enum(processor_type) String(cortex-a35) Value(cortexa35)
EnumValue
@@ -428,10 +434,19 @@ EnumValue
Enum(arm_arch) String(armv8.1-a+crc) Value(28)
EnumValue
-Enum(arm_arch) String(iwmmxt) Value(29)
+Enum(arm_arch) String(armv8-m.base) Value(29)
+
+EnumValue
+Enum(arm_arch) String(armv8-m.main) Value(30)
+
+EnumValue
+Enum(arm_arch) String(armv8-m.main+dsp) Value(31)
+
+EnumValue
+Enum(arm_arch) String(iwmmxt) Value(32)
EnumValue
-Enum(arm_arch) String(iwmmxt2) Value(30)
+Enum(arm_arch) String(iwmmxt2) Value(33)
Enum
Name(arm_fpu) Type(int)
diff --git a/gcc/config/arm/arm-tune.md b/gcc/config/arm/arm-tune.md
index d9f02a177e3..da9aa4cff84 100644
--- a/gcc/config/arm/arm-tune.md
+++ b/gcc/config/arm/arm-tune.md
@@ -32,8 +32,9 @@
cortexr4f,cortexr5,cortexr7,
cortexr8,cortexm7,cortexm4,
cortexm3,marvell_pj4,cortexa15cortexa7,
- cortexa17cortexa7,cortexa32,cortexa35,
- cortexa53,cortexa57,cortexa72,
- exynosm1,qdf24xx,xgene1,
- cortexa57cortexa53,cortexa72cortexa53"
+ cortexa17cortexa7,cortexm23,cortexa32,
+ cortexm33,cortexa35,cortexa53,
+ cortexa57,cortexa72,exynosm1,
+ qdf24xx,xgene1,cortexa57cortexa53,
+ cortexa72cortexa53"
(const (symbol_ref "((enum attr_tune) arm_tune)")))
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index c3c89b86635..d62c245f55c 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -27,6 +27,7 @@
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "cfghooks.h"
#include "df.h"
#include "tm_p.h"
@@ -61,6 +62,7 @@
#include "builtins.h"
#include "tm-constrs.h"
#include "rtl-iter.h"
+#include "gimplify.h"
/* This file should be included last. */
#include "target-def.h"
@@ -135,6 +137,8 @@ static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
#endif
+static tree arm_handle_cmse_nonsecure_entry (tree *, tree, tree, int, bool *);
+static tree arm_handle_cmse_nonsecure_call (tree *, tree, tree, int, bool *);
static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
static int arm_comp_type_attributes (const_tree, const_tree);
@@ -215,8 +219,8 @@ static bool arm_return_in_memory (const_tree, const_tree);
static void arm_unwind_emit (FILE *, rtx_insn *);
static bool arm_output_ttype (rtx);
static void arm_asm_emit_except_personality (rtx);
-static void arm_asm_init_sections (void);
#endif
+static void arm_asm_init_sections (void);
static rtx arm_dwarf_register_span (rtx);
static tree arm_cxx_guard_type (void);
@@ -300,6 +304,11 @@ static void arm_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
static unsigned HOST_WIDE_INT arm_asan_shadow_offset (void);
static void arm_sched_fusion_priority (rtx_insn *, int, int *, int*);
+static section *arm_function_section (tree, enum node_frequency, bool, bool);
+static bool arm_asm_elf_flags_numeric (unsigned int flags, unsigned int *num);
+static unsigned int arm_elf_section_type_flags (tree decl, const char *name,
+ int reloc);
+
/* Table of machine attributes. */
static const struct attribute_spec arm_attribute_table[] =
@@ -343,6 +352,11 @@ static const struct attribute_spec arm_attribute_table[] =
{ "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute,
false },
#endif
+ /* ARMv8-M Security Extensions support. */
+ { "cmse_nonsecure_entry", 0, 0, true, false, false,
+ arm_handle_cmse_nonsecure_entry, false },
+ { "cmse_nonsecure_call", 0, 0, true, false, false,
+ arm_handle_cmse_nonsecure_call, true },
{ NULL, 0, 0, false, false, false, NULL, false }
};
@@ -587,8 +601,8 @@ static const struct attribute_spec arm_attribute_table[] =
#define TARGET_ASM_EMIT_EXCEPT_PERSONALITY arm_asm_emit_except_personality
#undef TARGET_ASM_INIT_SECTIONS
-#define TARGET_ASM_INIT_SECTIONS arm_asm_init_sections
#endif /* ARM_UNWIND_INFO */
+#define TARGET_ASM_INIT_SECTIONS arm_asm_init_sections
#undef TARGET_DWARF_REGISTER_SPAN
#define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span
@@ -735,6 +749,15 @@ static const struct attribute_spec arm_attribute_table[] =
#undef TARGET_SCHED_FUSION_PRIORITY
#define TARGET_SCHED_FUSION_PRIORITY arm_sched_fusion_priority
+#undef TARGET_ASM_FUNCTION_SECTION
+#define TARGET_ASM_FUNCTION_SECTION arm_function_section
+
+#undef TARGET_ASM_ELF_FLAGS_NUMERIC
+#define TARGET_ASM_ELF_FLAGS_NUMERIC arm_asm_elf_flags_numeric
+
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS arm_elf_section_type_flags
+
struct gcc_target targetm = TARGET_INITIALIZER;
/* Obstack for minipool constant handling. */
@@ -793,6 +816,9 @@ int arm_arch5 = 0;
/* Nonzero if this chip supports the ARM Architecture 5E extensions. */
int arm_arch5e = 0;
+/* Nonzero if this chip supports the ARM Architecture 5TE extensions. */
+int arm_arch5te = 0;
+
/* Nonzero if this chip supports the ARM Architecture 6 extensions. */
int arm_arch6 = 0;
@@ -892,6 +918,9 @@ int arm_condexec_masklen = 0;
/* Nonzero if chip supports the ARMv8 CRC instructions. */
int arm_arch_crc = 0;
+/* Nonzero if chip supports the ARMv8-M security extensions. */
+int arm_arch_cmse = 0;
+
/* Nonzero if the core has a very small, high-latency, multiply unit. */
int arm_m_profile_small_mul = 0;
@@ -2183,7 +2212,8 @@ const struct tune_params arm_cortex_m7_tune =
};
/* The arm_v6m_tune is duplicated from arm_cortex_tune, rather than
- arm_v6t2_tune. It is used for cortex-m0, cortex-m1 and cortex-m0plus. */
+ arm_v6t2_tune. It is used for cortex-m0, cortex-m1, cortex-m0plus and
+ cortex-m23. */
const struct tune_params arm_v6m_tune =
{
arm_9e_rtx_costs,
@@ -2264,9 +2294,11 @@ static const struct processors *arm_selected_arch;
static const struct processors *arm_selected_cpu;
static const struct processors *arm_selected_tune;
-/* The name of the preprocessor macro to define for this architecture. */
+/* The name of the preprocessor macro to define for this architecture. PROFILE
+ is replaced by the architecture name (eg. 8A) in arm_option_override () and
+ is thus chosen to be big enough to hold the longest architecture name. */
-char arm_arch_name[] = "__ARM_ARCH_0UNK__";
+char arm_arch_name[] = "__ARM_ARCH_PROFILE__";
/* Available values for -mfpu=. */
@@ -2795,6 +2827,12 @@ arm_option_check_internal (struct gcc_options *opts)
&& ((!(arm_arch7 && !arm_arch_notm) && !arm_arch7em)
|| (TARGET_THUMB1_P (flags) || flag_pic || TARGET_NEON)))
error ("-mslow-flash-data only supports non-pic code on armv7-m targets");
+
+ /* We only support pure-code on Thumb-2 M-profile targets. */
+ if (target_pure_code
+ && (!arm_arch_thumb2 || arm_arch_notm || flag_pic || TARGET_NEON))
+ error ("-mpure-code only supports non-pic code on armv7-m targets");
+
}
/* Recompute the global settings depending on target attribute options. */
@@ -2907,7 +2945,8 @@ arm_option_override_internal (struct gcc_options *opts,
if (! opts_set->x_arm_restrict_it)
opts->x_arm_restrict_it = arm_arch8;
- if (!TARGET_THUMB2_P (opts->x_target_flags))
+ /* ARM execution state and M profile don't have [restrict] IT. */
+ if (!TARGET_THUMB2_P (opts->x_target_flags) || !arm_arch_notm)
opts->x_arm_restrict_it = 0;
/* Enable -munaligned-access by default for
@@ -2918,7 +2957,8 @@ arm_option_override_internal (struct gcc_options *opts,
Disable -munaligned-access by default for
- all pre-ARMv6 architecture-based processors
- - ARMv6-M architecture-based processors. */
+ - ARMv6-M architecture-based processors
+ - ARMv8-M Baseline processors. */
if (! opts_set->x_unaligned_access)
{
@@ -3161,6 +3201,7 @@ arm_option_override (void)
arm_arch4t = arm_arch4 && (ARM_FSET_HAS_CPU1 (insn_flags, FL_THUMB));
arm_arch5 = ARM_FSET_HAS_CPU1 (insn_flags, FL_ARCH5);
arm_arch5e = ARM_FSET_HAS_CPU1 (insn_flags, FL_ARCH5E);
+ arm_arch5te = arm_arch5e && ARM_FSET_HAS_CPU1 (insn_flags, FL_THUMB);
arm_arch6 = ARM_FSET_HAS_CPU1 (insn_flags, FL_ARCH6);
arm_arch6k = ARM_FSET_HAS_CPU1 (insn_flags, FL_ARCH6K);
arm_arch6kz = arm_arch6k && ARM_FSET_HAS_CPU1 (insn_flags, FL_ARCH6KZ);
@@ -3184,6 +3225,7 @@ arm_option_override (void)
arm_arch_no_volatile_ce = ARM_FSET_HAS_CPU1 (insn_flags, FL_NO_VOLATILE_CE);
arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0;
arm_arch_crc = ARM_FSET_HAS_CPU1 (insn_flags, FL_CRC32);
+ arm_arch_cmse = ARM_FSET_HAS_CPU2 (insn_flags, FL2_CMSE);
arm_m_profile_small_mul = ARM_FSET_HAS_CPU1 (insn_flags, FL_SMALLMUL);
/* V5 code we generate is completely interworking capable, so we turn off
@@ -3432,10 +3474,14 @@ arm_option_override (void)
global_options.x_param_values,
global_options_set.x_param_values);
- /* Currently, for slow flash data, we just disable literal pools. */
- if (target_slow_flash_data)
+ /* Currently, for slow flash data, we just disable literal pools. We also
+ disable it for pure-code. */
+ if (target_slow_flash_data || target_pure_code)
arm_disable_literal_pool = true;
+ if (use_cmse && !arm_arch_cmse)
+ error ("target CPU does not support ARMv8-M Security Extensions");
+
/* Disable scheduling fusion by default if it's not armv7 processor
or doesn't prefer ldrd/strd. */
if (flag_schedule_fusion == 2
@@ -3568,6 +3614,9 @@ arm_compute_func_type (void)
else
type |= arm_isr_value (TREE_VALUE (a));
+ if (lookup_attribute ("cmse_nonsecure_entry", attr))
+ type |= ARM_FT_CMSE_ENTRY;
+
return type;
}
@@ -3794,6 +3843,11 @@ use_return_insn (int iscond, rtx sibling)
return 0;
}
+ /* ARMv8-M nonsecure entry function need to use bxns to return and thus need
+ several instructions if anything needs to be popped. */
+ if (saved_int_regs && IS_CMSE_ENTRY (func_type))
+ return 0;
+
/* If there are saved registers but the LR isn't saved, then we need
two instructions for the return. */
if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
@@ -3899,7 +3953,7 @@ const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
{
case SET:
/* See if we can use movw. */
- if (arm_arch_thumb2 && (i & 0xffff0000) == 0)
+ if (TARGET_HAVE_MOVT && (i & 0xffff0000) == 0)
return 1;
else
/* Otherwise, try mvn. */
@@ -6561,6 +6615,185 @@ arm_handle_notshared_attribute (tree *node,
}
#endif
+/* This function returns true if a function with declaration FNDECL and type
+ FNTYPE uses the stack to pass arguments or return variables and false
+ otherwise. This is used for functions with the attributes
+ 'cmse_nonsecure_call' or 'cmse_nonsecure_entry' and this function will issue
+ diagnostic messages if the stack is used. NAME is the name of the attribute
+ used. */
+
+static bool
+cmse_func_args_or_return_in_stack (tree fndecl, tree name, tree fntype)
+{
+ function_args_iterator args_iter;
+ CUMULATIVE_ARGS args_so_far_v;
+ cumulative_args_t args_so_far;
+ bool first_param = true;
+ tree arg_type, prev_arg_type = NULL_TREE, ret_type;
+
+ /* Error out if any argument is passed on the stack. */
+ arm_init_cumulative_args (&args_so_far_v, fntype, NULL_RTX, fndecl);
+ args_so_far = pack_cumulative_args (&args_so_far_v);
+ FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
+ {
+ rtx arg_rtx;
+ machine_mode arg_mode = TYPE_MODE (arg_type);
+
+ prev_arg_type = arg_type;
+ if (VOID_TYPE_P (arg_type))
+ continue;
+
+ if (!first_param)
+ arm_function_arg_advance (args_so_far, arg_mode, arg_type, true);
+ arg_rtx = arm_function_arg (args_so_far, arg_mode, arg_type, true);
+ if (!arg_rtx
+ || arm_arg_partial_bytes (args_so_far, arg_mode, arg_type, true))
+ {
+ error ("%qE attribute not available to functions with arguments "
+ "passed on the stack", name);
+ return true;
+ }
+ first_param = false;
+ }
+
+ /* Error out for variadic functions since we cannot control how many
+ arguments will be passed and thus stack could be used. stdarg_p () is not
+ used for the checking to avoid browsing arguments twice. */
+ if (prev_arg_type != NULL_TREE && !VOID_TYPE_P (prev_arg_type))
+ {
+ error ("%qE attribute not available to functions with variable number "
+ "of arguments", name);
+ return true;
+ }
+
+ /* Error out if return value is passed on the stack. */
+ ret_type = TREE_TYPE (fntype);
+ if (arm_return_in_memory (ret_type, fntype))
+ {
+ error ("%qE attribute not available to functions that return value on "
+ "the stack", name);
+ return true;
+ }
+ return false;
+}
+
+/* Called upon detection of the use of the cmse_nonsecure_entry attribute, this
+ function will check whether the attribute is allowed here and will add the
+ attribute to the function declaration tree or otherwise issue a warning. */
+
+static tree
+arm_handle_cmse_nonsecure_entry (tree *node, tree name,
+ tree /* args */,
+ int /* flags */,
+ bool *no_add_attrs)
+{
+ tree fndecl;
+
+ if (!use_cmse)
+ {
+ *no_add_attrs = true;
+ warning (OPT_Wattributes, "%qE attribute ignored without -mcmse option.",
+ name);
+ return NULL_TREE;
+ }
+
+ /* Ignore attribute for function types. */
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ fndecl = *node;
+
+ /* Warn for static linkage functions. */
+ if (!TREE_PUBLIC (fndecl))
+ {
+ warning (OPT_Wattributes, "%qE attribute has no effect on functions "
+ "with static linkage", name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ *no_add_attrs |= cmse_func_args_or_return_in_stack (fndecl, name,
+ TREE_TYPE (fndecl));
+ return NULL_TREE;
+}
+
+
+/* Called upon detection of the use of the cmse_nonsecure_call attribute, this
+ function will check whether the attribute is allowed here and will add the
+ attribute to the function type tree or otherwise issue a diagnostic. The
+ reason we check this at declaration time is to only allow the use of the
+ attribute with declarations of function pointers and not function
+ declarations. This function checks NODE is of the expected type and issues
+ diagnostics otherwise using NAME. If it is not of the expected type
+ *NO_ADD_ATTRS will be set to true. */
+
+static tree
+arm_handle_cmse_nonsecure_call (tree *node, tree name,
+ tree /* args */,
+ int /* flags */,
+ bool *no_add_attrs)
+{
+ tree decl = NULL_TREE, fntype = NULL_TREE;
+ tree type;
+
+ if (!use_cmse)
+ {
+ *no_add_attrs = true;
+ warning (OPT_Wattributes, "%qE attribute ignored without -mcmse option.",
+ name);
+ return NULL_TREE;
+ }
+
+ if (TREE_CODE (*node) == VAR_DECL || TREE_CODE (*node) == TYPE_DECL)
+ {
+ decl = *node;
+ fntype = TREE_TYPE (decl);
+ }
+
+ while (fntype != NULL_TREE && TREE_CODE (fntype) == POINTER_TYPE)
+ fntype = TREE_TYPE (fntype);
+
+ if (!decl || TREE_CODE (fntype) != FUNCTION_TYPE)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to base type of a "
+ "function pointer", name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ *no_add_attrs |= cmse_func_args_or_return_in_stack (NULL, name, fntype);
+
+ if (*no_add_attrs)
+ return NULL_TREE;
+
+ /* Prevent trees being shared among function types with and without
+ cmse_nonsecure_call attribute. */
+ type = TREE_TYPE (decl);
+
+ type = build_distinct_type_copy (type);
+ TREE_TYPE (decl) = type;
+ fntype = type;
+
+ while (TREE_CODE (fntype) != FUNCTION_TYPE)
+ {
+ type = fntype;
+ fntype = TREE_TYPE (fntype);
+ fntype = build_distinct_type_copy (fntype);
+ TREE_TYPE (type) = fntype;
+ }
+
+ /* Construct a type attribute and add it to the function type. */
+ tree attrs = tree_cons (get_identifier ("cmse_nonsecure_call"), NULL_TREE,
+ TYPE_ATTRIBUTES (fntype));
+ TYPE_ATTRIBUTES (fntype) = attrs;
+ return NULL_TREE;
+}
+
/* Return 0 if the attributes for two types are incompatible, 1 if they
are compatible, and 2 if they are nearly compatible (which causes a
warning to be generated). */
@@ -6601,6 +6834,14 @@ arm_comp_type_attributes (const_tree type1, const_tree type2)
if (l1 != l2)
return 0;
+ l1 = lookup_attribute ("cmse_nonsecure_call",
+ TYPE_ATTRIBUTES (type1)) != NULL;
+ l2 = lookup_attribute ("cmse_nonsecure_call",
+ TYPE_ATTRIBUTES (type2)) != NULL;
+
+ if (l1 != l2)
+ return 0;
+
return 1;
}
@@ -6727,6 +6968,20 @@ arm_function_ok_for_sibcall (tree decl, tree exp)
if (IS_INTERRUPT (func_type))
return false;
+ /* ARMv8-M non-secure entry functions need to return with bxns which is only
+ generated for entry functions themselves. */
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ return false;
+
+ /* We do not allow ARMv8-M non-secure calls to be turned into sibling calls,
+ this would complicate matters for later code generation. */
+ if (TREE_CODE (exp) == CALL_EXPR)
+ {
+ tree fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
+ if (lookup_attribute ("cmse_nonsecure_call", TYPE_ATTRIBUTES (fntype)))
+ return false;
+ }
+
if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
{
/* Check that the return value locations are the same. For
@@ -8226,6 +8481,12 @@ arm_legitimate_constant_p_1 (machine_mode, rtx x)
static bool
thumb_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
+ /* Splitters for TARGET_USE_MOVT call arm_emit_movpair which creates high
+ RTX. These RTX must therefore be allowed for Thumb-1 so that when run
+ for ARMv8-M Baseline or later the result is valid. */
+ if (TARGET_HAVE_MOVT && GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+
return (CONST_INT_P (x)
|| CONST_DOUBLE_P (x)
|| CONSTANT_ADDRESS_P (x)
@@ -8312,7 +8573,9 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
case CONST_INT:
if (outer == SET)
{
- if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
+ if (UINTVAL (x) < 256
+ /* 16-bit constant. */
+ || (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000)))
return 0;
if (thumb_shiftable_const (INTVAL (x)))
return COSTS_N_INSNS (2);
@@ -9015,7 +9278,7 @@ static inline int
thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
{
machine_mode mode = GET_MODE (x);
- int words;
+ int words, cost;
switch (code)
{
@@ -9061,17 +9324,26 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
/* A SET doesn't have a mode, so let's look at the SET_DEST to get
the mode. */
words = ARM_NUM_INTS (GET_MODE_SIZE (GET_MODE (SET_DEST (x))));
- return COSTS_N_INSNS (words)
- + COSTS_N_INSNS (1) * (satisfies_constraint_J (SET_SRC (x))
- || satisfies_constraint_K (SET_SRC (x))
- /* thumb1_movdi_insn. */
- || ((words > 1) && MEM_P (SET_SRC (x))));
+ cost = COSTS_N_INSNS (words);
+ if (satisfies_constraint_J (SET_SRC (x))
+ || satisfies_constraint_K (SET_SRC (x))
+ /* Too big an immediate for a 2-byte mov, using MOVT. */
+ || (UINTVAL (SET_SRC (x)) >= 256
+ && TARGET_HAVE_MOVT
+ && satisfies_constraint_j (SET_SRC (x)))
+ /* thumb1_movdi_insn. */
+ || ((words > 1) && MEM_P (SET_SRC (x))))
+ cost += COSTS_N_INSNS (1);
+ return cost;
case CONST_INT:
if (outer == SET)
{
if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
return COSTS_N_INSNS (1);
+ /* movw is 4byte long. */
+ if (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000))
+ return COSTS_N_INSNS (2);
/* See split "TARGET_THUMB1 && satisfies_constraint_J". */
if (INTVAL (x) >= -255 && INTVAL (x) <= -1)
return COSTS_N_INSNS (2);
@@ -12935,7 +13207,7 @@ neon_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
/* Bounds-check constants. */
void
-neon_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+arm_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
{
bounds_check (operand, low, high, NULL_TREE, "constant");
}
@@ -17097,10 +17369,6 @@ push_minipool_fix (rtx_insn *insn, HOST_WIDE_INT address, rtx *loc,
int
arm_max_const_double_inline_cost ()
{
- /* Let the value get synthesized to avoid the use of literal pools. */
- if (arm_disable_literal_pool)
- return 99;
-
return ((optimize_size || arm_ld_sched) ? 3 : 4);
}
@@ -17269,6 +17537,470 @@ note_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address, int do_pushes)
return;
}
+/* This function computes the clear mask and PADDING_BITS_TO_CLEAR for structs
+ and unions in the context of ARMv8-M Security Extensions. It is used as a
+ helper function for both 'cmse_nonsecure_call' and 'cmse_nonsecure_entry'
+ functions. The PADDING_BITS_TO_CLEAR pointer can be the base to either one
+ or four masks, depending on whether it is being computed for a
+ 'cmse_nonsecure_entry' return value or a 'cmse_nonsecure_call' argument
+ respectively. The tree for the type of the argument or a field within an
+ argument is passed in ARG_TYPE, the current register this argument or field
+ starts in is kept in the pointer REGNO and updated accordingly, the bit this
+ argument or field starts at is passed in STARTING_BIT and the last used bit
+ is kept in LAST_USED_BIT which is also updated accordingly. */
+
+static unsigned HOST_WIDE_INT
+comp_not_to_clear_mask_str_un (tree arg_type, int * regno,
+ uint32_t * padding_bits_to_clear,
+ unsigned starting_bit, int * last_used_bit)
+
+{
+ unsigned HOST_WIDE_INT not_to_clear_reg_mask = 0;
+
+ if (TREE_CODE (arg_type) == RECORD_TYPE)
+ {
+ unsigned current_bit = starting_bit;
+ tree field;
+ long int offset, size;
+
+
+ field = TYPE_FIELDS (arg_type);
+ while (field)
+ {
+ /* The offset within a structure is always an offset from
+ the start of that structure. Make sure we take that into the
+ calculation of the register based offset that we use here. */
+ offset = starting_bit;
+ offset += TREE_INT_CST_ELT (DECL_FIELD_BIT_OFFSET (field), 0);
+ offset %= 32;
+
+ /* This is the actual size of the field, for bitfields this is the
+ bitfield width and not the container size. */
+ size = TREE_INT_CST_ELT (DECL_SIZE (field), 0);
+
+ if (*last_used_bit != offset)
+ {
+ if (offset < *last_used_bit)
+ {
+ /* This field's offset is before the 'last_used_bit', that
+ means this field goes on the next register. So we need to
+ pad the rest of the current register and increase the
+ register number. */
+ uint32_t mask;
+ mask = ((uint32_t)-1) - ((uint32_t) 1 << *last_used_bit);
+ mask++;
+
+ padding_bits_to_clear[*regno] |= mask;
+ not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
+ (*regno)++;
+ }
+ else
+ {
+ /* Otherwise we pad the bits between the last field's end and
+ the start of the new field. */
+ uint32_t mask;
+
+ mask = ((uint32_t)-1) >> (32 - offset);
+ mask -= ((uint32_t) 1 << *last_used_bit) - 1;
+ padding_bits_to_clear[*regno] |= mask;
+ }
+ current_bit = offset;
+ }
+
+ /* Calculate further padding bits for inner structs/unions too. */
+ if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
+ {
+ *last_used_bit = current_bit;
+ not_to_clear_reg_mask
+ |= comp_not_to_clear_mask_str_un (TREE_TYPE (field), regno,
+ padding_bits_to_clear, offset,
+ last_used_bit);
+ }
+ else
+ {
+ /* Update 'current_bit' with this field's size. If the
+ 'current_bit' lies in a subsequent register, update 'regno' and
+ reset 'current_bit' to point to the current bit in that new
+ register. */
+ current_bit += size;
+ while (current_bit >= 32)
+ {
+ current_bit-=32;
+ not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
+ (*regno)++;
+ }
+ *last_used_bit = current_bit;
+ }
+
+ field = TREE_CHAIN (field);
+ }
+ not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
+ }
+ else if (TREE_CODE (arg_type) == UNION_TYPE)
+ {
+ tree field, field_t;
+ int i, regno_t, field_size;
+ int max_reg = -1;
+ int max_bit = -1;
+ uint32_t mask;
+ uint32_t padding_bits_to_clear_res[NUM_ARG_REGS]
+ = {-1, -1, -1, -1};
+
+ /* To compute the padding bits in a union we only consider bits as
+ padding bits if they are always either a padding bit or fall outside a
+ fields size for all fields in the union. */
+ field = TYPE_FIELDS (arg_type);
+ while (field)
+ {
+ uint32_t padding_bits_to_clear_t[NUM_ARG_REGS]
+ = {0U, 0U, 0U, 0U};
+ int last_used_bit_t = *last_used_bit;
+ regno_t = *regno;
+ field_t = TREE_TYPE (field);
+
+ /* If the field's type is either a record or a union make sure to
+ compute their padding bits too. */
+ if (RECORD_OR_UNION_TYPE_P (field_t))
+ not_to_clear_reg_mask
+ |= comp_not_to_clear_mask_str_un (field_t, &regno_t,
+ &padding_bits_to_clear_t[0],
+ starting_bit, &last_used_bit_t);
+ else
+ {
+ field_size = TREE_INT_CST_ELT (DECL_SIZE (field), 0);
+ regno_t = (field_size / 32) + *regno;
+ last_used_bit_t = (starting_bit + field_size) % 32;
+ }
+
+ for (i = *regno; i < regno_t; i++)
+ {
+ /* For all but the last register used by this field only keep the
+ padding bits that were padding bits in this field. */
+ padding_bits_to_clear_res[i] &= padding_bits_to_clear_t[i];
+ }
+
+ /* For the last register, keep all padding bits that were padding
+ bits in this field and any padding bits that are still valid
+ as padding bits but fall outside of this field's size. */
+ mask = (((uint32_t) -1) - ((uint32_t) 1 << last_used_bit_t)) + 1;
+ padding_bits_to_clear_res[regno_t]
+ &= padding_bits_to_clear_t[regno_t] | mask;
+
+ /* Update the maximum size of the fields in terms of registers used
+ ('max_reg') and the 'last_used_bit' in said register. */
+ if (max_reg < regno_t)
+ {
+ max_reg = regno_t;
+ max_bit = last_used_bit_t;
+ }
+ else if (max_reg == regno_t && max_bit < last_used_bit_t)
+ max_bit = last_used_bit_t;
+
+ field = TREE_CHAIN (field);
+ }
+
+ /* Update the current padding_bits_to_clear using the intersection of the
+ padding bits of all the fields. */
+ for (i=*regno; i < max_reg; i++)
+ padding_bits_to_clear[i] |= padding_bits_to_clear_res[i];
+
+ /* Do not keep trailing padding bits, we do not know yet whether this
+ is the end of the argument. */
+ mask = ((uint32_t) 1 << max_bit) - 1;
+ padding_bits_to_clear[max_reg]
+ |= padding_bits_to_clear_res[max_reg] & mask;
+
+ *regno = max_reg;
+ *last_used_bit = max_bit;
+ }
+ else
+ /* This function should only be used for structs and unions. */
+ gcc_unreachable ();
+
+ return not_to_clear_reg_mask;
+}
+
+/* In the context of ARMv8-M Security Extensions, this function is used for both
+ 'cmse_nonsecure_call' and 'cmse_nonsecure_entry' functions to compute what
+ registers are used when returning or passing arguments, which is then
+ returned as a mask. It will also compute a mask to indicate padding/unused
+ bits for each of these registers, and passes this through the
+ PADDING_BITS_TO_CLEAR pointer. The tree of the argument type is passed in
+ ARG_TYPE, the rtl representation of the argument is passed in ARG_RTX and
+ the starting register used to pass this argument or return value is passed
+ in REGNO. It makes use of 'comp_not_to_clear_mask_str_un' to compute these
+ for struct and union types. */
+
+static unsigned HOST_WIDE_INT
+compute_not_to_clear_mask (tree arg_type, rtx arg_rtx, int regno,
+ uint32_t * padding_bits_to_clear)
+
+{
+ int last_used_bit = 0;
+ unsigned HOST_WIDE_INT not_to_clear_mask;
+
+ if (RECORD_OR_UNION_TYPE_P (arg_type))
+ {
+ not_to_clear_mask
+ = comp_not_to_clear_mask_str_un (arg_type, &regno,
+ padding_bits_to_clear, 0,
+ &last_used_bit);
+
+
+ /* If the 'last_used_bit' is not zero, that means we are still using a
+ part of the last 'regno'. In such cases we must clear the trailing
+ bits. Otherwise we are not using regno and we should mark it as to
+ clear. */
+ if (last_used_bit != 0)
+ padding_bits_to_clear[regno]
+ |= ((uint32_t)-1) - ((uint32_t) 1 << last_used_bit) + 1;
+ else
+ not_to_clear_mask &= ~(HOST_WIDE_INT_1U << regno);
+ }
+ else
+ {
+ not_to_clear_mask = 0;
+ /* We are not dealing with structs nor unions. So these arguments may be
+ passed in floating point registers too. In some cases a BLKmode is
+ used when returning or passing arguments in multiple VFP registers. */
+ if (GET_MODE (arg_rtx) == BLKmode)
+ {
+ int i, arg_regs;
+ rtx reg;
+
+ /* This should really only occur when dealing with the hard-float
+ ABI. */
+ gcc_assert (TARGET_HARD_FLOAT_ABI);
+
+ for (i = 0; i < XVECLEN (arg_rtx, 0); i++)
+ {
+ reg = XEXP (XVECEXP (arg_rtx, 0, i), 0);
+ gcc_assert (REG_P (reg));
+
+ not_to_clear_mask |= HOST_WIDE_INT_1U << REGNO (reg);
+
+ /* If we are dealing with DF mode, make sure we don't
+ clear either of the registers it addresses. */
+ arg_regs = ARM_NUM_REGS (GET_MODE (reg));
+ if (arg_regs > 1)
+ {
+ unsigned HOST_WIDE_INT mask;
+ mask = HOST_WIDE_INT_1U << (REGNO (reg) + arg_regs);
+ mask -= HOST_WIDE_INT_1U << REGNO (reg);
+ not_to_clear_mask |= mask;
+ }
+ }
+ }
+ else
+ {
+ /* Otherwise we can rely on the MODE to determine how many registers
+ are being used by this argument. */
+ int arg_regs = ARM_NUM_REGS (GET_MODE (arg_rtx));
+ not_to_clear_mask |= HOST_WIDE_INT_1U << REGNO (arg_rtx);
+ if (arg_regs > 1)
+ {
+ unsigned HOST_WIDE_INT
+ mask = HOST_WIDE_INT_1U << (REGNO (arg_rtx) + arg_regs);
+ mask -= HOST_WIDE_INT_1U << REGNO (arg_rtx);
+ not_to_clear_mask |= mask;
+ }
+ }
+ }
+
+ return not_to_clear_mask;
+}
+
+/* Saves callee saved registers, clears callee saved registers and caller saved
+ registers not used to pass arguments before a cmse_nonsecure_call. And
+ restores the callee saved registers after. */
+
+static void
+cmse_nonsecure_call_clear_caller_saved (void)
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ rtx_insn *insn;
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ uint64_t to_clear_mask, float_mask;
+ rtx_insn *seq;
+ rtx pat, call, unspec, reg, cleared_reg, tmp;
+ unsigned int regno, maxregno;
+ rtx address;
+ CUMULATIVE_ARGS args_so_far_v;
+ cumulative_args_t args_so_far;
+ tree arg_type, fntype;
+ bool using_r4, first_param = true;
+ function_args_iterator args_iter;
+ uint32_t padding_bits_to_clear[4] = {0U, 0U, 0U, 0U};
+ uint32_t * padding_bits_to_clear_ptr = &padding_bits_to_clear[0];
+
+ if (!NONDEBUG_INSN_P (insn))
+ continue;
+
+ if (!CALL_P (insn))
+ continue;
+
+ pat = PATTERN (insn);
+ gcc_assert (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 0);
+ call = XVECEXP (pat, 0, 0);
+
+ /* Get the real call RTX if the insn sets a value, ie. returns. */
+ if (GET_CODE (call) == SET)
+ call = SET_SRC (call);
+
+ /* Check if it is a cmse_nonsecure_call. */
+ unspec = XEXP (call, 0);
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_NONSECURE_MEM)
+ continue;
+
+ /* Determine the caller-saved registers we need to clear. */
+ to_clear_mask = (1LL << (NUM_ARG_REGS)) - 1;
+ maxregno = NUM_ARG_REGS - 1;
+ /* Only look at the caller-saved floating point registers in case of
+ -mfloat-abi=hard. For -mfloat-abi=softfp we will be using the
+ lazy store and loads which clear both caller- and callee-saved
+ registers. */
+ if (TARGET_HARD_FLOAT_ABI)
+ {
+ float_mask = (1LL << (D7_VFP_REGNUM + 1)) - 1;
+ float_mask &= ~((1LL << FIRST_VFP_REGNUM) - 1);
+ to_clear_mask |= float_mask;
+ maxregno = D7_VFP_REGNUM;
+ }
+
+ /* Make sure the register used to hold the function address is not
+ cleared. */
+ address = RTVEC_ELT (XVEC (unspec, 0), 0);
+ gcc_assert (MEM_P (address));
+ gcc_assert (REG_P (XEXP (address, 0)));
+ to_clear_mask &= ~(1LL << REGNO (XEXP (address, 0)));
+
+ /* Set basic block of call insn so that df rescan is performed on
+ insns inserted here. */
+ set_block_for_insn (insn, bb);
+ df_set_flags (DF_DEFER_INSN_RESCAN);
+ start_sequence ();
+
+ /* Make sure the scheduler doesn't schedule other insns beyond
+ here. */
+ emit_insn (gen_blockage ());
+
+ /* Walk through all arguments and clear registers appropriately.
+ */
+ fntype = TREE_TYPE (MEM_EXPR (address));
+ arm_init_cumulative_args (&args_so_far_v, fntype, NULL_RTX,
+ NULL_TREE);
+ args_so_far = pack_cumulative_args (&args_so_far_v);
+ FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
+ {
+ rtx arg_rtx;
+ machine_mode arg_mode = TYPE_MODE (arg_type);
+
+ if (VOID_TYPE_P (arg_type))
+ continue;
+
+ if (!first_param)
+ arm_function_arg_advance (args_so_far, arg_mode, arg_type,
+ true);
+
+ arg_rtx = arm_function_arg (args_so_far, arg_mode, arg_type,
+ true);
+ gcc_assert (REG_P (arg_rtx));
+ to_clear_mask
+ &= ~compute_not_to_clear_mask (arg_type, arg_rtx,
+ REGNO (arg_rtx),
+ padding_bits_to_clear_ptr);
+
+ first_param = false;
+ }
+
+ /* Clear padding bits where needed. */
+ cleared_reg = XEXP (address, 0);
+ reg = gen_rtx_REG (SImode, IP_REGNUM);
+ using_r4 = false;
+ for (regno = R0_REGNUM; regno < NUM_ARG_REGS; regno++)
+ {
+ if (padding_bits_to_clear[regno] == 0)
+ continue;
+
+ /* If this is a Thumb-1 target copy the address of the function
+ we are calling from 'r4' into 'ip' such that we can use r4 to
+ clear the unused bits in the arguments. */
+ if (TARGET_THUMB1 && !using_r4)
+ {
+ using_r4 = true;
+ reg = cleared_reg;
+ emit_move_insn (gen_rtx_REG (SImode, IP_REGNUM),
+ reg);
+ }
+
+ tmp = GEN_INT ((((~padding_bits_to_clear[regno]) << 16u) >> 16u));
+ emit_move_insn (reg, tmp);
+ /* Also fill the top half of the negated
+ padding_bits_to_clear. */
+ if (((~padding_bits_to_clear[regno]) >> 16) > 0)
+ {
+ tmp = GEN_INT ((~padding_bits_to_clear[regno]) >> 16);
+ emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (SImode, reg,
+ GEN_INT (16),
+ GEN_INT (16)),
+ tmp));
+ }
+
+ emit_insn (gen_andsi3 (gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, regno),
+ reg));
+
+ }
+ if (using_r4)
+ emit_move_insn (cleared_reg,
+ gen_rtx_REG (SImode, IP_REGNUM));
+
+ /* We use right shift and left shift to clear the LSB of the address
+ we jump to instead of using bic, to avoid having to use an extra
+ register on Thumb-1. */
+ tmp = gen_rtx_LSHIFTRT (SImode, cleared_reg, const1_rtx);
+ emit_insn (gen_rtx_SET (cleared_reg, tmp));
+ tmp = gen_rtx_ASHIFT (SImode, cleared_reg, const1_rtx);
+ emit_insn (gen_rtx_SET (cleared_reg, tmp));
+
+ /* Clearing all registers that leak before doing a non-secure
+ call. */
+ for (regno = R0_REGNUM; regno <= maxregno; regno++)
+ {
+ if (!(to_clear_mask & (1LL << regno)))
+ continue;
+
+ /* If regno is an even vfp register and its successor is also to
+ be cleared, use vmov. */
+ if (IS_VFP_REGNUM (regno))
+ {
+ if (TARGET_VFP_DOUBLE
+ && VFP_REGNO_OK_FOR_DOUBLE (regno)
+ && to_clear_mask & (1LL << (regno + 1)))
+ emit_move_insn (gen_rtx_REG (DFmode, regno++),
+ CONST0_RTX (DFmode));
+ else
+ emit_move_insn (gen_rtx_REG (SFmode, regno),
+ CONST0_RTX (SFmode));
+ }
+ else
+ emit_move_insn (gen_rtx_REG (SImode, regno), cleared_reg);
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before (seq, insn);
+
+ }
+ }
+}
+
/* Rewrite move insn into subtract of 0 if the condition codes will
be useful in next conditional jump insn. */
@@ -17569,6 +18301,8 @@ arm_reorg (void)
HOST_WIDE_INT address = 0;
Mfix * fix;
+ if (use_cmse)
+ cmse_nonsecure_call_clear_caller_saved ();
if (TARGET_THUMB1)
thumb1_reorg ();
else if (TARGET_THUMB2)
@@ -17941,6 +18675,23 @@ vfp_emit_fstmd (int base_reg, int count)
return count * 8;
}
+/* Returns true if -mcmse has been passed and the function pointed to by 'addr'
+ has the cmse_nonsecure_call attribute and returns false otherwise. */
+
+bool
+detect_cmse_nonsecure_call (tree addr)
+{
+ if (!addr)
+ return FALSE;
+
+ tree fntype = TREE_TYPE (addr);
+ if (use_cmse && lookup_attribute ("cmse_nonsecure_call",
+ TYPE_ATTRIBUTES (fntype)))
+ return TRUE;
+ return FALSE;
+}
+
+
/* Emit a call instruction with pattern PAT. ADDR is the address of
the call target. */
@@ -19563,6 +20314,7 @@ output_return_instruction (rtx operand, bool really_return, bool reverse,
(e.g. interworking) then we can load the return address
directly into the PC. Otherwise we must load it into LR. */
if (really_return
+ && !IS_CMSE_ENTRY (func_type)
&& (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
return_reg = reg_names[PC_REGNUM];
else
@@ -19703,8 +20455,45 @@ output_return_instruction (rtx operand, bool really_return, bool reverse,
break;
default:
+ if (IS_CMSE_ENTRY (func_type))
+ {
+ /* Check if we have to clear the 'GE bits' which is only used if
+ parallel add and subtraction instructions are available. */
+ if (TARGET_INT_SIMD)
+ snprintf (instr, sizeof (instr),
+ "msr%s\tAPSR_nzcvqg, %%|lr", conditional);
+ else
+ snprintf (instr, sizeof (instr),
+ "msr%s\tAPSR_nzcvq, %%|lr", conditional);
+
+ output_asm_insn (instr, & operand);
+ if (TARGET_HARD_FLOAT && !TARGET_THUMB1)
+ {
+ /* Clear the cumulative exception-status bits (0-4,7) and the
+ condition code bits (28-31) of the FPSCR. We need to
+ remember to clear the first scratch register used (IP) and
+ save and restore the second (r4). */
+ snprintf (instr, sizeof (instr), "push\t{%%|r4}");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "vmrs\t%%|ip, fpscr");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "movw\t%%|r4, #65376");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "movt\t%%|r4, #4095");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "and\t%%|ip, %%|r4");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "vmsr\tfpscr, %%|ip");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "pop\t{%%|r4}");
+ output_asm_insn (instr, & operand);
+ snprintf (instr, sizeof (instr), "mov\t%%|ip, %%|lr");
+ output_asm_insn (instr, & operand);
+ }
+ snprintf (instr, sizeof (instr), "bxns\t%%|lr");
+ }
/* Use bx if it's available. */
- if (arm_arch5 || arm_arch4t)
+ else if (arm_arch5 || arm_arch4t)
sprintf (instr, "bx%s\t%%|lr", conditional);
else
sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
@@ -19717,6 +20506,44 @@ output_return_instruction (rtx operand, bool really_return, bool reverse,
return "";
}
+/* Output in FILE asm statements needed to declare the NAME of the function
+ defined by its DECL node. */
+
+void
+arm_asm_declare_function_name (FILE *file, const char *name, tree decl)
+{
+ size_t cmse_name_len;
+ char *cmse_name = 0;
+ char cmse_prefix[] = "__acle_se_";
+
+ /* When compiling with ARMv8-M Security Extensions enabled, we should print an
+ extra function label for each function with the 'cmse_nonsecure_entry'
+ attribute. This extra function label should be prepended with
+ '__acle_se_', telling the linker that it needs to create secure gateway
+ veneers for this function. */
+ if (use_cmse && lookup_attribute ("cmse_nonsecure_entry",
+ DECL_ATTRIBUTES (decl)))
+ {
+ cmse_name_len = sizeof (cmse_prefix) + strlen (name);
+ cmse_name = XALLOCAVEC (char, cmse_name_len);
+ snprintf (cmse_name, cmse_name_len, "%s%s", cmse_prefix, name);
+ targetm.asm_out.globalize_label (file, cmse_name);
+
+ ARM_DECLARE_FUNCTION_NAME (file, cmse_name, decl);
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, cmse_name, "function");
+ }
+
+ ARM_DECLARE_FUNCTION_NAME (file, name, decl);
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
+ ASM_OUTPUT_LABEL (file, name);
+
+ if (cmse_name)
+ ASM_OUTPUT_LABEL (file, cmse_name);
+
+ ARM_OUTPUT_FN_UNWIND (file, TRUE);
+}
+
/* Write the function name into the code section, directly preceding
the function prologue.
@@ -19766,10 +20593,6 @@ arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
{
unsigned long func_type;
- /* ??? Do we want to print some of the below anyway? */
- if (TARGET_THUMB1)
- return;
-
/* Sanity check. */
gcc_assert (!arm_ccfsm_state && !arm_target_insn);
@@ -19804,6 +20627,8 @@ arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
if (IS_STACKALIGN (func_type))
asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
+ if (IS_CMSE_ENTRY (func_type))
+ asm_fprintf (f, "\t%@ Non-secure entry function: called from non-secure code.\n");
asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
crtl->args.size,
@@ -23885,8 +24710,8 @@ thumb_pop (FILE *f, unsigned long mask)
if (mask & (1 << PC_REGNUM))
{
/* Catch popping the PC. */
- if (TARGET_INTERWORK || TARGET_BACKTRACE
- || crtl->calls_eh_return)
+ if (TARGET_INTERWORK || TARGET_BACKTRACE || crtl->calls_eh_return
+ || IS_CMSE_ENTRY (arm_current_func_type ()))
{
/* The PC is never poped directly, instead
it is popped into r3 and then BX is used. */
@@ -23947,7 +24772,14 @@ thumb_exit (FILE *f, int reg_containing_return_addr)
if (crtl->calls_eh_return)
asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
- asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ {
+ asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n",
+ reg_containing_return_addr);
+ asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
+ }
+ else
+ asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
return;
}
/* Otherwise if we are not supporting interworking and we have not created
@@ -23956,7 +24788,8 @@ thumb_exit (FILE *f, int reg_containing_return_addr)
else if (!TARGET_INTERWORK
&& !TARGET_BACKTRACE
&& !is_called_in_ARM_mode (current_function_decl)
- && !crtl->calls_eh_return)
+ && !crtl->calls_eh_return
+ && !IS_CMSE_ENTRY (arm_current_func_type ()))
{
asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
return;
@@ -24179,7 +25012,21 @@ thumb_exit (FILE *f, int reg_containing_return_addr)
asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
/* Return to caller. */
- asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ {
+ /* This is for the cases where LR is not being used to contain the return
+ address. It may therefore contain information that we might not want
+ to leak, hence it must be cleared. The value in R0 will never be a
+ secret at this point, so it is safe to use it, see the clearing code
+ in 'cmse_nonsecure_entry_clear_before_return'. */
+ if (reg_containing_return_addr != LR_REGNUM)
+ asm_fprintf (f, "\tmov\tlr, r0\n");
+
+ asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n", reg_containing_return_addr);
+ asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
+ }
+ else
+ asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
}
/* Scan INSN just before assembler is output for it.
@@ -25044,6 +25891,149 @@ thumb1_expand_prologue (void)
cfun->machine->lr_save_eliminated = 0;
}
+/* Clear caller saved registers not used to pass return values and leaked
+ condition flags before exiting a cmse_nonsecure_entry function. */
+
+void
+cmse_nonsecure_entry_clear_before_return (void)
+{
+ uint64_t to_clear_mask[2];
+ uint32_t padding_bits_to_clear = 0;
+ uint32_t * padding_bits_to_clear_ptr = &padding_bits_to_clear;
+ int regno, maxregno = IP_REGNUM;
+ tree result_type;
+ rtx result_rtl;
+
+ to_clear_mask[0] = (1ULL << (NUM_ARG_REGS)) - 1;
+ to_clear_mask[0] |= (1ULL << IP_REGNUM);
+
+ /* If we are not dealing with -mfloat-abi=soft we will need to clear VFP
+ registers. We also check that TARGET_HARD_FLOAT and !TARGET_THUMB1 hold
+ to make sure the instructions used to clear them are present. */
+ if (TARGET_HARD_FLOAT && !TARGET_THUMB1)
+ {
+ uint64_t float_mask = (1ULL << (D7_VFP_REGNUM + 1)) - 1;
+ maxregno = LAST_VFP_REGNUM;
+
+ float_mask &= ~((1ULL << FIRST_VFP_REGNUM) - 1);
+ to_clear_mask[0] |= float_mask;
+
+ float_mask = (1ULL << (maxregno - 63)) - 1;
+ to_clear_mask[1] = float_mask;
+
+ /* Make sure we don't clear the two scratch registers used to clear the
+ relevant FPSCR bits in output_return_instruction. */
+ emit_use (gen_rtx_REG (SImode, IP_REGNUM));
+ to_clear_mask[0] &= ~(1ULL << IP_REGNUM);
+ emit_use (gen_rtx_REG (SImode, 4));
+ to_clear_mask[0] &= ~(1ULL << 4);
+ }
+
+ /* If the user has defined registers to be caller saved, these are no longer
+ restored by the function before returning and must thus be cleared for
+ security purposes. */
+ for (regno = NUM_ARG_REGS; regno < LAST_VFP_REGNUM; regno++)
+ {
+ /* We do not touch registers that can be used to pass arguments as per
+ the AAPCS, since these should never be made callee-saved by user
+ options. */
+ if (IN_RANGE (regno, FIRST_VFP_REGNUM, D7_VFP_REGNUM))
+ continue;
+ if (IN_RANGE (regno, IP_REGNUM, PC_REGNUM))
+ continue;
+ if (call_used_regs[regno])
+ to_clear_mask[regno / 64] |= (1ULL << (regno % 64));
+ }
+
+ /* Make sure we do not clear the registers used to return the result in. */
+ result_type = TREE_TYPE (DECL_RESULT (current_function_decl));
+ if (!VOID_TYPE_P (result_type))
+ {
+ result_rtl = arm_function_value (result_type, current_function_decl, 0);
+
+ /* No need to check that we return in registers, because we don't
+ support returning on stack yet. */
+ to_clear_mask[0]
+ &= ~compute_not_to_clear_mask (result_type, result_rtl, 0,
+ padding_bits_to_clear_ptr);
+ }
+
+ if (padding_bits_to_clear != 0)
+ {
+ rtx reg_rtx;
+ /* Padding bits to clear is not 0 so we know we are dealing with
+ returning a composite type, which only uses r0. Let's make sure that
+ r1-r3 is cleared too, we will use r1 as a scratch register. */
+ gcc_assert ((to_clear_mask[0] & 0xe) == 0xe);
+
+ reg_rtx = gen_rtx_REG (SImode, R1_REGNUM);
+
+ /* Fill the lower half of the negated padding_bits_to_clear. */
+ emit_move_insn (reg_rtx,
+ GEN_INT ((((~padding_bits_to_clear) << 16u) >> 16u)));
+
+ /* Also fill the top half of the negated padding_bits_to_clear. */
+ if (((~padding_bits_to_clear) >> 16) > 0)
+ emit_insn (gen_rtx_SET (gen_rtx_ZERO_EXTRACT (SImode, reg_rtx,
+ GEN_INT (16),
+ GEN_INT (16)),
+ GEN_INT ((~padding_bits_to_clear) >> 16)));
+
+ emit_insn (gen_andsi3 (gen_rtx_REG (SImode, R0_REGNUM),
+ gen_rtx_REG (SImode, R0_REGNUM),
+ reg_rtx));
+ }
+
+ for (regno = R0_REGNUM; regno <= maxregno; regno++)
+ {
+ if (!(to_clear_mask[regno / 64] & (1ULL << (regno % 64))))
+ continue;
+
+ if (IS_VFP_REGNUM (regno))
+ {
+ /* If regno is an even vfp register and its successor is also to
+ be cleared, use vmov. */
+ if (TARGET_VFP_DOUBLE
+ && VFP_REGNO_OK_FOR_DOUBLE (regno)
+ && to_clear_mask[regno / 64] & (1ULL << ((regno % 64) + 1)))
+ {
+ emit_move_insn (gen_rtx_REG (DFmode, regno),
+ CONST1_RTX (DFmode));
+ emit_use (gen_rtx_REG (DFmode, regno));
+ regno++;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (SFmode, regno),
+ CONST1_RTX (SFmode));
+ emit_use (gen_rtx_REG (SFmode, regno));
+ }
+ }
+ else
+ {
+ if (TARGET_THUMB1)
+ {
+ if (regno == R0_REGNUM)
+ emit_move_insn (gen_rtx_REG (SImode, regno),
+ const0_rtx);
+ else
+ /* R0 has either been cleared before, see code above, or it
+ holds a return value, either way it is not secret
+ information. */
+ emit_move_insn (gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, R0_REGNUM));
+ emit_use (gen_rtx_REG (SImode, regno));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, LR_REGNUM));
+ emit_use (gen_rtx_REG (SImode, regno));
+ }
+ }
+ }
+}
+
/* Generate pattern *pop_multiple_with_stack_update_and_return if single
POP instruction can be generated. LR should be replaced by PC. All
the checks required are already done by USE_RETURN_INSN (). Hence,
@@ -25065,6 +26055,12 @@ thumb2_expand_return (bool simple_return)
if (!simple_return && saved_regs_mask)
{
+ /* TODO: Verify that this path is never taken for cmse_nonsecure_entry
+ functions or adapt code to handle according to ACLE. This path should
+ not be reachable for cmse_nonsecure_entry functions though we prefer
+ to assert it for now to ensure that future code changes do not silently
+ change this behavior. */
+ gcc_assert (!IS_CMSE_ENTRY (arm_current_func_type ()));
if (num_regs == 1)
{
rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
@@ -25087,6 +26083,8 @@ thumb2_expand_return (bool simple_return)
}
else
{
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ cmse_nonsecure_entry_clear_before_return ();
emit_jump_insn (simple_return_rtx);
}
}
@@ -25145,6 +26143,10 @@ thumb1_expand_epilogue (void)
if (! df_regs_ever_live_p (LR_REGNUM))
emit_use (gen_rtx_REG (SImode, LR_REGNUM));
+
+ /* Clear all caller-saved regs that are not used to return. */
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ cmse_nonsecure_entry_clear_before_return ();
}
/* Epilogue code for APCS frame. */
@@ -25482,6 +26484,7 @@ arm_expand_epilogue (bool really_return)
if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
&& (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
+ && !IS_CMSE_ENTRY (func_type)
&& !IS_STACKALIGN (func_type)
&& really_return
&& crtl->args.pretend_args_size == 0
@@ -25578,6 +26581,14 @@ arm_expand_epilogue (bool really_return)
stack_pointer_rtx, stack_pointer_rtx);
}
+ /* Clear all caller-saved regs that are not used to return. */
+ if (IS_CMSE_ENTRY (arm_current_func_type ()))
+ {
+ /* CMSE_ENTRY always returns. */
+ gcc_assert (really_return);
+ cmse_nonsecure_entry_clear_before_return ();
+ }
+
if (!really_return)
return;
@@ -26018,7 +27029,7 @@ arm_file_start (void)
const char* pos = strchr (arm_selected_arch->name, '+');
if (pos)
{
- char buf[15];
+ char buf[32];
gcc_assert (strlen (arm_selected_arch->name)
<= sizeof (buf) / sizeof (*pos));
strncpy (buf, arm_selected_arch->name,
@@ -27168,17 +28179,24 @@ arm_asm_emit_except_personality (rtx personality)
output_addr_const (asm_out_file, personality);
fputc ('\n', asm_out_file);
}
+#endif /* ARM_UNWIND_INFO */
/* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
static void
arm_asm_init_sections (void)
{
+#if ARM_UNWIND_INFO
exception_section = get_unnamed_section (0, output_section_asm_op,
"\t.handlerdata");
-}
#endif /* ARM_UNWIND_INFO */
+#ifdef OBJECT_FORMAT_ELF
+ if (target_pure_code)
+ text_section->unnamed.data = "\t.section .text,\"0x20000006\",%progbits";
+#endif
+}
+
/* Output unwind directives for the start/end of a function. */
void
@@ -27960,9 +28978,9 @@ emit_unlikely_jump (rtx insn)
void
arm_expand_compare_and_swap (rtx operands[])
{
- rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
+ rtx bval, bdst, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
machine_mode mode;
- rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+ rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
bval = operands[0];
rval = operands[1];
@@ -28019,43 +29037,54 @@ arm_expand_compare_and_swap (rtx operands[])
gcc_unreachable ();
}
- emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
+ bdst = TARGET_THUMB1 ? bval : gen_rtx_REG (CCmode, CC_REGNUM);
+ emit_insn (gen (bdst, rval, mem, oldval, newval, is_weak, mod_s, mod_f));
if (mode == QImode || mode == HImode)
emit_move_insn (operands[1], gen_lowpart (mode, rval));
/* In all cases, we arrange for success to be signaled by Z set.
This arrangement allows for the boolean result to be used directly
- in a subsequent branch, post optimization. */
- x = gen_rtx_REG (CCmode, CC_REGNUM);
- x = gen_rtx_EQ (SImode, x, const0_rtx);
- emit_insn (gen_rtx_SET (bval, x));
+ in a subsequent branch, post optimization. For Thumb-1 targets, the
+ boolean negation of the result is also stored in bval because Thumb-1
+ backend lacks dependency tracking for CC flag due to flag-setting not
+ being represented at RTL level. */
+ if (TARGET_THUMB1)
+ emit_insn (gen_cstoresi_eq0_thumb1 (bval, bdst));
+ else
+ {
+ x = gen_rtx_EQ (SImode, bdst, const0_rtx);
+ emit_insn (gen_rtx_SET (bval, x));
+ }
}
/* Split a compare and swap pattern. It is IMPLEMENTATION DEFINED whether
another memory store between the load-exclusive and store-exclusive can
reset the monitor from Exclusive to Open state. This means we must wait
until after reload to split the pattern, lest we get a register spill in
- the middle of the atomic sequence. */
+ the middle of the atomic sequence. Success of the compare and swap is
+ indicated by the Z flag set for 32bit targets and by neg_bval being zero
+ for Thumb-1 targets (ie. negation of the boolean value returned by
+ atomic_compare_and_swapmode standard pattern in operand 0). */
void
arm_split_compare_and_swap (rtx operands[])
{
- rtx rval, mem, oldval, newval, scratch;
+ rtx rval, mem, oldval, newval, neg_bval;
machine_mode mode;
enum memmodel mod_s, mod_f;
bool is_weak;
rtx_code_label *label1, *label2;
rtx x, cond;
- rval = operands[0];
- mem = operands[1];
- oldval = operands[2];
- newval = operands[3];
- is_weak = (operands[4] != const0_rtx);
- mod_s = memmodel_from_int (INTVAL (operands[5]));
- mod_f = memmodel_from_int (INTVAL (operands[6]));
- scratch = operands[7];
+ rval = operands[1];
+ mem = operands[2];
+ oldval = operands[3];
+ newval = operands[4];
+ is_weak = (operands[5] != const0_rtx);
+ mod_s = memmodel_from_int (INTVAL (operands[6]));
+ mod_f = memmodel_from_int (INTVAL (operands[7]));
+ neg_bval = TARGET_THUMB1 ? operands[0] : operands[8];
mode = GET_MODE (mem);
bool is_armv8_sync = arm_arch8 && is_mm_sync (mod_s);
@@ -28087,26 +29116,44 @@ arm_split_compare_and_swap (rtx operands[])
arm_emit_load_exclusive (mode, rval, mem, use_acquire);
- cond = arm_gen_compare_reg (NE, rval, oldval, scratch);
- x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
- x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
- gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
- emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
+ /* Z is set to 0 for 32bit targets (resp. rval set to 1) if oldval != rval,
+ as required to communicate with arm_expand_compare_and_swap. */
+ if (TARGET_32BIT)
+ {
+ cond = arm_gen_compare_reg (NE, rval, oldval, neg_bval);
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
+ emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
+ }
+ else
+ {
+ emit_move_insn (neg_bval, const1_rtx);
+ cond = gen_rtx_NE (VOIDmode, rval, oldval);
+ if (thumb1_cmpneg_operand (oldval, SImode))
+ emit_unlikely_jump (gen_cbranchsi4_scratch (neg_bval, rval, oldval,
+ label2, cond));
+ else
+ emit_unlikely_jump (gen_cbranchsi4_insn (cond, rval, oldval, label2));
+ }
- arm_emit_store_exclusive (mode, scratch, mem, newval, use_release);
+ arm_emit_store_exclusive (mode, neg_bval, mem, newval, use_release);
/* Weak or strong, we want EQ to be true for success, so that we
match the flags that we got from the compare above. */
- cond = gen_rtx_REG (CCmode, CC_REGNUM);
- x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
- emit_insn (gen_rtx_SET (cond, x));
+ if (TARGET_32BIT)
+ {
+ cond = gen_rtx_REG (CCmode, CC_REGNUM);
+ x = gen_rtx_COMPARE (CCmode, neg_bval, const0_rtx);
+ emit_insn (gen_rtx_SET (cond, x));
+ }
if (!is_weak)
{
- x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
- x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
- gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
- emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
+ /* Z is set to boolean value of !neg_bval, as required to communicate
+ with arm_expand_compare_and_swap. */
+ x = gen_rtx_NE (VOIDmode, neg_bval, const0_rtx);
+ emit_unlikely_jump (gen_cbranchsi4 (x, neg_bval, const0_rtx, label1));
}
if (!is_mm_relaxed (mod_f))
@@ -28121,6 +29168,15 @@ arm_split_compare_and_swap (rtx operands[])
emit_label (label2);
}
+/* Split an atomic operation pattern. Operation is given by CODE and is one
+ of PLUS, MINUS, IOR, XOR, SET (for an exchange operation) or NOT (for a nand
+ operation). Operation is performed on the content at MEM and on VALUE
+ following the memory model MODEL_RTX. The content at MEM before and after
+ the operation is returned in OLD_OUT and NEW_OUT respectively while the
+ success of the operation is returned in COND. Using a scratch register or
+ an operand register for these determines what result is returned for that
+ pattern. */
+
void
arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
rtx value, rtx model_rtx, rtx cond)
@@ -28129,6 +29185,7 @@ arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
rtx_code_label *label;
+ bool all_low_regs, bind_old_new;
rtx x;
bool is_armv8_sync = arm_arch8 && is_mm_sync (model);
@@ -28163,6 +29220,28 @@ arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
arm_emit_load_exclusive (mode, old_out, mem, use_acquire);
+ /* Does the operation require destination and first operand to use the same
+ register? This is decided by register constraints of relevant insn
+ patterns in thumb1.md. */
+ gcc_assert (!new_out || REG_P (new_out));
+ all_low_regs = REG_P (value) && REGNO_REG_CLASS (REGNO (value)) == LO_REGS
+ && new_out && REGNO_REG_CLASS (REGNO (new_out)) == LO_REGS
+ && REGNO_REG_CLASS (REGNO (old_out)) == LO_REGS;
+ bind_old_new =
+ (TARGET_THUMB1
+ && code != SET
+ && code != MINUS
+ && (code != PLUS || (!all_low_regs && !satisfies_constraint_L (value))));
+
+ /* We want to return the old value while putting the result of the operation
+ in the same register as the old value so copy the old value over to the
+ destination register and use that register for the operation. */
+ if (old_out && bind_old_new)
+ {
+ emit_move_insn (new_out, old_out);
+ old_out = new_out;
+ }
+
switch (code)
{
case SET:
@@ -30333,4 +31412,216 @@ arm_sched_fusion_priority (rtx_insn *insn, int max_pri,
return;
}
+/* Implement the TARGET_ASM_ELF_FLAGS_NUMERIC hook.
+
+ For pure-code sections there is no letter code for this attribute, so
+ output all the section flags numerically when this is needed. */
+
+static bool
+arm_asm_elf_flags_numeric (unsigned int flags, unsigned int *num)
+{
+
+ if (flags & SECTION_ARM_PURECODE)
+ {
+ *num = 0x20000000;
+
+ if (!(flags & SECTION_DEBUG))
+ *num |= 0x2;
+ if (flags & SECTION_EXCLUDE)
+ *num |= 0x80000000;
+ if (flags & SECTION_WRITE)
+ *num |= 0x1;
+ if (flags & SECTION_CODE)
+ *num |= 0x4;
+ if (flags & SECTION_MERGE)
+ *num |= 0x10;
+ if (flags & SECTION_STRINGS)
+ *num |= 0x20;
+ if (flags & SECTION_TLS)
+ *num |= 0x400;
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ *num |= 0x200;
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Implement the TARGET_ASM_FUNCTION_SECTION hook.
+
+ If pure-code is passed as an option, make sure all functions are in
+ sections that have the SHF_ARM_PURECODE attribute. */
+
+static section *
+arm_function_section (tree decl, enum node_frequency freq,
+ bool startup, bool exit)
+{
+ const char * section_name;
+ section * sec;
+
+ if (!decl || TREE_CODE (decl) != FUNCTION_DECL)
+ return default_function_section (decl, freq, startup, exit);
+
+ if (!target_pure_code)
+ return default_function_section (decl, freq, startup, exit);
+
+
+ section_name = DECL_SECTION_NAME (decl);
+
+ /* If a function is not in a named section then it falls under the 'default'
+ text section, also known as '.text'. We can preserve previous behavior as
+ the default text section already has the SHF_ARM_PURECODE section
+ attribute. */
+ if (!section_name)
+ {
+ section *default_sec = default_function_section (decl, freq, startup,
+ exit);
+
+ /* If default_sec is not null, then it must be a special section like for
+ example .text.startup. We set the pure-code attribute and return the
+ same section to preserve existing behavior. */
+ if (default_sec)
+ default_sec->common.flags |= SECTION_ARM_PURECODE;
+ return default_sec;
+ }
+
+ /* Otherwise look whether a section has already been created with
+ 'section_name'. */
+ sec = get_named_section (decl, section_name, 0);
+ if (!sec)
+ /* If that is not the case passing NULL as the section's name to
+ 'get_named_section' will create a section with the declaration's
+ section name. */
+ sec = get_named_section (decl, NULL, 0);
+
+ /* Set the SHF_ARM_PURECODE attribute. */
+ sec->common.flags |= SECTION_ARM_PURECODE;
+
+ return sec;
+}
+
+/* Implements the TARGET_SECTION_FLAGS hook.
+
+ If DECL is a function declaration and pure-code is passed as an option
+ then add the SFH_ARM_PURECODE attribute to the section flags. NAME is the
+ section's name and RELOC indicates whether the declarations initializer may
+ contain runtime relocations. */
+
+static unsigned int
+arm_elf_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (decl && TREE_CODE (decl) == FUNCTION_DECL && target_pure_code)
+ flags |= SECTION_ARM_PURECODE;
+
+ return flags;
+}
+
+/* This function checks for the availability of the coprocessor builtin passed
+ in BUILTIN for the current target. Returns true if it is available and
+ false otherwise. If a BUILTIN is passed for which this function has not
+ been implemented it will cause an exception. */
+
+bool
+arm_coproc_builtin_available (enum unspecv builtin)
+{
+ /* None of these builtins are available in Thumb mode if the target only
+ supports Thumb-1. */
+ if (TARGET_THUMB1)
+ return false;
+
+ switch (builtin)
+ {
+ case VUNSPEC_CDP:
+ case VUNSPEC_LDC:
+ case VUNSPEC_LDCL:
+ case VUNSPEC_STC:
+ case VUNSPEC_STCL:
+ case VUNSPEC_MCR:
+ case VUNSPEC_MRC:
+ if (arm_arch4)
+ return true;
+ break;
+ case VUNSPEC_CDP2:
+ case VUNSPEC_LDC2:
+ case VUNSPEC_LDC2L:
+ case VUNSPEC_STC2:
+ case VUNSPEC_STC2L:
+ case VUNSPEC_MCR2:
+ case VUNSPEC_MRC2:
+ /* Only present in ARMv5*, ARMv6 (but not ARMv6-M), ARMv7* and
+ ARMv8-{A,M}. */
+ if (arm_arch5)
+ return true;
+ break;
+ case VUNSPEC_MCRR:
+ case VUNSPEC_MRRC:
+ /* Only present in ARMv5TE, ARMv6 (but not ARMv6-M), ARMv7* and
+ ARMv8-{A,M}. */
+ if (arm_arch6 || arm_arch5te)
+ return true;
+ break;
+ case VUNSPEC_MCRR2:
+ case VUNSPEC_MRRC2:
+ if (arm_arch6)
+ return true;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
+
+/* This function returns true if OP is a valid memory operand for the ldc and
+ stc coprocessor instructions and false otherwise. */
+
+bool
+arm_coproc_ldc_stc_legitimate_address (rtx op)
+{
+ int range;
+ /* Has to be a memory operand. */
+ if (!MEM_P (op))
+ return false;
+
+ op = XEXP (op, 0);
+
+ /* We accept registers. */
+ if (REG_P (op))
+ return true;
+
+ switch GET_CODE (op)
+ {
+ case PLUS:
+ {
+ /* Or registers with an offset. */
+ if (!REG_P (XEXP (op, 0)))
+ return false;
+
+ op = XEXP (op, 1);
+
+ /* The offset must be an immediate though. */
+ if (!CONST_INT_P (op))
+ return false;
+
+ range = INTVAL (op);
+
+ /* Within the range of [-1020,1020]. */
+ if (!IN_RANGE (range, -1020, 1020))
+ return false;
+
+ /* And a multiple of 4. */
+ return (range % 4) == 0;
+ }
+ case PRE_INC:
+ case POST_INC:
+ case PRE_DEC:
+ case POST_DEC:
+ return REG_P (XEXP (op, 0));
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
#include "gt-arm.h"
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index ad123dde991..4e5e477bd2b 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -236,7 +236,7 @@ extern void (*arm_lang_output_object_attributes_hook)(void);
/* Should MOVW/MOVT be used in preference to a constant pool. */
#define TARGET_USE_MOVT \
- (arm_arch_thumb2 \
+ (TARGET_HAVE_MOVT \
&& (arm_disable_literal_pool \
|| (!optimize_size && !current_tune->prefer_constant_pool)))
@@ -251,14 +251,18 @@ extern void (*arm_lang_output_object_attributes_hook)(void);
#define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR)
/* Nonzero if this chip supports ldrex and strex */
-#define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) || arm_arch7)
+#define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) \
+ || arm_arch7 \
+ || (arm_arch8 && !arm_arch_notm))
/* Nonzero if this chip supports LPAE. */
#define TARGET_HAVE_LPAE \
(arm_arch7 && ARM_FSET_HAS_CPU1 (insn_flags, FL_FOR_ARCH7VE))
/* Nonzero if this chip supports ldrex{bh} and strex{bh}. */
-#define TARGET_HAVE_LDREXBH ((arm_arch6k && TARGET_ARM) || arm_arch7)
+#define TARGET_HAVE_LDREXBH ((arm_arch6k && TARGET_ARM) \
+ || arm_arch7 \
+ || (arm_arch8 && !arm_arch_notm))
/* Nonzero if this chip supports ldrexd and strexd. */
#define TARGET_HAVE_LDREXD (((arm_arch6k && TARGET_ARM) \
@@ -267,9 +271,20 @@ extern void (*arm_lang_output_object_attributes_hook)(void);
/* Nonzero if this chip supports load-acquire and store-release. */
#define TARGET_HAVE_LDACQ (TARGET_ARM_ARCH >= 8)
+/* Nonzero if this chip supports LDAEXD and STLEXD. */
+#define TARGET_HAVE_LDACQEXD (TARGET_ARM_ARCH >= 8 \
+ && TARGET_32BIT \
+ && arm_arch_notm)
+
+/* Nonzero if this chip provides the MOVW and MOVT instructions. */
+#define TARGET_HAVE_MOVT (arm_arch_thumb2 || arm_arch8)
+
+/* Nonzero if this chip provides the CBZ and CBNZ instructions. */
+#define TARGET_HAVE_CBZ (arm_arch_thumb2 || arm_arch8)
+
/* Nonzero if integer division instructions supported. */
#define TARGET_IDIV ((TARGET_ARM && arm_arch_arm_hwdiv) \
- || (TARGET_THUMB2 && arm_arch_thumb_hwdiv))
+ || (TARGET_THUMB && arm_arch_thumb_hwdiv))
/* Nonzero if disallow volatile memory access in IT block. */
#define TARGET_NO_VOLATILE_CE (arm_arch_no_volatile_ce)
@@ -402,7 +417,9 @@ enum base_architecture
BASE_ARCH_7R = 7,
BASE_ARCH_7M = 7,
BASE_ARCH_7EM = 7,
- BASE_ARCH_8A = 8
+ BASE_ARCH_8A = 8,
+ BASE_ARCH_8M_BASE = 8,
+ BASE_ARCH_8M_MAIN = 8
};
/* The major revision number of the ARM Architecture implemented by the target. */
@@ -502,6 +519,9 @@ extern bool arm_disable_literal_pool;
/* Nonzero if chip supports the ARMv8 CRC instructions. */
extern int arm_arch_crc;
+/* Nonzero if chip supports the ARMv8-M Security Extensions. */
+extern int arm_arch_cmse;
+
#ifndef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_APCS_FRAME)
#endif
@@ -1363,6 +1383,7 @@ enum reg_class
#define ARM_FT_VOLATILE (1 << 4) /* Does not return. */
#define ARM_FT_NESTED (1 << 5) /* Embedded inside another func. */
#define ARM_FT_STACKALIGN (1 << 6) /* Called with misaligned stack. */
+#define ARM_FT_CMSE_ENTRY (1 << 7) /* ARMv8-M non-secure entry function. */
/* Some macros to test these flags. */
#define ARM_FUNC_TYPE(t) (t & ARM_FT_TYPE_MASK)
@@ -1371,6 +1392,7 @@ enum reg_class
#define IS_NAKED(t) (t & ARM_FT_NAKED)
#define IS_NESTED(t) (t & ARM_FT_NESTED)
#define IS_STACKALIGN(t) (t & ARM_FT_STACKALIGN)
+#define IS_CMSE_ENTRY(t) (t & ARM_FT_CMSE_ENTRY)
/* Structure used to hold the function stack frame layout. Offsets are
@@ -2245,13 +2267,18 @@ extern const char *arm_rewrite_mcpu (int argc, const char **argv);
" :%{march=*:-march=%*}}" \
BIG_LITTLE_SPEC
+extern const char *arm_target_thumb_only (int argc, const char **argv);
+#define TARGET_MODE_SPEC_FUNCTIONS \
+ { "target_mode_check", arm_target_thumb_only },
+
/* -mcpu=native handling only makes sense with compiler running on
an ARM chip. */
#if defined(__arm__)
extern const char *host_detect_local_cpu (int argc, const char **argv);
# define EXTRA_SPEC_FUNCTIONS \
{ "local_cpu_detect", host_detect_local_cpu }, \
- BIG_LITTLE_CPU_SPEC_FUNCTIONS
+ BIG_LITTLE_CPU_SPEC_FUNCTIONS \
+ TARGET_MODE_SPEC_FUNCTIONS
# define MCPU_MTUNE_NATIVE_SPECS \
" %{march=native:%<march=native %:local_cpu_detect(arch)}" \
@@ -2259,13 +2286,28 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
" %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
#else
# define MCPU_MTUNE_NATIVE_SPECS ""
-# define EXTRA_SPEC_FUNCTIONS BIG_LITTLE_CPU_SPEC_FUNCTIONS
+# define EXTRA_SPEC_FUNCTIONS \
+ BIG_LITTLE_CPU_SPEC_FUNCTIONS \
+ TARGET_MODE_SPEC_FUNCTIONS
#endif
-#define DRIVER_SELF_SPECS MCPU_MTUNE_NATIVE_SPECS
+/* Automatically add -mthumb for Thumb-only targets if mode isn't specified
+ via the configuration option --with-mode or via the command line. The
+ function target_mode_check is called to do the check with either:
+ - an array of -march values if any is given;
+ - an array of -mcpu values if any is given;
+ - an empty array. */
+#define TARGET_MODE_SPECS \
+ " %{!marm:%{!mthumb:%:target_mode_check(%{march=*:%*;mcpu=*:%*;:})}}"
+
+#define DRIVER_SELF_SPECS MCPU_MTUNE_NATIVE_SPECS TARGET_MODE_SPECS
#define TARGET_SUPPORTS_WIDE_INT 1
/* For switching between functions with different target attributes. */
#define SWITCHABLE_TARGET 1
+/* Define SECTION_ARM_PURECODE as the ARM specific section attribute
+ representation for SHF_ARM_PURECODE in GCC. */
+#define SECTION_ARM_PURECODE SECTION_MACH_DEP
+
#endif /* ! GCC_ARM_H */
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 86df1c0366b..3ff77f2bf09 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -118,10 +118,10 @@
; This can be "a" for ARM, "t" for either of the Thumbs, "32" for
; TARGET_32BIT, "t1" or "t2" to specify a specific Thumb mode. "v6"
; for ARM or Thumb-2 with arm_arch6, and nov6 for ARM without
-; arm_arch6. "v6t2" for Thumb-2 with arm_arch6. This attribute is
-; used to compute attribute "enabled", use type "any" to enable an
-; alternative in all cases.
-(define_attr "arch" "any,a,t,32,t1,t2,v6,nov6,v6t2,neon_for_64bits,avoid_neon_for_64bits,iwmmxt,iwmmxt2,armv6_or_vfpv3"
+; arm_arch6. "v6t2" for Thumb-2 with arm_arch6 and "v8mb" for ARMv8-M
+; Baseline. This attribute is used to compute attribute "enabled",
+; use type "any" to enable an alternative in all cases.
+(define_attr "arch" "any,a,t,32,t1,t2,v6,nov6,v6t2,v8mb,neon_for_64bits,avoid_neon_for_64bits,iwmmxt,iwmmxt2,armv6_or_vfpv3"
(const_string "any"))
(define_attr "arch_enabled" "no,yes"
@@ -160,6 +160,10 @@
(match_test "TARGET_32BIT && arm_arch6 && arm_arch_thumb2"))
(const_string "yes")
+ (and (eq_attr "arch" "v8mb")
+ (match_test "TARGET_THUMB1 && arm_arch8"))
+ (const_string "yes")
+
(and (eq_attr "arch" "avoid_neon_for_64bits")
(match_test "TARGET_NEON")
(not (match_test "TARGET_PREFER_NEON_64BITS")))
@@ -225,10 +229,6 @@
(match_test "arm_restrict_it"))
(const_string "no")
- (and (eq_attr "use_literal_pool" "yes")
- (match_test "arm_disable_literal_pool"))
- (const_string "no")
-
(eq_attr "arch_enabled" "no")
(const_string "no")]
(const_string "yes")))
@@ -4326,23 +4326,29 @@
;; Division instructions
(define_insn "divsi3"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
- (div:SI (match_operand:SI 1 "s_register_operand" "r")
- (match_operand:SI 2 "s_register_operand" "r")))]
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
"TARGET_IDIV"
- "sdiv%?\t%0, %1, %2"
- [(set_attr "predicable" "yes")
+ "@
+ sdiv%?\t%0, %1, %2
+ sdiv\t%0, %1, %2"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "type" "sdiv")]
)
(define_insn "udivsi3"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
- (udiv:SI (match_operand:SI 1 "s_register_operand" "r")
- (match_operand:SI 2 "s_register_operand" "r")))]
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (udiv:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
"TARGET_IDIV"
- "udiv%?\t%0, %1, %2"
- [(set_attr "predicable" "yes")
+ "@
+ udiv%?\t%0, %1, %2
+ udiv\t%0, %1, %2"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "type" "udiv")]
)
@@ -5518,8 +5524,9 @@
(match_operand:ANY64 1 "immediate_operand" ""))]
"TARGET_32BIT
&& reload_completed
- && (arm_const_double_inline_cost (operands[1])
- <= arm_max_const_double_inline_cost ())"
+ && (arm_disable_literal_pool
+ || (arm_const_double_inline_cost (operands[1])
+ <= arm_max_const_double_inline_cost ()))"
[(const_int 0)]
"
arm_split_constant (SET, SImode, curr_insn,
@@ -5699,12 +5706,15 @@
;; LO_SUM adds in the high bits. Fortunately these are opaque operations
;; so this does not matter.
(define_insn "*arm_movt"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r")
- (lo_sum:SI (match_operand:SI 1 "nonimmediate_operand" "0")
- (match_operand:SI 2 "general_operand" "i")))]
- "arm_arch_thumb2 && arm_valid_symbolic_address_p (operands[2])"
- "movt%?\t%0, #:upper16:%c2"
- [(set_attr "predicable" "yes")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "i,i")))]
+ "TARGET_HAVE_MOVT && arm_valid_symbolic_address_p (operands[2])"
+ "@
+ movt%?\t%0, #:upper16:%c2
+ movt\t%0, #:upper16:%c2"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "length" "4")
(set_attr "type" "alu_sreg")]
@@ -5762,7 +5772,8 @@
[(set (match_operand:SI 0 "arm_general_register_operand" "")
(const:SI (plus:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "const_int_operand" ""))))]
- "TARGET_THUMB2
+ "TARGET_THUMB
+ && TARGET_HAVE_MOVT
&& arm_disable_literal_pool
&& reload_completed
&& GET_CODE (operands[1]) == SYMBOL_REF"
@@ -5793,8 +5804,7 @@
(define_split
[(set (match_operand:SI 0 "arm_general_register_operand" "")
(match_operand:SI 1 "general_operand" ""))]
- "TARGET_32BIT
- && TARGET_USE_MOVT && GET_CODE (operands[1]) == SYMBOL_REF
+ "TARGET_USE_MOVT && GET_CODE (operands[1]) == SYMBOL_REF
&& !flag_pic && !target_word_relocations
&& !arm_tls_referenced_p (operands[1])"
[(clobber (const_int 0))]
@@ -7627,6 +7637,7 @@
"
{
rtx callee, pat;
+ tree addr = MEM_EXPR (operands[0]);
/* In an untyped call, we can get NULL for operand 2. */
if (operands[2] == NULL_RTX)
@@ -7641,8 +7652,17 @@
: !REG_P (callee))
XEXP (operands[0], 0) = force_reg (Pmode, callee);
- pat = gen_call_internal (operands[0], operands[1], operands[2]);
- arm_emit_call_insn (pat, XEXP (operands[0], 0), false);
+ if (detect_cmse_nonsecure_call (addr))
+ {
+ pat = gen_nonsecure_call_internal (operands[0], operands[1],
+ operands[2]);
+ emit_call_insn (pat);
+ }
+ else
+ {
+ pat = gen_call_internal (operands[0], operands[1], operands[2]);
+ arm_emit_call_insn (pat, XEXP (operands[0], 0), false);
+ }
DONE;
}"
)
@@ -7653,6 +7673,24 @@
(use (match_operand 2 "" ""))
(clobber (reg:SI LR_REGNUM))])])
+(define_expand "nonsecure_call_internal"
+ [(parallel [(call (unspec:SI [(match_operand 0 "memory_operand" "")]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI 4))])]
+ "use_cmse"
+ "
+ {
+ rtx tmp;
+ tmp = copy_to_suggested_reg (XEXP (operands[0], 0),
+ gen_rtx_REG (SImode, 4),
+ SImode);
+
+ operands[0] = replace_equiv_address (operands[0], tmp);
+ }")
+
(define_insn "*call_reg_armv5"
[(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
(match_operand 1 "" ""))
@@ -7688,6 +7726,7 @@
"
{
rtx pat, callee;
+ tree addr = MEM_EXPR (operands[1]);
/* In an untyped call, we can get NULL for operand 2. */
if (operands[3] == 0)
@@ -7702,9 +7741,18 @@
: !REG_P (callee))
XEXP (operands[1], 0) = force_reg (Pmode, callee);
- pat = gen_call_value_internal (operands[0], operands[1],
- operands[2], operands[3]);
- arm_emit_call_insn (pat, XEXP (operands[1], 0), false);
+ if (detect_cmse_nonsecure_call (addr))
+ {
+ pat = gen_nonsecure_call_value_internal (operands[0], operands[1],
+ operands[2], operands[3]);
+ emit_call_insn (pat);
+ }
+ else
+ {
+ pat = gen_call_value_internal (operands[0], operands[1],
+ operands[2], operands[3]);
+ arm_emit_call_insn (pat, XEXP (operands[1], 0), false);
+ }
DONE;
}"
)
@@ -7716,6 +7764,25 @@
(use (match_operand 3 "" ""))
(clobber (reg:SI LR_REGNUM))])])
+(define_expand "nonsecure_call_value_internal"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (unspec:SI [(match_operand 1 "memory_operand" "")]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "general_operand" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI 4))])]
+ "use_cmse"
+ "
+ {
+ rtx tmp;
+ tmp = copy_to_suggested_reg (XEXP (operands[1], 0),
+ gen_rtx_REG (SImode, 4),
+ SImode);
+
+ operands[1] = replace_equiv_address (operands[1], tmp);
+ }")
+
(define_insn "*call_value_reg_armv5"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
@@ -8180,7 +8247,7 @@
(match_operand:SI 2 "const_int_operand" "") ; total range
(match_operand:SI 3 "" "") ; table label
(match_operand:SI 4 "" "")] ; Out of range label
- "TARGET_32BIT || optimize_size || flag_pic"
+ "(TARGET_32BIT || optimize_size || flag_pic) && !target_pure_code"
"
{
enum insn_code code;
@@ -10958,13 +11025,16 @@
;; We only care about the lower 16 bits of the constant
;; being inserted into the upper 16 bits of the register.
(define_insn "*arm_movtas_ze"
- [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "+r")
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "+r,r")
(const_int 16)
(const_int 16))
(match_operand:SI 1 "const_int_operand" ""))]
- "arm_arch_thumb2"
- "movt%?\t%0, %L1"
- [(set_attr "predicable" "yes")
+ "TARGET_HAVE_MOVT"
+ "@
+ movt%?\t%0, %L1
+ movt\t%0, %L1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "length" "4")
(set_attr "type" "alu_sreg")]
@@ -11425,6 +11495,134 @@
DONE;
})
+(define_insn "<cdp>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "immediate_operand" "n")
+ (match_operand:SI 3 "immediate_operand" "n")
+ (match_operand:SI 4 "immediate_operand" "n")
+ (match_operand:SI 5 "immediate_operand" "n")] CDPI)]
+ "arm_coproc_builtin_available (VUNSPEC_<CDP>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, 16);
+ arm_const_bounds (operands[2], 0, (1 << 5));
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ arm_const_bounds (operands[4], 0, (1 << 5));
+ arm_const_bounds (operands[5], 0, 8);
+ return "<cdp>\\tp%c0, %1, CR%c2, CR%c3, CR%c4, %5";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_insn "*stc"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "memory_operand" "=Uz")] STCI)]
+ "arm_coproc_builtin_available (VUNSPEC_<STC>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, (1 << 5));
+ return "<stc>\\tp%c0, CR%c1, %2";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_expand "<stc>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand")
+ (match_operand:SI 1 "immediate_operand")
+ (mem:SI (match_operand:SI 2 "s_register_operand"))] STCI)]
+ "arm_coproc_builtin_available (VUNSPEC_<STC>)")
+
+(define_insn "*ldc"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "memory_operand" "Uz")] LDCI)]
+ "arm_coproc_builtin_available (VUNSPEC_<LDC>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, (1 << 5));
+ return "<ldc>\\tp%c0, CR%c1, %2";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_expand "<ldc>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand")
+ (match_operand:SI 1 "immediate_operand")
+ (mem:SI (match_operand:SI 2 "s_register_operand"))] LDCI)]
+ "arm_coproc_builtin_available (VUNSPEC_<LDC>)")
+
+(define_insn "<mcr>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "immediate_operand" "n")
+ (match_operand:SI 4 "immediate_operand" "n")
+ (match_operand:SI 5 "immediate_operand" "n")] MCRI)
+ (use (match_dup 2))]
+ "arm_coproc_builtin_available (VUNSPEC_<MCR>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, 8);
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ arm_const_bounds (operands[4], 0, (1 << 5));
+ arm_const_bounds (operands[5], 0, 8);
+ return "<mcr>\\tp%c0, %1, %2, CR%c3, CR%c4, %5";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_insn "<mrc>"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec_volatile:SI [(match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "immediate_operand" "n")
+ (match_operand:SI 3 "immediate_operand" "n")
+ (match_operand:SI 4 "immediate_operand" "n")
+ (match_operand:SI 5 "immediate_operand" "n")] MRCI))]
+ "arm_coproc_builtin_available (VUNSPEC_<MRC>)"
+{
+ arm_const_bounds (operands[1], 0, 16);
+ arm_const_bounds (operands[2], 0, 8);
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ arm_const_bounds (operands[4], 0, (1 << 5));
+ arm_const_bounds (operands[5], 0, 8);
+ return "<mrc>\\tp%c1, %2, %0, CR%c3, CR%c4, %5";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_insn "<mcrr>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:DI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "immediate_operand" "n")] MCRRI)
+ (use (match_dup 2))]
+ "arm_coproc_builtin_available (VUNSPEC_<MCRR>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, 8);
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ return "<mcrr>\\tp%c0, %1, %Q2, %R2, CR%c3";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
+(define_insn "<mrrc>"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (unspec_volatile:DI [(match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "immediate_operand" "n")
+ (match_operand:SI 3 "immediate_operand" "n")] MRRCI))]
+ "arm_coproc_builtin_available (VUNSPEC_<MRRC>)"
+{
+ arm_const_bounds (operands[1], 0, 16);
+ arm_const_bounds (operands[2], 0, 8);
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ return "<mrrc>\\tp%c1, %2, %Q0, %R0, CR%c3";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
;; Vector bits common to IWMMXT and Neon
(include "vec-common.md")
;; Load the Intel Wireless Multimedia Extension patterns
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index 0ebe0174390..5fb645df71b 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -109,6 +109,10 @@ mfloat-abi=
Target RejectNegative Joined Enum(float_abi_type) Var(arm_float_abi) Init(TARGET_DEFAULT_FLOAT_ABI)
Specify if floating point hardware should be used.
+mcmse
+Target RejectNegative Var(use_cmse)
+Specify that the compiler should target secure code as per ARMv8-M Security Extensions.
+
Enum
Name(float_abi_type) Type(enum float_abi_type)
Known floating-point ABIs (for use with the -mfloat-abi= option):
@@ -281,3 +285,7 @@ Assume loading data from flash is slower than fetching instructions.
masm-syntax-unified
Target Report Var(inline_asm_unified) Init(0) Save
Assume unified syntax for inline assembly code.
+
+mpure-code
+Target Report Var(target_pure_code) Init(0)
+Do not allow constant data to be placed in code sections.
diff --git a/gcc/config/arm/arm_acle.h b/gcc/config/arm/arm_acle.h
index 5d937168e10..1de1e277d18 100644
--- a/gcc/config/arm/arm_acle.h
+++ b/gcc/config/arm/arm_acle.h
@@ -32,6 +32,147 @@
extern "C" {
#endif
+#if (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ return __builtin_arm_cdp (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ return __builtin_arm_ldc (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldcl (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ return __builtin_arm_ldcl (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ return __builtin_arm_stc (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stcl (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ return __builtin_arm_stcl (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcr (const unsigned int __coproc, const unsigned int __opc1,
+ uint32_t __value, const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ return __builtin_arm_mcr (__coproc, __opc1, __value, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__arm_mrc (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ return __builtin_arm_mrc (__coproc, __opc1, __CRn, __CRm, __opc2);
+}
+#if __ARM_ARCH >= 5
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ return __builtin_arm_cdp2 (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc2 (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ return __builtin_arm_ldc2 (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc2l (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ return __builtin_arm_ldc2l (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc2 (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ return __builtin_arm_stc2 (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc2l (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ return __builtin_arm_stc2l (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcr2 (const unsigned int __coproc, const unsigned int __opc1,
+ uint32_t __value, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ return __builtin_arm_mcr2 (__coproc, __opc1, __value, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__arm_mrc2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ return __builtin_arm_mrc2 (__coproc, __opc1, __CRn, __CRm, __opc2);
+}
+
+#if __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__)
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcrr (const unsigned int __coproc, const unsigned int __opc1,
+ uint64_t __value, const unsigned int __CRm)
+{
+ return __builtin_arm_mcrr (__coproc, __opc1, __value, __CRm);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__arm_mrrc (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRm)
+{
+ return __builtin_arm_mrrc (__coproc, __opc1, __CRm);
+}
+
+#if __ARM_ARCH >= 6
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcrr2 (const unsigned int __coproc, const unsigned int __opc1,
+ uint64_t __value, const unsigned int __CRm)
+{
+ return __builtin_arm_mcrr2 (__coproc, __opc1, __value, __CRm);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__arm_mrrc2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRm)
+{
+ return __builtin_arm_mrrc2 (__coproc, __opc1, __CRm);
+}
+#endif /* __ARM_ARCH >= 6. */
+#endif /* __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__). */
+#endif /* __ARM_ARCH >= 5. */
+#endif /* (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4. */
+
#ifdef __ARM_FEATURE_CRC32
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
__crc32b (uint32_t __a, uint8_t __b)
diff --git a/gcc/config/arm/arm_acle_builtins.def b/gcc/config/arm/arm_acle_builtins.def
new file mode 100644
index 00000000000..bd1f66272c9
--- /dev/null
+++ b/gcc/config/arm/arm_acle_builtins.def
@@ -0,0 +1,44 @@
+/* ACLE builtin definitions for ARM.
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+VAR1 (UBINOP, crc32b, si)
+VAR1 (UBINOP, crc32h, si)
+VAR1 (UBINOP, crc32w, si)
+VAR1 (UBINOP, crc32cb, si)
+VAR1 (UBINOP, crc32ch, si)
+VAR1 (UBINOP, crc32cw, si)
+VAR1 (CDP, cdp, void)
+VAR1 (CDP, cdp2, void)
+VAR1 (LDC, ldc, void)
+VAR1 (LDC, ldc2, void)
+VAR1 (LDC, ldcl, void)
+VAR1 (LDC, ldc2l, void)
+VAR1 (STC, stc, void)
+VAR1 (STC, stc2, void)
+VAR1 (STC, stcl, void)
+VAR1 (STC, stc2l, void)
+VAR1 (MCR, mcr, void)
+VAR1 (MCR, mcr2, void)
+VAR1 (MRC, mrc, si)
+VAR1 (MRC, mrc2, si)
+VAR1 (MCRR, mcrr, void)
+VAR1 (MCRR, mcrr2, void)
+VAR1 (MRRC, mrrc, di)
+VAR1 (MRRC, mrrc2, di)
diff --git a/gcc/config/arm/arm_cmse.h b/gcc/config/arm/arm_cmse.h
new file mode 100644
index 00000000000..82b58b1c4f4
--- /dev/null
+++ b/gcc/config/arm/arm_cmse.h
@@ -0,0 +1,199 @@
+/* ARMv8-M Secure Extensions intrinsics include file.
+
+ Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef _GCC_ARM_CMSE_H
+#define _GCC_ARM_CMSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if __ARM_FEATURE_CMSE & 1
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __ARM_BIG_ENDIAN
+
+typedef union {
+ struct cmse_address_info {
+#if __ARM_FEATURE_CMSE & 2
+ unsigned idau_region:8;
+ unsigned idau_region_valid:1;
+ unsigned secure:1;
+ unsigned nonsecure_readwrite_ok:1;
+ unsigned nonsecure_read_ok:1;
+#else
+ unsigned :12;
+#endif
+ unsigned readwrite_ok:1;
+ unsigned read_ok:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region_valid:1;
+#else
+ unsigned :1;
+#endif
+ unsigned mpu_region_valid:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region:8;
+#else
+ unsigned :8;
+#endif
+ unsigned mpu_region:8;
+ } flags;
+ unsigned value;
+} cmse_address_info_t;
+
+#else
+
+typedef union {
+ struct cmse_address_info {
+ unsigned mpu_region:8;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region:8;
+#else
+ unsigned :8;
+#endif
+ unsigned mpu_region_valid:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region_valid:1;
+#else
+ unsigned :1;
+#endif
+ unsigned read_ok:1;
+ unsigned readwrite_ok:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned nonsecure_read_ok:1;
+ unsigned nonsecure_readwrite_ok:1;
+ unsigned secure:1;
+ unsigned idau_region_valid:1;
+ unsigned idau_region:8;
+#else
+ unsigned :12;
+#endif
+ } flags;
+ unsigned value;
+} cmse_address_info_t;
+
+#endif /* __ARM_BIG_ENDIAN */
+
+#define cmse_TT_fptr(p) (__cmse_TT_fptr ((__cmse_fptr)(p)))
+
+typedef void (*__cmse_fptr)(void);
+
+#define __CMSE_TT_ASM(flags) \
+{ \
+ cmse_address_info_t __result; \
+ __asm__ ("tt" # flags " %0,%1" \
+ : "=r"(__result) \
+ : "r"(__p) \
+ : "memory"); \
+ return __result; \
+}
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM ()
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TT (void *__p)
+__CMSE_TT_ASM ()
+
+#define cmse_TTT_fptr(p) (__cmse_TTT_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TTT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (t)
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TTT (void *__p)
+__CMSE_TT_ASM (t)
+
+#if __ARM_FEATURE_CMSE & 2
+
+#define cmse_TTA_fptr(p) (__cmse_TTA_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TTA_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (a)
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TTA (void *__p)
+__CMSE_TT_ASM (a)
+
+#define cmse_TTAT_fptr(p) (__cmse_TTAT_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline cmse_address_info_t
+__attribute__ ((__always_inline__))
+__cmse_TTAT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (at)
+
+__extension__ static __inline cmse_address_info_t
+__attribute__ ((__always_inline__))
+cmse_TTAT (void *__p)
+__CMSE_TT_ASM (at)
+
+/* FIXME: diagnose use outside cmse_nonsecure_entry functions. */
+__extension__ static __inline int __attribute__ ((__always_inline__))
+cmse_nonsecure_caller (void)
+{
+ return __builtin_arm_cmse_nonsecure_caller ();
+}
+
+#define CMSE_AU_NONSECURE 2
+#define CMSE_MPU_NONSECURE 16
+#define CMSE_NONSECURE 18
+
+#define cmse_nsfptr_create(p) ((typeof ((p))) ((intptr_t) (p) & ~1))
+
+#define cmse_is_nsfptr(p) (!((intptr_t) (p) & 1))
+
+#endif /* __ARM_FEATURE_CMSE & 2 */
+
+#define CMSE_MPU_UNPRIV 4
+#define CMSE_MPU_READWRITE 1
+#define CMSE_MPU_READ 8
+
+__extension__ void *
+cmse_check_address_range (void *, size_t, int);
+
+#define cmse_check_pointed_object(p, f) \
+ ((typeof ((p))) cmse_check_address_range ((p), sizeof (*(p)), (f)))
+
+#endif /* __ARM_FEATURE_CMSE & 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _GCC_ARM_CMSE_H */
diff --git a/gcc/config/arm/bpabi.h b/gcc/config/arm/bpabi.h
index 06488baed31..9b68fa11a49 100644
--- a/gcc/config/arm/bpabi.h
+++ b/gcc/config/arm/bpabi.h
@@ -90,6 +90,9 @@
|march=armv8-a+crc \
|march=armv8.1-a \
|march=armv8.1-a+crc \
+ |march=armv8-m.base|mcpu=cortex-m23 \
+ |march=armv8-m.main \
+ |march=armv8-m.main+dsp|mcpu=cortex-m33 \
:%{!r:--be8}}}"
#else
#define BE8_LINK_SPEC \
@@ -121,6 +124,9 @@
|march=armv8-a+crc \
|march=armv8.1-a \
|march=armv8.1-a+crc \
+ |march=armv8-m.base|mcpu=cortex-m23 \
+ |march=armv8-m.main \
+ |march=armv8-m.main+dsp|mcpu=cortex-m33 \
:%{!r:--be8}}}"
#endif
diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md
index 3b71c4a5270..b04c35c755c 100644
--- a/gcc/config/arm/constraints.md
+++ b/gcc/config/arm/constraints.md
@@ -34,11 +34,13 @@
;; in ARM/Thumb-2 state: Da, Db, Dc, Dd, Dn, Dl, DL, Do, Dv, Dy, Di, Dt, Dp, Dz
;; in Thumb-1 state: Pa, Pb, Pc, Pd, Pe
;; in Thumb-2 state: Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py
+;; in all states: Pf
;; The following memory constraints have been used:
-;; in ARM/Thumb-2 state: Q, Uh, Ut, Uv, Uy, Un, Um, Us
+;; in ARM/Thumb-2 state: Uh, Ut, Uv, Uy, Un, Um, Us
;; in ARM state: Uq
;; in Thumb state: Uu, Uw
+;; in all states: Q
(define_register_constraint "t" "TARGET_32BIT ? VFP_LO_REGS : NO_REGS"
@@ -66,7 +68,7 @@
(define_constraint "j"
"A constant suitable for a MOVW instruction. (ARM/Thumb-2)"
- (and (match_test "TARGET_32BIT && arm_arch_thumb2")
+ (and (match_test "TARGET_HAVE_MOVT")
(ior (and (match_code "high")
(match_test "arm_valid_symbolic_address_p (XEXP (op, 0))"))
(and (match_code "const_int")
@@ -180,6 +182,13 @@
(and (match_code "const_int")
(match_test "TARGET_THUMB1 && ival >= 256 && ival <= 510")))
+(define_constraint "Pf"
+ "Memory models except relaxed, consume or release ones."
+ (and (match_code "const_int")
+ (match_test "!is_mm_relaxed (memmodel_from_int (ival))
+ && !is_mm_consume (memmodel_from_int (ival))
+ && !is_mm_release (memmodel_from_int (ival))")))
+
(define_constraint "Ps"
"@internal In Thumb-2 state a constant in the range -255 to +255"
(and (match_code "const_int")
@@ -407,7 +416,7 @@
(define_memory_constraint "Q"
"@internal
- In ARM/Thumb-2 state an address that is a single base register."
+ An address that is a single base register."
(and (match_code "mem")
(match_test "REG_P (XEXP (op, 0))")))
@@ -438,6 +447,12 @@
(match_code "symbol_ref")
)
+(define_memory_constraint "Uz"
+ "@internal
+ A memory access that is accessible as an LDC/STC operand"
+ (and (match_code "mem")
+ (match_test "arm_coproc_ldc_stc_legitimate_address (op)")))
+
;; We used to have constraint letters for S and R in ARM state, but
;; all uses of these now appear to have been removed.
diff --git a/gcc/config/arm/elf.h b/gcc/config/arm/elf.h
index 77f30554d52..03931eee739 100644
--- a/gcc/config/arm/elf.h
+++ b/gcc/config/arm/elf.h
@@ -75,16 +75,7 @@
/* We might need a ARM specific header to function declarations. */
#undef ASM_DECLARE_FUNCTION_NAME
-#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
- do \
- { \
- ARM_DECLARE_FUNCTION_NAME (FILE, NAME, DECL); \
- ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
- ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
- ASM_OUTPUT_LABEL(FILE, NAME); \
- ARM_OUTPUT_FN_UNWIND (FILE, TRUE); \
- } \
- while (0)
+#define ASM_DECLARE_FUNCTION_NAME arm_asm_declare_function_name
/* We might need an ARM specific trailer for function declarations. */
#undef ASM_DECLARE_FUNCTION_SIZE
@@ -104,7 +95,8 @@
the code more efficient, but for Thumb-1 it's better to put them out of
band unless we are generating compressed tables. */
#define JUMP_TABLES_IN_TEXT_SECTION \
- (TARGET_32BIT || (TARGET_THUMB && (optimize_size || flag_pic)))
+ ((TARGET_32BIT || (TARGET_THUMB && (optimize_size || flag_pic))) \
+ && !target_pure_code)
#ifndef LINK_SPEC
#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
@@ -148,8 +140,9 @@
while (0)
/* Horrible hack: We want to prevent some libgcc routines being included
- for some multilibs. */
-#ifndef __ARM_ARCH_6M__
+ for some multilibs. The condition should match the one in
+ libgcc/config/arm/lib1funcs.S. */
+#if __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1
#undef L_fixdfsi
#undef L_fixunsdfsi
#undef L_truncdfsf2
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index aba1023cdd0..572affc3fdc 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -847,3 +847,50 @@
;; Attributes for VQRDMLAH/VQRDMLSH
(define_int_attr neon_rdma_as [(UNSPEC_VQRDMLAH "a") (UNSPEC_VQRDMLSH "s")])
+
+;; An iterator for the CDP coprocessor instructions
+(define_int_iterator CDPI [VUNSPEC_CDP VUNSPEC_CDP2])
+(define_int_attr cdp [(VUNSPEC_CDP "cdp") (VUNSPEC_CDP2 "cdp2")])
+(define_int_attr CDP [(VUNSPEC_CDP "CDP") (VUNSPEC_CDP2 "CDP2")])
+
+;; An iterator for the LDC coprocessor instructions
+(define_int_iterator LDCI [VUNSPEC_LDC VUNSPEC_LDC2
+ VUNSPEC_LDCL VUNSPEC_LDC2L])
+
+(define_int_attr ldc [(VUNSPEC_LDC "ldc") (VUNSPEC_LDC2 "ldc2")
+ (VUNSPEC_LDCL "ldcl") (VUNSPEC_LDC2L "ldc2l")])
+(define_int_attr LDC [(VUNSPEC_LDC "LDC") (VUNSPEC_LDC2 "LDC2")
+ (VUNSPEC_LDCL "LDCL") (VUNSPEC_LDC2L "LDC2L")])
+
+;; An iterator for the STC coprocessor instructions
+(define_int_iterator STCI [VUNSPEC_STC VUNSPEC_STC2
+ VUNSPEC_STCL VUNSPEC_STC2L])
+
+(define_int_attr stc [(VUNSPEC_STC "stc") (VUNSPEC_STC2 "stc2")
+ (VUNSPEC_STCL "stcl") (VUNSPEC_STC2L "stc2l")])
+(define_int_attr STC [(VUNSPEC_STC "STC") (VUNSPEC_STC2 "STC2")
+ (VUNSPEC_STCL "STCL") (VUNSPEC_STC2L "STC2L")])
+
+;; An iterator for the MCR coprocessor instructions
+(define_int_iterator MCRI [VUNSPEC_MCR VUNSPEC_MCR2])
+
+(define_int_attr mcr [(VUNSPEC_MCR "mcr") (VUNSPEC_MCR2 "mcr2")])
+(define_int_attr MCR [(VUNSPEC_MCR "MCR") (VUNSPEC_MCR2 "MCR2")])
+
+;; An iterator for the MRC coprocessor instructions
+(define_int_iterator MRCI [VUNSPEC_MRC VUNSPEC_MRC2])
+
+(define_int_attr mrc [(VUNSPEC_MRC "mrc") (VUNSPEC_MRC2 "mrc2")])
+(define_int_attr MRC [(VUNSPEC_MRC "MRC") (VUNSPEC_MRC2 "MRC2")])
+
+;; An iterator for the MCRR coprocessor instructions
+(define_int_iterator MCRRI [VUNSPEC_MCRR VUNSPEC_MCRR2])
+
+(define_int_attr mcrr [(VUNSPEC_MCRR "mcrr") (VUNSPEC_MCRR2 "mcrr2")])
+(define_int_attr MCRR [(VUNSPEC_MCRR "MCRR") (VUNSPEC_MCRR2 "MCRR2")])
+
+;; An iterator for the MRRC coprocessor instructions
+(define_int_iterator MRRCI [VUNSPEC_MRRC VUNSPEC_MRRC2])
+
+(define_int_attr mrrc [(VUNSPEC_MRRC "mrrc") (VUNSPEC_MRRC2 "mrrc2")])
+(define_int_attr MRRC [(VUNSPEC_MRRC "MRRC") (VUNSPEC_MRRC2 "MRRC2")])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index 1d51c4045a1..20ae62bfb2b 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -3104,7 +3104,7 @@ if (BYTES_BIG_ENDIAN)
VCVT_US_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vcvt.<sup>%#32.f32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_fp_to_int_<V_elem_ch><q>")]
@@ -3117,7 +3117,7 @@ if (BYTES_BIG_ENDIAN)
VCVT_US_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vcvt.f32.<sup>%#32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_int_to_fp_<V_elem_ch><q>")]
@@ -3686,7 +3686,7 @@ if (BYTES_BIG_ENDIAN)
UNSPEC_VEXT))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
+ arm_const_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
return "vext.<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2, %3";
}
[(set_attr "type" "neon_ext<q>")]
@@ -3783,7 +3783,7 @@ if (BYTES_BIG_ENDIAN)
VSHR_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_shift_imm<q>")]
@@ -3797,7 +3797,7 @@ if (BYTES_BIG_ENDIAN)
VSHRN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<V_if_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_shift_imm_narrow_q")]
@@ -3811,7 +3811,7 @@ if (BYTES_BIG_ENDIAN)
VQSHRN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
@@ -3825,7 +3825,7 @@ if (BYTES_BIG_ENDIAN)
VQSHRUN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<V_s_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
@@ -3838,7 +3838,7 @@ if (BYTES_BIG_ENDIAN)
UNSPEC_VSHL_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vshl.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_shift_imm<q>")]
@@ -3851,7 +3851,7 @@ if (BYTES_BIG_ENDIAN)
VQSHL_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vqshl.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
@@ -3864,7 +3864,7 @@ if (BYTES_BIG_ENDIAN)
UNSPEC_VQSHLU_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vqshlu.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
@@ -3878,7 +3878,7 @@ if (BYTES_BIG_ENDIAN)
"TARGET_NEON"
{
/* The boundaries are: 0 < imm <= size. */
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
return "vshll.<sup>%#<V_sz_elem>\t%q0, %P1, %2";
}
[(set_attr "type" "neon_shift_imm_long")]
@@ -3893,7 +3893,7 @@ if (BYTES_BIG_ENDIAN)
VSRA_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_acc<q>")]
@@ -3907,7 +3907,7 @@ if (BYTES_BIG_ENDIAN)
UNSPEC_VSRI))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
return "vsri.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_reg<q>")]
@@ -3921,7 +3921,7 @@ if (BYTES_BIG_ENDIAN)
UNSPEC_VSLI))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[3], 0, neon_element_bits (<MODE>mode));
return "vsli.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_reg<q>")]
diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md
index b1cd556211a..73d3e521cc4 100644
--- a/gcc/config/arm/predicates.md
+++ b/gcc/config/arm/predicates.md
@@ -398,6 +398,12 @@
|| mode == CC_DGTUmode));
})
+;; Any register, including CC
+(define_predicate "cc_register_operand"
+ (and (match_code "reg")
+ (ior (match_operand 0 "s_register_operand")
+ (match_operand 0 "cc_register"))))
+
(define_special_predicate "arm_extendqisi_mem_op"
(and (match_operand 0 "memory_operand")
(match_test "TARGET_ARM ? arm_legitimate_address_outer_p (mode,
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 0589e4d8905..64a12d76211 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -63,37 +63,59 @@
(set_attr "predicable" "no")])
(define_insn "atomic_load<mode>"
- [(set (match_operand:QHSI 0 "register_operand" "=r")
+ [(set (match_operand:QHSI 0 "register_operand" "=r,r,l")
(unspec_volatile:QHSI
- [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q")
- (match_operand:SI 2 "const_int_operand")] ;; model
+ [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q,Q,Q")
+ (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
- return \"ldr<sync_sfx>%?\\t%0, %1\";
+ {
+ if (TARGET_THUMB1)
+ return \"ldr<sync_sfx>\\t%0, %1\";
+ else
+ return \"ldr<sync_sfx>%?\\t%0, %1\";
+ }
else
- return \"lda<sync_sfx>%?\\t%0, %1\";
+ {
+ if (TARGET_THUMB1)
+ return \"lda<sync_sfx>\\t%0, %1\";
+ else
+ return \"lda<sync_sfx>%?\\t%0, %1\";
+ }
}
- [(set_attr "predicable" "yes")
+ [(set_attr "arch" "32,v8mb,any")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(define_insn "atomic_store<mode>"
- [(set (match_operand:QHSI 0 "memory_operand" "=Q")
+ [(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
(unspec_volatile:QHSI
- [(match_operand:QHSI 1 "general_operand" "r")
- (match_operand:SI 2 "const_int_operand")] ;; model
+ [(match_operand:QHSI 1 "general_operand" "r,r,l")
+ (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
- return \"str<sync_sfx>%?\t%1, %0\";
+ {
+ if (TARGET_THUMB1)
+ return \"str<sync_sfx>\t%1, %0\";
+ else
+ return \"str<sync_sfx>%?\t%1, %0\";
+ }
else
- return \"stl<sync_sfx>%?\t%1, %0\";
+ {
+ if (TARGET_THUMB1)
+ return \"stl<sync_sfx>\t%1, %0\";
+ else
+ return \"stl<sync_sfx>%?\t%1, %0\";
+ }
}
- [(set_attr "predicable" "yes")
+ [(set_attr "arch" "32,v8mb,any")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
@@ -117,7 +139,7 @@
[(match_operand:DI 0 "s_register_operand") ;; val out
(match_operand:DI 1 "mem_noofs_operand") ;; memory
(match_operand:SI 2 "const_int_operand")] ;; model
- "(TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQ)
+ "(TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQEXD)
&& ARM_DOUBLEWORD_ALIGN"
{
memmodel model = memmodel_from_int (INTVAL (operands[2]));
@@ -125,7 +147,7 @@
/* For ARMv8-A we can use an LDAEXD to atomically load two 32-bit registers
when acquire or stronger semantics are needed. When the relaxed model is
used this can be relaxed to a normal LDRD. */
- if (TARGET_HAVE_LDACQ)
+ if (TARGET_HAVE_LDACQEXD)
{
if (is_mm_relaxed (model))
emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
@@ -167,21 +189,23 @@
DONE;
})
+;; Constraints of this pattern must be at least as strict as those of the
+;; cbranchsi operations in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_compare_and_swap<mode>_1"
- [(set (reg:CC_Z CC_REGNUM) ;; bool out
+ [(set (match_operand 0 "cc_register_operand" "=&c,&l,&l,&l") ;; bool out
(unspec_volatile:CC_Z [(const_int 0)] VUNSPEC_ATOMIC_CAS))
- (set (match_operand:SI 0 "s_register_operand" "=&r") ;; val out
+ (set (match_operand:SI 1 "s_register_operand" "=&r,&l,&0,&l*h") ;; val out
(zero_extend:SI
- (match_operand:NARROW 1 "mem_noofs_operand" "+Ua"))) ;; memory
- (set (match_dup 1)
+ (match_operand:NARROW 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))) ;; memory
+ (set (match_dup 2)
(unspec_volatile:NARROW
- [(match_operand:SI 2 "arm_add_operand" "rIL") ;; expected
- (match_operand:NARROW 3 "s_register_operand" "r") ;; desired
- (match_operand:SI 4 "const_int_operand") ;; is_weak
- (match_operand:SI 5 "const_int_operand") ;; mod_s
- (match_operand:SI 6 "const_int_operand")] ;; mod_f
+ [(match_operand:SI 3 "arm_add_operand" "rIL,lIL*h,J,*r") ;; expected
+ (match_operand:NARROW 4 "s_register_operand" "r,r,r,r") ;; desired
+ (match_operand:SI 5 "const_int_operand") ;; is_weak
+ (match_operand:SI 6 "const_int_operand") ;; mod_s
+ (match_operand:SI 7 "const_int_operand")] ;; mod_f
VUNSPEC_ATOMIC_CAS))
- (clobber (match_scratch:SI 7 "=&r"))]
+ (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -189,27 +213,30 @@
{
arm_split_compare_and_swap (operands);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
(define_mode_attr cas_cmp_operand
[(SI "arm_add_operand") (DI "cmpdi_operand")])
(define_mode_attr cas_cmp_str
[(SI "rIL") (DI "rDi")])
+;; Constraints of this pattern must be at least as strict as those of the
+;; cbranchsi operations in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_compare_and_swap<mode>_1"
- [(set (reg:CC_Z CC_REGNUM) ;; bool out
+ [(set (match_operand 0 "cc_register_operand" "=&c,&l,&l,&l") ;; bool out
(unspec_volatile:CC_Z [(const_int 0)] VUNSPEC_ATOMIC_CAS))
- (set (match_operand:SIDI 0 "s_register_operand" "=&r") ;; val out
- (match_operand:SIDI 1 "mem_noofs_operand" "+Ua")) ;; memory
- (set (match_dup 1)
+ (set (match_operand:SIDI 1 "s_register_operand" "=&r,&l,&0,&l*h") ;; val out
+ (match_operand:SIDI 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua")) ;; memory
+ (set (match_dup 2)
(unspec_volatile:SIDI
- [(match_operand:SIDI 2 "<cas_cmp_operand>" "<cas_cmp_str>") ;; expect
- (match_operand:SIDI 3 "s_register_operand" "r") ;; desired
- (match_operand:SI 4 "const_int_operand") ;; is_weak
- (match_operand:SI 5 "const_int_operand") ;; mod_s
- (match_operand:SI 6 "const_int_operand")] ;; mod_f
+ [(match_operand:SIDI 3 "<cas_cmp_operand>" "<cas_cmp_str>,lIL*h,J,*r") ;; expect
+ (match_operand:SIDI 4 "s_register_operand" "r,r,r,r") ;; desired
+ (match_operand:SI 5 "const_int_operand") ;; is_weak
+ (match_operand:SI 6 "const_int_operand") ;; mod_s
+ (match_operand:SI 7 "const_int_operand")] ;; mod_f
VUNSPEC_ATOMIC_CAS))
- (clobber (match_scratch:SI 7 "=&r"))]
+ (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -217,18 +244,19 @@
{
arm_split_compare_and_swap (operands);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
(define_insn_and_split "atomic_exchange<mode>"
- [(set (match_operand:QHSD 0 "s_register_operand" "=&r") ;; output
- (match_operand:QHSD 1 "mem_noofs_operand" "+Ua")) ;; memory
+ [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r") ;; output
+ (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")) ;; memory
(set (match_dup 1)
(unspec_volatile:QHSD
- [(match_operand:QHSD 2 "s_register_operand" "r") ;; input
+ [(match_operand:QHSD 2 "s_register_operand" "r,r") ;; input
(match_operand:SI 3 "const_int_operand" "")] ;; model
VUNSPEC_ATOMIC_XCHG))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (clobber (match_scratch:SI 4 "=&r,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -237,7 +265,11 @@
arm_split_atomic_op (SET, operands[0], NULL, operands[1],
operands[2], operands[3], operands[4]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb")])
+
+;; The following mode and code attribute are defined here because they are
+;; specific to atomics and are not needed anywhere else.
(define_mode_attr atomic_op_operand
[(QI "reg_or_int_operand")
@@ -248,16 +280,24 @@
(define_mode_attr atomic_op_str
[(QI "rn") (HI "rn") (SI "rn") (DI "r")])
+(define_code_attr thumb1_atomic_op_str
+ [(ior "l,l") (xor "l,l") (and "l,l") (plus "lIJL,r") (minus "lPd,lPd")])
+
+(define_code_attr thumb1_atomic_newop_str
+ [(ior "&l,&l") (xor "&l,&l") (and "&l,&l") (plus "&l,&r") (minus "&l,&l")])
+
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic operations in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_<sync_optab><mode>"
- [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua")
+ [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua,Ua")
(unspec_volatile:QHSD
[(syncop:QHSD (match_dup 0)
- (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>"))
+ (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>"))
(match_operand:SI 2 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:QHSD 3 "=&r"))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (clobber (match_scratch:QHSD 3 "=&r,<thumb1_atomic_newop_str>"))
+ (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -266,19 +306,22 @@
arm_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
operands[1], operands[2], operands[4]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb,v8mb")])
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic NANDs in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_nand<mode>"
- [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua")
+ [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua")
(unspec_volatile:QHSD
[(not:QHSD
(and:QHSD (match_dup 0)
- (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>")))
+ (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,l")))
(match_operand:SI 2 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:QHSD 3 "=&r"))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (clobber (match_scratch:QHSD 3 "=&r,&l"))
+ (clobber (match_scratch:SI 4 "=&r,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -287,20 +330,38 @@
arm_split_atomic_op (NOT, NULL, operands[3], operands[0],
operands[1], operands[2], operands[4]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb")])
+
+;; 3 alternatives are needed to represent constraints after split from
+;; thumb1_addsi3: (i) case where operand1 and destination can be in different
+;; registers, (ii) case where they are in the same low register and (iii) case
+;; when they are in the same register without restriction on the register. We
+;; disparage slightly alternatives that require copying the old value into the
+;; register for the new value (see bind_old_new in arm_split_atomic_op).
+(define_code_attr thumb1_atomic_fetch_op_str
+ [(ior "l,l,l") (xor "l,l,l") (and "l,l,l") (plus "lL,?IJ,?r") (minus "lPd,lPd,lPd")])
+
+(define_code_attr thumb1_atomic_fetch_newop_str
+ [(ior "&l,&l,&l") (xor "&l,&l,&l") (and "&l,&l,&l") (plus "&l,&l,&r") (minus "&l,&l,&l")])
+(define_code_attr thumb1_atomic_fetch_oldop_str
+ [(ior "&r,&r,&r") (xor "&r,&r,&r") (and "&r,&r,&r") (plus "&l,&r,&r") (minus "&l,&l,&l")])
+
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic operations in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_fetch_<sync_optab><mode>"
- [(set (match_operand:QHSD 0 "s_register_operand" "=&r")
- (match_operand:QHSD 1 "mem_noofs_operand" "+Ua"))
+ [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_fetch_oldop_str>")
+ (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))
(set (match_dup 1)
(unspec_volatile:QHSD
[(syncop:QHSD (match_dup 1)
- (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>"))
+ (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_fetch_op_str>"))
(match_operand:SI 3 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:QHSD 4 "=&r"))
- (clobber (match_scratch:SI 5 "=&r"))]
+ (clobber (match_scratch:QHSD 4 "=&r,<thumb1_atomic_fetch_newop_str>"))
+ (clobber (match_scratch:SI 5 "=&r,&l,&l,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -309,21 +370,24 @@
arm_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
operands[2], operands[3], operands[5]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic NANDs in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_fetch_nand<mode>"
- [(set (match_operand:QHSD 0 "s_register_operand" "=&r")
- (match_operand:QHSD 1 "mem_noofs_operand" "+Ua"))
+ [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")
+ (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))
(set (match_dup 1)
(unspec_volatile:QHSD
[(not:QHSD
(and:QHSD (match_dup 1)
- (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>")))
+ (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l")))
(match_operand:SI 3 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:QHSD 4 "=&r"))
- (clobber (match_scratch:SI 5 "=&r"))]
+ (clobber (match_scratch:QHSD 4 "=&r,&l"))
+ (clobber (match_scratch:SI 5 "=&r,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -332,20 +396,23 @@
arm_split_atomic_op (NOT, operands[0], operands[4], operands[1],
operands[2], operands[3], operands[5]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb")])
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic operations in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_<sync_optab>_fetch<mode>"
- [(set (match_operand:QHSD 0 "s_register_operand" "=&r")
+ [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_newop_str>")
(syncop:QHSD
- (match_operand:QHSD 1 "mem_noofs_operand" "+Ua")
- (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>")))
+ (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua")
+ (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>")))
(set (match_dup 1)
(unspec_volatile:QHSD
[(match_dup 1) (match_dup 2)
(match_operand:SI 3 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -354,21 +421,24 @@
arm_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
operands[2], operands[3], operands[4]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb,v8mb")])
+;; Constraints of this pattern must be at least as strict as those of the non
+;; atomic NANDs in thumb1.md and aim to be as permissive.
(define_insn_and_split "atomic_nand_fetch<mode>"
- [(set (match_operand:QHSD 0 "s_register_operand" "=&r")
+ [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&l")
(not:QHSD
(and:QHSD
- (match_operand:QHSD 1 "mem_noofs_operand" "+Ua")
- (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>"))))
+ (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")
+ (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l"))))
(set (match_dup 1)
(unspec_volatile:QHSD
[(match_dup 1) (match_dup 2)
(match_operand:SI 3 "const_int_operand")] ;; model
VUNSPEC_ATOMIC_OP))
(clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (clobber (match_scratch:SI 4 "=&r,&l"))]
"<sync_predtab>"
"#"
"&& reload_completed"
@@ -377,48 +447,61 @@
arm_split_atomic_op (NOT, NULL, operands[0], operands[1],
operands[2], operands[3], operands[4]);
DONE;
- })
+ }
+ [(set_attr "arch" "32,v8mb")])
(define_insn "arm_load_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(zero_extend:SI
(unspec_volatile:NARROW
- [(match_operand:NARROW 1 "mem_noofs_operand" "Ua")]
+ [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
VUNSPEC_LL)))]
"TARGET_HAVE_LDREXBH"
- "ldrex<sync_sfx>%?\t%0, %C1"
- [(set_attr "predicable" "yes")
+ "@
+ ldrex<sync_sfx>%?\t%0, %C1
+ ldrex<sync_sfx>\t%0, %C1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(define_insn "arm_load_acquire_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(zero_extend:SI
(unspec_volatile:NARROW
- [(match_operand:NARROW 1 "mem_noofs_operand" "Ua")]
+ [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
VUNSPEC_LAX)))]
"TARGET_HAVE_LDACQ"
- "ldaex<sync_sfx>%?\\t%0, %C1"
- [(set_attr "predicable" "yes")
+ "@
+ ldaex<sync_sfx>%?\\t%0, %C1
+ ldaex<sync_sfx>\\t%0, %C1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(define_insn "arm_load_exclusivesi"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(unspec_volatile:SI
- [(match_operand:SI 1 "mem_noofs_operand" "Ua")]
+ [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
VUNSPEC_LL))]
"TARGET_HAVE_LDREX"
- "ldrex%?\t%0, %C1"
- [(set_attr "predicable" "yes")
+ "@
+ ldrex%?\t%0, %C1
+ ldrex\t%0, %C1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(define_insn "arm_load_acquire_exclusivesi"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(unspec_volatile:SI
- [(match_operand:SI 1 "mem_noofs_operand" "Ua")]
+ [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
VUNSPEC_LAX))]
"TARGET_HAVE_LDACQ"
- "ldaex%?\t%0, %C1"
- [(set_attr "predicable" "yes")
+ "@
+ ldaex%?\t%0, %C1
+ ldaex\t%0, %C1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(define_insn "arm_load_exclusivedi"
@@ -436,7 +519,7 @@
(unspec_volatile:DI
[(match_operand:DI 1 "mem_noofs_operand" "Ua")]
VUNSPEC_LAX))]
- "TARGET_HAVE_LDACQ && ARM_DOUBLEWORD_ALIGN"
+ "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
"ldaexd%?\t%0, %H0, %C1"
[(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
@@ -461,7 +544,10 @@
operands[3] = gen_rtx_REG (SImode, REGNO (value) + 1);
return "strexd%?\t%0, %2, %3, %C1";
}
- return "strex<sync_sfx>%?\t%0, %2, %C1";
+ if (TARGET_THUMB1)
+ return "strex<sync_sfx>\t%0, %2, %C1";
+ else
+ return "strex<sync_sfx>%?\t%0, %2, %C1";
}
[(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
@@ -473,7 +559,7 @@
(unspec_volatile:DI
[(match_operand:DI 2 "s_register_operand" "r")]
VUNSPEC_SLX))]
- "TARGET_HAVE_LDACQ && ARM_DOUBLEWORD_ALIGN"
+ "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
{
rtx value = operands[2];
/* See comment in arm_store_exclusive<mode> above. */
@@ -485,13 +571,16 @@
(set_attr "predicable_short_it" "no")])
(define_insn "arm_store_release_exclusive<mode>"
- [(set (match_operand:SI 0 "s_register_operand" "=&r")
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
- (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua")
+ (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua,Ua")
(unspec_volatile:QHSI
- [(match_operand:QHSI 2 "s_register_operand" "r")]
+ [(match_operand:QHSI 2 "s_register_operand" "r,r")]
VUNSPEC_SLX))]
"TARGET_HAVE_LDACQ"
- "stlex<sync_sfx>%?\t%0, %2, %C1"
- [(set_attr "predicable" "yes")
+ "@
+ stlex<sync_sfx>%?\t%0, %2, %C1
+ stlex<sync_sfx>\t%0, %2, %C1"
+ [(set_attr "arch" "32,v8mb")
+ (set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
diff --git a/gcc/config/arm/t-rmprofile b/gcc/config/arm/t-rmprofile
new file mode 100644
index 00000000000..93aa909b4d9
--- /dev/null
+++ b/gcc/config/arm/t-rmprofile
@@ -0,0 +1,176 @@
+# Copyright (C) 2016 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# This is a target makefile fragment that attempts to get
+# multilibs built for the range of CPU's, FPU's and ABI's that
+# are relevant for the ARM architecture. It should not be used in
+# conjunction with another make file fragment and assumes --with-arch,
+# --with-cpu, --with-fpu, --with-float, --with-mode have their default
+# values during the configure step. We enforce this during the
+# top-level configury.
+
+MULTILIB_OPTIONS =
+MULTILIB_DIRNAMES =
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES =
+MULTILIB_REUSE =
+
+# We have the following hierachy:
+# ISA: A32 (.) or T16/T32 (thumb).
+# Architecture: ARMv6S-M (v6-m), ARMv7-M (v7-m), ARMv7E-M (v7e-m),
+# ARMv8-M Baseline (v8-m.base) or ARMv8-M Mainline (v8-m.main).
+# FPU: VFPv3-D16 (fpv3), FPV4-SP-D16 (fpv4-sp), FPV5-SP-D16 (fpv5-sp),
+# VFPv5-D16 (fpv5), or None (.).
+# Float-abi: Soft (.), softfp (softfp), or hard (hardfp).
+
+# Options to build libraries with
+
+MULTILIB_OPTIONS += mthumb
+MULTILIB_DIRNAMES += thumb
+
+MULTILIB_OPTIONS += march=armv6s-m/march=armv7-m/march=armv7e-m/march=armv7/march=armv8-m.base/march=armv8-m.main
+MULTILIB_DIRNAMES += v6-m v7-m v7e-m v7-ar v8-m.base v8-m.main
+
+MULTILIB_OPTIONS += mfpu=vfpv3-d16/mfpu=fpv4-sp-d16/mfpu=fpv5-sp-d16/mfpu=fpv5-d16
+MULTILIB_DIRNAMES += fpv3 fpv4-sp fpv5-sp fpv5
+
+MULTILIB_OPTIONS += mfloat-abi=softfp/mfloat-abi=hard
+MULTILIB_DIRNAMES += softfp hard
+
+
+# Option combinations to build library with
+
+# Default CPU/Arch
+MULTILIB_REQUIRED += mthumb
+MULTILIB_REQUIRED += mfloat-abi=hard
+
+# ARMv6-M
+MULTILIB_REQUIRED += mthumb/march=armv6s-m
+
+# ARMv8-M Baseline
+MULTILIB_REQUIRED += mthumb/march=armv8-m.base
+
+# ARMv7-M
+MULTILIB_REQUIRED += mthumb/march=armv7-m
+
+# ARMv7E-M
+MULTILIB_REQUIRED += mthumb/march=armv7e-m
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv4-sp-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv4-sp-d16/mfloat-abi=hard
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv5-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv5-d16/mfloat-abi=hard
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv5-sp-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv7e-m/mfpu=fpv5-sp-d16/mfloat-abi=hard
+
+# ARMv8-M Mainline
+MULTILIB_REQUIRED += mthumb/march=armv8-m.main
+MULTILIB_REQUIRED += mthumb/march=armv8-m.main/mfpu=fpv5-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv8-m.main/mfpu=fpv5-d16/mfloat-abi=hard
+MULTILIB_REQUIRED += mthumb/march=armv8-m.main/mfpu=fpv5-sp-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv8-m.main/mfpu=fpv5-sp-d16/mfloat-abi=hard
+
+# ARMv7-R as well as ARMv7-A and ARMv8-A if aprofile was not specified
+MULTILIB_REQUIRED += mthumb/march=armv7
+MULTILIB_REQUIRED += mthumb/march=armv7/mfpu=vfpv3-d16/mfloat-abi=softfp
+MULTILIB_REQUIRED += mthumb/march=armv7/mfpu=vfpv3-d16/mfloat-abi=hard
+
+
+# Matches
+
+# CPU Matches
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m0
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m0.small-multiply
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m0plus
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m0plus.small-multiply
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m1
+MULTILIB_MATCHES += march?armv6s-m=mcpu?cortex-m1.small-multiply
+MULTILIB_MATCHES += march?armv7-m=mcpu?cortex-m3
+MULTILIB_MATCHES += march?armv7e-m=mcpu?cortex-m4
+MULTILIB_MATCHES += march?armv7e-m=mcpu?cortex-m7
+MULTILIB_MATCHES += march?armv8-m.base=mcpu?cortex-m23
+MULTILIB_MATCHES += march?armv8-m.main=mcpu?cortex-m33
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4f
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-r5
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-r7
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-r8
+MULTILIB_MATCHES += march?armv7=mcpu?marvell-pj4
+MULTILIB_MATCHES += march?armv7=mcpu?generic-armv7-a
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a9
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a5
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a7
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a15
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a12
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a17
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a15.cortex-a7
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a17.cortex-a7
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a32
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a35
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a53
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a57
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a57.cortex-a53
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a72
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a72.cortex-a53
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a73
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a73.cortex-a35
+MULTILIB_MATCHES += march?armv7=mcpu?cortex-a73.cortex-a53
+MULTILIB_MATCHES += march?armv7=mcpu?exynos-m1
+MULTILIB_MATCHES += march?armv7=mcpu?qdf24xx
+MULTILIB_MATCHES += march?armv7=mcpu?xgene1
+
+# Arch Matches
+MULTILIB_MATCHES += march?armv6s-m=march?armv6-m
+MULTILIB_MATCHES += march?armv8-m.main=march?armv8-m.main+dsp
+MULTILIB_MATCHES += march?armv7=march?armv7-r
+ifeq (,$(HAS_APROFILE))
+MULTILIB_MATCHES += march?armv7=march?armv7-a
+MULTILIB_MATCHES += march?armv7=march?armv7ve
+MULTILIB_MATCHES += march?armv7=march?armv8-a
+MULTILIB_MATCHES += march?armv7=march?armv8-a+crc
+MULTILIB_MATCHES += march?armv7=march?armv8.1-a
+MULTILIB_MATCHES += march?armv7=march?armv8.1-a+crc
+MULTILIB_MATCHES += march?armv7=march?armv8.2-a
+MULTILIB_MATCHES += march?armv7=march?armv8.2-a+fp16
+endif
+
+# FPU matches
+ifeq (,$(HAS_APROFILE))
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv3
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv3-fp16
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv3-d16-fp16
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?neon
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?neon-fp16
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv4
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv4-d16
+MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?neon-vfpv4
+MULTILIB_MATCHES += mfpu?fpv5-d16=mfpu?fp-armv8
+MULTILIB_MATCHES += mfpu?fpv5-d16=mfpu?neon-fp-armv8
+MULTILIB_MATCHES += mfpu?fpv5-d16=mfpu?crypto-neon-fp-armv8
+endif
+
+
+# We map all requests for ARMv7-R or ARMv7-A in ARM mode to Thumb mode and
+# any FPU to VFPv3-d16 if possible.
+MULTILIB_REUSE += mthumb/march.armv7=march.armv7
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.softfp=march.armv7/mfpu.vfpv3-d16/mfloat-abi.softfp
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.hard=march.armv7/mfpu.vfpv3-d16/mfloat-abi.hard
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.softfp=march.armv7/mfpu.fpv5-d16/mfloat-abi.softfp
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.hard=march.armv7/mfpu.fpv5-d16/mfloat-abi.hard
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.softfp=mthumb/march.armv7/mfpu.fpv5-d16/mfloat-abi.softfp
+MULTILIB_REUSE += mthumb/march.armv7/mfpu.vfpv3-d16/mfloat-abi.hard=mthumb/march.armv7/mfpu.fpv5-d16/mfloat-abi.hard
diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
index 072ed4da47a..af8a9f8e0a9 100644
--- a/gcc/config/arm/thumb1.md
+++ b/gcc/config/arm/thumb1.md
@@ -55,6 +55,10 @@
(set_attr "type" "multiple")]
)
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic additions in sync.md and to the logic for bind_old_new in
+;; arm_split_atomic_op in arm.c. These must be at least as strict as the
+;; constraints here and aim to be as permissive.
(define_insn_and_split "*thumb1_addsi3"
[(set (match_operand:SI 0 "register_operand" "=l,l,l,*rk,*hk,l,k,l,l,l")
(plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,k,k,0,l,k")
@@ -131,6 +135,10 @@
(set_attr "type" "multiple")]
)
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic subtractions in sync.md and to the logic for bind_old_new in
+;; arm_split_atomic_op in arm.c. These must be at least as strict as the
+;; constraints here and aim to be as permissive.
(define_insn "thumb1_subsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=l")
(minus:SI (match_operand:SI 1 "register_operand" "l")
@@ -173,6 +181,10 @@
(set_attr "type" "muls")]
)
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic bitwise ANDs and NANDs in sync.md and to the logic for bind_old_new
+;; in arm_split_atomic_op in arm.c. These must be at least as strict as the
+;; constraints here and aim to be as permissive.
(define_insn "*thumb1_andsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=l")
(and:SI (match_operand:SI 1 "register_operand" "%0")
@@ -227,6 +239,10 @@
(set_attr "type" "logics_reg")]
)
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic inclusive ORs in sync.md and to the logic for bind_old_new in
+;; arm_split_atomic_op in arm.c. These must be at least as strict as the
+;; constraints here and aim to be as permissive.
(define_insn "*thumb1_iorsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=l")
(ior:SI (match_operand:SI 1 "register_operand" "%0")
@@ -237,6 +253,10 @@
(set_attr "conds" "set")
(set_attr "type" "logics_reg")])
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic exclusive ORs in sync.md and to the logic for bind_old_new in
+;; arm_split_atomic_op in arm.c. These must be at least as strict as the
+;; constraints here and aim to be as permissive.
(define_insn "*thumb1_xorsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=l")
(xor:SI (match_operand:SI 1 "register_operand" "%0")
@@ -590,8 +610,8 @@
;;; ??? The 'i' constraint looks funny, but it should always be replaced by
;;; thumb_reorg with a memory reference.
(define_insn "*thumb1_movdi_insn"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,l,>,l, m,*r")
- (match_operand:DI 1 "general_operand" "l, I,J,>,l,mi,l,*r"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,r,l,>,l, m,*r")
+ (match_operand:DI 1 "general_operand" "l, I,J,j,>,l,mi,l,*r"))]
"TARGET_THUMB1
&& ( register_operand (operands[0], DImode)
|| register_operand (operands[1], DImode))"
@@ -610,36 +630,41 @@
operands[1] = GEN_INT (- INTVAL (operands[1]));
return \"movs\\t%Q0, %1\;rsbs\\t%Q0, %Q0, #0\;asrs\\t%R0, %Q0, #31\";
case 3:
- return \"ldmia\\t%1, {%0, %H0}\";
+ gcc_assert (TARGET_HAVE_MOVT);
+ return \"movw\\t%Q0, %L1\;movs\\tR0, #0\";
case 4:
- return \"stmia\\t%0, {%1, %H1}\";
+ return \"ldmia\\t%1, {%0, %H0}\";
case 5:
- return thumb_load_double_from_address (operands);
+ return \"stmia\\t%0, {%1, %H1}\";
case 6:
+ return thumb_load_double_from_address (operands);
+ case 7:
operands[2] = gen_rtx_MEM (SImode,
plus_constant (Pmode, XEXP (operands[0], 0), 4));
output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
return \"\";
- case 7:
+ case 8:
if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
}
}"
- [(set_attr "length" "4,4,6,2,2,6,4,4")
- (set_attr "type" "multiple,multiple,multiple,load2,store2,load2,store2,multiple")
- (set_attr "pool_range" "*,*,*,*,*,1018,*,*")]
+ [(set_attr "length" "4,4,6,6,2,2,6,4,4")
+ (set_attr "type" "multiple,multiple,multiple,multiple,load2,store2,load2,store2,multiple")
+ (set_attr "arch" "t1,t1,t1,v8mb,t1,t1,t1,t1,t1")
+ (set_attr "pool_range" "*,*,*,*,*,*,1018,*,*")]
)
(define_insn "*thumb1_movsi_insn"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*l*h*k")
- (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*l*h*k"))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,r,l,l,l,>,l, m,*l*h*k")
+ (match_operand:SI 1 "general_operand" "l, I,j,J,K,>,l,mi,l,*l*h*k"))]
"TARGET_THUMB1
&& ( register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode))"
"@
movs %0, %1
movs %0, %1
+ movw %0, %1
#
#
ldmia\\t%1, {%0}
@@ -647,10 +672,11 @@
ldr\\t%0, %1
str\\t%1, %0
mov\\t%0, %1"
- [(set_attr "length" "2,2,4,4,2,2,2,2,2")
- (set_attr "type" "mov_reg,mov_imm,multiple,multiple,load1,store1,load1,store1,mov_reg")
- (set_attr "pool_range" "*,*,*,*,*,*,1018,*,*")
- (set_attr "conds" "set,clob,*,*,nocond,nocond,nocond,nocond,nocond")])
+ [(set_attr "length" "2,2,4,4,4,2,2,2,2,2")
+ (set_attr "type" "mov_reg,mov_imm,mov_imm,multiple,multiple,load1,store1,load1,store1,mov_reg")
+ (set_attr "pool_range" "*,*,*,*,*,*,*,1018,*,*")
+ (set_attr "arch" "t1,t1,v8mb,t1,t1,t1,t1,t1,t1,t1")
+ (set_attr "conds" "set,clob,nocond,*,*,nocond,nocond,nocond,nocond,nocond")])
; Split the load of 64-bit constant into two loads for high and low 32-bit parts respectively
; to see if we can load them in fewer instructions or fewer cycles.
@@ -687,7 +713,8 @@
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "const_int_operand" ""))]
- "TARGET_THUMB1 && satisfies_constraint_K (operands[1])"
+ "TARGET_THUMB1 && satisfies_constraint_K (operands[1])
+ && !(TARGET_HAVE_MOVT && satisfies_constraint_j (operands[1]))"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (ashift:SI (match_dup 2) (match_dup 3)))]
"
@@ -714,7 +741,8 @@
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "const_int_operand" ""))]
- "TARGET_THUMB1 && satisfies_constraint_Pe (operands[1])"
+ "TARGET_THUMB1 && satisfies_constraint_Pe (operands[1])
+ && !(TARGET_HAVE_MOVT && satisfies_constraint_j (operands[1]))"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))]
"
@@ -726,8 +754,8 @@
)
(define_insn "*thumb1_movhi_insn"
- [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,l*r,*h,l")
- (match_operand:HI 1 "general_operand" "l,m,l,k*h,*r,I"))]
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,l*r,*h,l,r")
+ (match_operand:HI 1 "general_operand" "l,m,l,k*h,*r,I,n"))]
"TARGET_THUMB1
&& ( register_operand (operands[0], HImode)
|| register_operand (operands[1], HImode))"
@@ -739,6 +767,8 @@
case 3: return \"mov %0, %1\";
case 4: return \"mov %0, %1\";
case 5: return \"movs %0, %1\";
+ case 6: gcc_assert (TARGET_HAVE_MOVT);
+ return \"movw %0, %L1\";
default: gcc_unreachable ();
case 1:
/* The stack pointer can end up being taken as an index register.
@@ -758,9 +788,10 @@
}
return \"ldrh %0, %1\";
}"
- [(set_attr "length" "2,4,2,2,2,2")
- (set_attr "type" "alus_imm,load1,store1,mov_reg,mov_reg,mov_imm")
- (set_attr "conds" "clob,nocond,nocond,nocond,nocond,clob")])
+ [(set_attr "length" "2,4,2,2,2,2,4")
+ (set_attr "type" "alus_imm,load1,store1,mov_reg,mov_reg,mov_imm,mov_imm")
+ (set_attr "arch" "t1,t1,t1,t1,t1,t1,v8mb")
+ (set_attr "conds" "clob,nocond,nocond,nocond,nocond,clob,nocond")])
(define_expand "thumb_movhi_clobber"
[(set (match_operand:HI 0 "memory_operand" "")
@@ -963,6 +994,94 @@
DONE;
})
+;; A pattern for the CB(N)Z instruction added in ARMv8-M Baseline profile,
+;; adapted from cbranchsi4_insn. Modifying cbranchsi4_insn instead leads to
+;; code generation difference for ARMv6-M because the minimum length of the
+;; instruction becomes 2 even for ARMv6-M due to a limitation in genattrtab's
+;; handling of PC in the length condition.
+(define_insn "thumb1_cbz"
+ [(set (pc) (if_then_else
+ (match_operator 0 "equality_operator"
+ [(match_operand:SI 1 "s_register_operand" "l")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_THUMB1 && TARGET_HAVE_CBZ"
+{
+ if (get_attr_length (insn) == 2)
+ {
+ if (GET_CODE (operands[0]) == EQ)
+ return "cbz\t%1, %l2";
+ else
+ return "cbnz\t%1, %l2";
+ }
+ else
+ {
+ rtx t = cfun->machine->thumb1_cc_insn;
+ if (t != NULL_RTX)
+ {
+ if (!rtx_equal_p (cfun->machine->thumb1_cc_op0, operands[1])
+ || !rtx_equal_p (cfun->machine->thumb1_cc_op1, operands[2]))
+ t = NULL_RTX;
+ if (cfun->machine->thumb1_cc_mode == CC_NOOVmode)
+ {
+ if (!noov_comparison_operator (operands[0], VOIDmode))
+ t = NULL_RTX;
+ }
+ else if (cfun->machine->thumb1_cc_mode != CCmode)
+ t = NULL_RTX;
+ }
+ if (t == NULL_RTX)
+ {
+ output_asm_insn ("cmp\t%1, #0", operands);
+ cfun->machine->thumb1_cc_insn = insn;
+ cfun->machine->thumb1_cc_op0 = operands[1];
+ cfun->machine->thumb1_cc_op1 = operands[2];
+ cfun->machine->thumb1_cc_mode = CCmode;
+ }
+ else
+ /* Ensure we emit the right type of condition code on the jump. */
+ XEXP (operands[0], 0) = gen_rtx_REG (cfun->machine->thumb1_cc_mode,
+ CC_REGNUM);
+
+ switch (get_attr_length (insn))
+ {
+ case 4: return "b%d0\t%l2";
+ case 6: return "b%D0\t.LCB%=;b\t%l2\t%@long jump\n.LCB%=:";
+ case 8: return "b%D0\t.LCB%=;bl\t%l2\t%@far jump\n.LCB%=:";
+ default: gcc_unreachable ();
+ }
+ }
+}
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int 2))
+ (le (minus (match_dup 2) (pc)) (const_int 128)))
+ (const_int 2)
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -250))
+ (le (minus (match_dup 2) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -2040))
+ (le (minus (match_dup 2) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))))
+ (set (attr "type")
+ (if_then_else
+ (eq_attr "length" "2")
+ (const_string "branch")
+ (const_string "multiple")))]
+)
+
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic compare_and_swap splitters in sync.md. These must be at least as
+;; strict as the constraints here and aim to be as permissive.
(define_insn "cbranchsi4_insn"
[(set (pc) (if_then_else
(match_operator 0 "arm_comparison_operator"
@@ -1024,6 +1143,9 @@
(set_attr "type" "multiple")]
)
+;; Changes to the constraints of this pattern must be propagated to those of
+;; atomic compare_and_swap splitters in sync.md. These must be at least as
+;; strict as the constraints here and aim to be as permissive.
(define_insn "cbranchsi4_scratch"
[(set (pc) (if_then_else
(match_operator 4 "arm_comparison_operator"
@@ -1609,6 +1731,19 @@
(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_reg_thumb1_v5"
+ [(call (unspec:SI [(mem:SI (match_operand:SI 0 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 0))]
+ "TARGET_THUMB1 && use_cmse && !SIBLING_CALL_P (insn)"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_reg_thumb1"
[(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
(match_operand 1 "" ""))
@@ -1641,6 +1776,21 @@
(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_value_reg_thumb1_v5"
+ [(set (match_operand 0 "" "")
+ (call (unspec:SI
+ [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 1))]
+ "TARGET_THUMB1 && use_cmse"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_value_reg_thumb1"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
@@ -1747,8 +1897,13 @@
"*
return thumb1_unexpanded_epilogue ();
"
- ; Length is absolute worst case
- [(set_attr "length" "44")
+ ; Length is absolute worst case, when using CMSE and if this is an entry
+ ; function an extra 4 (MSR) bytes will be added.
+ [(set (attr "length")
+ (if_then_else
+ (match_test "IS_CMSE_ENTRY (arm_current_func_type ())")
+ (const_int 48)
+ (const_int 44)))
(set_attr "type" "block")
;; We don't clobber the conditions, but the potential length of this
;; operation is sufficient to make conditionalizing the sequence
diff --git a/gcc/config/arm/thumb2.md b/gcc/config/arm/thumb2.md
index ab08288413c..ad4fee1cdcd 100644
--- a/gcc/config/arm/thumb2.md
+++ b/gcc/config/arm/thumb2.md
@@ -581,6 +581,19 @@
[(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_reg_thumb2"
+ [(call (unspec:SI [(mem:SI (match_operand:SI 0 "s_register_operand" "r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 0))]
+ "TARGET_THUMB2 && use_cmse"
+ "bl\\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*call_value_reg_thumb2"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
@@ -592,6 +605,21 @@
[(set_attr "type" "call")]
)
+(define_insn "*nonsecure_call_value_reg_thumb2"
+ [(set (match_operand 0 "" "")
+ (call
+ (unspec:SI [(mem:SI (match_operand:SI 1 "register_operand" "l*r"))]
+ UNSPEC_NONSECURE_MEM)
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (match_dup 1))]
+ "TARGET_THUMB2 && use_cmse"
+ "bl\t__gnu_cmse_nonsecure_call"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
(define_insn "*thumb2_indirect_jump"
[(set (pc)
(match_operand:SI 0 "register_operand" "l*r"))]
@@ -1115,12 +1143,31 @@
(define_insn "*thumb2_return"
[(simple_return)]
- "TARGET_THUMB2"
+ "TARGET_THUMB2 && !IS_CMSE_ENTRY (arm_current_func_type ())"
"* return output_return_instruction (const_true_rtx, true, false, true);"
[(set_attr "type" "branch")
(set_attr "length" "4")]
)
+(define_insn "*thumb2_cmse_entry_return"
+ [(simple_return)]
+ "TARGET_THUMB2 && IS_CMSE_ENTRY (arm_current_func_type ())"
+ "* return output_return_instruction (const_true_rtx, true, false, true);"
+ [(set_attr "type" "branch")
+ ; This is a return from a cmse_nonsecure_entry function so code will be
+ ; added to clear the APSR and potentially the FPSCR if VFP is available, so
+ ; we adapt the length accordingly.
+ (set (attr "length")
+ (if_then_else (match_test "TARGET_HARD_FLOAT")
+ (const_int 12)
+ (const_int 8)))
+ ; We do not support predicate execution of returns from cmse_nonsecure_entry
+ ; functions because we need to clear the APSR. Since predicable has to be
+ ; a constant, we had to duplicate the thumb2_return pattern for CMSE entry
+ ; functions.
+ (set_attr "predicable" "no")]
+)
+
(define_insn_and_split "thumb2_eh_return"
[(unspec_volatile [(match_operand:SI 0 "s_register_operand" "r")]
VUNSPEC_EH_RETURN)
diff --git a/gcc/config/arm/types.md b/gcc/config/arm/types.md
index 25f79b4d010..14d6429ceab 100644
--- a/gcc/config/arm/types.md
+++ b/gcc/config/arm/types.md
@@ -538,6 +538,10 @@
; crypto_sha1_slow
; crypto_sha256_fast
; crypto_sha256_slow
+;
+; The classification below is for coprocessor instructions
+;
+; coproc
(define_attr "type"
"adc_imm,\
@@ -1071,7 +1075,8 @@
crypto_sha1_fast,\
crypto_sha1_slow,\
crypto_sha256_fast,\
- crypto_sha256_slow"
+ crypto_sha256_slow,\
+ coproc"
(const_string "untyped"))
; Is this an (integer side) multiply with a 32-bit (or smaller) result?
diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md
index 5744c62cf5a..ce725f23e4a 100644
--- a/gcc/config/arm/unspecs.md
+++ b/gcc/config/arm/unspecs.md
@@ -84,6 +84,8 @@
UNSPEC_VRINTA ; Represent a float to integral float rounding
; towards nearest, ties away from zero.
UNSPEC_PROBE_STACK ; Probe stack memory reference
+ UNSPEC_NONSECURE_MEM ; Represent non-secure memory in ARMv8-M with
+ ; security extension
])
(define_c_enum "unspec" [
@@ -148,6 +150,24 @@
VUNSPEC_GET_FPSCR ; Represent fetch of FPSCR content.
VUNSPEC_SET_FPSCR ; Represent assign of FPSCR content.
VUNSPEC_PROBE_STACK_RANGE ; Represent stack range probing.
+ VUNSPEC_CDP ; Represent the coprocessor cdp instruction.
+ VUNSPEC_CDP2 ; Represent the coprocessor cdp2 instruction.
+ VUNSPEC_LDC ; Represent the coprocessor ldc instruction.
+ VUNSPEC_LDC2 ; Represent the coprocessor ldc2 instruction.
+ VUNSPEC_LDCL ; Represent the coprocessor ldcl instruction.
+ VUNSPEC_LDC2L ; Represent the coprocessor ldc2l instruction.
+ VUNSPEC_STC ; Represent the coprocessor stc instruction.
+ VUNSPEC_STC2 ; Represent the coprocessor stc2 instruction.
+ VUNSPEC_STCL ; Represent the coprocessor stcl instruction.
+ VUNSPEC_STC2L ; Represent the coprocessor stc2l instruction.
+ VUNSPEC_MCR ; Represent the coprocessor mcr instruction.
+ VUNSPEC_MCR2 ; Represent the coprocessor mcr2 instruction.
+ VUNSPEC_MRC ; Represent the coprocessor mrc instruction.
+ VUNSPEC_MRC2 ; Represent the coprocessor mrc2 instruction.
+ VUNSPEC_MCRR ; Represent the coprocessor mcrr instruction.
+ VUNSPEC_MCRR2 ; Represent the coprocessor mcrr2 instruction.
+ VUNSPEC_MRRC ; Represent the coprocessor mrrc instruction.
+ VUNSPEC_MRRC2 ; Represent the coprocessor mrrc2 instruction.
])
;; Enumerators for NEON unspecs.
diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index ac5f3b862b5..251305e1a56 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -1401,3 +1401,40 @@
;; fmdhr et al (VFPv1)
;; Support for xD (single precision only) variants.
;; fmrrs, fmsrr
+
+;; Split an immediate DF move to two immediate SI moves.
+(define_insn_and_split "no_literal_pool_df_immediate"
+ [(set (match_operand 0 "s_register_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_THUMB2 && arm_disable_literal_pool
+ && !(TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE
+ && vfp3_const_double_rtx (operands[1]))"
+ "#"
+ "&& !reload_completed"
+ [(set (subreg:SI (match_dup 1) 0) (match_dup 2))
+ (set (subreg:SI (match_dup 1) 4) (match_dup 3))
+ (set (match_dup 0) (match_dup 1))]
+ "
+ long buf[2];
+ real_to_target (buf, CONST_DOUBLE_REAL_VALUE (operands[1]), DFmode);
+ operands[2] = GEN_INT ((int) buf[0]);
+ operands[3] = GEN_INT ((int) buf[1]);
+ operands[1] = gen_reg_rtx (DFmode);
+ ")
+
+;; Split an immediate SF move to one immediate SI move.
+(define_insn_and_split "no_literal_pool_sf_immediate"
+ [(set (match_operand 0 "s_register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "TARGET_THUMB2 && arm_disable_literal_pool
+ && !(TARGET_HARD_FLOAT && vfp3_const_double_rtx (operands[1]))"
+ "#"
+ "&& !reload_completed"
+ [(set (subreg:SI (match_dup 1) 0) (match_dup 2))
+ (set (match_dup 0) (match_dup 1))]
+ "
+ long buf;
+ real_to_target (&buf, CONST_DOUBLE_REAL_VALUE (operands[1]), SFmode);
+ operands[2] = GEN_INT ((int) buf);
+ operands[1] = gen_reg_rtx (SFmode);
+ ")
diff --git a/gcc/config/i386/driver-mingw32.c b/gcc/config/i386/driver-mingw32.c
new file mode 100644
index 00000000000..b70363ad26a
--- /dev/null
+++ b/gcc/config/i386/driver-mingw32.c
@@ -0,0 +1,26 @@
+/* Host OS specific configuration for the gcc driver.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+
+/* When defined, force the use (if non null) or not (otherwise) of CLI
+ globbing. */
+#ifdef MINGW_DOWILDCARD
+int _dowildcard = MINGW_DOWILDCARD;
+#endif
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 53afcbb6402..6cb5decdadd 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "backend.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "cfghooks.h"
#include "cfgloop.h"
diff --git a/gcc/config/i386/x-mingw32 b/gcc/config/i386/x-mingw32
index 1d28a702a9d..3d0d9e94566 100644
--- a/gcc/config/i386/x-mingw32
+++ b/gcc/config/i386/x-mingw32
@@ -29,3 +29,6 @@ host-mingw32.o : $(srcdir)/config/i386/host-mingw32.c $(CONFIG_H) $(SYSTEM_H) \
coretypes.h hosthooks.h hosthooks-def.h toplev.h $(DIAGNOSTIC_H) $(HOOKS_H)
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
$(srcdir)/config/i386/host-mingw32.c
+
+driver-mingw32.o : $(srcdir)/config/i386/driver-mingw32.c $(CONFIG_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 3fcc3b5385d..ea9a778a7ae 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "cfghooks.h"
#include "df.h"
#include "tm_p.h"
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 5af3d1e6505..55cf52fd6be 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "cfghooks.h"
#include "df.h"
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 7591e550d9d..b0b1b3a245d 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -24,6 +24,7 @@
#include "backend.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "cfghooks.h"
#include "cfgloop.h"
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index e3b79d8bc9c..f1969fe53d5 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "df.h"
#include "tm_p.h"
diff --git a/gcc/configure b/gcc/configure
index 954673c1c43..bfb5cdb3275 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -916,6 +916,7 @@ enable_rpath
with_libiconv_prefix
enable_sjlj_exceptions
enable_secureplt
+enable_mingw_wildcard
enable_leading_mingw64_underscores
enable_cld
enable_frame_pointer
@@ -1627,6 +1628,8 @@ Optional Features:
--enable-sjlj-exceptions
arrange to use setjmp/longjmp exception handling
--enable-secureplt enable -msecure-plt by default for PowerPC
+ --enable-mingw-wildcard Set whether to expand wildcard on command-line.
+ Default to platform configuration
--enable-leading-mingw64-underscores
enable leading underscores on 64 bit mingw targets
--enable-cld enable -mcld by default for 32bit x86
@@ -11969,6 +11972,21 @@ if test "${enable_secureplt+set}" = set; then :
fi
+# Check whether --enable-mingw-wildcard was given.
+if test "${enable_mingw_wildcard+set}" = set; then :
+ enableval=$enable_mingw_wildcard;
+else
+ enable_mingw_wildcard=platform
+fi
+
+if test x"$enable_mingw_wildcard" != xplatform ; then :
+
+cat >>confdefs.h <<_ACEOF
+#define MINGW_DOWILDCARD $(test x"$enable_mingw_wildcard" = xno; echo $?)
+_ACEOF
+
+fi
+
# Check whether --enable-leading-mingw64-underscores was given.
if test "${enable_leading_mingw64_underscores+set}" = set; then :
enableval=$enable_leading_mingw64_underscores;
@@ -12270,7 +12288,7 @@ elif test "x$TARGET_SYSTEM_ROOT" != x; then
fi
if test x$host != x$target || test "x$TARGET_SYSTEM_ROOT" != x; then
- if test "x$with_headers" != x; then
+ if test "x$with_headers" != x && test "x$with_headers" != xyes; then
target_header_dir=$with_headers
elif test "x$with_sysroot" = x; then
target_header_dir="${test_exec_prefix}/${target_noncanonical}/sys-include"
@@ -18460,7 +18478,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 18463 "configure"
+#line 18481 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -18566,7 +18584,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 18569 "configure"
+#line 18587 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 4c65d441e72..6c6f8436e45 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -1789,6 +1789,16 @@ AC_ARG_ENABLE(secureplt,
[enable -msecure-plt by default for PowerPC])],
[], [])
+AC_ARG_ENABLE(mingw-wildcard,
+[AS_HELP_STRING([--enable-mingw-wildcard],
+ [Set whether to expand wildcard on command-line.
+ Default to platform configuration])],
+[],[enable_mingw_wildcard=platform])
+AS_IF([test x"$enable_mingw_wildcard" != xplatform ],
+ [AC_DEFINE_UNQUOTED(MINGW_DOWILDCARD,
+ $(test x"$enable_mingw_wildcard" = xno; echo $?),
+ [Value to set MinGW's _dowildcard to.])])
+
AC_ARG_ENABLE(leading-mingw64-underscores,
AS_HELP_STRING([--enable-leading-mingw64-underscores],
[enable leading underscores on 64 bit mingw targets]),
@@ -2026,7 +2036,7 @@ elif test "x$TARGET_SYSTEM_ROOT" != x; then
fi
if test x$host != x$target || test "x$TARGET_SYSTEM_ROOT" != x; then
- if test "x$with_headers" != x; then
+ if test "x$with_headers" != x && test "x$with_headers" != xyes; then
target_header_dir=$with_headers
elif test "x$with_sysroot" = x; then
target_header_dir="${test_exec_prefix}/${target_noncanonical}/sys-include"
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index c3a65686be7..c33feb548af 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -11416,6 +11416,7 @@ instructions, but allow the compiler to schedule those calls.
* ARM iWMMXt Built-in Functions::
* ARM C Language Extensions (ACLE)::
* ARM Floating Point Status and Control Intrinsics::
+* ARM ARMv8-M Security Extensions::
* AVR Built-in Functions::
* Blackfin Built-in Functions::
* FR-V Built-in Functions::
@@ -12241,8 +12242,9 @@ The built-in intrinsics for the Advanced SIMD extension are available when
NEON is enabled.
Currently, ARM and AArch64 back ends do not support ACLE 2.0 fully. Both
-back ends support CRC32 intrinsics from @file{arm_acle.h}. The ARM back end's
-16-bit floating-point Advanced SIMD intrinsics currently comply to ACLE v1.1.
+back ends support CRC32 intrinsics and the ARM back end supports the
+Coprocessor intrinsics, all from @file{arm_acle.h}. The ARM back end's 16-bit
+floating-point Advanced SIMD intrinsics currently comply to ACLE v1.1.
AArch64's back end does not have support for 16-bit floating point Advanced SIMD
intrinsics yet.
@@ -12260,6 +12262,35 @@ unsigned int __builtin_arm_get_fpscr ()
void __builtin_arm_set_fpscr (unsigned int)
@end smallexample
+@node ARM ARMv8-M Security Extensions
+@subsection ARM ARMv8-M Security Extensions
+
+GCC implements the ARMv8-M Security Extensions as described in the ARMv8-M
+Security Extensions: Requiremenets on Development Tools Engineering
+Specification, which can be found at
+@uref{http://infocenter.arm.com/help/topic/com.arm.doc.ecm0359818/ECM0359818_armv8m_security_extensions_reqs_on_dev_tools_1_0.pdf}.
+
+As part of the Security Extensions GCC implements two new function attributes:
+@code{cmse_nonsecure_entry} and @code{cmse_nonsecure_call}.
+
+As part of the Security Extensions GCC implements the intrinsics below. FPTR
+is used here to mean any function pointer type.
+
+@smallexample
+cmse_address_info_t cmse_TT (void *)
+cmse_address_info_t cmse_TT_fptr (FPTR)
+cmse_address_info_t cmse_TTT (void *)
+cmse_address_info_t cmse_TTT_fptr (FPTR)
+cmse_address_info_t cmse_TTA (void *)
+cmse_address_info_t cmse_TTA_fptr (FPTR)
+cmse_address_info_t cmse_TTAT (void *)
+cmse_address_info_t cmse_TTAT_fptr (FPTR)
+void * cmse_check_address_range (void *, size_t, int)
+typeof(p) cmse_nsfptr_create (FPTR p)
+intptr_t cmse_is_nsfptr (FPTR)
+int cmse_nonsecure_caller (void)
+@end smallexample
+
@node AVR Built-in Functions
@subsection AVR Built-in Functions
diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
index b60b53a7143..417653d8b27 100644
--- a/gcc/doc/install.texi
+++ b/gcc/doc/install.texi
@@ -1101,19 +1101,59 @@ sysv, aix.
@item --with-multilib-list=@var{list}
@itemx --without-multilib-list
-Specify what multilibs to build.
-Currently only implemented for arm*-*-*, sh*-*-* and x86-64-*-linux*.
+Specify what multilibs to build. @var{list} is a comma separated list of
+values, possibly consisting of a single value. Currently only implemented
+for arm*-*-*, sh*-*-* and x86-64-*-linux*. The accepted values and meaning
+for each target is given below.
@table @code
@item arm*-*-*
-@var{list} is either @code{default} or @code{aprofile}. Specifying
-@code{default} is equivalent to omitting this option while specifying
-@code{aprofile} builds multilibs for each combination of ISA (@code{-marm} or
-@code{-mthumb}), architecture (@code{-march=armv7-a}, @code{-march=armv7ve},
-or @code{-march=armv8-a}), FPU available (none, @code{-mfpu=vfpv3-d16},
-@code{-mfpu=neon}, @code{-mfpu=vfpv4-d16}, @code{-mfpu=neon-vfpv4} or
-@code{-mfpu=neon-fp-armv8} depending on architecture) and floating-point ABI
-(@code{-mfloat-abi=softfp} or @code{-mfloat-abi=hard}).
+@var{list} is one of@code{default}, @code{aprofile} or @code{rmprofile}.
+Specifying @code{default} is equivalent to omitting this option, ie. only the
+default runtime library will be enabled. Specifying @code{aprofile} or
+@code{rmprofile} builds multilibs for a combination of ISA, architecture,
+FPU available and floating-point ABI.
+
+The table below gives the combination of ISAs, architectures, FPUs and
+floating-point ABIs for which multilibs are built for each accepted value.
+
+@multitable @columnfractions .15 .28 .30
+@item Option @tab aprofile @tab rmprofile
+@item ISAs
+@tab @code{-marm} and @code{-mthumb}
+@tab @code{-mthumb}
+@item Architectures@*@*@*@*@*@*
+@tab default architecture@*
+@code{-march=armv7-a}@*
+@code{-march=armv7ve}@*
+@code{-march=armv8-a}@*@*@*
+@tab default architecture@*
+@code{-march=armv6s-m}@*
+@code{-march=armv7-m}@*
+@code{-march=armv7e-m}@*
+@code{-march=armv8-m.base}@*
+@code{-march=armv8-m.main}@*
+@code{-march=armv7}
+@item FPUs@*@*@*@*@*
+@tab none@*
+@code{-mfpu=vfpv3-d16}@*
+@code{-mfpu=neon}@*
+@code{-mfpu=vfpv4-d16}@*
+@code{-mfpu=neon-vfpv4}@*
+@code{-mfpu=neon-fp-armv8}
+@tab none@*
+@code{-mfpu=vfpv3-d16}@*
+@code{-mfpu=fpv4-sp-d16}@*
+@code{-mfpu=fpv5-sp-d16}@*
+@code{-mfpu=fpv5-d16}@*
+@item floating-point@/ ABIs@*@*
+@tab @code{-mfloat-abi=soft}@*
+@code{-mfloat-abi=softfp}@*
+@code{-mfloat-abi=hard}
+@tab @code{-mfloat-abi=soft}@*
+@code{-mfloat-abi=softfp}@*
+@code{-mfloat-abi=hard}
+@end multitable
@item sh*-*-*
@var{list} is a comma separated list of CPU names. These must be of the
@@ -1668,6 +1708,18 @@ Using the GNU Compiler Collection (GCC)},
See ``i386 and x86-64 Options'' in the main manual
@end ifhtml
+@item --enable-mingw-wildcard
+@itemx --disable-mingw-wildcard
+The @option{--enable-mingw-wildcard} option enables Microsoft Windows-hosted
+GCC to perform wildcard expansion of its arguments, irregardless of the default
+configuration of MinGW runtime. Conversely, @option{--disable-mingw-wildcard}
+option disables wildcard expansion. When none of these options is specified,
+wildcard expansion will be decided according to the way the MinGW runtime was
+configured.
+
+Note that this option only affects wildcard expansion for GCC itself. It does
+not affect wildcard expansion of executables built by the resulting GCC.
+
@item --enable-win32-registry
@itemx --enable-win32-registry=@var{key}
@itemx --disable-win32-registry
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e003ccaa353..63e5e8bb675 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -632,7 +632,9 @@ Objective-C and Objective-C++ Dialects}.
-mneon-for-64bits @gol
-mslow-flash-data @gol
-masm-syntax-unified @gol
--mrestrict-it}
+-mrestrict-it @gol
+-mpure-code @gol
+-mcmse}
@emph{AVR Options}
@gccoptlist{-mmcu=@var{mcu} -maccumulate-args -mbranch-cost=@var{cost} @gol
@@ -13971,7 +13973,8 @@ of the @option{-mcpu=} option. Permissible names are: @samp{armv2},
@samp{armv6t2}, @samp{armv6z}, @samp{armv6kz}, @samp{armv6-m},
@samp{armv7}, @samp{armv7-a}, @samp{armv7-r}, @samp{armv7-m}, @samp{armv7e-m},
@samp{armv7ve}, @samp{armv8-a}, @samp{armv8-a+crc}, @samp{armv8.1-a},
-@samp{armv8.1-a+crc}, @samp{iwmmxt}, @samp{iwmmxt2}, @samp{ep9312}.
+@samp{armv8.1-a+crc}, @samp{armv8-m.base}, @samp{armv8-m.main},
+@samp{armv8-m.main+dsp}, @samp{iwmmxt}, @samp{iwmmxt2}.
Architecture revisions older than @option{armv4t} are deprecated.
@@ -14014,6 +14017,8 @@ Permissible names are: @samp{arm2}, @samp{arm250},
@samp{cortex-a32}, @samp{cortex-a35}, @samp{cortex-a53}, @samp{cortex-a57},
@samp{cortex-a72}, @samp{cortex-r4},
@samp{cortex-r4f}, @samp{cortex-r5}, @samp{cortex-r7}, @samp{cortex-r8},
+@samp{cortex-m33},
+@samp{cortex-m23},
@samp{cortex-m7},
@samp{cortex-m4},
@samp{cortex-m3},
@@ -14276,10 +14281,10 @@ generating these instructions. This option is enabled by default when
@opindex mno-unaligned-access
Enables (or disables) reading and writing of 16- and 32- bit values
from addresses that are not 16- or 32- bit aligned. By default
-unaligned access is disabled for all pre-ARMv6 and all ARMv6-M
-architectures, and enabled for all other architectures. If unaligned
-access is not enabled then words in packed data structures are
-accessed a byte at a time.
+unaligned access is disabled for all pre-ARMv6, all ARMv6-M and for
+ARMv8-M Baseline architectures, and enabled for all other
+architectures. If unaligned access is not enabled then words in packed
+data structures are accessed a byte at a time.
The ARM attribute @code{Tag_CPU_unaligned_access} is set in the
generated object file to either true or false, depending upon the
@@ -14319,6 +14324,19 @@ Print CPU tuning information as comment in assembler file. This is
an option used only for regression testing of the compiler and not
intended for ordinary use in compiling code. This option is disabled
by default.
+
+@item -mpure-code
+@opindex mpure-code
+Do not allow constant data to be placed in code sections.
+Additionally, when compiling for ELF object format give all text sections the
+ELF processor-specific section attribute @code{SHF_ARM_PURECODE}. This option
+is only available when generating non-pic code for ARMv7-M targets.
+
+@item -mcmse
+@opindex mcmse
+Generate secure code as per the "ARMv8-M Security Extensions: Requirements on
+Development Tools Engineering Specification", which can be found on
+@url{http://infocenter.arm.com/help/topic/com.arm.doc.ecm0359818/ECM0359818_armv8m_security_extensions_reqs_on_dev_tools_1_0.pdf}.
@end table
@node AVR Options
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index c5354cfc8f3..fcedc64fcdd 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -1601,6 +1601,33 @@ arm_v8_1a_neon_ok.
ARM target prefers @code{LDRD} and @code{STRD} instructions over
@code{LDM} and @code{STM} instructions.
+@item arm_thumb1_movt_ok
+ARM target generates Thumb-1 code for @code{-mthumb} with @code{MOVW}
+and @code{MOVT} instructions available.
+
+@item arm_thumb1_cbz_ok
+ARM target generates Thumb-1 code for @code{-mthumb} with
+@code{CBZ} and @code{CBNZ} instructions available.
+
+@item arm_cmse_ok
+ARM target supports ARMv8-M Security Extensions, enabled by the @code{-mcmse}
+option.
+
+@item arm_coproc1_ok
+@anchor{arm_coproc1_ok}
+ARM target supports the following coprocessor instruction: @code{CDP},
+@code{LDC}, @code{STC}, @code{MCR} and @code{MRC}.
+
+@item arm_coproc2_ok
+@anchor{arm_coproc2_ok}
+ARM target supports all the coprocessor instructions also listed as supported
+in @ref{arm_coproc1_ok} in addition to the following: @code{CDP2}, @code{LDC2},
+@code{LDC2l}, @code{STC2}, @code{STC2l}, @code{MCR2} and @code{MRC2}.
+
+@item arm_coproc3_ok
+ARM target supports all the coprocessor instructions also listed as supported
+in @ref{arm_coproc2_ok} in addition to the following: @code{MCRR} and
+@code{MRRC}.
@end table
@subsubsection AArch64-specific attributes
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 745910f9a33..1feb6316289 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -7510,6 +7510,18 @@ is non-NULL, it is the @code{VAR_DECL} or @code{FUNCTION_DECL} with which
this section is associated.
@end deftypefn
+@deftypefn {Target Hook} bool TARGET_ASM_ELF_FLAGS_NUMERIC (unsigned int @var{flags}, unsigned int *@var{num})
+This hook can be used to encode ELF section flags for which no letter
+code has been defined in the assembler. It is called by
+@code{default_asm_named_section} whenever the section flags need to be
+emitted in the assembler output. If the hook returns true, then the
+numerical value for ELF section flags should be calculated from
+@var{flags} and saved in @var{*num}; the value will be printed out
+instead of the normal sequence of letter codes. If the hook is not
+defined, or if it returns false, then @var{num} will be ignored and the
+traditional letter sequence will be emitted.
+@end deftypefn
+
@deftypefn {Target Hook} {section *} TARGET_ASM_FUNCTION_SECTION (tree @var{decl}, enum node_frequency @var{freq}, bool @var{startup}, bool @var{exit})
Return preferred text (sub)section for function @var{decl}.
Main purpose of this function is to separate cold, normal and hot
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index f31c763991c..db0d75fbe89 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -5209,6 +5209,8 @@ of the filename using this macro.
@hook TARGET_ASM_NAMED_SECTION
+@hook TARGET_ASM_ELF_FLAGS_NUMERIC
+
@hook TARGET_ASM_FUNCTION_SECTION
@hook TARGET_ASM_FUNCTION_SWITCHED_TEXT_SECTIONS
diff --git a/gcc/genconditions.c b/gcc/genconditions.c
index 8abf1c243a9..0f9c749cd65 100644
--- a/gcc/genconditions.c
+++ b/gcc/genconditions.c
@@ -94,6 +94,7 @@ write_header (void)
#include \"resource.h\"\n\
#include \"diagnostic-core.h\"\n\
#include \"reload.h\"\n\
+#include \"memmodel.h\"\n\
#include \"tm-constrs.h\"\n");
if (saw_eh_return)
diff --git a/gcc/genemit.c b/gcc/genemit.c
index 87f53010926..9e01dc17160 100644
--- a/gcc/genemit.c
+++ b/gcc/genemit.c
@@ -792,6 +792,7 @@ from the machine description file `md'. */\n\n");
printf ("#include \"reload.h\"\n");
printf ("#include \"diagnostic-core.h\"\n");
printf ("#include \"regs.h\"\n");
+ printf ("#include \"memmodel.h\"\n");
printf ("#include \"tm-constrs.h\"\n");
printf ("#include \"ggc.h\"\n");
printf ("#include \"dumpfile.h\"\n");
diff --git a/gcc/genoutput.c b/gcc/genoutput.c
index 6ca1bb89cf7..d5a7319d835 100644
--- a/gcc/genoutput.c
+++ b/gcc/genoutput.c
@@ -231,6 +231,7 @@ output_prologue (void)
printf ("#include \"diagnostic-core.h\"\n");
printf ("#include \"output.h\"\n");
printf ("#include \"target.h\"\n");
+ printf ("#include \"memmodel.h\"\n");
printf ("#include \"tm-constrs.h\"\n");
}
diff --git a/gcc/genpeep.c b/gcc/genpeep.c
index aef9c74145f..2e82c525e3f 100644
--- a/gcc/genpeep.c
+++ b/gcc/genpeep.c
@@ -373,6 +373,7 @@ from the machine description file `md'. */\n\n");
printf ("#include \"except.h\"\n");
printf ("#include \"diagnostic-core.h\"\n");
printf ("#include \"flags.h\"\n");
+ printf ("#include \"memmodel.h\"\n");
printf ("#include \"tm-constrs.h\"\n\n");
printf ("extern rtx peep_operand[];\n\n");
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index c0d7ce4146c..ac09245e7cd 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -1577,6 +1577,7 @@ write_insn_preds_c (void)
#include \"reload.h\"\n\
#include \"regs.h\"\n\
#include \"emit-rtl.h\"\n\
+#include \"memmodel.h\"\n\
#include \"tm-constrs.h\"\n");
FOR_ALL_PREDICATES (p)
diff --git a/gcc/genrecog.c b/gcc/genrecog.c
index 47e42660fcc..fb878468ced 100644
--- a/gcc/genrecog.c
+++ b/gcc/genrecog.c
@@ -4172,6 +4172,7 @@ write_header (void)
#include \"diagnostic-core.h\"\n\
#include \"reload.h\"\n\
#include \"regs.h\"\n\
+#include \"memmodel.h\"\n\
#include \"tm-constrs.h\"\n\
\n");
diff --git a/gcc/hooks.c b/gcc/hooks.c
index 99ec4014adb..0d18ef699dc 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -481,3 +481,12 @@ void
hook_void_gcc_optionsp (struct gcc_options *opts ATTRIBUTE_UNUSED)
{
}
+
+/* Generic hook that takes an unsigned int, an unsigned int pointer and
+ returns false. */
+
+bool
+hook_bool_uint_uintp_false (unsigned int, unsigned int *)
+{
+ return false;
+}
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 2dc59baeadd..0cdfc81ca42 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -76,6 +76,7 @@ extern void hook_void_tree (tree);
extern void hook_void_tree_treeptr (tree, tree *);
extern void hook_void_int_int (int, int);
extern void hook_void_gcc_optionsp (struct gcc_options *);
+extern bool hook_bool_uint_uintp_false (unsigned int, unsigned int *);
extern int hook_int_uint_mode_1 (unsigned int, machine_mode);
extern int hook_int_const_tree_0 (const_tree);
diff --git a/gcc/memmodel.h b/gcc/memmodel.h
new file mode 100644
index 00000000000..d53eb7bc9d9
--- /dev/null
+++ b/gcc/memmodel.h
@@ -0,0 +1,86 @@
+/* Prototypes of memory model helper functions.
+ Copyright (C) 2015-2016 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MEMMODEL_H
+#define GCC_MEMMODEL_H
+
+/* Return the memory model from a host integer. */
+static inline enum memmodel
+memmodel_from_int (unsigned HOST_WIDE_INT val)
+{
+ return (enum memmodel) (val & MEMMODEL_MASK);
+}
+
+/* Return the base memory model from a host integer. */
+static inline enum memmodel
+memmodel_base (unsigned HOST_WIDE_INT val)
+{
+ return (enum memmodel) (val & MEMMODEL_BASE_MASK);
+}
+
+/* Return TRUE if the memory model is RELAXED. */
+static inline bool
+is_mm_relaxed (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
+}
+
+/* Return TRUE if the memory model is CONSUME. */
+static inline bool
+is_mm_consume (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
+}
+
+/* Return TRUE if the memory model is ACQUIRE. */
+static inline bool
+is_mm_acquire (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
+}
+
+/* Return TRUE if the memory model is RELEASE. */
+static inline bool
+is_mm_release (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
+}
+
+/* Return TRUE if the memory model is ACQ_REL. */
+static inline bool
+is_mm_acq_rel (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
+}
+
+/* Return TRUE if the memory model is SEQ_CST. */
+static inline bool
+is_mm_seq_cst (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
+}
+
+/* Return TRUE if the memory model is a SYNC variant. */
+static inline bool
+is_mm_sync (enum memmodel model)
+{
+ return (model & MEMMODEL_SYNC);
+}
+
+#endif /* GCC_MEMMODEL_H */
diff --git a/gcc/optabs.c b/gcc/optabs.c
index a6d8822b87e..5bfc304689f 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "predict.h"
#include "tm_p.h"
#include "expmed.h"
diff --git a/gcc/target.def b/gcc/target.def
index 20f2b32da1e..33a671af95b 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -432,6 +432,22 @@ this section is associated.",
void, (const char *name, unsigned int flags, tree decl),
default_no_named_section)
+/* Tell assembler what section attributes to assign this elf section
+ declaration, using their numerical value. */
+DEFHOOK
+(elf_flags_numeric,
+ "This hook can be used to encode ELF section flags for which no letter\n\
+code has been defined in the assembler. It is called by\n\
+@code{default_asm_named_section} whenever the section flags need to be\n\
+emitted in the assembler output. If the hook returns true, then the\n\
+numerical value for ELF section flags should be calculated from\n\
+@var{flags} and saved in @var{*num}; the value will be printed out\n\
+instead of the normal sequence of letter codes. If the hook is not\n\
+defined, or if it returns false, then @var{num} will be ignored and the\n\
+traditional letter sequence will be emitted.",
+ bool, (unsigned int flags, unsigned int *num),
+ hook_bool_uint_uintp_false)
+
/* Return preferred text (sub)section for function DECL.
Main purpose of this function is to separate cold, normal and hot
functions. STARTUP is true when function is known to be used only
diff --git a/gcc/testsuite/ChangeLog.arm b/gcc/testsuite/ChangeLog.arm
new file mode 100644
index 00000000000..2811cf9dea7
--- /dev/null
+++ b/gcc/testsuite/ChangeLog.arm
@@ -0,0 +1,365 @@
+2017-01-24 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * lib/target-supports.exp (arm_coproc2_ok,arm_coproc3_ok): Fix language
+ in comments.
+ (arm_coproc4_ok): New.
+
+2016-12-14 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/thumb2-slow-flash-data-3.c: Add extra scan.
+ * gcc.target/arm/thumb2-slow-flash-data-5.c: Likewise.
+
+2016-12-14 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/cmse/mainline/hard/cmse-13.c: Fix testsuite.
+ * gcc.target/arm/cmse/mainline/hard/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c: Likewise.
+
+2016-12-09 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-09 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ PR rtl-optimization/78255
+ * gcc.target/aarch64/pr78255.c: New.
+ * gcc.target/arm/pr78255-1.c: New.
+ * gcc.target/arm/pr78255-2.c: New.
+
+2016-12-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-12-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR rtl-optimization/78617
+ * gcc.c-torture/execute/pr78617.c: New test.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR target/71607
+ * gcc.target/arm/thumb2-slow-flash-data.c: Renamed to ...
+ * gcc.target/arm/thumb2-slow-flash-data-1.c: ... this.
+ * gcc.target/arm/thumb2-slow-flash-data-2.c: New.
+ * gcc.target/arm/thumb2-slow-flash-data-3.c: New.
+ * gcc.target/arm/thumb2-slow-flash-data-4.c: New.
+ * gcc.target/arm/thumb2-slow-flash-data-5.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/acle/mcrr: New.
+ * gcc.target/arm/acle/mcrr2: New.
+ * gcc.target/arm/acle/mrrc: New.
+ * gcc.target/arm/acle/mrrc2: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/acle/mcr.c: New.
+ * gcc.target/arm/acle/mrc.c: New.
+ * gcc.target/arm/acle/mcr2.c: New.
+ * gcc.target/arm/acle/mrc2.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/acle/ldc: New.
+ * gcc.target/arm/acle/ldc2: New.
+ * gcc.target/arm/acle/ldcl: New.
+ * gcc.target/arm/acle/ldc2l: New.
+ * gcc.target/arm/acle/stc: New.
+ * gcc.target/arm/acle/stc2: New.
+ * gcc.target/arm/acle/stcl: New.
+ * gcc.target/arm/acle/stc2l: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/acle/acle.exp: Run tests for different options
+ and make sure fat-lto-objects is used such that we can still do
+ assemble scans.
+ * gcc.target/arm/acle/cdp.c: New.
+ * gcc.target/arm/acle/cdp2.c: New.
+ * lib/target-supports.exp (check_effective_target_arm_coproc1_ok): New.
+ (check_effective_target_arm_coproc1_ok_nocache): New.
+ (check_effective_target_arm_coproc2_ok): New.
+ (check_effective_target_arm_coproc2_ok_nocache): New.
+ (check_effective_target_arm_coproc3_ok): New.
+ (check_effective_target_arm_coproc3_ok_nocache): New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse-1.c: Add test for
+ cmse_nonsecure_caller.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse.exp: Run tests in mainline dir.
+ * gcc.target/arm/cmse/cmse-9.c: Added some extra tests.
+ * gcc.target/arm/cmse/cmse-14.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-4.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-5.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-6.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-7.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-8.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-9.c: New.
+ * gcc.target/arm/cmse/baseline/bitfield-and-union-1.c: New.
+ * gcc.target/arm/cmse/baseline/cmse-11.c: New.
+ * gcc.target/arm/cmse/baseline/cmse-13.c: New.
+ * gcc.target/arm/cmse/baseline/cmse-6.c: New.
+ * gcc.target/arm/cmse/baseline/union-1.c: New.
+ * gcc.target/arm/cmse/baseline/union-2.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-4.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-5.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-6.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-7.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-8.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-9.c: New.
+ * gcc.target/arm/cmse/mainline/bitfield-and-union-1.c: New.
+ * gcc.target/arm/cmse/mainline/union-1.c: New.
+ * gcc.target/arm/cmse/mainline/union-2.c: New.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c: New.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c: New.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c: New.
+ * gcc.target/arm/cmse/mainline/hard/cmse-13.c: New.
+ * gcc.target/arm/cmse/mainline/hard/cmse-7.c: New.
+ * gcc.target/arm/cmse/mainline/hard/cmse-8.c: New.
+ * gcc.target/arm/cmse/mainline/soft/cmse-13.c: New.
+ * gcc.target/arm/cmse/mainline/soft/cmse-7.c: New.
+ * gcc.target/arm/cmse/mainline/soft/cmse-8.c: New.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c: New.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c: New.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-13.c: New.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-7.c: New.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-8.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse-3.c: Add tests.
+ * gcc.target/arm/cmse/cmse-4.c: Add tests.
+ * gcc.target/arm/cmse/cmse-15.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse.exp: Test different multilibs separate.
+ * gcc.target/arm/cmse/struct-1.c: New.
+ * gcc.target/arm/cmse/bitfield-1.c: New.
+ * gcc.target/arm/cmse/bitfield-2.c: New.
+ * gcc.target/arm/cmse/bitfield-3.c: New.
+ * gcc.target/arm/cmse/baseline/cmse-2.c: New.
+ * gcc.target/arm/cmse/baseline/softfp.c: New.
+ * gcc.target/arm/cmse/mainline/soft/cmse-5.c: New.
+ * gcc.target/arm/cmse/mainline/hard/cmse-5.c: New.
+ * gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c: New.
+ * gcc.target/arm/cmse/mainline/softfp/cmse-5.c: New.
+ * gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse-4.c: New.
+ * gcc.target/arm/cmse/cmse-9.c: New.
+ * gcc.target/arm/cmse/cmse-10.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse-3.c: New.
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/cmse/cmse.exp: New.
+ * gcc.target/arm/cmse/cmse-1.c: New.
+ * gcc.target/arm/cmse/cmse-12.c: New.
+ * lib/target-supports.exp
+ (check_effective_target_arm_cmse_ok): New.
+
+2016-12-01 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-30 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/target-supports.exp (add_options_for_arm_arch_v6m): Add
+ -mfloat-abi=soft option.
+ (add_options_for_arm_arch_v8m_base): Likewise. Reindent containing
+ foreach loop.
+
+2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-22 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR target/77904
+ * gcc.target/arm/pr77904.c: New test.
+
+2016-11-21 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport to mainline
+ 2016-11-21 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/empty_fiq_handler.c: Skip if -mthumb is passed in and
+ target is Thumb-only.
+
+2016-11-18 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-18 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/optional_thumb-1.c: New test.
+ * gcc.target/arm/optional_thumb-2.c: New test.
+ * gcc.target/arm/optional_thumb-3.c: New test.
+
+2016-11-08 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-08 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ PR target/77933
+ * gcc.target/arm/pr77933-1.c: New test.
+ * gcc.target/arm/pr77933-2.c: Likewise.
+
+2016-11-17 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-11-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/empty_fiq_handler.c: New test.
+
+2016-11-08 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-10-21 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/pure-code/pure-code.exp: Require arm_cortex_m
+ effective target.
+
+ 2016-09-22 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Terry Guo <terry.guo@arm.com>
+
+ * gcc.target/arm/pure-code/ffunction-sections.c: New.
+ * gcc.target/arm/pure-code/no-literal-pool.c: New.
+ * gcc.target/arm/pure-code/pure-code.exp: New.
+
+2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-10-27 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/atomic-comp-swap-release-acquire-3.c: New test.
+ * gcc.target/arm/atomic-op-acq_rel-3.c: Likewise.
+ * gcc.target/arm/atomic-op-acquire-3.c: Likewise.
+ * gcc.target/arm/atomic-op-char-3.c: Likewise.
+ * gcc.target/arm/atomic-op-consume-3.c: Likewise.
+ * gcc.target/arm/atomic-op-int-3.c: Likewise.
+ * gcc.target/arm/atomic-op-relaxed-3.c: Likewise.
+ * gcc.target/arm/atomic-op-release-3.c: Likewise.
+ * gcc.target/arm/atomic-op-seq_cst-3.c: Likewise.
+ * gcc.target/arm/atomic-op-short-3.c: Likewise.
+
+2016-09-01 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-14 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/atomic-comp-swap-release-acquire.c: Rename into ...
+ * gcc.target/arm/atomic-comp-swap-release-acquire-1.c: This.
+ * gcc.target/arm/atomic-op-acq_rel.c: Rename into ...
+ * gcc.target/arm/atomic-op-acq_rel-1.c: This.
+ * gcc.target/arm/atomic-op-acquire.c: Rename into ...
+ * gcc.target/arm/atomic-op-acquire-1.c: This.
+ * gcc.target/arm/atomic-op-char.c: Rename into ...
+ * gcc.target/arm/atomic-op-char-1.c: This.
+ * gcc.target/arm/atomic-op-consume.c: Rename into ...
+ * gcc.target/arm/atomic-op-consume-1.c: This.
+ * gcc.target/arm/atomic-op-int.c: Rename into ...
+ * gcc.target/arm/atomic-op-int-1.c: This.
+ * gcc.target/arm/atomic-op-relaxed.c: Rename into ...
+ * gcc.target/arm/atomic-op-relaxed-1.c: This.
+ * gcc.target/arm/atomic-op-release.c: Rename into ...
+ * gcc.target/arm/atomic-op-release-1.c: This.
+ * gcc.target/arm/atomic-op-seq_cst.c: Rename into ...
+ * gcc.target/arm/atomic-op-seq_cst-1.c: This.
+ * gcc.target/arm/atomic-op-short.c: Rename into ...
+ * gcc.target/arm/atomic-op-short-1.c: This.
+ * gcc.target/arm/atomic-comp-swap-release-acquire-2.c: New test.
+ * gcc.target/arm/atomic-op-acq_rel-2.c: Likewise.
+ * gcc.target/arm/atomic-op-acquire-2.c: Likewise.
+ * gcc.target/arm/atomic-op-char-2.c: Likewise.
+ * gcc.target/arm/atomic-op-consume-2.c: Likewise.
+ * gcc.target/arm/atomic-op-int-2.c: Likewise.
+ * gcc.target/arm/atomic-op-relaxed-2.c: Likewise.
+ * gcc.target/arm/atomic-op-release-2.c: Likewise.
+ * gcc.target/arm/atomic-op-seq_cst-2.c: Likewise.
+ * gcc.target/arm/atomic-op-short-2.c: Likewise.
+
+2016-07-14 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-14 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * gcc.target/arm/pr42574.c: Add missing target keyword for the dg-do
+ selector and enclose boolean expression in curly braces.
+
+2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_arm_thumb1_cbz_ok):
+ Add new arm_thumb1_cbz_ok effective target.
+ * gcc.target/arm/cbz.c: New test.
+
+2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_arm_thumb1_movt_ok):
+ Define effective target.
+ * gcc.target/arm/pr42574.c: Require arm_thumb1_ok and
+ !arm_thumb1_movt_ok to exclude ARMv8-M Baseline.
+ * gcc.target/arm/movhi_movw.c: New test.
+ * gcc.target/arm/movsi_movw.c: Likewise.
+ * gcc.target/arm/movdi_movw.c: Likewise.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/target-supports.exp: Generate add_options_for_arm_arch_FUNC and
+ check_effective_target_arm_arch_FUNC_multilib for ARMv8-M Baseline and
+ ARMv8-M Mainline architectures.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * lib/target-supports.exp (check_effective_target_arm_cortex_m): Use
+ __ARM_ARCH_ISA_ARM to test for Cortex-M devices.
diff --git a/gcc/testsuite/gcc.target/arm/acle/acle.exp b/gcc/testsuite/gcc.target/arm/acle/acle.exp
index 91954bdff2f..f431da67794 100644
--- a/gcc/testsuite/gcc.target/arm/acle/acle.exp
+++ b/gcc/testsuite/gcc.target/arm/acle/acle.exp
@@ -27,9 +27,26 @@ load_lib gcc-dg.exp
# Initialize `dg'.
dg-init
+set saved-dg-do-what-default ${dg-do-what-default}
+set dg-do-what-default "assemble"
+
+set saved-lto_torture_options ${LTO_TORTURE_OPTIONS}
+
+# Add -ffat-lto-objects option to all LTO options such that we can do assembly
+# scans.
+proc add_fat_objects { list } {
+ set res {}
+ foreach el $list {set res [lappend res [concat $el " -ffat-lto-objects"]]}
+ return $res
+};
+set LTO_TORTURE_OPTIONS [add_fat_objects ${LTO_TORTURE_OPTIONS}]
+
# Main loop.
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
"" ""
+# Restore globals
+set dg-do-what-default ${saved-dg-do-what-default}
+set LTO_TORTURE_OPTIONS ${saved-lto_torture_options}
# All done.
dg-finish
diff --git a/gcc/testsuite/gcc.target/arm/acle/cdp.c b/gcc/testsuite/gcc.target/arm/acle/cdp.c
new file mode 100644
index 00000000000..28b218e7cfc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/cdp.c
@@ -0,0 +1,14 @@
+/* Test the cdp ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+void test_cdp (void)
+{
+ __arm_cdp (10, 1, 2, 3, 4, 5);
+}
+
+/* { dg-final { scan-assembler "cdp\tp10, #1, CR2, CR3, CR4, #5\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/cdp2.c b/gcc/testsuite/gcc.target/arm/acle/cdp2.c
new file mode 100644
index 00000000000..00bcd502b56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/cdp2.c
@@ -0,0 +1,14 @@
+/* Test the cdp2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+void test_cdp2 (void)
+{
+ __arm_cdp2 (10, 4, 3, 2, 1, 0);
+}
+
+/* { dg-final { scan-assembler "cdp2\tp10, #4, CR3, CR2, CR1, #0\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/ldc.c b/gcc/testsuite/gcc.target/arm/acle/ldc.c
new file mode 100644
index 00000000000..f45f25d8c97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/ldc.c
@@ -0,0 +1,18 @@
+/* Test the ldc ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_ldc (void)
+{
+ __arm_ldc (10, 1, p + 4);
+ __arm_ldc (11, 1, p + 1024);
+}
+
+/* { dg-final { scan-assembler "ldc\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "ldc\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/ldc2.c b/gcc/testsuite/gcc.target/arm/acle/ldc2.c
new file mode 100644
index 00000000000..433bf8a1204
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/ldc2.c
@@ -0,0 +1,18 @@
+/* Test the ldc2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_ldc2 (void)
+{
+ __arm_ldc2 (10, 1, p - 120);
+ __arm_ldc2 (11, 1, p - 122);
+}
+
+/* { dg-final { scan-assembler "ldc2\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "ldc2\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/ldc2l.c b/gcc/testsuite/gcc.target/arm/acle/ldc2l.c
new file mode 100644
index 00000000000..88c8aa44765
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/ldc2l.c
@@ -0,0 +1,18 @@
+/* Test the ldc2l ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_ldc2l (void)
+{
+ __arm_ldc2l (10, 1, p - 120);
+ __arm_ldc2l (11, 1, p - 122);
+}
+
+/* { dg-final { scan-assembler "ldc2l\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "ldc2l\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/ldcl.c b/gcc/testsuite/gcc.target/arm/acle/ldcl.c
new file mode 100644
index 00000000000..72a97f1d7b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/ldcl.c
@@ -0,0 +1,18 @@
+/* Test the ldcl ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_ldcl (void)
+{
+ __arm_ldcl (10, 1, p + 4);
+ __arm_ldcl (11, 1, p + 1024);
+}
+
+/* { dg-final { scan-assembler "ldcl\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "ldcl\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mcr.c b/gcc/testsuite/gcc.target/arm/acle/mcr.c
new file mode 100644
index 00000000000..93f977a2bdb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mcr.c
@@ -0,0 +1,16 @@
+/* Test the mcr ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+void test_mcr (uint32_t a)
+{
+ a += 77;
+ __arm_mcr (10, 5, a, 3, 4, 7);
+}
+
+/* { dg-final { scan-assembler "add\[^\n\]*#77\n" } } */
+/* { dg-final { scan-assembler "mcr\tp10, #5, r\[r0-9\]*, CR3, CR4, #7\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mcr2.c b/gcc/testsuite/gcc.target/arm/acle/mcr2.c
new file mode 100644
index 00000000000..5b60d10ff25
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mcr2.c
@@ -0,0 +1,16 @@
+/* Test the mcr2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+void test_mcr2 (uint32_t a)
+{
+ a += 77;
+ __arm_mcr2 (10, 5, a, 3, 4, 7);
+}
+
+/* { dg-final { scan-assembler "add\[^\n\]*#77\n" } } */
+/* { dg-final { scan-assembler "mcr2\tp10, #5, r\[r0-9\]*, CR3, CR4, #7\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mcrr.c b/gcc/testsuite/gcc.target/arm/acle/mcrr.c
new file mode 100644
index 00000000000..dcc223c713d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mcrr.c
@@ -0,0 +1,16 @@
+/* Test the mcrr ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc3_ok } */
+
+#include "arm_acle.h"
+
+void test_mcrr (uint64_t a)
+{
+ a += 77;
+ __arm_mcrr (10, 5, a, 3);
+}
+
+/* { dg-final { scan-assembler "add\[^\n\]*#77\n" } } */
+/* { dg-final { scan-assembler "mcrr\tp10, #5, r\[r0-9\]*, r\[r0-9\]*, CR3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mcrr2.c b/gcc/testsuite/gcc.target/arm/acle/mcrr2.c
new file mode 100644
index 00000000000..a341169b123
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mcrr2.c
@@ -0,0 +1,16 @@
+/* Test the mcrr2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc3_ok } */
+
+#include "arm_acle.h"
+
+void test_mcrr2 (uint64_t a)
+{
+ a += 77;
+ __arm_mcrr2 (10, 5, a, 3);
+}
+
+/* { dg-final { scan-assembler "add\[^\n\]*#77\n" } } */
+/* { dg-final { scan-assembler "mcrr2\tp10, #5, r\[r0-9\]*, r\[r0-9\]*, CR3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mrc.c b/gcc/testsuite/gcc.target/arm/acle/mrc.c
new file mode 100644
index 00000000000..34ca6a1638c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mrc.c
@@ -0,0 +1,14 @@
+/* Test the mrc ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+uint32_t test_mrc (void)
+{
+ return __arm_mrc (10, 0, 0, 15, 3);
+}
+
+/* { dg-final { scan-assembler "mrc\tp10, #0, r\[r0-9\]*, CR0, CR15, #3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mrc2.c b/gcc/testsuite/gcc.target/arm/acle/mrc2.c
new file mode 100644
index 00000000000..3b72a402224
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mrc2.c
@@ -0,0 +1,14 @@
+/* Test the mrc2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+uint32_t test_mrc2 (void)
+{
+ return __arm_mrc2 (10, 0, 0, 15, 3);
+}
+
+/* { dg-final { scan-assembler "mrc2\tp10, #0, r\[r0-9\]*, CR0, CR15, #3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mrrc.c b/gcc/testsuite/gcc.target/arm/acle/mrrc.c
new file mode 100644
index 00000000000..28c3b8ea6b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mrrc.c
@@ -0,0 +1,14 @@
+/* Test the mrrc ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc3_ok } */
+
+#include "arm_acle.h"
+
+uint64_t test_mrrc (void)
+{
+ return __arm_mrrc (10, 5, 3);
+}
+
+/* { dg-final { scan-assembler "mrrc\tp10, #5, r\[r0-9\]*, r\[r0-9\]*, CR3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/mrrc2.c b/gcc/testsuite/gcc.target/arm/acle/mrrc2.c
new file mode 100644
index 00000000000..1e89828e158
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/mrrc2.c
@@ -0,0 +1,14 @@
+/* Test the mrrc2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc3_ok } */
+
+#include "arm_acle.h"
+
+uint64_t test_mrrc2 (void)
+{
+ return __arm_mrrc2 (10, 5, 3);
+}
+
+/* { dg-final { scan-assembler "mrrc2\tp10, #5, r\[r0-9\]*, r\[r0-9\]*, CR3\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/stc.c b/gcc/testsuite/gcc.target/arm/acle/stc.c
new file mode 100644
index 00000000000..7c6e04fe0fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/stc.c
@@ -0,0 +1,18 @@
+/* Test the stc ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_stc (void)
+{
+ __arm_stc (10, 1, p + 4);
+ __arm_stc (11, 1, p + 1024);
+}
+
+/* { dg-final { scan-assembler "stc\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "stc\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/stc2.c b/gcc/testsuite/gcc.target/arm/acle/stc2.c
new file mode 100644
index 00000000000..1578f7b1136
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/stc2.c
@@ -0,0 +1,18 @@
+/* Test the stc2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_stc2 (void)
+{
+ __arm_stc2 (10, 1, p - 120);
+ __arm_stc2 (11, 1, p - 122);
+}
+
+/* { dg-final { scan-assembler "stc2\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "stc2\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/stc2l.c b/gcc/testsuite/gcc.target/arm/acle/stc2l.c
new file mode 100644
index 00000000000..7adbd60d48a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/stc2l.c
@@ -0,0 +1,18 @@
+/* Test the stc2l ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_stc2l (void)
+{
+ __arm_stc2l (10, 1, p - 120);
+ __arm_stc2l (11, 1, p - 122);
+}
+
+/* { dg-final { scan-assembler "stc2l\tp10, CR1, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "stc2l\tp11, CR1, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/acle/stcl.c b/gcc/testsuite/gcc.target/arm/acle/stcl.c
new file mode 100644
index 00000000000..2fd5edd02d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/acle/stcl.c
@@ -0,0 +1,18 @@
+/* Test the stcl ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+extern void * p;
+
+void test_stcl (void)
+{
+ __arm_stcl (14, 10, p + 4);
+ __arm_stcl (10, 10, p + 1024);
+}
+
+/* { dg-final { scan-assembler "stcl\tp14, CR10, \[r\[0-9\]+" } } */
+/* { dg-final { scan-assembler "stcl\tp10, CR10, \[r\[0-9\]+\]\n" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire.c b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-1.c
index aa11ba57e02..aa11ba57e02 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-2.c b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-2.c
new file mode 100644
index 00000000000..73b140aaae4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2 -fno-ipa-icf" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-comp-swap-release-acquire.x"
+
+/* { dg-final { scan-assembler-times "ldaex" 4 } } */
+/* { dg-final { scan-assembler-times "stlex" 4 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-3.c b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-3.c
new file mode 100644
index 00000000000..0191f7af3a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-comp-swap-release-acquire-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2 -fno-ipa-icf" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-comp-swap-release-acquire.x"
+
+/* { dg-final { scan-assembler-times "ldaex" 4 } } */
+/* { dg-final { scan-assembler-times "stlex" 4 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel.c b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-1.c
index ccfa31c34e3..ccfa31c34e3 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-2.c
new file mode 100644
index 00000000000..26f88c8775a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-acq_rel.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-3.c
new file mode 100644
index 00000000000..f2ed32d0197
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acq_rel-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-acq_rel.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acquire.c b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-1.c
index 52bcf99e83c..52bcf99e83c 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-acquire.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acquire-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-2.c
new file mode 100644
index 00000000000..09062eba358
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-acquire.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-acquire-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-3.c
new file mode 100644
index 00000000000..bba1c2709e7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-acquire-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-acquire.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-char.c b/gcc/testsuite/gcc.target/arm/atomic-op-char-1.c
index 0c30922dbaa..0c30922dbaa 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-char.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-char-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-char-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-char-2.c
new file mode 100644
index 00000000000..3f33857cf68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-char-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-char.x"
+
+/* { dg-final { scan-assembler-times "ldrexb\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strexb\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-char-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-char-3.c
new file mode 100644
index 00000000000..17117eebf70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-char-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-char.x"
+
+/* { dg-final { scan-assembler-times "ldrexb\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strexb\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-consume.c b/gcc/testsuite/gcc.target/arm/atomic-op-consume-1.c
index 6c5f9897261..6c5f9897261 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-consume.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-consume-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-consume-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-consume-2.c
new file mode 100644
index 00000000000..9771d817f02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-consume-2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-consume.x"
+
+/* Scan for ldaex is a PR59448 consume workaround. */
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-consume-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-consume-3.c
new file mode 100644
index 00000000000..8352f0c3af8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-consume-3.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-consume.x"
+
+/* Scan for ldaex is a PR59448 consume workaround. */
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-int.c b/gcc/testsuite/gcc.target/arm/atomic-op-int-1.c
index 7716994f0d0..7716994f0d0 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-int.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-int-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-int-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-int-2.c
new file mode 100644
index 00000000000..ce0fbfcc0cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-int-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-int.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-int-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-int-3.c
new file mode 100644
index 00000000000..d4f1db34a1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-int-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-int.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-relaxed.c b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-1.c
index 4b72fd95bc3..4b72fd95bc3 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-relaxed.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-2.c
new file mode 100644
index 00000000000..207baf77d2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-relaxed.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-3.c
new file mode 100644
index 00000000000..09b5ea9f6d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-relaxed-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-relaxed.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-release.c b/gcc/testsuite/gcc.target/arm/atomic-op-release-1.c
index 8582e4f1d97..8582e4f1d97 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-release.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-release-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-release-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-release-2.c
new file mode 100644
index 00000000000..376f8597581
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-release-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-release.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-release-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-release-3.c
new file mode 100644
index 00000000000..2b136f5ca2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-release-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-release.x"
+
+/* { dg-final { scan-assembler-times "ldrex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst.c b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-1.c
index 70b5b9ebb6c..70b5b9ebb6c 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-2.c
new file mode 100644
index 00000000000..4b31083dc42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-seq_cst.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-3.c
new file mode 100644
index 00000000000..7f38d42fa63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-seq_cst-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-seq_cst.x"
+
+/* { dg-final { scan-assembler-times "ldaex\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "stlex\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-short.c b/gcc/testsuite/gcc.target/arm/atomic-op-short-1.c
index a6f5a6df617..a6f5a6df617 100644
--- a/gcc/testsuite/gcc.target/arm/atomic-op-short.c
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-short-1.c
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-short-2.c b/gcc/testsuite/gcc.target/arm/atomic-op-short-2.c
new file mode 100644
index 00000000000..e9b35f3d0a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-short-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_main } */
+
+#include "../aarch64/atomic-op-short.x"
+
+/* { dg-final { scan-assembler-times "ldrexh\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strexh\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/atomic-op-short-3.c b/gcc/testsuite/gcc.target/arm/atomic-op-short-3.c
new file mode 100644
index 00000000000..60ae42ebc34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/atomic-op-short-3.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-options "-O2" } */
+/* { dg-add-options arm_arch_v8m_base } */
+
+#include "../aarch64/atomic-op-short.x"
+
+/* { dg-final { scan-assembler-times "ldrexh\tr\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-times "strexh\t...?, r\[0-9\]+, \\\[r\[0-9\]+\\\]" 6 } } */
+/* { dg-final { scan-assembler-not "dmb" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cbz.c b/gcc/testsuite/gcc.target/arm/cbz.c
new file mode 100644
index 00000000000..5d3de638777
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cbz.c
@@ -0,0 +1,12 @@
+/* { dg-do compile {target { arm_thumb2 || arm_thumb1_cbz_ok } } } */
+/* { dg-options "-O2" } */
+
+int
+foo (int a, int *b)
+{
+ if (a)
+ *b = 1;
+ return 0;
+}
+
+/* { dg-final { scan-assembler-times "cbz\\tr\\d" 1 } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c
new file mode 100644
index 00000000000..a6c1386c06e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-4.c
@@ -0,0 +1,57 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int b:5;
+ unsigned int c:11, :0, d:8;
+ struct { unsigned int ee:2; } e;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+extern void foo (test_st st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 255" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #255" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #3" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c
new file mode 100644
index 00000000000..d51ce2d42c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-5.c
@@ -0,0 +1,53 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
+/* { dg-final { scan-assembler "movt\tr4, 255" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #2047" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c
new file mode 100644
index 00000000000..77e9104b546
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-6.c
@@ -0,0 +1,63 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int b : 3;
+ unsigned int c : 14;
+ unsigned int d : 1;
+ struct {
+ unsigned int ee : 2;
+ unsigned short ff : 15;
+ } e;
+ unsigned char g : 1;
+ unsigned char : 4;
+ unsigned char h : 3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 1023" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #3" } } */
+/* { dg-final { scan-assembler "movt\tr4, 32767" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #255" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c
new file mode 100644
index 00000000000..3d8941bbfee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-7.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
+/* { dg-final { scan-assembler "movt\tr4, 255" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #2047" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c
new file mode 100644
index 00000000000..9ffbb718d34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-8.c
@@ -0,0 +1,57 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #255" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #1" } } */
+/* { dg-final { scan-assembler "movt\tr4, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 31" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c
new file mode 100644
index 00000000000..8a614182923
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-9.c
@@ -0,0 +1,56 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ char a:3;
+} test_st3;
+
+typedef struct
+{
+ char a:3;
+} test_st2;
+
+typedef struct
+{
+ test_st2 st2;
+ test_st3 st3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #1799" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c
new file mode 100644
index 00000000000..642f4e0346b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/bitfield-and-union-1.c
@@ -0,0 +1,96 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned short a :11;
+} test_st_4;
+
+typedef union
+{
+ char a;
+ test_st_4 st4;
+}test_un_2;
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st_3;
+
+typedef struct
+{
+ unsigned char a :3;
+ unsigned int b :13;
+ test_un_2 un2;
+} test_st_2;
+
+typedef union
+{
+ test_st_2 st2;
+ test_st_3 st3;
+}test_un_1;
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned char c :4;
+ test_un_1 un1;
+} test_st_1;
+
+typedef union
+{
+ test_st_1 st1;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st_1;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st_1);
+
+int
+main (void)
+{
+ read_st_1 r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st1);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #7939" } } */
+/* { dg-final { scan-assembler "movt\tr4, 15" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 2047" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr4, #1" } } */
+/* { dg-final { scan-assembler "movt\tr4, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 31" } } */
+/* { dg-final { scan-assembler "ands\tr3, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c
new file mode 100644
index 00000000000..3007409ad88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-11.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (int);
+
+int
+foo (int a)
+{
+ return bar (bar (a + 1));
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr1, r4" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c
new file mode 100644
index 00000000000..f2b931be591
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-13.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "movs\tr0, r4" } } */
+/* { dg-final { scan-assembler "\n\tmovs\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "\n\tmovs\tr2, r4\n\tmovs\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c
new file mode 100644
index 00000000000..814502d4e5d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-2.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+/* { dg-final { scan-assembler "movs\tr1, r0" } } */
+/* { dg-final { scan-assembler "movs\tr2, r0" } } */
+/* { dg-final { scan-assembler "movs\tr3, r0" } } */
+/* { dg-final { scan-assembler "mov\tip, r0" } } */
+/* { dg-final { scan-assembler "mov\tlr, r0" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq," } } */
+/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c
new file mode 100644
index 00000000000..95da045690a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/cmse-6.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Remember dont clear r0 and r1, because we are passing the double parameter
+ * for bar in them. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c
new file mode 100644
index 00000000000..0069fcdaebf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/softfp.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_base_ok } */
+/* { dg-add-options arm_arch_v8m_base } */
+/* { dg-options "-mcmse -mfloat-abi=softfp" } */
+
+double __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+double
+foo (double a)
+{
+ return bar (1.0f, 2.0) + a;
+}
+
+float __attribute__ ((cmse_nonsecure_entry))
+baz (float a, double b)
+{
+ return (float) bar (a, b);
+}
+
+/* Make sure we are not using FP instructions, since ARMv8-M Baseline does not
+ support such instructions. */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+/* { dg-final { scan-assembler-not "vmrs" } } */
+
+/* Just double checking that we are still doing cmse though. */
+/* { dg-final { scan-assembler-not "vmrs" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c
new file mode 100644
index 00000000000..ff18e839b02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-1.c
@@ -0,0 +1,71 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #8063" } } */
+/* { dg-final { scan-assembler "movt\tr4, 63" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #511" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr2, r4" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c
new file mode 100644
index 00000000000..b2e024b7f07
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/baseline/union-2.c
@@ -0,0 +1,86 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st_3;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+ test_st_3 st_3;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #8191" } } */
+/* { dg-final { scan-assembler "movt\tr4, 63" } } */
+/* { dg-final { scan-assembler "ands\tr0, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #511" } } */
+/* { dg-final { scan-assembler "movt\tr4, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr1, r4" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr4, 31" } } */
+/* { dg-final { scan-assembler "ands\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr4, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "movs\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-1.c b/gcc/testsuite/gcc.target/arm/cmse/bitfield-1.c
new file mode 100644
index 00000000000..fccc51d5c82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-1.c
@@ -0,0 +1,39 @@
+/* { dg-do run } */
+/* { dg-options "--save-temps -mcmse -Wl,--section-start,.gnu.sgstubs=0x20400000" } */
+
+typedef struct
+{
+ unsigned short a : 6;
+ unsigned char b : 3;
+ unsigned char c;
+ unsigned short d : 8;
+} test_st;
+
+test_st __attribute__ ((cmse_nonsecure_entry)) foo (void)
+{
+ test_st t;
+ t.a = 63u;
+ t.b = 7u;
+ t.c = 255u;
+ t.d = 255u;
+ return t;
+}
+
+int
+main (void)
+{
+ test_st t;
+ t = foo ();
+ if (t.a != 63u
+ || t.b != 7u
+ || t.c != 255u
+ || t.d != 255u)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tr1, #1855" } } */
+/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "bxns" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-2.c b/gcc/testsuite/gcc.target/arm/cmse/bitfield-2.c
new file mode 100644
index 00000000000..e6aee3c4c02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-2.c
@@ -0,0 +1,36 @@
+/* { dg-do run } */
+/* { dg-options "--save-temps -mcmse -Wl,--section-start,.gnu.sgstubs=0x20400000" } */
+
+typedef struct
+{
+ short a : 7;
+ signed char b : 3;
+ short c : 11;
+} test_st;
+
+test_st __attribute__ ((cmse_nonsecure_entry)) foo (void)
+{
+ test_st t;
+ t.a = -64;
+ t.b = -4 ;
+ t.c = -1024;
+ return t;
+}
+
+int
+main (void)
+{
+ test_st t;
+ t = foo ();
+ if (t.a != -64
+ || t.b != -4
+ || t.c != -1024)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tr1, #1919" } } */
+/* { dg-final { scan-assembler "movt\tr1, 2047" } } */
+/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "bxns" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/bitfield-3.c b/gcc/testsuite/gcc.target/arm/cmse/bitfield-3.c
new file mode 100644
index 00000000000..285a2b92f64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/bitfield-3.c
@@ -0,0 +1,37 @@
+/* { dg-do run } */
+/* { dg-options "--save-temps -mcmse -Wl,--section-start,.gnu.sgstubs=0x20400000" } */
+
+typedef struct
+{
+ short a;
+ signed char b : 2;
+ short : 1;
+ signed char c : 3;
+} test_st;
+
+test_st __attribute__ ((cmse_nonsecure_entry)) foo (void)
+{
+ test_st t;
+ t.a = -32768;
+ t.b = -2;
+ t.c = -4;
+ return t;
+}
+
+int
+main (void)
+{
+ test_st t;
+ t = foo ();
+ if (t.a != -32768
+ || t.b != -2
+ || t.c != -4)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tr1, #65535" } } */
+/* { dg-final { scan-assembler "movt\tr1, 63" } } */
+/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "bxns" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-1.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-1.c
new file mode 100644
index 00000000000..c13272eed68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-1.c
@@ -0,0 +1,106 @@
+/* { dg-do compile } */
+/* { dg-options "-Os -mcmse -fdump-rtl-expand" } */
+
+#include <arm_cmse.h>
+
+extern int a;
+extern int bar (void);
+
+int foo (char * p)
+{
+ cmse_address_info_t cait;
+
+ cait = cmse_TT (&a);
+ if (cait.flags.mpu_region)
+ a++;
+
+ cait = cmse_TT_fptr (&bar);
+ if (cait.flags.mpu_region)
+ a+= bar ();
+
+ cait = cmse_TTA (&a);
+ if (cait.flags.mpu_region)
+ a++;
+
+ cait = cmse_TTA_fptr (&bar);
+ if (cait.flags.mpu_region)
+ a+= bar ();
+
+ cait = cmse_TTT (&a);
+ if (cait.flags.mpu_region)
+ a++;
+
+ cait = cmse_TTT_fptr (&bar);
+ if (cait.flags.mpu_region)
+ a+= bar ();
+
+ cait = cmse_TTAT (&a);
+ if (cait.flags.mpu_region)
+ a++;
+
+ cait = cmse_TTAT_fptr (&bar);
+ if (cait.flags.mpu_region)
+ a+= bar ();
+
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char), 0);
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char),
+ CMSE_MPU_UNPRIV);
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char),
+ CMSE_MPU_READWRITE);
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char),
+ CMSE_MPU_UNPRIV | CMSE_MPU_READ);
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char),
+ CMSE_AU_NONSECURE
+ | CMSE_MPU_NONSECURE);
+ p = (char *) cmse_check_address_range ((void *) p, sizeof (char),
+ CMSE_NONSECURE | CMSE_MPU_UNPRIV);
+
+ p = (char *) cmse_check_pointed_object (p, CMSE_NONSECURE | CMSE_MPU_UNPRIV);
+
+ return a;
+}
+/* { dg-final { scan-assembler-times "\ttt " 2 } } */
+/* { dg-final { scan-assembler-times "ttt " 2 } } */
+/* { dg-final { scan-assembler-times "tta " 2 } } */
+/* { dg-final { scan-assembler-times "ttat " 2 } } */
+/* { dg-final { scan-assembler-times "bl.cmse_check_address_range" 7 } } */
+/* { dg-final { scan-assembler-not "cmse_check_pointed_object" } } */
+
+int __attribute__ ((cmse_nonsecure_entry))
+baz (void)
+{
+ return cmse_nonsecure_caller ();
+}
+
+typedef int __attribute__ ((cmse_nonsecure_call)) (int_nsfunc_t) (void);
+
+int default_callback (void)
+{
+ return 0;
+}
+
+int_nsfunc_t * fp = (int_nsfunc_t *) default_callback;
+
+void __attribute__ ((cmse_nonsecure_entry))
+qux (int_nsfunc_t * callback)
+{
+ fp = cmse_nsfptr_create (callback);
+}
+
+int call_callback (void)
+{
+ if (cmse_is_nsfptr (fp))
+ return fp ();
+ else
+ return default_callback ();
+}
+/* { dg-final { scan-assembler "baz:" } } */
+/* { dg-final { scan-assembler "__acle_se_baz:" } } */
+/* { dg-final { scan-assembler "qux:" } } */
+/* { dg-final { scan-assembler "__acle_se_qux:" } } */
+/* { dg-final { scan-assembler-not "\tcmse_nonsecure_caller" } } */
+/* { dg-final { scan-rtl-dump "and.*reg.*const_int 1" expand } } */
+/* { dg-final { scan-assembler "bic" } } */
+/* { dg-final { scan-assembler "push\t\{r4, r5, r6" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq" } } */
+/* { dg-final { scan-assembler-times "bl\\s+__gnu_cmse_nonsecure_call" 1 } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-10.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-10.c
new file mode 100644
index 00000000000..1a91ac39ee3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-10.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+void
+foo (void) {}
+
+/* { dg-final { scan-assembler-not "bxns" } } */
+/* { dg-final { scan-assembler "foo:" } } */
+/* { dg-final { scan-assembler-not "__acle_se_foo:" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-12.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-12.c
new file mode 100644
index 00000000000..87a2f1363a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-12.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+#include <arm_cmse.h>
+
+char *
+foo (char * p)
+{
+ if (!cmse_is_nsfptr (p))
+ return cmse_nsfptr_create (p);
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler-not "cmse_is_nsfptr" } } */
+/* { dg-final { scan-assembler-not "cmse_nsfptr_create" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-14.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-14.c
new file mode 100644
index 00000000000..701e9ee7e31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-14.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int foo (void)
+{
+ return bar ();
+}
+
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+/* { dg-final { scan-assembler-not "b\[^ y\n\]*\\s+bar" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-15.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-15.c
new file mode 100644
index 00000000000..4e9ace1f3f3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-15.c
@@ -0,0 +1,72 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*ns_foo) (void);
+int (*s_bar) (void);
+int __attribute__ ((cmse_nonsecure_call)) (**ns_foo2) (void);
+int (**s_bar2) (void);
+
+typedef int __attribute__ ((cmse_nonsecure_call)) ns_foo_t (void);
+typedef int s_bar_t (void);
+typedef int __attribute__ ((cmse_nonsecure_call)) (* ns_foo_ptr) (void);
+typedef int (*s_bar_ptr) (void);
+
+int nonsecure0 (ns_foo_t * ns_foo_p)
+{
+ return ns_foo_p ();
+}
+
+int nonsecure1 (ns_foo_t ** ns_foo_p)
+{
+ return (*ns_foo_p) ();
+}
+
+int nonsecure2 (ns_foo_ptr ns_foo_p)
+{
+ return ns_foo_p ();
+}
+int nonsecure3 (ns_foo_ptr * ns_foo_p)
+{
+ return (*ns_foo_p) ();
+}
+
+int secure0 (s_bar_t * s_bar_p)
+{
+ return s_bar_p ();
+}
+
+int secure1 (s_bar_t ** s_bar_p)
+{
+ return (*s_bar_p) ();
+}
+
+int secure2 (s_bar_ptr s_bar_p)
+{
+ return s_bar_p ();
+}
+
+int secure3 (s_bar_ptr * s_bar_p)
+{
+ return (*s_bar_p) ();
+}
+
+int nonsecure4 (void)
+{
+ return ns_foo ();
+}
+
+int nonsecure5 (void)
+{
+ return (*ns_foo2) ();
+}
+
+int secure4 (void)
+{
+ return s_bar ();
+}
+
+int secure5 (void)
+{
+ return (*s_bar2) ();
+}
+/* { dg-final { scan-assembler-times "bl\\s+__gnu_cmse_nonsecure_call" 6 } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-3.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-3.c
new file mode 100644
index 00000000000..7f92a4c28b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-3.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+struct span {
+ int a, b;
+};
+struct span2 {
+ float a, b, c, d;
+};
+
+union test_union
+{
+ long long a;
+ int b;
+ struct span2 c;
+} test_union;
+
+void __attribute__ ((cmse_nonsecure_entry))
+foo (long long a, int b, long long c) {} /* { dg-error "not available to functions with arguments passed on the stack" } */
+
+void __attribute__ ((cmse_nonsecure_entry))
+bar (long long a, int b, struct span c) {} /* { dg-error "not available to functions with arguments passed on the stack" } */
+
+void __attribute__ ((cmse_nonsecure_entry))
+baz (int a, ...) {} /* { dg-error "not available to functions with variable number of arguments" } */
+
+struct span __attribute__ ((cmse_nonsecure_entry))
+qux (void) { /* { dg-error "not available to functions that return value on the stack" } */
+ struct span ret = {0, 0};
+ return ret;
+}
+
+void __attribute__ ((cmse_nonsecure_entry))
+norf (struct span2 a) {}
+
+void __attribute__ ((cmse_nonsecure_entry))
+foo2 (long long a, int b, union test_union c) {} /* { dg-error "not available to functions with arguments passed on the stack" } */
+
+typedef void __attribute__ ((cmse_nonsecure_call)) bar2 (long long a, int b, long long c); /* { dg-error "not available to functions with arguments passed on the stack" } */
+
+typedef void __attribute__ ((cmse_nonsecure_call)) baz2 (long long a, int b, struct span c); /* { dg-error "not available to functions with arguments passed on the stack" } */
+
+typedef struct span __attribute__ ((cmse_nonsecure_call)) qux2 (void); /* { dg-error "not available to functions that return value on the stack" } */
+
+typedef void __attribute__ ((cmse_nonsecure_call)) norf2 (int a, ...); /* { dg-error "not available to functions with variable number of arguments" } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-4.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-4.c
new file mode 100644
index 00000000000..d0999a4181a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-4.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+struct span {
+ int a, b;
+};
+
+extern int qux (void);
+
+void __attribute__ ((cmse_nonsecure_entry))
+foo (void) {}
+
+static void __attribute__ ((cmse_nonsecure_entry))
+bar (void) {} /* { dg-warning "has no effect on functions with static linkage" } */
+
+int __attribute__ ((cmse_nonsecure_entry))
+baz (void)
+{
+ return qux ();
+}
+
+void __attribute__ ((cmse_nonsecure_call))
+quux (void) {} /* { dg-warning "attribute only applies to base type of a function pointer" } */
+
+int __attribute__ ((cmse_nonsecure_call)) norf; /* { dg-warning "attribute only applies to base type of a function pointer" } */
+
+/* { dg-final { scan-assembler-times "bxns" 2 } } */
+/* { dg-final { scan-assembler "foo:" } } */
+/* { dg-final { scan-assembler "__acle_se_foo:" } } */
+/* { dg-final { scan-assembler-not "__acle_se_bar:" } } */
+/* { dg-final { scan-assembler "baz:" } } */
+/* { dg-final { scan-assembler "__acle_se_baz:" } } */
+/* { dg-final { scan-assembler-not "__acle_se_quux:" } } */
+/* { dg-final { scan-assembler-not "__acle_se_norf:" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c b/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c
new file mode 100644
index 00000000000..9e81e30c891
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse-9.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-skip-if "Testing exclusion of -mcmse" { arm-*-* } { "-mcmse" } { "" } } */
+
+
+void __attribute__ ((cmse_nonsecure_call)) (*bar) (int); /* { dg-warning "attribute ignored without -mcmse option" } */
+typedef void __attribute__ ((cmse_nonsecure_call)) baz (int); /* { dg-warning "attribute ignored without -mcmse option" } */
+
+int __attribute__ ((cmse_nonsecure_entry))
+foo (int a, baz b)
+{ /* { dg-warning "attribute ignored without -mcmse option" } */
+ bar (a);
+ b (a);
+ return a + 1;
+}
+
+/* { dg-final { scan-assembler-not "bxns" } } */
+/* { dg-final { scan-assembler-not "blxns" } } */
+/* { dg-final { scan-assembler-not "bl\t__gnu_cmse_nonsecure_call" } } */
+/* { dg-final { scan-assembler "foo:" } } */
+/* { dg-final { scan-assembler-not "__acle_se_foo:" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/cmse.exp b/gcc/testsuite/gcc.target/arm/cmse/cmse.exp
new file mode 100644
index 00000000000..66a8b7da005
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/cmse.exp
@@ -0,0 +1,72 @@
+# Copyright (C) 1997-2016 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite for ARMv8-M Security Extensions using the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Exit immediately if the target does not support -mcmse.
+if ![check_effective_target_arm_cmse_ok] then {
+ return
+}
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+ set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# Initialize `dg'.
+dg-init
+
+set saved-dg-do-what-default ${dg-do-what-default}
+set dg-do-what-default "assemble"
+
+set saved-lto_torture_options ${LTO_TORTURE_OPTIONS}
+set LTO_TORTURE_OPTIONS ""
+
+# These are for both baseline and mainline.
+gcc-dg-runtest [lsort [glob $srcdir/$subdir/*.c]] \
+ "" $DEFAULT_CFLAGS
+
+if {[check_effective_target_arm_arch_v8m_base_ok]} then {
+ # Baseline only
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/baseline/*.c]] \
+ "" $DEFAULT_CFLAGS
+}
+
+if {[check_effective_target_arm_arch_v8m_main_ok]} then {
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/*.c]] \
+ "" $DEFAULT_CFLAGS
+ # Mainline -mfloat-abi=soft
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/soft/*.c]] \
+ "-mfloat-abi=soft" $DEFAULT_CFLAGS
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/softfp/*.c]] \
+ "" $DEFAULT_CFLAGS
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/softfp-sp/*.c]] \
+ "" $DEFAULT_CFLAGS
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/hard/*.c]] \
+ "" $DEFAULT_CFLAGS
+ gcc-dg-runtest [lsort [glob $srcdir/$subdir/mainline/hard-sp/*.c]] \
+ "" $DEFAULT_CFLAGS
+}
+
+set LTO_TORTURE_OPTIONS ${saved-lto_torture_options}
+set dg-do-what-default ${saved-dg-do-what-default}
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c
new file mode 100644
index 00000000000..c3b1396d52e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-4.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int b:5;
+ unsigned int c:11, :0, d:8;
+ struct { unsigned int ee:2; } e;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+extern void foo (test_st st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 255" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #255" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #3" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c
new file mode 100644
index 00000000000..0d029044aa9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-5.c
@@ -0,0 +1,51 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #8191" } } */
+/* { dg-final { scan-assembler "movt\tip, 255" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #2047" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c
new file mode 100644
index 00000000000..005515ab9cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-6.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int b : 3;
+ unsigned int c : 14;
+ unsigned int d : 1;
+ struct {
+ unsigned int ee : 2;
+ unsigned short ff : 15;
+ } e;
+ unsigned char g : 1;
+ unsigned char : 4;
+ unsigned char h : 3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 1023" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #3" } } */
+/* { dg-final { scan-assembler "movt\tip, 32767" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #255" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c
new file mode 100644
index 00000000000..6dd218e62fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-7.c
@@ -0,0 +1,52 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned short b :5;
+ unsigned char c;
+ unsigned short d :11;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+
+/* { dg-final { scan-assembler "movw\tip, #8191" } } */
+/* { dg-final { scan-assembler "movt\tip, 255" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #2047" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c
new file mode 100644
index 00000000000..c833bcb0ae9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-8.c
@@ -0,0 +1,55 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "mov\tip, #255" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #1" } } */
+/* { dg-final { scan-assembler "movt\tip, 65535" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 31" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c
new file mode 100644
index 00000000000..d6e4cdb8c44
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-9.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ char a:3;
+} test_st3;
+
+typedef struct
+{
+ char a:3;
+} test_st2;
+
+typedef struct
+{
+ test_st2 st2;
+ test_st3 st3;
+} test_st;
+
+typedef union
+{
+ test_st st;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st;
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st);
+
+int
+main (void)
+{
+ read_st r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+
+ f (r.st);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #1799" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c
new file mode 100644
index 00000000000..e139ba61af5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/bitfield-and-union-1.c
@@ -0,0 +1,94 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned short a :11;
+} test_st_4;
+
+typedef union
+{
+ char a;
+ test_st_4 st4;
+}test_un_2;
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st_3;
+
+typedef struct
+{
+ unsigned char a :3;
+ unsigned int b :13;
+ test_un_2 un2;
+} test_st_2;
+
+typedef union
+{
+ test_st_2 st2;
+ test_st_3 st3;
+}test_un_1;
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned char c :4;
+ test_un_1 un1;
+} test_st_1;
+
+typedef union
+{
+ test_st_1 st1;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_st_1;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_st_1);
+
+int
+main (void)
+{
+ read_st_1 r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+ r.values.v4 = 0xFFFFFFFF;
+
+ f (r.st1);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #7939" } } */
+/* { dg-final { scan-assembler "movt\tip, 15" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 2047" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "mov\tip, #1" } } */
+/* { dg-final { scan-assembler "movt\tip, 65535" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 31" } } */
+/* { dg-final { scan-assembler "and\tr3, r3, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c
new file mode 100644
index 00000000000..d90ad811fc1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-13.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (3.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts0, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts1, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts2, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts7, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts8, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts9, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts10, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts11, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts12, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts13, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts14, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts15, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c
new file mode 100644
index 00000000000..88dec276281
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-5.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+/* { dg-final { scan-assembler "mov\tr0, lr" } } */
+/* { dg-final { scan-assembler "mov\tr1, lr" } } */
+/* { dg-final { scan-assembler "mov\tr2, lr" } } */
+/* { dg-final { scan-assembler "mov\tr3, lr" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "push\t{r4}" } } */
+/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
+/* { dg-final { scan-assembler "movt\tr4, #4095" } } */
+/* { dg-final { scan-assembler "and\tip, r4" } } */
+/* { dg-final { scan-assembler "vmsr\tfpscr, ip" } } */
+/* { dg-final { scan-assembler "pop\t{r4}" } } */
+/* { dg-final { scan-assembler "mov\tip, lr" } } */
+/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c
new file mode 100644
index 00000000000..c047cd51c94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-7.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts0, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts1, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts2, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts7, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts8, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts9, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts10, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts11, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts12, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts13, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts14, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts15, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c
new file mode 100644
index 00000000000..20d2d4a8fb1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard-sp/cmse-8.c
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts0, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts1, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts2, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts7, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts8, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts9, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts10, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts11, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts12, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts13, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts14, .L" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts15, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c
new file mode 100644
index 00000000000..0af586a7fd1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-13.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (3.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vldr\.32\ts1, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.64\td0, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts0, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.64\td1, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts2, .L" } } */
+/* { dg-final { scan-assembler-not "vldr\.32\ts3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td2, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td7, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c
new file mode 100644
index 00000000000..29f60baf521
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-5.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+/* { dg-final { scan-assembler "mov\tr0, lr" } } */
+/* { dg-final { scan-assembler "mov\tr1, lr" } } */
+/* { dg-final { scan-assembler "mov\tr2, lr" } } */
+/* { dg-final { scan-assembler "mov\tr3, lr" } } */
+/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "push\t{r4}" } } */
+/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
+/* { dg-final { scan-assembler "movt\tr4, #4095" } } */
+/* { dg-final { scan-assembler "and\tip, r4" } } */
+/* { dg-final { scan-assembler "vmsr\tfpscr, ip" } } */
+/* { dg-final { scan-assembler "pop\t{r4}" } } */
+/* { dg-final { scan-assembler "mov\tip, lr" } } */
+/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c
new file mode 100644
index 00000000000..a5c64fb06ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-7.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "vldr\.64\td0, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td1, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td2, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td7, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c
new file mode 100644
index 00000000000..5e041b17b0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/hard/cmse-8.c
@@ -0,0 +1,33 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=softfp } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=hard -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vldr\.64\td0, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td1, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td2, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td3, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td4, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td5, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td6, .L" } } */
+/* { dg-final { scan-assembler "vldr\.64\td7, .L" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c
new file mode 100644
index 00000000000..dbbd262c890
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-13.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c
new file mode 100644
index 00000000000..a7229ea8eb2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-5.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+
+/* { dg-final { scan-assembler "mov\tr1, lr" } } */
+/* { dg-final { scan-assembler "mov\tr2, lr" } } */
+/* { dg-final { scan-assembler "mov\tr3, lr" } } */
+/* { dg-final { scan-assembler "mov\tip, lr" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "bxns" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c
new file mode 100644
index 00000000000..e33568400ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-7.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c
new file mode 100644
index 00000000000..024a12e0a41
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/soft/cmse-8.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=hard" -mfloat-abi=softfp } {""} } */
+/* { dg-options "-mcmse -mfloat-abi=soft" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "vmov" } } */
+/* { dg-final { scan-assembler-not "vmsr" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c
new file mode 100644
index 00000000000..7734d77dc38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-5.c
@@ -0,0 +1,46 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+/* { dg-final { scan-assembler "__acle_se_foo:" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
+/* { dg-final { scan-assembler "mov\tr1, lr" } } */
+/* { dg-final { scan-assembler "mov\tr2, lr" } } */
+/* { dg-final { scan-assembler "mov\tr3, lr" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts7, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts8, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts9, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts10, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts11, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts12, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "push\t{r4}" } } */
+/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
+/* { dg-final { scan-assembler "movt\tr4, #4095" } } */
+/* { dg-final { scan-assembler "and\tip, r4" } } */
+/* { dg-final { scan-assembler "vmsr\tfpscr, ip" } } */
+/* { dg-final { scan-assembler "pop\t{r4}" } } */
+/* { dg-final { scan-assembler "mov\tip, lr" } } */
+/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c
new file mode 100644
index 00000000000..fb195eb58d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-7.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c
new file mode 100644
index 00000000000..22ed3f8af88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp-sp/cmse-8.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing double precision" {*-*-*} {"-mfpu=fpv[4-5]-d16"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-sp-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c
new file mode 100644
index 00000000000..9634065e7cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-13.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (float, double);
+
+int
+foo (int a)
+{
+ return bar (1.0f, 2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "\n\tmov\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "\n\tmov\tr2, r4\n\tmov\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c
new file mode 100644
index 00000000000..6addaa1a4ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-5.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+extern float bar (void);
+
+float __attribute__ ((cmse_nonsecure_entry))
+foo (void)
+{
+ return bar ();
+}
+/* { dg-final { scan-assembler "__acle_se_foo:" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
+/* { dg-final { scan-assembler "mov\tr1, lr" } } */
+/* { dg-final { scan-assembler "mov\tr2, lr" } } */
+/* { dg-final { scan-assembler "mov\tr3, lr" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td3, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td4, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
+/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { arm_arch_v8m_main_ok && { ! arm_dsp } } } } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target { arm_arch_v8m_main_ok && arm_dsp } } } } */
+/* { dg-final { scan-assembler "push\t{r4}" } } */
+/* { dg-final { scan-assembler "vmrs\tip, fpscr" } } */
+/* { dg-final { scan-assembler "movw\tr4, #65376" } } */
+/* { dg-final { scan-assembler "movt\tr4, #4095" } } */
+/* { dg-final { scan-assembler "and\tip, r4" } } */
+/* { dg-final { scan-assembler "vmsr\tfpscr, ip" } } */
+/* { dg-final { scan-assembler "pop\t{r4}" } } */
+/* { dg-final { scan-assembler "mov\tip, lr" } } */
+/* { dg-final { scan-assembler "bxns" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c
new file mode 100644
index 00000000000..04f8466cc11
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-7.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (void);
+
+int
+foo (int a)
+{
+ return bar () + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c
new file mode 100644
index 00000000000..ffe94de8541
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/softfp/cmse-8.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arch_v8m_main_ok } */
+/* { dg-add-options arm_arch_v8m_main } */
+/* { dg-skip-if "Do not combine float-abi= hard | soft | softfp" {*-*-*} {"-mfloat-abi=soft" -mfloat-abi=hard } {""} } */
+/* { dg-skip-if "Skip these if testing single precision" {*-*-*} {"-mfpu=*-sp-*"} {""} } */
+/* { dg-options "-mcmse -mfloat-abi=softfp -mfpu=fpv5-d16" } */
+
+int __attribute__ ((cmse_nonsecure_call)) (*bar) (double);
+
+int
+foo (int a)
+{
+ return bar (2.0) + a + 1;
+}
+
+/* Checks for saving and clearing prior to function call. */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+
+/* Now we check that we use the correct intrinsic to call. */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c
new file mode 100644
index 00000000000..1fc846cd7a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-1.c
@@ -0,0 +1,69 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #8063" } } */
+/* { dg-final { scan-assembler "movt\tip, 63" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #511" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c
new file mode 100644
index 00000000000..420d0f136ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/mainline/union-2.c
@@ -0,0 +1,84 @@
+/* { dg-do compile } */
+/* { dg-options "-mcmse" } */
+
+typedef struct
+{
+ unsigned char a :2;
+ unsigned char :0;
+ unsigned short b :5;
+ unsigned char :0;
+ unsigned short c :3;
+ unsigned char :0;
+ unsigned int d :9;
+} test_st_1;
+
+typedef struct
+{
+ unsigned short a :7;
+ unsigned char :0;
+ unsigned char b :1;
+ unsigned char :0;
+ unsigned short c :6;
+} test_st_2;
+
+typedef struct
+{
+ unsigned char a;
+ unsigned int :0;
+ unsigned int b :1;
+ unsigned short :0;
+ unsigned short c;
+ unsigned int :0;
+ unsigned int d :21;
+} test_st_3;
+
+typedef union
+{
+ test_st_1 st_1;
+ test_st_2 st_2;
+ test_st_3 st_3;
+}test_un;
+
+typedef union
+{
+ test_un un;
+ struct
+ {
+ unsigned int v1;
+ unsigned int v2;
+ unsigned int v3;
+ unsigned int v4;
+ }values;
+} read_un;
+
+
+typedef void __attribute__ ((cmse_nonsecure_call)) (*foo_ns) (test_un);
+
+int
+main (void)
+{
+ read_un r;
+ foo_ns f;
+
+ f = (foo_ns) 0x200000;
+ r.values.v1 = 0xFFFFFFFF;
+ r.values.v2 = 0xFFFFFFFF;
+ r.values.v3 = 0xFFFFFFFF;
+
+ f (r.un);
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movw\tip, #8191" } } */
+/* { dg-final { scan-assembler "movt\tip, 63" } } */
+/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #511" } } */
+/* { dg-final { scan-assembler "movt\tip, 65535" } } */
+/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
+/* { dg-final { scan-assembler "movw\tip, #65535" } } */
+/* { dg-final { scan-assembler "movt\tip, 31" } } */
+/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
+/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
+/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
diff --git a/gcc/testsuite/gcc.target/arm/cmse/struct-1.c b/gcc/testsuite/gcc.target/arm/cmse/struct-1.c
new file mode 100644
index 00000000000..2d366a944df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/cmse/struct-1.c
@@ -0,0 +1,33 @@
+/* { dg-do run } */
+/* { dg-options "--save-temps -mcmse -Wl,--section-start,.gnu.sgstubs=0x20400000" } */
+
+typedef struct
+{
+ unsigned char a;
+ unsigned short b;
+} test_st;
+
+test_st __attribute__ ((cmse_nonsecure_entry)) foo (void)
+{
+ test_st t;
+ t.a = 255u;
+ t.b = 32767u;
+ return t;
+}
+
+int
+main (void)
+{
+ test_st t;
+ t = foo ();
+ if (t.a != 255u || t.b != 32767u)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-assembler "movs\tr1, #255" } } */
+/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
+/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "bxns" } } */
+
+
diff --git a/gcc/testsuite/gcc.target/arm/movdi_movw.c b/gcc/testsuite/gcc.target/arm/movdi_movw.c
new file mode 100644
index 00000000000..0f6b839da61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/movdi_movw.c
@@ -0,0 +1,10 @@
+/* { dg-do compile { target { arm_thumb2 || arm_thumb1_movt_ok } } } */
+/* { dg-options "-O2" } */
+
+long long
+movdi (int a)
+{
+ return 0xF0F0;
+}
+
+/* { dg-final { scan-assembler-times "movw\tr0, #61680" 1 } } */
diff --git a/gcc/testsuite/gcc.target/arm/movhi_movw.c b/gcc/testsuite/gcc.target/arm/movhi_movw.c
new file mode 100644
index 00000000000..b097a8a019c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/movhi_movw.c
@@ -0,0 +1,10 @@
+/* { dg-do compile { target { arm_thumb2 || arm_thumb1_movt_ok } } } */
+/* { dg-options "-O2" } */
+
+short
+movsi (void)
+{
+ return (short) 0x7070;
+}
+
+/* { dg-final { scan-assembler-times "movw\tr0, #28784" 1 } } */
diff --git a/gcc/testsuite/gcc.target/arm/movsi_movw.c b/gcc/testsuite/gcc.target/arm/movsi_movw.c
new file mode 100644
index 00000000000..d50906e76ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/movsi_movw.c
@@ -0,0 +1,10 @@
+/* { dg-do compile { target { arm_thumb2 || arm_thumb1_movt_ok } } } */
+/* { dg-options "-O2" } */
+
+int
+movsi (void)
+{
+ return 0xF0F0;
+}
+
+/* { dg-final { scan-assembler-times "movw\tr0, #61680" 1 } } */
diff --git a/gcc/testsuite/gcc.target/arm/optional_thumb-1.c b/gcc/testsuite/gcc.target/arm/optional_thumb-1.c
new file mode 100644
index 00000000000..23df62887ba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/optional_thumb-1.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-skip-if "-marm/-mthumb/-march/-mcpu given" { *-*-*} { "-marm" "-mthumb" "-march=*" "-mcpu=*" } } */
+/* { dg-options "-march=armv6-m" } */
+
+/* Check that -mthumb is not needed when compiling for a Thumb-only target. */
+
+int foo;
diff --git a/gcc/testsuite/gcc.target/arm/optional_thumb-2.c b/gcc/testsuite/gcc.target/arm/optional_thumb-2.c
new file mode 100644
index 00000000000..4bd53a45eca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/optional_thumb-2.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+/* { dg-skip-if "-marm/-mthumb/-march/-mcpu given" { *-*-*} { "-marm" "-mthumb" "-march=*" "-mcpu=*" } } */
+/* { dg-options "-mcpu=cortex-m4" } */
+
+/* Check that -mthumb is not needed when compiling for a Thumb-only target. */
+
+int foo;
diff --git a/gcc/testsuite/gcc.target/arm/optional_thumb-3.c b/gcc/testsuite/gcc.target/arm/optional_thumb-3.c
new file mode 100644
index 00000000000..f1fd5c8840b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/optional_thumb-3.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_cortex_m } */
+/* { dg-skip-if "-mthumb given" { *-*-*} { "-mthumb" } } */
+/* { dg-options "-marm" } */
+/* { dg-error "target CPU does not support ARM mode" "missing error with -marm on Thumb-only targets" { target *-*-*} 0 } */
+
+/* Check that -marm gives an error when compiling for a Thumb-only target. */
+
+int foo;
diff --git a/gcc/testsuite/gcc.target/arm/pr42574.c b/gcc/testsuite/gcc.target/arm/pr42574.c
index 0ccd05f9922..d3d6b5d32e5 100644
--- a/gcc/testsuite/gcc.target/arm/pr42574.c
+++ b/gcc/testsuite/gcc.target/arm/pr42574.c
@@ -1,5 +1,5 @@
+/* { dg-do compile { target { arm_thumb1_ok && { ! arm_thumb1_movt_ok } } } } */
/* { dg-options "-mthumb -Os -fpic" } */
-/* { dg-require-effective-target arm_thumb1_ok } */
/* { dg-require-effective-target fpic } */
/* Make sure the address of glob.c is calculated only once and using
a logical shift for the offset (200<<1). */
diff --git a/gcc/testsuite/gcc.target/arm/pure-code/ffunction-sections.c b/gcc/testsuite/gcc.target/arm/pure-code/ffunction-sections.c
new file mode 100644
index 00000000000..26fe38c0529
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pure-code/ffunction-sections.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-fpic" "-fPIC" } { "" } } */
+/* { dg-options "-ffunction-sections -mpure-code" } */
+#include <limits.h>
+
+char * foo (void)
+{
+ return "foo";
+}
+
+unsigned int bar (unsigned int b)
+{
+ return UINT_MAX - b;
+}
+
+/* { dg-final { scan-assembler {\.section\t\.text\.foo[^\n]*\"0x20000006\"} } } */
+/* { dg-final { scan-assembler {\.section\t\.text\.bar[^\n]*\"0x20000006\"} } } */
diff --git a/gcc/testsuite/gcc.target/arm/pure-code/no-casesi.c b/gcc/testsuite/gcc.target/arm/pure-code/no-casesi.c
new file mode 100644
index 00000000000..ba116a8261b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pure-code/no-casesi.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-mpure-code" } */
+/* { dg-skip-if "" { *-*-* } { "-g" "-fpic" "-fPIC" } { "" } } */
+
+extern int foo (void);
+extern int bar (void);
+extern int baz (void);
+extern int fooz (void);
+
+int caller (unsigned int reg_type)
+{
+ switch (reg_type)
+ {
+ case 0x80000000:
+ return (int) foo ();
+
+ case 0x80000003:
+ return (int) bar ();
+
+ case 0x80000001:
+ return (int) baz ();
+
+ case 0x80000004:
+ return (int) fooz ();
+ }
+}
+
+/* { dg-final { scan-assembler-not "\\.(float|l\\?double|\d?byte|short|int|long|quad|word)\\s+\[^.\]" } } */
+/* { dg-final { scan-assembler "text,\"0x20000006\"" } } */
diff --git a/gcc/testsuite/gcc.target/arm/pure-code/no-literal-pool.c b/gcc/testsuite/gcc.target/arm/pure-code/no-literal-pool.c
new file mode 100644
index 00000000000..4b893fd32f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pure-code/no-literal-pool.c
@@ -0,0 +1,68 @@
+/* { dg-do compile } */
+/* { dg-options "-mpure-code" } */
+/* { dg-skip-if "" { *-*-* } { "-g" "-fpic" "-fPIC" } { "" } } */
+
+float sf;
+double df;
+long long l;
+static char *p = "Hello World";
+
+float
+testsf (float *p)
+{
+ if (*p > 1.1234f)
+ return 2.1234f;
+ else
+ return 3.1234f;
+}
+
+double
+testdf (double *p)
+{
+ if (*p > 4.1234)
+ return 2.1234;
+ else
+ return 3.1234;
+}
+
+long long
+testll (long long *p)
+{
+ if (*p > 0x123456789ABCDEFll)
+ return 0x111111111ll;
+ else
+ return 0x222222222ll;
+}
+
+char *
+testchar ()
+{
+ return p + 4;
+}
+
+int
+foo (int a, int b)
+{
+ int i;
+ volatile int *labelref = &&label1;
+
+ if (a > b)
+ {
+ while (i < b)
+ {
+ a += *labelref;
+ i += 1;
+ }
+ goto *labelref;
+ }
+ else
+ b = b + 3;
+
+ a = a * b;
+
+label1:
+ return a + b;
+}
+
+/* { dg-final { scan-assembler-not "\\.(float|l\\?double|\d?byte|short|int|long|quad|word)\\s+\[^.\]" } } */
+/* { dg-final { scan-assembler "text,\"0x20000006\"" } } */
diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pure-code.exp b/gcc/testsuite/gcc.target/arm/pure-code/pure-code.exp
new file mode 100644
index 00000000000..41894600964
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pure-code/pure-code.exp
@@ -0,0 +1,58 @@
+# Copyright (C) 1997-2016 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite for ARM's -mpure-code option, using the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+ set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# The -mpure-code option is only available for M-profile targets that support
+# thumb2.
+if {[check_effective_target_arm_thumb2_ok]
+ && [check_effective_target_arm_cortex_m]} then {
+# Initialize `dg'.
+dg-init
+
+set saved-dg-do-what-default ${dg-do-what-default}
+set dg-do-what-default "assemble"
+
+set saved-lto_torture_options ${LTO_TORTURE_OPTIONS}
+
+# Add -ffat-lto-objects option to all LTO options such that we can do assembly
+# scans.
+proc add_fat_objects { list } {
+ set res {}
+ foreach el $list {set res [lappend res [concat $el " -ffat-lto-objects"]]}
+ return $res
+};
+set LTO_TORTURE_OPTIONS [add_fat_objects ${LTO_TORTURE_OPTIONS}]
+
+gcc-dg-runtest [lsort [glob $srcdir/$subdir/*.c]] \
+ "" $DEFAULT_CFLAGS
+
+# Restore global values
+set dg-do-what-default ${saved-dg-do-what-default}
+set LTO_TORTURE_OPTIONS ${saved-lto_torture_options}
+
+# All done.
+dg-finish
+}
diff --git a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data.c b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-1.c
index 089a72b67f3..089a72b67f3 100644
--- a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data.c
+++ b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-1.c
diff --git a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-2.c b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-2.c
new file mode 100644
index 00000000000..c52de113809
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-2.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_cortex_m } */
+/* { dg-require-effective-target arm_thumb2_ok } */
+/* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" } } */
+/* { dg-options "-O2 -mthumb -mfloat-abi=hard -mslow-flash-data" } */
+
+float f (float);
+
+const float max = 0.01f;
+
+int
+g (float in)
+{
+ if (f (in) + f (in) < max)
+ return 0;
+ return 1;
+}
+
+double foo (void)
+{
+ return 0xF1EC7A5239123AF;
+}
+
+double bar (void)
+{
+ return 0.0f;
+}
diff --git a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-3.c b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-3.c
new file mode 100644
index 00000000000..f4c17c9fa85
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-3.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_cortex_m } */
+/* { dg-require-effective-target arm_thumb2_ok } */
+/* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" } } */
+/* { dg-options "-mthumb -mfloat-abi=hard -mslow-flash-data" } */
+
+/* From PR71607 */
+
+float b;
+void fn1 ();
+
+float
+fn2 ()
+{
+ return 1.1f;
+}
+
+void
+fn3 ()
+{
+ float a[2];
+ a[1] = b;
+ fn1 (a);
+}
+
+/* { dg-final { scan-assembler-not "\\.(float|l\\?double|\d?byte|short|int|long|quad|word)\\s+\[^.\]" } } */
diff --git a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-4.c b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-4.c
new file mode 100644
index 00000000000..dbe129168d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-4.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_cortex_m } */
+/* { dg-require-effective-target arm_thumb2_ok } */
+/* { dg-require-effective-target arm_vfp_ok } */
+/* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" } } */
+/* { dg-options "-O2 -mthumb -mfloat-abi=hard -mslow-flash-data" } */
+
+double __attribute__ ((target ("fpu=fpv5-d16")))
+foo (void)
+{
+ return 1.0f;
+}
+
+float __attribute__ ((target ("fpu=fpv5-d16")))
+bar (void)
+{
+ return 1.0f;
+}
+
+float __attribute__ ((target ("fpu=fpv5-sp-d16")))
+baz (void)
+{
+ return 1.0f;
+}
+
+/* { dg-final { scan-assembler-times "#1\\.0e\\+0" 3 } } */
diff --git a/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-5.c b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-5.c
new file mode 100644
index 00000000000..9cc2539b31d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/thumb2-slow-flash-data-5.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_cortex_m } */
+/* { dg-require-effective-target arm_vfp_ok } */
+/* { dg-require-effective-target arm_thumb2_ok } */
+/* { dg-skip-if "do not override -mfloat-abi" { *-*-* } { "-mfloat-abi=*" } { "-mfloat-abi=hard" } } */
+/* { dg-options "-O2 -mthumb -mfloat-abi=hard -mslow-flash-data" } */
+
+double __attribute__ ((target ("fpu=fpv5-sp-d16")))
+foo (void)
+{
+ return 1.0f;
+}
+
+/* { dg-final { scan-assembler-not "#1\\.0e\\+0" } } */
+/* { dg-final { scan-assembler-not "\\.(float|l\\?double|\d?byte|short|int|long|quad|word)\\s+\[^.\]" } } */
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index f0f5ac4ee48..c123b849a01 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -3209,22 +3209,25 @@ proc check_effective_target_arm_fp16_ok { } {
# Usage: /* { dg-require-effective-target arm_arch_v5_ok } */
# /* { dg-add-options arm_arch_v5 } */
# /* { dg-require-effective-target arm_arch_v5_multilib } */
-foreach { armfunc armflag armdef } { v4 "-march=armv4 -marm" __ARM_ARCH_4__
- v4t "-march=armv4t" __ARM_ARCH_4T__
- v5 "-march=armv5 -marm" __ARM_ARCH_5__
- v5t "-march=armv5t" __ARM_ARCH_5T__
- v5te "-march=armv5te" __ARM_ARCH_5TE__
- v6 "-march=armv6" __ARM_ARCH_6__
- v6k "-march=armv6k" __ARM_ARCH_6K__
- v6t2 "-march=armv6t2" __ARM_ARCH_6T2__
- v6z "-march=armv6z" __ARM_ARCH_6Z__
- v6m "-march=armv6-m -mthumb" __ARM_ARCH_6M__
- v7a "-march=armv7-a" __ARM_ARCH_7A__
- v7r "-march=armv7-r" __ARM_ARCH_7R__
- v7m "-march=armv7-m -mthumb" __ARM_ARCH_7M__
- v7em "-march=armv7e-m -mthumb" __ARM_ARCH_7EM__
- v8a "-march=armv8-a" __ARM_ARCH_8A__
- v8_1a "-march=armv8.1a" __ARM_ARCH_8A__ } {
+foreach { armfunc armflag armdef } {
+ v4 "-march=armv4 -marm" __ARM_ARCH_4__
+ v4t "-march=armv4t" __ARM_ARCH_4T__
+ v5 "-march=armv5 -marm" __ARM_ARCH_5__
+ v5t "-march=armv5t" __ARM_ARCH_5T__
+ v5te "-march=armv5te" __ARM_ARCH_5TE__
+ v6 "-march=armv6" __ARM_ARCH_6__
+ v6k "-march=armv6k" __ARM_ARCH_6K__
+ v6t2 "-march=armv6t2" __ARM_ARCH_6T2__
+ v6z "-march=armv6z" __ARM_ARCH_6Z__
+ v6m "-march=armv6-m -mthumb -mfloat-abi=soft" __ARM_ARCH_6M__
+ v7a "-march=armv7-a" __ARM_ARCH_7A__
+ v7r "-march=armv7-r" __ARM_ARCH_7R__
+ v7m "-march=armv7-m -mthumb" __ARM_ARCH_7M__
+ v7em "-march=armv7e-m -mthumb" __ARM_ARCH_7EM__
+ v8a "-march=armv8-a" __ARM_ARCH_8A__
+ v8_1a "-march=armv8.1a" __ARM_ARCH_8A__
+ v8m_base "-march=armv8-m.base -mthumb -mfloat-abi=soft" __ARM_ARCH_8M_BASE__
+ v8m_main "-march=armv8-m.main -mthumb" __ARM_ARCH_8M_MAIN__ } {
eval [string map [list FUNC $armfunc FLAG $armflag DEF $armdef ] {
proc check_effective_target_arm_arch_FUNC_ok { } {
if { [ string match "*-marm*" "FLAG" ] &&
@@ -3352,15 +3355,60 @@ proc check_effective_target_arm_cortex_m { } {
return 0
}
return [check_no_compiler_messages arm_cortex_m assembly {
- #if !defined(__ARM_ARCH_7M__) \
- && !defined (__ARM_ARCH_7EM__) \
- && !defined (__ARM_ARCH_6M__)
- #error !__ARM_ARCH_7M__ && !__ARM_ARCH_7EM__ && !__ARM_ARCH_6M__
+ #if defined(__ARM_ARCH_ISA_ARM)
+ #error __ARM_ARCH_ISA_ARM is defined
#endif
int i;
} "-mthumb"]
}
+# Return 1 if this is an ARM target where -mthumb causes Thumb-1 to be
+# used and MOVT/MOVW instructions to be available.
+
+proc check_effective_target_arm_thumb1_movt_ok {} {
+ if [check_effective_target_arm_thumb1_ok] {
+ return [check_no_compiler_messages arm_movt object {
+ int
+ foo (void)
+ {
+ asm ("movt r0, #42");
+ }
+ } "-mthumb"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target where -mthumb causes Thumb-1 to be
+# used and CBZ and CBNZ instructions are available.
+
+proc check_effective_target_arm_thumb1_cbz_ok {} {
+ if [check_effective_target_arm_thumb1_ok] {
+ return [check_no_compiler_messages arm_movt object {
+ int
+ foo (void)
+ {
+ asm ("cbz r0, 2f\n2:");
+ }
+ } "-mthumb"]
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if this is an ARM target where ARMv8-M Security Extensions is
+# available.
+
+proc check_effective_target_arm_cmse_ok {} {
+ return [check_no_compiler_messages arm_cmse object {
+ int
+ foo (void)
+ {
+ asm ("bxns r0");
+ }
+ } "-mcmse"];
+}
+
# Return 1 if this compilation turns on string_ops_prefer_neon on.
proc check_effective_target_arm_tune_string_ops_prefer_neon { } {
@@ -7016,3 +7064,78 @@ proc check_effective_target_offload_hsa { } {
int main () {return 0;}
} "-foffload=hsa" ]
}
+
+# Return 1 if the target supports coprocessor instructions: cdp, ldc, stc, mcr and
+# mrc.
+proc check_effective_target_arm_coproc1_ok_nocache { } {
+ if { ![istarget arm*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc1_ok assembly {
+ #if (__thumb__ && !__thumb2__) || __ARM_ARCH < 4
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc1_ok { } {
+ return [check_cached_effective_target arm_coproc1_ok \
+ check_effective_target_arm_coproc1_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc1_ok and the following: cdp2, ldc2, ldc2l,
+# stc2, stc2l, mcr2 and mrc2.
+proc check_effective_target_arm_coproc2_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc1_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc2_ok assembly {
+ #if __ARM_ARCH < 5
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc2_ok { } {
+ return [check_cached_effective_target arm_coproc2_ok \
+ check_effective_target_arm_coproc2_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc2_ok in addition to the following: mcrr
+# and mrrc.
+proc check_effective_target_arm_coproc3_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc2_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc3_ok assembly {
+ #if __ARM_ARCH < 6 && !defined (__ARM_ARCH_5TE__)
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc3_ok { } {
+ return [check_cached_effective_target arm_coproc3_ok \
+ check_effective_target_arm_coproc3_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc3_ok in addition to the following: mcrr2
+# and mrrc2.
+proc check_effective_target_arm_coproc4_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc3_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc4_ok assembly {
+ #if __ARM_ARCH < 6
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc4_ok { } {
+ return [check_cached_effective_target arm_coproc4_ok \
+ check_effective_target_arm_coproc4_ok_nocache]
+}
diff --git a/gcc/tree.h b/gcc/tree.h
index d5d89df4c05..cb687d266c6 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -4628,69 +4628,6 @@ extern void warn_deprecated_use (tree, tree);
extern void cache_integer_cst (tree);
extern const char *combined_fn_name (combined_fn);
-/* Return the memory model from a host integer. */
-static inline enum memmodel
-memmodel_from_int (unsigned HOST_WIDE_INT val)
-{
- return (enum memmodel) (val & MEMMODEL_MASK);
-}
-
-/* Return the base memory model from a host integer. */
-static inline enum memmodel
-memmodel_base (unsigned HOST_WIDE_INT val)
-{
- return (enum memmodel) (val & MEMMODEL_BASE_MASK);
-}
-
-/* Return TRUE if the memory model is RELAXED. */
-static inline bool
-is_mm_relaxed (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
-}
-
-/* Return TRUE if the memory model is CONSUME. */
-static inline bool
-is_mm_consume (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
-}
-
-/* Return TRUE if the memory model is ACQUIRE. */
-static inline bool
-is_mm_acquire (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
-}
-
-/* Return TRUE if the memory model is RELEASE. */
-static inline bool
-is_mm_release (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
-}
-
-/* Return TRUE if the memory model is ACQ_REL. */
-static inline bool
-is_mm_acq_rel (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
-}
-
-/* Return TRUE if the memory model is SEQ_CST. */
-static inline bool
-is_mm_seq_cst (enum memmodel model)
-{
- return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
-}
-
-/* Return TRUE if the memory model is a SYNC variant. */
-static inline bool
-is_mm_sync (enum memmodel model)
-{
- return (model & MEMMODEL_SYNC);
-}
-
/* Compare and hash for any structure which begins with a canonical
pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */
diff --git a/gcc/tsan.c b/gcc/tsan.c
index 449b5066bdf..8bed5f2bd06 100644
--- a/gcc/tsan.c
+++ b/gcc/tsan.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "backend.h"
#include "rtl.h"
#include "tree.h"
+#include "memmodel.h"
#include "gimple.h"
#include "tree-pass.h"
#include "ssa.h"
diff --git a/gcc/varasm.c b/gcc/varasm.c
index b65f29c13a4..755fed2868d 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -6242,7 +6242,8 @@ void
default_elf_asm_named_section (const char *name, unsigned int flags,
tree decl)
{
- char flagchars[10], *f = flagchars;
+ char flagchars[11], *f = flagchars;
+ unsigned int numeric_value = 0;
/* If we have already declared this section, we can use an
abbreviated form to switch back to it -- unless this section is
@@ -6255,27 +6256,34 @@ default_elf_asm_named_section (const char *name, unsigned int flags,
return;
}
- if (!(flags & SECTION_DEBUG))
- *f++ = 'a';
+ /* If we have a machine specific flag, then use the numeric value to pass
+ this on to GAS. */
+ if (targetm.asm_out.elf_flags_numeric (flags, &numeric_value))
+ snprintf (f, sizeof (flagchars), "0x%08x", numeric_value);
+ else
+ {
+ if (!(flags & SECTION_DEBUG))
+ *f++ = 'a';
#if defined (HAVE_GAS_SECTION_EXCLUDE) && HAVE_GAS_SECTION_EXCLUDE == 1
- if (flags & SECTION_EXCLUDE)
- *f++ = 'e';
+ if (flags & SECTION_EXCLUDE)
+ *f++ = 'e';
#endif
- if (flags & SECTION_WRITE)
- *f++ = 'w';
- if (flags & SECTION_CODE)
- *f++ = 'x';
- if (flags & SECTION_SMALL)
- *f++ = 's';
- if (flags & SECTION_MERGE)
- *f++ = 'M';
- if (flags & SECTION_STRINGS)
- *f++ = 'S';
- if (flags & SECTION_TLS)
- *f++ = TLS_SECTION_ASM_FLAG;
- if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
- *f++ = 'G';
- *f = '\0';
+ if (flags & SECTION_WRITE)
+ *f++ = 'w';
+ if (flags & SECTION_CODE)
+ *f++ = 'x';
+ if (flags & SECTION_SMALL)
+ *f++ = 's';
+ if (flags & SECTION_MERGE)
+ *f++ = 'M';
+ if (flags & SECTION_STRINGS)
+ *f++ = 'S';
+ if (flags & SECTION_TLS)
+ *f++ = TLS_SECTION_ASM_FLAG;
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ *f++ = 'G';
+ *f = '\0';
+ }
fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
diff --git a/libgcc/ChangeLog.arm b/libgcc/ChangeLog.arm
new file mode 100644
index 00000000000..481f85b4b92
--- /dev/null
+++ b/libgcc/ChangeLog.arm
@@ -0,0 +1,69 @@
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/cmse_nonsecure_call.S: New.
+ * config/arm/t-arm: Compile cmse_nonsecure_call.S
+
+2016-12-05 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-12-02 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/t-arm (HAVE_CMSE): New.
+ * config/arm/cmse.c: New.
+
+2016-07-12 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ Backport from mainline
+ 2016-07-11 Hale Wang <hale.wang@arm.com>
+ Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/lib1funcs.S: Add new wrapper.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/lib1funcs.S (__ARM_ARCH__): Define to 8 for ARMv8-M.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/lib1funcs.S (HAVE_ARM_CLZ): Define for ARMv6* or later
+ and ARMv5t* rather than for a fixed list of architectures.
+
+2016-07-11 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ Backport from mainline
+ 2016-07-07 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
+ * config/arm/bpabi-v6m.S: Clarify what architectures is the
+ implementation suitable for.
+ * config/arm/lib1funcs.S (__prefer_thumb__): Define among other cases
+ for all Thumb-1 only targets.
+ (NOT_ISA_TARGET_32BIT): Define for Thumb-1 only targets.
+ (THUMB_LDIV0): Test for NOT_ISA_TARGET_32BIT rather than
+ __ARM_ARCH_6M__.
+ (EQUIV): Likewise.
+ (ARM_FUNC_ALIAS): Likewise.
+ (umodsi3): Add check to __ARM_ARCH_ISA_THUMB != 1 to guard the idiv
+ version.
+ (modsi3): Likewise.
+ (clzsi2): Test for NOT_ISA_TARGET_32BIT rather than __ARM_ARCH_6M__.
+ (clzdi2): Likewise.
+ (ctzsi2): Likewise.
+ (L_interwork_call_via_rX): Test for __ARM_ARCH_ISA_ARM rather than
+ __ARM_ARCH_6M__ in guard for checking whether it is defined.
+ (final includes): Test for NOT_ISA_TARGET_32BIT rather than
+ __ARM_ARCH_6M__ and add comment to indicate the connection between
+ this condition and the one in gcc/config/arm/elf.h.
+ * config/arm/libunwind.S: Test for __ARM_ARCH_ISA_THUMB and
+ __ARM_ARCH_ISA_ARM rather than __ARM_ARCH_6M__.
+ * config/arm/t-softfp: Likewise.
diff --git a/libgcc/config/arm/bpabi-v6m.S b/libgcc/config/arm/bpabi-v6m.S
index 5d35aa6afca..27f33a4e8ce 100644
--- a/libgcc/config/arm/bpabi-v6m.S
+++ b/libgcc/config/arm/bpabi-v6m.S
@@ -1,4 +1,5 @@
-/* Miscellaneous BPABI functions. ARMv6M implementation
+/* Miscellaneous BPABI functions. Thumb-1 implementation, suitable for ARMv4T,
+ ARMv6-M and ARMv8-M Baseline like ISA variants.
Copyright (C) 2006-2016 Free Software Foundation, Inc.
Contributed by CodeSourcery.
diff --git a/libgcc/config/arm/cmse.c b/libgcc/config/arm/cmse.c
new file mode 100644
index 00000000000..fe3a22967c8
--- /dev/null
+++ b/libgcc/config/arm/cmse.c
@@ -0,0 +1,108 @@
+/* ARMv8-M Security Extensions routines.
+ Copyright (C) 2015-2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+#if __ARM_FEATURE_CMSE & 1
+
+#include <arm_cmse.h>
+
+/* ARM intrinsic function to perform a permission check on a given
+ address range. See ACLE changes for ARMv8-M. */
+
+void *
+cmse_check_address_range (void *p, size_t size, int flags)
+{
+ cmse_address_info_t permb, perme;
+ char *pb = (char *) p, *pe;
+
+ /* Check if the range wraps around. */
+ if (UINTPTR_MAX - (uintptr_t) p < size)
+ return NULL;
+
+ /* Check if an unknown flag is present. */
+ int known = CMSE_MPU_UNPRIV | CMSE_MPU_READWRITE | CMSE_MPU_READ;
+ int known_secure_level = CMSE_MPU_UNPRIV;
+#if __ARM_FEATURE_CMSE & 2
+ known |= CMSE_AU_NONSECURE | CMSE_MPU_NONSECURE;
+ known_secure_level |= CMSE_MPU_NONSECURE;
+#endif
+ if (flags & (~known))
+ return NULL;
+
+ /* Execute the right variant of the TT instructions. */
+ pe = pb + size - 1;
+ const int singleCheck = (((uintptr_t) pb ^ (uintptr_t) pe) < 32);
+ switch (flags & known_secure_level)
+ {
+ case 0:
+ permb = cmse_TT (pb);
+ perme = singleCheck ? permb : cmse_TT (pe);
+ break;
+ case CMSE_MPU_UNPRIV:
+ permb = cmse_TTT (pb);
+ perme = singleCheck ? permb : cmse_TTT (pe);
+ break;
+#if __ARM_FEATURE_CMSE & 2
+ case CMSE_MPU_NONSECURE:
+ permb = cmse_TTA (pb);
+ perme = singleCheck ? permb : cmse_TTA (pe);
+ break;
+ case CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE:
+ permb = cmse_TTAT (pb);
+ perme = singleCheck ? permb : cmse_TTAT (pe);
+ break;
+#endif
+ default:
+ /* Invalid flag, eg. CMSE_MPU_NONSECURE specified but
+ __ARM_FEATURE_CMSE & 2 == 0. */
+ return NULL;
+ }
+
+ /* Check that the range does not cross MPU, SAU, or IDAU boundaries. */
+ if (permb.value != perme.value)
+ return NULL;
+
+ /* Check the permissions on the range. */
+ switch (flags & (~known_secure_level))
+ {
+#if __ARM_FEATURE_CMSE & 2
+ case CMSE_MPU_READ | CMSE_MPU_READWRITE | CMSE_AU_NONSECURE:
+ case CMSE_MPU_READWRITE | CMSE_AU_NONSECURE:
+ return permb.flags.nonsecure_readwrite_ok ? p : NULL;
+ case CMSE_MPU_READ | CMSE_AU_NONSECURE:
+ return permb.flags.nonsecure_read_ok ? p : NULL;
+ case CMSE_AU_NONSECURE:
+ return permb.flags.secure ? NULL : p;
+#endif
+ case CMSE_MPU_READ | CMSE_MPU_READWRITE:
+ case CMSE_MPU_READWRITE:
+ return permb.flags.readwrite_ok ? p : NULL;
+ case CMSE_MPU_READ:
+ return permb.flags.read_ok ? p : NULL;
+ default:
+ return NULL;
+ }
+}
+
+
+#endif /* __ARM_FEATURE_CMSE & 1. */
diff --git a/libgcc/config/arm/cmse_nonsecure_call.S b/libgcc/config/arm/cmse_nonsecure_call.S
new file mode 100644
index 00000000000..68b6a1cb500
--- /dev/null
+++ b/libgcc/config/arm/cmse_nonsecure_call.S
@@ -0,0 +1,131 @@
+/* CMSE wrapper function used to save, clear and restore callee saved registers
+ for cmse_nonsecure_call's.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+.syntax unified
+.thumb
+.global __gnu_cmse_nonsecure_call
+__gnu_cmse_nonsecure_call:
+#if defined(__ARM_ARCH_8M_MAIN__)
+push {r5-r11,lr}
+mov r7, r4
+mov r8, r4
+mov r9, r4
+mov r10, r4
+mov r11, r4
+mov ip, r4
+
+/* Save and clear callee-saved registers only if we are dealing with hard float
+ ABI. The unused caller-saved registers have already been cleared by GCC
+ generated code. */
+#ifdef __ARM_PCS_VFP
+vpush.f64 {d8-d15}
+mov r5, #0
+vmov d8, r5, r5
+#if __ARM_FP & 0x04
+vmov s18, s19, r5, r5
+vmov s20, s21, r5, r5
+vmov s22, s23, r5, r5
+vmov s24, s25, r5, r5
+vmov s26, s27, r5, r5
+vmov s28, s29, r5, r5
+vmov s30, s31, r5, r5
+#elif __ARM_FP & 0x08
+vmov.f64 d9, d8
+vmov.f64 d10, d8
+vmov.f64 d11, d8
+vmov.f64 d12, d8
+vmov.f64 d13, d8
+vmov.f64 d14, d8
+vmov.f64 d15, d8
+#else
+#error "Half precision implementation not supported."
+#endif
+/* Clear the cumulative exception-status bits (0-4,7) and the
+ condition code bits (28-31) of the FPSCR. */
+vmrs r5, fpscr
+movw r6, #65376
+movt r6, #4095
+ands r5, r6
+vmsr fpscr, r5
+
+/* We are not dealing with hard float ABI, so we can safely use the vlstm and
+ vlldm instructions without needing to preserve the registers used for
+ argument passing. */
+#else
+sub sp, sp, #0x88 /* Reserve stack space to save all floating point
+ registers, including FPSCR. */
+vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */
+#endif /* __ARM_PCS_VFP */
+
+/* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD
+ instructions are available. */
+#if defined(__ARM_FEATURE_SIMD32)
+msr APSR_nzcvqg, r4
+#else
+msr APSR_nzcvq, r4
+#endif
+
+mov r5, r4
+mov r6, r4
+blxns r4
+
+#ifdef __ARM_PCS_VFP
+vpop.f64 {d8-d15}
+#else
+vlldm sp /* Lazy restore of d0-d16 and FPSCR. */
+add sp, sp, #0x88 /* Free space used to save floating point registers. */
+#endif /* __ARM_PCS_VFP */
+
+pop {r5-r11, pc}
+
+#elif defined (__ARM_ARCH_8M_BASE__)
+push {r5-r7, lr}
+mov r5, r8
+mov r6, r9
+mov r7, r10
+push {r5-r7}
+mov r5, r11
+push {r5}
+mov r5, r4
+mov r6, r4
+mov r7, r4
+mov r8, r4
+mov r9, r4
+mov r10, r4
+mov r11, r4
+mov ip, r4
+msr APSR_nzcvq, r4
+blxns r4
+pop {r5}
+mov r11, r5
+pop {r5-r7}
+mov r10, r7
+mov r9, r6
+mov r8, r5
+pop {r5-r7, pc}
+
+#else
+#error "This should only be used for armv8-m base- and mainline."
+#endif
diff --git a/libgcc/config/arm/lib1funcs.S b/libgcc/config/arm/lib1funcs.S
index 375a5135110..ba52e7b762f 100644
--- a/libgcc/config/arm/lib1funcs.S
+++ b/libgcc/config/arm/lib1funcs.S
@@ -108,7 +108,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# define __ARM_ARCH__ 7
#endif
-#if defined(__ARM_ARCH_8A__)
+#if defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8M_BASE__) \
+ || defined(__ARM_ARCH_8M_MAIN__)
# define __ARM_ARCH__ 8
#endif
@@ -124,10 +125,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
&& !defined(__thumb2__) \
&& (!defined(__THUMB_INTERWORK__) \
|| defined (__OPTIMIZE_SIZE__) \
- || defined(__ARM_ARCH_6M__)))
+ || !__ARM_ARCH_ISA_ARM))
# define __prefer_thumb__
#endif
+#if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
+#define NOT_ISA_TARGET_32BIT 1
+#endif
+
/* How to return from a function call depends on the architecture variant. */
#if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
@@ -305,35 +310,14 @@ LSYM(Lend_fde):
#ifdef __ARM_EABI__
.macro THUMB_LDIV0 name signed
-#if defined(__ARM_ARCH_6M__)
- .ifc \signed, unsigned
- cmp r0, #0
- beq 1f
- mov r0, #0
- mvn r0, r0 @ 0xffffffff
-1:
- .else
- cmp r0, #0
- beq 2f
- blt 3f
+#ifdef NOT_ISA_TARGET_32BIT
+
+ push {r0, lr}
mov r0, #0
- mvn r0, r0
- lsr r0, r0, #1 @ 0x7fffffff
- b 2f
-3: mov r0, #0x80
- lsl r0, r0, #24 @ 0x80000000
-2:
- .endif
- push {r0, r1, r2}
- ldr r0, 4f
- adr r1, 4f
- add r0, r1
- str r0, [sp, #8]
+ bl SYM(__aeabi_idiv0)
@ We know we are not on armv4t, so pop pc is safe.
- pop {r0, r1, pc}
- .align 2
-4:
- .word __aeabi_idiv0 - 4b
+ pop {r1, pc}
+
#elif defined(__thumb2__)
.syntax unified
.ifc \signed, unsigned
@@ -478,7 +462,7 @@ _L__\name:
#else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
-#ifdef __ARM_ARCH_6M__
+#ifdef NOT_ISA_TARGET_32BIT
#define EQUIV .thumb_set
#else
.macro ARM_FUNC_START name sp_section=
@@ -510,7 +494,7 @@ SYM (__\name):
#endif
.endm
-#ifndef __ARM_ARCH_6M__
+#ifndef NOT_ISA_TARGET_32BIT
.macro ARM_FUNC_ALIAS new old
.globl SYM (__\new)
EQUIV SYM (__\new), SYM (__\old)
@@ -945,7 +929,170 @@ LSYM(Lover7):
add dividend, work
.endif
LSYM(Lgot_result):
-.endm
+.endm
+
+/* If performance is preferred, the following functions are provided. */
+#if defined(__prefer_thumb__) && !defined(__OPTIMIZE_SIZE__)
+
+/* Branch to div(n), and jump to label if curbit is lo than divisior. */
+.macro BranchToDiv n, label
+ lsr curbit, dividend, \n
+ cmp curbit, divisor
+ blo \label
+.endm
+
+/* Body of div(n). Shift the divisor in n bits and compare the divisor
+ and dividend. Update the dividend as the substruction result. */
+.macro DoDiv n
+ lsr curbit, dividend, \n
+ cmp curbit, divisor
+ bcc 1f
+ lsl curbit, divisor, \n
+ sub dividend, dividend, curbit
+
+1: adc result, result
+.endm
+
+/* The body of division with positive divisor. Unless the divisor is very
+ big, shift it up in multiples of four bits, since this is the amount of
+ unwinding in the main division loop. Continue shifting until the divisor
+ is larger than the dividend. */
+.macro THUMB1_Div_Positive
+ mov result, #0
+ BranchToDiv #1, LSYM(Lthumb1_div1)
+ BranchToDiv #4, LSYM(Lthumb1_div4)
+ BranchToDiv #8, LSYM(Lthumb1_div8)
+ BranchToDiv #12, LSYM(Lthumb1_div12)
+ BranchToDiv #16, LSYM(Lthumb1_div16)
+LSYM(Lthumb1_div_large_positive):
+ mov result, #0xff
+ lsl divisor, divisor, #8
+ rev result, result
+ lsr curbit, dividend, #16
+ cmp curbit, divisor
+ blo 1f
+ asr result, #8
+ lsl divisor, divisor, #8
+ beq LSYM(Ldivbyzero_waypoint)
+
+1: lsr curbit, dividend, #12
+ cmp curbit, divisor
+ blo LSYM(Lthumb1_div12)
+ b LSYM(Lthumb1_div16)
+LSYM(Lthumb1_div_loop):
+ lsr divisor, divisor, #8
+LSYM(Lthumb1_div16):
+ Dodiv #15
+ Dodiv #14
+ Dodiv #13
+ Dodiv #12
+LSYM(Lthumb1_div12):
+ Dodiv #11
+ Dodiv #10
+ Dodiv #9
+ Dodiv #8
+ bcs LSYM(Lthumb1_div_loop)
+LSYM(Lthumb1_div8):
+ Dodiv #7
+ Dodiv #6
+ Dodiv #5
+LSYM(Lthumb1_div5):
+ Dodiv #4
+LSYM(Lthumb1_div4):
+ Dodiv #3
+LSYM(Lthumb1_div3):
+ Dodiv #2
+LSYM(Lthumb1_div2):
+ Dodiv #1
+LSYM(Lthumb1_div1):
+ sub divisor, dividend, divisor
+ bcs 1f
+ cpy divisor, dividend
+
+1: adc result, result
+ cpy dividend, result
+ RET
+
+LSYM(Ldivbyzero_waypoint):
+ b LSYM(Ldiv0)
+.endm
+
+/* The body of division with negative divisor. Similar with
+ THUMB1_Div_Positive except that the shift steps are in multiples
+ of six bits. */
+.macro THUMB1_Div_Negative
+ lsr result, divisor, #31
+ beq 1f
+ neg divisor, divisor
+
+1: asr curbit, dividend, #32
+ bcc 2f
+ neg dividend, dividend
+
+2: eor curbit, result
+ mov result, #0
+ cpy ip, curbit
+ BranchToDiv #4, LSYM(Lthumb1_div_negative4)
+ BranchToDiv #8, LSYM(Lthumb1_div_negative8)
+LSYM(Lthumb1_div_large):
+ mov result, #0xfc
+ lsl divisor, divisor, #6
+ rev result, result
+ lsr curbit, dividend, #8
+ cmp curbit, divisor
+ blo LSYM(Lthumb1_div_negative8)
+
+ lsl divisor, divisor, #6
+ asr result, result, #6
+ cmp curbit, divisor
+ blo LSYM(Lthumb1_div_negative8)
+
+ lsl divisor, divisor, #6
+ asr result, result, #6
+ cmp curbit, divisor
+ blo LSYM(Lthumb1_div_negative8)
+
+ lsl divisor, divisor, #6
+ beq LSYM(Ldivbyzero_negative)
+ asr result, result, #6
+ b LSYM(Lthumb1_div_negative8)
+LSYM(Lthumb1_div_negative_loop):
+ lsr divisor, divisor, #6
+LSYM(Lthumb1_div_negative8):
+ DoDiv #7
+ DoDiv #6
+ DoDiv #5
+ DoDiv #4
+LSYM(Lthumb1_div_negative4):
+ DoDiv #3
+ DoDiv #2
+ bcs LSYM(Lthumb1_div_negative_loop)
+ DoDiv #1
+ sub divisor, dividend, divisor
+ bcs 1f
+ cpy divisor, dividend
+
+1: cpy curbit, ip
+ adc result, result
+ asr curbit, curbit, #1
+ cpy dividend, result
+ bcc 2f
+ neg dividend, dividend
+ cmp curbit, #0
+
+2: bpl 3f
+ neg divisor, divisor
+
+3: RET
+
+LSYM(Ldivbyzero_negative):
+ cpy curbit, ip
+ asr curbit, curbit, #1
+ bcc LSYM(Ldiv0)
+ neg dividend, dividend
+.endm
+#endif /* ARM Thumb version. */
+
/* ------------------------------------------------------------------------ */
/* Start of the Real Functions */
/* ------------------------------------------------------------------------ */
@@ -955,6 +1102,7 @@ LSYM(Lgot_result):
FUNC_START udivsi3
FUNC_ALIAS aeabi_uidiv udivsi3
+#if defined(__OPTIMIZE_SIZE__)
cmp divisor, #0
beq LSYM(Ldiv0)
@@ -972,6 +1120,14 @@ LSYM(udivsi3_skip_div0_test):
pop { work }
RET
+/* Implementation of aeabi_uidiv for ARMv6m. This version is only
+ used in ARMv6-M when we need an efficient implementation. */
+#else
+LSYM(udivsi3_skip_div0_test):
+ THUMB1_Div_Positive
+
+#endif /* __OPTIMIZE_SIZE__ */
+
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START udivsi3
@@ -1023,12 +1179,21 @@ LSYM(udivsi3_skip_div0_test):
FUNC_START aeabi_uidivmod
cmp r1, #0
beq LSYM(Ldiv0)
+# if defined(__OPTIMIZE_SIZE__)
push {r0, r1, lr}
bl LSYM(udivsi3_skip_div0_test)
POP {r1, r2, r3}
mul r2, r0
sub r1, r1, r2
bx r3
+# else
+ /* Both the quotient and remainder are calculated simultaneously
+ in THUMB1_Div_Positive. There is no need to calculate the
+ remainder again here. */
+ b LSYM(udivsi3_skip_div0_test)
+ RET
+# endif /* __OPTIMIZE_SIZE__ */
+
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START aeabi_uidivmod
cmp r1, #0
@@ -1054,7 +1219,7 @@ ARM_FUNC_START aeabi_uidivmod
/* ------------------------------------------------------------------------ */
#ifdef L_umodsi3
-#ifdef __ARM_ARCH_EXT_IDIV__
+#if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
ARM_FUNC_START umodsi3
@@ -1084,7 +1249,7 @@ LSYM(Lover10):
RET
#else /* ARM version. */
-
+
FUNC_START umodsi3
subs r2, r1, #1 @ compare divisor with 1
@@ -1109,8 +1274,9 @@ LSYM(Lover10):
#if defined(__prefer_thumb__)
- FUNC_START divsi3
+ FUNC_START divsi3
FUNC_ALIAS aeabi_idiv divsi3
+#if defined(__OPTIMIZE_SIZE__)
cmp divisor, #0
beq LSYM(Ldiv0)
@@ -1133,7 +1299,7 @@ LSYM(Lover11):
blo LSYM(Lgot_result)
THUMB_DIV_MOD_BODY 0
-
+
mov r0, result
mov work, ip
cmp work, #0
@@ -1143,6 +1309,22 @@ LSYM(Lover12):
pop { work }
RET
+/* Implementation of aeabi_idiv for ARMv6m. This version is only
+ used in ARMv6-M when we need an efficient implementation. */
+#else
+LSYM(divsi3_skip_div0_test):
+ cpy curbit, dividend
+ orr curbit, divisor
+ bmi LSYM(Lthumb1_div_negative)
+
+LSYM(Lthumb1_div_positive):
+ THUMB1_Div_Positive
+
+LSYM(Lthumb1_div_negative):
+ THUMB1_Div_Negative
+
+#endif /* __OPTIMIZE_SIZE__ */
+
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START divsi3
@@ -1154,8 +1336,8 @@ LSYM(Lover12):
RET
#else /* ARM/Thumb-2 version. */
-
- ARM_FUNC_START divsi3
+
+ ARM_FUNC_START divsi3
ARM_FUNC_ALIAS aeabi_idiv divsi3
cmp r1, #0
@@ -1209,12 +1391,21 @@ LSYM(divsi3_skip_div0_test):
FUNC_START aeabi_idivmod
cmp r1, #0
beq LSYM(Ldiv0)
+# if defined(__OPTIMIZE_SIZE__)
push {r0, r1, lr}
bl LSYM(divsi3_skip_div0_test)
POP {r1, r2, r3}
mul r2, r0
sub r1, r1, r2
bx r3
+# else
+ /* Both the quotient and remainder are calculated simultaneously
+ in THUMB1_Div_Positive and THUMB1_Div_Negative. There is no
+ need to calculate the remainder again here. */
+ b LSYM(divsi3_skip_div0_test)
+ RET
+# endif /* __OPTIMIZE_SIZE__ */
+
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START aeabi_idivmod
cmp r1, #0
@@ -1240,7 +1431,7 @@ ARM_FUNC_START aeabi_idivmod
/* ------------------------------------------------------------------------ */
#ifdef L_modsi3
-#if defined(__ARM_ARCH_EXT_IDIV__)
+#if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
ARM_FUNC_START modsi3
@@ -1508,14 +1699,15 @@ LSYM(Lover12):
#endif /* __symbian__ */
-#if ((__ARM_ARCH__ > 5) && !defined(__ARM_ARCH_6M__)) \
- || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__)
+#if (__ARM_ARCH_ISA_THUMB == 2 \
+ || (__ARM_ARCH_ISA_ARM \
+ && (__ARM_ARCH__ > 5 \
+ || (__ARM_ARCH__ == 5 && __ARM_ARCH_ISA_THUMB))))
#define HAVE_ARM_CLZ 1
#endif
#ifdef L_clzsi2
-#if defined(__ARM_ARCH_6M__)
+#ifdef NOT_ISA_TARGET_32BIT
FUNC_START clzsi2
mov r1, #28
mov r3, #1
@@ -1576,7 +1768,7 @@ ARM_FUNC_START clzsi2
#ifdef L_clzdi2
#if !defined(HAVE_ARM_CLZ)
-# if defined(__ARM_ARCH_6M__)
+# ifdef NOT_ISA_TARGET_32BIT
FUNC_START clzdi2
push {r4, lr}
# else
@@ -1601,7 +1793,7 @@ ARM_FUNC_START clzdi2
bl __clzsi2
# endif
2:
-# if defined(__ARM_ARCH_6M__)
+# ifdef NOT_ISA_TARGET_32BIT
pop {r4, pc}
# else
RETLDM r4
@@ -1623,7 +1815,7 @@ ARM_FUNC_START clzdi2
#endif /* L_clzdi2 */
#ifdef L_ctzsi2
-#if defined(__ARM_ARCH_6M__)
+#ifdef NOT_ISA_TARGET_32BIT
FUNC_START ctzsi2
neg r1, r0
and r0, r0, r1
@@ -1738,7 +1930,7 @@ ARM_FUNC_START ctzsi2
/* Don't bother with the old interworking routines for Thumb-2. */
/* ??? Maybe only omit these on "m" variants. */
-#if !defined(__thumb2__) && !defined(__ARM_ARCH_6M__)
+#if !defined(__thumb2__) && __ARM_ARCH_ISA_ARM
#if defined L_interwork_call_via_rX
@@ -1983,11 +2175,12 @@ LSYM(Lchange_\register):
.endm
#ifndef __symbian__
-#ifndef __ARM_ARCH_6M__
+/* The condition here must match the one in gcc/config/arm/elf.h. */
+#ifndef NOT_ISA_TARGET_32BIT
#include "ieee754-df.S"
#include "ieee754-sf.S"
#include "bpabi.S"
-#else /* __ARM_ARCH_6M__ */
+#else /* NOT_ISA_TARGET_32BIT */
#include "bpabi-v6m.S"
-#endif /* __ARM_ARCH_6M__ */
+#endif /* NOT_ISA_TARGET_32BIT */
#endif /* !__symbian__ */
diff --git a/libgcc/config/arm/libunwind.S b/libgcc/config/arm/libunwind.S
index a68b10ddce9..3d7e70181fa 100644
--- a/libgcc/config/arm/libunwind.S
+++ b/libgcc/config/arm/libunwind.S
@@ -58,7 +58,7 @@
#endif
#endif
-#ifdef __ARM_ARCH_6M__
+#if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
/* r0 points to a 16-word block. Upload these values to the actual core
state. */
@@ -169,7 +169,7 @@ FUNC_START gnu_Unwind_Save_WMMXC
UNPREFIX \name
.endm
-#else /* !__ARM_ARCH_6M__ */
+#else /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */
/* r0 points to a 16-word block. Upload these values to the actual core
state. */
@@ -351,7 +351,7 @@ ARM_FUNC_START gnu_Unwind_Save_WMMXC
UNPREFIX \name
.endm
-#endif /* !__ARM_ARCH_6M__ */
+#endif /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */
UNWIND_WRAPPER _Unwind_RaiseException 1
UNWIND_WRAPPER _Unwind_Resume 1
diff --git a/libgcc/config/arm/t-arm b/libgcc/config/arm/t-arm
index 4e17e99b4a5..9e85ac06b14 100644
--- a/libgcc/config/arm/t-arm
+++ b/libgcc/config/arm/t-arm
@@ -1,3 +1,17 @@
LIB1ASMSRC = arm/lib1funcs.S
LIB1ASMFUNCS = _thumb1_case_sqi _thumb1_case_uqi _thumb1_case_shi \
_thumb1_case_uhi _thumb1_case_si
+
+HAVE_CMSE:=$(findstring __ARM_FEATURE_CMSE,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
+ifneq ($(shell $(gcc_compile_bare) -E -mcmse - </dev/null 2>/dev/null),)
+CMSE_OPTS:=-mcmse
+endif
+
+ifdef HAVE_CMSE
+libgcc-objects += cmse.o cmse_nonsecure_call.o
+
+cmse.o: $(srcdir)/config/arm/cmse.c
+ $(gcc_compile) -c $(CMSE_OPTS) $<
+cmse_nonsecure_call.o: $(srcdir)/config/arm/cmse_nonsecure_call.S
+ $(gcc_compile) -c $<
+endif
diff --git a/libgcc/config/arm/t-softfp b/libgcc/config/arm/t-softfp
index 4ede438baf6..554ec9bc47b 100644
--- a/libgcc/config/arm/t-softfp
+++ b/libgcc/config/arm/t-softfp
@@ -1,2 +1,2 @@
-softfp_wrap_start := '\#ifdef __ARM_ARCH_6M__'
+softfp_wrap_start := '\#if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1'
softfp_wrap_end := '\#endif'