aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Meissner <meissner@linux.ibm.com>2018-08-30 18:31:50 +0000
committerMichael Meissner <meissner@linux.ibm.com>2018-08-30 18:31:50 +0000
commitd0ed5596dfd55dbaf41f506784fafcda1d7ad63b (patch)
tree6fc657e62ff04d4cd7fce825a3b0644d0bffe981
parenta3df48ce98f6f37f452eda0b793bec281937782b (diff)
parentec0e5c27c42959e428d618a1172e5b602584ff9b (diff)
Merge up to 263992ibm/addr2
git-svn-id: https://gcc.gnu.org/svn/gcc/branches/ibm/addr2@263994 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--ChangeLog8
-rw-r--r--MAINTAINERS1
-rw-r--r--config/ChangeLog4
-rw-r--r--contrib/ChangeLog8
-rw-r--r--fixincludes/ChangeLog4
-rw-r--r--gcc/ChangeLog910
-rw-r--r--gcc/ChangeLog.meissner10
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/REVISION2
-rw-r--r--gcc/ada/ChangeLog14
-rw-r--r--gcc/ada/exp_unst.ads2
-rw-r--r--gcc/ada/gcc-interface/decl.c2
-rw-r--r--gcc/ada/gcc-interface/gigi.h2
-rw-r--r--gcc/ada/gcc-interface/utils.c2
-rw-r--r--gcc/attribs.c2
-rw-r--r--gcc/builtins.c69
-rw-r--r--gcc/builtins.h1
-rw-r--r--gcc/c-family/ChangeLog64
-rw-r--r--gcc/c-family/ChangeLog.meissner4
-rw-r--r--gcc/c-family/c-common.c51
-rw-r--r--gcc/c-family/c-common.h2
-rw-r--r--gcc/c-family/c-format.c2
-rw-r--r--gcc/c-family/c-warn.c4
-rw-r--r--gcc/c-family/c.opt4
-rw-r--r--gcc/c-family/known-headers.cc2
-rw-r--r--gcc/c/ChangeLog35
-rw-r--r--gcc/c/c-decl.c27
-rw-r--r--gcc/c/c-objc-common.c3
-rw-r--r--gcc/c/c-parser.c6
-rw-r--r--gcc/c/c-tree.h2
-rw-r--r--gcc/c/c-typeck.c7
-rw-r--r--gcc/c/gimple-parser.c11
-rw-r--r--gcc/calls.c27
-rw-r--r--gcc/cfg.c2
-rw-r--r--gcc/cfg.h60
-rw-r--r--gcc/cfganal.c152
-rw-r--r--gcc/cfganal.h2
-rw-r--r--gcc/cfgexpand.c6
-rw-r--r--gcc/cfgloop.c9
-rw-r--r--gcc/cgraph.c10
-rw-r--r--gcc/cgraphclones.c5
-rw-r--r--gcc/combine.c11
-rw-r--r--gcc/config/aarch64/aarch64-simd.md26
-rw-r--r--gcc/config/aarch64/aarch64-speculation.cc2
-rw-r--r--gcc/config/aarch64/aarch64.c29
-rw-r--r--gcc/config/aarch64/aarch64.md151
-rw-r--r--gcc/config/aarch64/aarch64.opt12
-rw-r--r--gcc/config/arm/arm-protos.h2
-rw-r--r--gcc/config/arm/arm.c17
-rw-r--r--gcc/config/arm/arm.md16
-rw-r--r--gcc/config/darwin.c9
-rw-r--r--gcc/config/i386/i386-modes.def3
-rw-r--r--gcc/config/i386/i386.c265
-rw-r--r--gcc/config/i386/i386.h14
-rw-r--r--gcc/config/i386/i386.md20
-rw-r--r--gcc/config/i386/predicates.md3
-rw-r--r--gcc/config/mips/frame-header-opt.c2
-rw-r--r--gcc/config/riscv/pic.md113
-rw-r--r--gcc/config/riscv/riscv.c8
-rw-r--r--gcc/config/riscv/riscv.md16
-rw-r--r--gcc/config/rs6000/altivec.md4
-rw-r--r--gcc/config/rs6000/rs6000-string.c396
-rw-r--r--gcc/config/rs6000/rs6000.c4
-rw-r--r--gcc/config/rs6000/rs6000.opt4
-rw-r--r--gcc/config/rs6000/vsx.md18
-rw-r--r--gcc/cp/ChangeLog118
-rw-r--r--gcc/cp/call.c24
-rw-r--r--gcc/cp/constexpr.c14
-rw-r--r--gcc/cp/cp-gimplify.c12
-rw-r--r--gcc/cp/cp-tree.h3
-rw-r--r--gcc/cp/decl.c59
-rw-r--r--gcc/cp/error.c2
-rw-r--r--gcc/cp/name-lookup.c6
-rw-r--r--gcc/cp/parser.c28
-rw-r--r--gcc/cp/pt.c4
-rw-r--r--gcc/cp/rtti.c2
-rw-r--r--gcc/cp/semantics.c5
-rw-r--r--gcc/cp/tree.c5
-rw-r--r--gcc/cp/typeck.c51
-rw-r--r--gcc/cp/typeck2.c45
-rw-r--r--gcc/diagnostic-show-locus.c244
-rw-r--r--gcc/doc/invoke.texi77
-rw-r--r--gcc/dse.c3
-rw-r--r--gcc/dumpfile.h2
-rw-r--r--gcc/dwarf2out.c6
-rw-r--r--gcc/emit-rtl.c7
-rw-r--r--gcc/emit-rtl.h3
-rw-r--r--gcc/explow.c3
-rw-r--r--gcc/expmed.c12
-rw-r--r--gcc/expr.c33
-rw-r--r--gcc/expr.h2
-rw-r--r--gcc/fold-const.c9
-rw-r--r--gcc/fortran/ChangeLog74
-rw-r--r--gcc/fortran/error.c2
-rw-r--r--gcc/fortran/frontend-passes.c31
-rw-r--r--gcc/fortran/gfortran.texi3
-rw-r--r--gcc/fortran/match.c20
-rw-r--r--gcc/fortran/resolve.c19
-rw-r--r--gcc/fortran/trans-expr.c125
-rw-r--r--gcc/fortran/trans-intrinsic.c26
-rw-r--r--gcc/fortran/trans-stmt.c14
-rw-r--r--gcc/function.c1
-rw-r--r--gcc/gcc-rich-location.c2
-rw-r--r--gcc/gcov.c15
-rw-r--r--gcc/genmatch.c2
-rw-r--r--gcc/genmodes.c15
-rw-r--r--gcc/genpreds.c2
-rw-r--r--gcc/gimple-fold.c3
-rw-r--r--gcc/gimple-low.c2
-rw-r--r--gcc/gimple-pretty-print.c5
-rw-r--r--gcc/gimple-ssa-evrp-analyze.c7
-rw-r--r--gcc/gimple-ssa-warn-alloca.c46
-rw-r--r--gcc/gimple-ssa-warn-restrict.c2
-rw-r--r--gcc/gimple.c7
-rw-r--r--gcc/gimplify.c17
-rw-r--r--gcc/go/ChangeLog8
-rw-r--r--gcc/go/go-gcc.cc4
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--gcc/go/gofrontend/expressions.cc6
-rw-r--r--gcc/go/gofrontend/types.cc8
-rw-r--r--gcc/hsa-brig.c2
-rw-r--r--gcc/hsa-dump.c2
-rw-r--r--gcc/hsa-gen.c24
-rw-r--r--gcc/hsa-regalloc.c4
-rw-r--r--gcc/ipa-cp.c3
-rw-r--r--gcc/ipa-fnsummary.c8
-rw-r--r--gcc/ipa-param-manipulation.c2
-rw-r--r--gcc/ipa-split.c5
-rw-r--r--gcc/ipa-utils.h54
-rw-r--r--gcc/ipa-visibility.c2
-rw-r--r--gcc/jit/ChangeLog2
-rw-r--r--gcc/lto-cgraph.c2
-rw-r--r--gcc/lto-opts.c15
-rw-r--r--gcc/lto-streamer-out.c11
-rw-r--r--gcc/lto-wrapper.c83
-rw-r--r--gcc/lto/ChangeLog47
-rw-r--r--gcc/lto/lto-lang.c3
-rw-r--r--gcc/lto/lto-symtab.c8
-rw-r--r--gcc/lto/lto.c5
-rw-r--r--gcc/machmode.h4
-rw-r--r--gcc/match.pd17
-rw-r--r--gcc/mode-classes.def1
-rw-r--r--gcc/objc/ChangeLog2
-rw-r--r--gcc/objcp/ChangeLog6
-rw-r--r--gcc/omp-low.c7
-rw-r--r--gcc/params.def18
-rw-r--r--gcc/predict.c8
-rw-r--r--gcc/pretty-print.c5
-rw-r--r--gcc/pretty-print.h3
-rw-r--r--gcc/print-rtl.c2
-rw-r--r--gcc/print-tree.c6
-rw-r--r--gcc/profile-count.c2
-rw-r--r--gcc/sanopt.c6
-rw-r--r--gcc/sreal.c59
-rw-r--r--gcc/sreal.h61
-rw-r--r--gcc/stmt.c4
-rw-r--r--gcc/stor-layout.c20
-rw-r--r--gcc/substring-locations.c2
-rw-r--r--gcc/symtab.c2
-rw-r--r--gcc/testsuite/ChangeLog332
-rw-r--r--gcc/testsuite/ChangeLog.meissner4
-rw-r--r--gcc/testsuite/g++.dg/Walloca1.C6
-rw-r--r--gcc/testsuite/g++.dg/concepts/pr85265.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wpessimizing-move5.C14
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wredundant-move1.C106
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wredundant-move2.C57
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wredundant-move3.C43
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/Wredundant-move4.C86
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/auto52.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn52.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn53.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/auto-fn54.C3
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/decomp47.C32
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/missing-typename.C12
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/param-type-mismatch-2.C25
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/pr86993.C13
-rw-r--r--gcc/testsuite/g++.dg/other/switch4.C6
-rw-r--r--gcc/testsuite/g++.dg/pr85523.C3
-rw-r--r--gcc/testsuite/g++.dg/torture/20180705-1.C30
-rw-r--r--gcc/testsuite/g++.dg/torture/pr87124.C12
-rw-r--r--gcc/testsuite/g++.dg/ubsan/vptr-13.C19
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/dse.c19
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/pr87110.c13
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/memchr-1.c153
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr86714.c26
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/pr87053.c17
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/widechar-3.c26
-rw-r--r--gcc/testsuite/gcc.dg/Warray-bounds-35.c15
-rw-r--r--gcc/testsuite/gcc.dg/asan/pr86962.c13
-rw-r--r--gcc/testsuite/gcc.dg/empty.h0
-rw-r--r--gcc/testsuite/gcc.dg/fixits-pr84852-1.c5
-rw-r--r--gcc/testsuite/gcc.dg/fixits-pr84852-2.c5
-rw-r--r--gcc/testsuite/gcc.dg/lvalue-5.c2
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-3.c8
-rw-r--r--gcc/testsuite/gcc.dg/missing-header-fixit-4.c23
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw.c1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-color.c1
-rw-r--r--gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c37
-rw-r--r--gcc/testsuite/gcc.dg/pr79342.c18
-rw-r--r--gcc/testsuite/gcc.dg/pr83666.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr85195.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr85467.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr87009.c23
-rw-r--r--gcc/testsuite/gcc.dg/pr87024.c14
-rw-r--r--gcc/testsuite/gcc.dg/pr87092.c10
-rw-r--r--gcc/testsuite/gcc.dg/pr87099.c21
-rw-r--r--gcc/testsuite/gcc.dg/pr87112.c31
-rw-r--r--gcc/testsuite/gcc.dg/pr87117-1.c21
-rw-r--r--gcc/testsuite/gcc.dg/pr87117-2.c15
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-57.c49
-rw-r--r--gcc/testsuite/gcc.dg/strlenopt-58.c93
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr81790.c1
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr87132.c18
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr87147.c22
-rw-r--r--gcc/testsuite/gcc.dg/tree-prof/val-prof-10.c31
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr87126.c25
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-14.c11
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-46.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-67.c16
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/switch-2.c25
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/switch-3.c20
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp105.c37
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp92.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c5
-rw-r--r--gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c5
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr65947-13.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr80631-2.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr86927.c15
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-23.c6
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-37.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-perm-10.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/slp-perm-9.c4
-rw-r--r--gcc/testsuite/gcc.target/aarch64/large_struct_copy_2.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vect_su_add_sub.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/strcmpopt_6.c (renamed from gcc/testsuite/gcc.dg/strcmpopt_6.c)0
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/bswap_1.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/bswap_2.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/bswap_3.c13
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_1.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_2.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_3.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_4.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_5.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_6.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/slp_perm_7.c22
-rw-r--r--gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c1
-rw-r--r--gcc/testsuite/gcc.target/i386/pr87065.c22
-rw-r--r--gcc/testsuite/gcc.target/i386/strcmpopt_6.c36
-rw-r--r--gcc/testsuite/gfortran.dg/allocate_with_source_25.f9071
-rw-r--r--gcc/testsuite/gfortran.dg/associate_3.f032
-rw-r--r--gcc/testsuite/gfortran.dg/associate_39.f9019
-rw-r--r--gcc/testsuite/gfortran.dg/class_result_7.f9036
-rw-r--r--gcc/testsuite/gfortran.dg/class_result_8.f9041
-rw-r--r--gcc/testsuite/gfortran.dg/class_result_9.f9045
-rw-r--r--gcc/testsuite/gfortran.dg/generic_35.f9031
-rw-r--r--gcc/testsuite/gfortran.dg/implied_do_io_6.f9039
-rw-r--r--gcc/testsuite/gfortran.dg/matmul_19.f9025
-rw-r--r--gcc/testsuite/gfortran.dg/pr87117.f9014
-rw-r--r--gcc/testsuite/gfortran.dg/reassoc_4.f2
-rw-r--r--gcc/testsuite/gfortran.dg/submodule_32.f0862
-rw-r--r--gcc/testsuite/lib/target-supports.exp13
-rw-r--r--gcc/trans-mem.c20
-rw-r--r--gcc/tree-call-cdce.c2
-rw-r--r--gcc/tree-cfg.c92
-rw-r--r--gcc/tree-cfg.h7
-rw-r--r--gcc/tree-cfgcleanup.c7
-rw-r--r--gcc/tree-core.h5
-rw-r--r--gcc/tree-diagnostic.c2
-rw-r--r--gcc/tree-eh.c28
-rw-r--r--gcc/tree-if-conv.c2
-rw-r--r--gcc/tree-inline.c17
-rw-r--r--gcc/tree-into-ssa.c22
-rw-r--r--gcc/tree-pretty-print.c2
-rw-r--r--gcc/tree-scalar-evolution.c4
-rw-r--r--gcc/tree-sra.c3
-rw-r--r--gcc/tree-ssa-alias.c36
-rw-r--r--gcc/tree-ssa-ccp.c13
-rw-r--r--gcc/tree-ssa-dce.c4
-rw-r--r--gcc/tree-ssa-dom.c6
-rw-r--r--gcc/tree-ssa-dse.c12
-rw-r--r--gcc/tree-ssa-forwprop.c8
-rw-r--r--gcc/tree-ssa-loop-im.c4
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c57
-rw-r--r--gcc/tree-ssa-math-opts.c2
-rw-r--r--gcc/tree-ssa-operands.c3
-rw-r--r--gcc/tree-ssa-pre.c97
-rw-r--r--gcc/tree-ssa-sccvn.c3687
-rw-r--r--gcc/tree-ssa-sccvn.h100
-rw-r--r--gcc/tree-ssa-strlen.c8
-rw-r--r--gcc/tree-ssa-structalias.c4
-rw-r--r--gcc/tree-ssa-ter.c2
-rw-r--r--gcc/tree-ssa-threadedge.c4
-rw-r--r--gcc/tree-ssa-uncprop.c2
-rw-r--r--gcc/tree-ssa-uninit.c2
-rw-r--r--gcc/tree-stdarg.c7
-rw-r--r--gcc/tree-streamer-in.c2
-rw-r--r--gcc/tree-streamer-out.c5
-rw-r--r--gcc/tree-switch-conversion.c383
-rw-r--r--gcc/tree-switch-conversion.h47
-rw-r--r--gcc/tree-tailcall.c2
-rw-r--r--gcc/tree-vect-data-refs.c2
-rw-r--r--gcc/tree-vect-loop.c3
-rw-r--r--gcc/tree-vect-slp.c150
-rw-r--r--gcc/tree-vect-stmts.c28
-rw-r--r--gcc/tree-vrp.c263
-rw-r--r--gcc/tree-vrp.h3
-rw-r--r--gcc/tree.c31
-rw-r--r--gcc/tree.h61
-rw-r--r--gcc/ubsan.c2
-rw-r--r--gcc/varasm.c3
-rw-r--r--gcc/vr-values.c15
-rw-r--r--gcc/wide-int-range.cc70
-rw-r--r--gcc/wide-int-range.h29
-rw-r--r--include/ChangeLog12
-rw-r--r--intl/ChangeLog4
-rw-r--r--libada/ChangeLog6
-rw-r--r--libatomic/ChangeLog2
-rw-r--r--libbacktrace/ChangeLog10
-rw-r--r--libcpp/ChangeLog38
-rw-r--r--libcpp/include/line-map.h81
-rw-r--r--libcpp/line-map.c22
-rw-r--r--libdecnumber/ChangeLog14
-rw-r--r--libffi/ChangeLog4
-rw-r--r--libgcc/ChangeLog45
-rw-r--r--libgcc/ChangeLog.meissner4
-rw-r--r--libgcc/config/arm/lib1funcs.S44
-rw-r--r--libgcc/config/arm/t-arm2
-rw-r--r--libgfortran/ChangeLog42
-rw-r--r--libgfortran/generated/matmul_c10.c45
-rw-r--r--libgfortran/generated/matmul_c16.c45
-rw-r--r--libgfortran/generated/matmul_c4.c45
-rw-r--r--libgfortran/generated/matmul_c8.c45
-rw-r--r--libgfortran/generated/matmul_i1.c45
-rw-r--r--libgfortran/generated/matmul_i16.c45
-rw-r--r--libgfortran/generated/matmul_i2.c45
-rw-r--r--libgfortran/generated/matmul_i4.c45
-rw-r--r--libgfortran/generated/matmul_i8.c45
-rw-r--r--libgfortran/generated/matmul_r10.c45
-rw-r--r--libgfortran/generated/matmul_r16.c45
-rw-r--r--libgfortran/generated/matmul_r4.c45
-rw-r--r--libgfortran/generated/matmul_r8.c45
-rw-r--r--libgfortran/generated/matmulavx128_c10.c18
-rw-r--r--libgfortran/generated/matmulavx128_c16.c18
-rw-r--r--libgfortran/generated/matmulavx128_c4.c18
-rw-r--r--libgfortran/generated/matmulavx128_c8.c18
-rw-r--r--libgfortran/generated/matmulavx128_i1.c18
-rw-r--r--libgfortran/generated/matmulavx128_i16.c18
-rw-r--r--libgfortran/generated/matmulavx128_i2.c18
-rw-r--r--libgfortran/generated/matmulavx128_i4.c18
-rw-r--r--libgfortran/generated/matmulavx128_i8.c18
-rw-r--r--libgfortran/generated/matmulavx128_r10.c18
-rw-r--r--libgfortran/generated/matmulavx128_r16.c18
-rw-r--r--libgfortran/generated/matmulavx128_r4.c18
-rw-r--r--libgfortran/generated/matmulavx128_r8.c18
-rw-r--r--libgfortran/io/async.h21
-rw-r--r--libgfortran/m4/matmul_internal.m49
-rw-r--r--libgo/go/reflect/type.go1
-rw-r--r--libgo/go/runtime/cgo_gccgo.go10
-rw-r--r--libgo/go/runtime/hashmap.go14
-rw-r--r--libgo/go/runtime/lock_futex.go4
-rw-r--r--libgo/go/runtime/lock_sema.go4
-rw-r--r--libgo/go/runtime/malloc.go6
-rw-r--r--libgo/go/runtime/norace_test.go4
-rw-r--r--libgo/go/runtime/proc.go8
-rw-r--r--libgo/go/runtime/stubs.go16
-rw-r--r--libgo/go/runtime/type.go1
-rw-r--r--libgo/runtime/proc.c18
-rw-r--r--libgo/runtime/runtime.h10
-rw-r--r--libgomp/ChangeLog8
-rw-r--r--libiberty/ChangeLog15
-rw-r--r--libiberty/pex-unix.c163
-rw-r--r--libobjc/ChangeLog12
-rw-r--r--libsanitizer/ChangeLog36
-rw-r--r--libssp/ChangeLog2
-rw-r--r--libstdc++-v3/ChangeLog331
-rw-r--r--libstdc++-v3/config/abi/pre/gnu.ver5
-rw-r--r--libstdc++-v3/configure.host17
-rw-r--r--libstdc++-v3/include/bits/basic_string.h7
-rw-r--r--libstdc++-v3/include/bits/deque.tcc10
-rw-r--r--libstdc++-v3/include/bits/hashtable_policy.h7
-rw-r--r--libstdc++-v3/include/bits/stl_deque.h31
-rw-r--r--libstdc++-v3/include/bits/stl_vector.h34
-rw-r--r--libstdc++-v3/include/bits/vector.tcc1
-rw-r--r--libstdc++-v3/include/debug/deque8
-rw-r--r--libstdc++-v3/include/debug/formatter.h22
-rw-r--r--libstdc++-v3/include/debug/forward_list8
-rw-r--r--libstdc++-v3/include/debug/functions.h109
-rw-r--r--libstdc++-v3/include/debug/helper_functions.h86
-rw-r--r--libstdc++-v3/include/debug/list8
-rw-r--r--libstdc++-v3/include/debug/map8
-rw-r--r--libstdc++-v3/include/debug/map.h3
-rw-r--r--libstdc++-v3/include/debug/multimap.h3
-rw-r--r--libstdc++-v3/include/debug/multiset.h3
-rw-r--r--libstdc++-v3/include/debug/safe_iterator.h761
-rw-r--r--libstdc++-v3/include/debug/safe_iterator.tcc134
-rw-r--r--libstdc++-v3/include/debug/safe_local_iterator.h98
-rw-r--r--libstdc++-v3/include/debug/safe_local_iterator.tcc43
-rw-r--r--libstdc++-v3/include/debug/set6
-rw-r--r--libstdc++-v3/include/debug/set.h3
-rw-r--r--libstdc++-v3/include/debug/stl_iterator.h19
-rw-r--r--libstdc++-v3/include/debug/string58
-rw-r--r--libstdc++-v3/include/debug/unordered_map22
-rw-r--r--libstdc++-v3/include/debug/unordered_set19
-rw-r--r--libstdc++-v3/include/debug/vector15
-rw-r--r--libstdc++-v3/include/experimental/regex3
-rw-r--r--libstdc++-v3/include/experimental/string2
-rw-r--r--libstdc++-v3/include/ext/pointer.h4
-rw-r--r--libstdc++-v3/include/std/bit6
-rw-r--r--libstdc++-v3/include/std/regex2
-rw-r--r--libstdc++-v3/include/std/string2
-rw-r--r--libstdc++-v3/libsupc++/new8
-rwxr-xr-xlibstdc++-v3/scripts/check_compile6
-rw-r--r--libstdc++-v3/src/filesystem/std-path.cc19
-rw-r--r--libstdc++-v3/testsuite/20_util/reference_wrapper/lwg2993.cc2
-rw-r--r--libstdc++-v3/testsuite/21_strings/basic_string/types/pmr_typedefs.cc1
-rw-r--r--libstdc++-v3/testsuite/22_locale/time_get/get_date/wchar_t/4.cc24
-rw-r--r--libstdc++-v3/testsuite/23_containers/deque/capacity/max_size.cc146
-rw-r--r--libstdc++-v3/testsuite/23_containers/deque/modifiers/assign/1.cc2
-rw-r--r--libstdc++-v3/testsuite/23_containers/deque/types/pmr_typedefs_debug.cc25
-rw-r--r--libstdc++-v3/testsuite/23_containers/forward_list/pmr_typedefs_debug.cc25
-rw-r--r--libstdc++-v3/testsuite/23_containers/list/68222_neg.cc37
-rw-r--r--libstdc++-v3/testsuite/23_containers/list/modifiers/assign/1.cc2
-rw-r--r--libstdc++-v3/testsuite/23_containers/list/pmr_typedefs_debug.cc25
-rw-r--r--libstdc++-v3/testsuite/23_containers/map/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/multimap/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/multiset/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/set/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_map/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_multimap/pmr_typedefs_debug.cc27
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_multiset/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_set/debug/debug_functions.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/unordered_set/pmr_typedefs_debug.cc26
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/bool/modifiers/assign/1.cc2
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/capacity/max_size.cc146
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/cons/destructible_debug_neg.cc2
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/debug/debug_functions.cc23
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/modifiers/assign/1.cc2
-rw-r--r--libstdc++-v3/testsuite/23_containers/vector/types/pmr_typedefs_debug.cc25
-rw-r--r--libstdc++-v3/testsuite/25_algorithms/fill_n/2.cc2
-rw-r--r--libstdc++-v3/testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc4
-rw-r--r--libstdc++-v3/testsuite/26_numerics/bit/bit.pow.two/ceil2.cc8
-rw-r--r--libstdc++-v3/testsuite/27_io/filesystem/path/generation/normal.cc63
-rw-r--r--libstdc++-v3/testsuite/28_regex/match_results/pmr_typedefs.cc1
-rw-r--r--libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc1
-rw-r--r--libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc1
-rw-r--r--libstdc++-v3/testsuite/ext/ext_pointer/1.cc14
-rw-r--r--libstdc++-v3/testsuite/util/testsuite_allocator.h6
-rw-r--r--libstdc++-v3/testsuite/util/testsuite_containers.h72
-rw-r--r--libvtv/ChangeLog2
-rw-r--r--lto-plugin/ChangeLog10
451 files changed, 12039 insertions, 4592 deletions
diff --git a/ChangeLog b/ChangeLog
index cbc5e2c966c..c16fa1d6c4a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2018-08-29 Vlad Lazar <vlad.lazar@arm.com>
+
+ * MAINTAINERS (write after approval): Add myself.
+
2018-08-21 Richard Sandiford <richard.sandiford@arm.com>
* MAINTAINERS: Add self to global reviewers list.
@@ -9,7 +13,7 @@
2018-08-10 Martin Liska <mliska@suse.cz>
* MAINTAINERS: Revert change in previous commit and
- join lines.
+ join lines.
2018-08-10 Martin Liska <mliska@suse.cz>
@@ -51,7 +55,7 @@
2018-06-19 Bernhard M. Wiedemann <bwiedemann@suse.de>
- * libtool.m4: Sort output of 'find' to enable deterministic builds.
+ * libtool.m4: Sort output of 'find' to enable deterministic builds.
* ltmain.sh: Likewise.
2018-07-03 Segher Boessenkool <segher@kernel.crashing.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 6b7d105a23e..25059609c78 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -459,6 +459,7 @@ Terry Laurenzo <tlaurenzo@gmail.com>
Alan Lawrence <alan.lawrence@arm.com>
Jozef Lawrynowicz <jozef.l@mittosystems.com>
Georg-Johann Lay <avr@gjlay.de>
+Vlad Lazar <vlad.lazar@arm.com>
Marc Lehmann <pcg@goof.com>
James Lemke <jim@lemke.org>
Kriang Lerdsuwanakij <lerdsuwa@users.sourceforge.net>
diff --git a/config/ChangeLog b/config/ChangeLog
index 66bae696598..70b3f724026 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -14,7 +14,7 @@
2018-05-09 Joshua Watt <jpewhacker@gmail.com>
- * ax_pthread.m4: Add file.
+ * ax_pthread.m4: Add file.
2018-05-08 Richard Biener <rguenther@suse.de>
@@ -451,7 +451,7 @@
* config/mh-interix: Remove as unneeded.
* config/picflag.m4 (i[[34567]]86-*-interix3*):
- Change triplet to i[[34567]]86-*-interix[[3-9]]*.
+ Change triplet to i[[34567]]86-*-interix[[3-9]]*.
2012-01-04 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
diff --git a/contrib/ChangeLog b/contrib/ChangeLog
index 97ed2002247..21139150c9f 100644
--- a/contrib/ChangeLog
+++ b/contrib/ChangeLog
@@ -73,7 +73,7 @@
* update-copyright.py: Skip pdt-5.f03 in gfortran.dg subdir.
2017-11-28 Julia Koval <julia.koval@intel.com>
- Sebastian Peryt <sebastian.peryt@intel.com>
+ Sebastian Peryt <sebastian.peryt@intel.com>
* contrib/gcc_update: Ditto.
@@ -183,8 +183,8 @@
2017-04-11 Damian Rouson <damian@sourceryinstitute.org>
- * download_prerequisites (md5_check): New function emulates Linux
- 'md5 --check' on macOS. Modified script for macOS compatibility.
+ * download_prerequisites (md5_check): New function emulates Linux
+ 'md5 --check' on macOS. Modified script for macOS compatibility.
2017-02-06 Palmer Dabbelt <palmer@dabbelt.com>
@@ -283,7 +283,7 @@
2016-09-20 Christophe Lyon <christophe.lyon@linaro.org>
* compare_tests: Take ERROR messages into account when
- comparing.
+ comparing.
2016-08-17 Martin Liska <mliska@suse.cz>
diff --git a/fixincludes/ChangeLog b/fixincludes/ChangeLog
index 8dade5c657d..03b459c3db8 100644
--- a/fixincludes/ChangeLog
+++ b/fixincludes/ChangeLog
@@ -1256,7 +1256,7 @@
* tests/base/math.h: Update.
2007-01-05 Bruce Korb <bkorb@gnu.org>,
- Daniel Franke <franke.daniel@gmail.com>
+ Daniel Franke <franke.daniel@gmail.com>
PR target/30008
* fixincl.tpl (List): separate file name patterns with a NUL byte instead
@@ -1472,7 +1472,7 @@
* fixincl.c: Don't include <sys/wait.h> if SEPARATE_FIX_PROC.
2005-05-19 Eric Botcazou <ebotcazou@libertysurf.fr>
- Joseph S. Myers <joseph@codesourcery.com>
+ Joseph S. Myers <joseph@codesourcery.com>
PR target/19933
PR target/21315
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 606fb5f6db5..5b49b2ae1c3 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,899 @@
+2018-08-30 Aaron Sawdey <acsawdey@linux.ibm.com>
+
+ * config/rs6000/altivec.md (altivec_eq<mode>): Remove star.
+ (altivec_vcmpequ<VI_char>_p): Remove star.
+ * config/rs6000/rs6000-string.c (do_load_for_compare): Support
+ vector load modes.
+ (expand_strncmp_vec_sequence): New function.
+ (emit_final_str_compare_vec): New function.
+ (expand_strn_compare): Add support for vector strncmp.
+ * config/rs6000/rs6000.opt (-mstring-compare-inline-limit): Change
+ length specification to bytes.
+ * config/rs6000/vsx.md (vsx_ld_elemrev_v16qi_internal): Remove star.
+ (vcmpnezb_p): New pattern.
+ * doc/invoke.texi (RS/6000 and PowerPC Options): Update documentation
+ for option -mstring-compare-inline-limit.
+
+2018-08-30 Thiago Macieira <thiago.macieira@intel.com>
+
+ * config/i386/i386.c (PTA_WESTMERE): Remove PTA_AES.
+ (PTA_SKYLAKE): Add PTA_AES.
+ (PTA_GOLDMONT): Likewise.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ PR lto/86517
+ * lto-opts.c (lto_write_options): Always stream PIC/PIE mode.
+ * lto-wrapper.c (merge_and_complain): Fix merging of PIC/PIE.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * lto-streamer-out.c (DFS::DFS_write_tree_body): Do not follow
+ TYPE_STUB_DECL.
+ (hash_tree): Do not visit TYPE_STUB_DECL.
+ * tree-streamer-out.c (write_ts_type_common_tree_pointers): Do not
+ stream TYPE_STUB_DECL.
+ * tree-streamer-in.c (lto_input_ts_type_common_tree_pointers): Likewise.
+ * ipa-utils.h (type_with_linkage_p): Do not rely on TYPE_STUB_DECL
+ after free_lang_data.
+ (type_in_anonymous_namespace_p): Likewise.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * sreal.h (SREAL_PART_BITS): Change to 31; remove seemingly unnecessary
+ comment that it has to be even number.
+ (class sreal): Change m_sig type to int32_t.
+ * sreal.c (sreal::dump, sreal::to_int, opreator+, operator-): Use
+ int64_t for temporary calculations.
+ (sreal_verify_basics): Drop one bit from minimum and maximum.
+
+2018-08-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87147
+ * tree-ssa-sccvn.c (SSA_VISITED): New function.
+ (visit_phi): When the degenerate result is from the backedge and
+ we didn't visit its definition yet drop to VARYING.
+ (do_rpo_vn): Properly mark blocks with incoming backedges as executable.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * lto-streamer-out.c (DFS::DFS_write_tree_body): Do not walk
+ DECL_VINDEX.
+ (hash_tree): Likewise.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * tree.c (find_decls_types_r): Walk also TYPE_NEXT_PTR_TO
+ and TYPE_NEXT_REF_TO.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * sreal.h (SREAL_PART_BITS): Change to 31; remove seemingly unnecessary
+ comment that it has to be even number.
+ (class sreal): Change m_sig type to int32_t.
+ * sreal.c (sreal::dump, sreal::to_int, opreator+, operator-): Use
+ int64_t for temporary calculations.
+ (sreal_verify_basics): Drop one bit from minimum and maximum.
+
+2018-08-30 Tamar Christina <tamar.christina@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_expand_movmem): Set TImode max.
+
+2018-08-30 Vlad Lazar <vlad.lazar@arm.com>
+
+ PR middle-end/86995
+ * expmed.c (canonicalize_comparison): Use wi::sub instead of wi::add
+ if to_add is negative.
+
+2018-08-29 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR middle-end/87053
+ * builtins.c (c_strlen): Improve range checks.
+
+2018-08-29 Martin Sebor <msebor@redhat.com>
+ Jeff Law <law@redhat.com>
+
+ PR tree-optimization/86714
+ PR tree-optimization/86711
+ * builtins.c (c_strlen): Add arguments to call to string_constant.
+ * expr.c (string_constant): Add argument. Detect missing nul
+ terminator and outermost declaration it's missing in.
+ * expr.h (string_constant): Add argument.
+ * fold-const.c (read_from_constant_string): Add arguments to call to
+ string_constant.
+ (c_getstr): Likewise.
+ * tree-ssa-forwprop.c (simplify_builtin_call): Likewise.
+ to string_constant.
+ * tree-ssa-strlen.c (get_stridx): Likewise.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * tree-streamer-in.c (lto_input_ts_function_decl_tree_pointers):
+ Do not stream DECL_VINDEX.
+ * tree-streamer-out.c (write_ts_function_decl_tree_pointers): Likewise.
+ * tree.c (free_lang_data_in_decl): Clear DECL_VINDEX.
+ (decl_function_context): Use DECL_VIRTUAL_P rather than DECL_VINDEX.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ * tree-ssa-sccvn.c (vuse_ssa_val): Return NULL for unvisited
+ virtual operands that are not default defs to honor region
+ boundaries.
+ (rpo_vn_valueize): Remove ineffective code here.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87132
+ * tree-ssa-alias.c (get_continuation_for_phi): Do not translate
+ when skipping defs reachable over backedges.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ * tree-core.h: Document use of deprecated_flag in SSA_NAME.
+ * tree.h (SSA_NAME_POINTS_TO_READONLY_MEMORY): Define.
+ * tree-into-ssa.c (pass_build_ssa::execute): Initialize
+ function parameters SSA_NAME_POINTS_TO_READONLY_MEMORY from fnspec.
+ * tree-ssa-sccvn.c (const_parms, init_const_parms): Remove.
+ (vn_reference_lookup_3): Remove use of const_parms.
+ (free_rpo_vn): Do not free const_parms.
+ (do_rpo_vn): Do not call init_const_parms.
+ * tree-ssa-alias.c (refs_may_alias_p_1): Honor
+ SSA_NAME_POINTS_TO_READONLY_MEMORY.
+ (call_may_clobber_ref_p_1): Likewise.
+
+2018-08-29 Alexander Monakov <amonakov@ispras.ru>
+
+ PR other/86726
+ * invoke.texi (Optimization Options): List -ftree-scev-cprop.
+ (-O): Ditto.
+ (-ftree-scev-cprop): Document.
+
+2018-08-29 Jan Hubicka <jh@suse.cz>
+
+ * sreal.h (normalize, normalize_up, normalize_down): Add new_sig/new_exp
+ parameters.
+ (sreal constructor): Update.
+ * sreal.c (sreal:operator+, sreal:operator-, sreal:operator*,
+ sreal:operator/): Update.
+
+2018-08-29 Martin Liska <mliska@suse.cz>
+
+ * tree-switch-conversion.c (switch_conversion::expand):
+ Strenghten assumption about gswitch statements.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * tree-ssa-sccvn.c (eliminate_dom_walker::eliminate_stmt): Only
+ re-value-number released SSA VDEFs.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87126
+ * tree-ssa-sccvn.c (vn_reference_insert): Remove assert.
+
+2018-08-28 Jim Wilson <jimw@sifive.com>
+
+ * config/riscv/pic.md: Rewrite.
+ * config/riscv/riscv.c (riscv_address_insns): Return cost of 3 for
+ invalid address.
+ * config/riscv/riscv.md (ZERO_EXTEND_LOAD): Delete.
+ (SOFTF, default_load, softload, softstore): New.
+
+2018-08-28 Jeff Law <law@redhat.com>
+
+ * fold-const.c (fold_binary_loc): Remove recently added assert.
+
+2018-08-28 Joern Rennecke <joern.rennecke@riscy-ip.com>
+
+ * genpreds.c (write_predicate_subfunction): Also add ATTRIBUTE_UNUSED
+ to OP parmeter of generated function.
+
+2018-08-28 MCC CS <deswurstes@users.noreply.github.com>
+
+ PR tree-optimization/87009
+ * match.pd: Add boolean optimizations.
+
+2018-08-28 Martin Sebor <msebor@redhat.com>
+
+ PR middle-end/86631
+ * calls.c (alloc_max_size): Treat HOST_WIDE_INT special.
+ * gimple-ssa-warn-alloca.c (adjusted_warn_limit): New function.
+ (pass_walloca::gate): Use it.
+ (alloca_call_type): Same.
+ (pass_walloca::execute): Same.
+ * stor-layout.c (layout_decl): Treat HOST_WIDE_INT special.
+
+2018-08-28 David Malcolm <dmalcolm@redhat.com>
+
+ * dumpfile.h (ATTRIBUTE_GCC_DUMP_PRINTF): Change version check on
+ GCC_VERSION for usage of "__gcc_dump_printf__" format from
+ >= 3005 to >= 9000.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87124
+ * tree-ssa-sccvn.c (vn_lookup_simplify_result): Guard against
+ constants before looking up avail.
+
+2018-08-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/87099
+ * calls.c (maybe_warn_nonstring_arg): Punt early if
+ warn_stringop_overflow is zero. Don't call get_range_strlen
+ on 3rd argument, keep iterating until lenrng[1] is INTEGER_CST.
+ Swap comparison operands to have constants on rhs. Only use
+ lenrng[1] if non-NULL and INTEGER_CST. Don't uselessly
+ increment lenrng[0].
+
+2018-08-28 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-ssa-sccvn.c (fully_constant_vn_reference_p): Fix unguarded
+ use of tree_to_shwi. Remove duplicated test for the size being
+ a whole number of bytes.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * tree-ssa-sccvn.c (eliminate_dom_walker::eliminate_cleanup):
+ Handle removed stmt without LHS (GIMPLE_NOP).
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * tree-ssa-sccvn.c (fully_constant_vn_reference_p): Exclude
+ void which is is_gimple_reg_type by checking for COMPLETE_TYPE_P.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * tree-ssa-pre.c (compute_avail): Do not make expressions
+ with predicated values available.
+ (get_expr_value_id): Assert we do not run into predicated value
+ expressions.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * tree-ssa-operands.c (add_stmt_operand): STRING_CST may
+ get virtual operands.
+ (get_expr_operands): Handle STRING_CST like other decls.
+
+2018-08-28 Martin Liska <mliska@suse.cz>
+
+ * tree.h: Update documentation of fndecl_built_in_p
+ functions.
+
+
+2018-08-27 Jeff Law <law@redhat.com>
+ PR tree-optimization/87110
+ * tree-ssa-dse.c (compute_trims): Handle non-constant
+ TYPE_SIZE_UNIT.
+
+2018-08-27 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/86914
+ * tree-ssa-strlen.c (maybe_set_strlen_range): Avoid MEM_REF.
+
+2018-08-27 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/87112
+ * builtins.c (expand_builtin_strnlen): Convert c_strlen result to
+ the type of the bound argument.
+
+2018-08-27 Jeff Law <law@redhat.com>
+
+ * tree-ssa-dse.c (compute_trims): Handle case where the reference's
+ type does not have a TYPE_SIZE_UNIT.
+
+2018-08-27 Steve Ellcey <sellcey@cavium.com>
+
+ * config/aarch64/aarch64-speculation.cc: Replace include of cfg.h
+ with include of backend.h.
+
+2018-08-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/86927
+ * tree-vect-loop.c (vect_create_epilog_for_reduction): Properly
+ use const cond reduction code.
+
+2018-08-27 Alexander Monakov <amonakov@ispras.ru>
+
+ PR tree-optimization/85758
+ * match.pd ((X & Y) ^ Y): Add :s qualifier to inner expression.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * diagnostic-show-locus.c (class layout_range): Update for
+ conversion of show_caret_p to a tri-state.
+ (layout_range::layout_range): Likewise.
+ (make_range): Likewise.
+ (layout::maybe_add_location_range): Likewise.
+ (layout::should_print_annotation_line_p): Don't show annotation
+ lines for ranges that are SHOW_LINES_WITHOUT_RANGE.
+ (layout::get_state_at_point): Update for conversion of
+ show_caret_p to a tri-state. Bail out early for
+ SHOW_LINES_WITHOUT_RANGE, so that such ranges don't affect
+ underlining or source colorization.
+ (gcc_rich_location::add_location_if_nearby): Update for conversion
+ of show_caret_p to a tri-state.
+ (selftest::test_one_liner_multiple_carets_and_ranges): Likewise.
+ (selftest::test_one_liner_fixit_replace_equal_secondary_range):
+ Likewise.
+ (selftest::test_one_liner_labels): Likewise.
+ * gcc-rich-location.c (gcc_rich_location::add_expr): Update for
+ conversion of show_caret_p to a tri-state.
+ * pretty-print.c (text_info::set_location): Likewise.
+ * pretty-print.h (text_info::set_location): Likewise.
+ * substring-locations.c (format_warning_n_va): Likewise.
+ * tree-diagnostic.c (default_tree_printer): Likewise.
+ * tree-pretty-print.c (newline_and_indent): Likewise.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * diagnostic-show-locus.c (get_line_span_for_fixit_hint): Show the
+ line above for line-insertion fix-it hints.
+ (selftest::test_fixit_insert_containing_newline): Update the
+ expected results, and add a test with line-numbering enabled.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/86962
+ * sanopt.c (sanitize_rewrite_addressable_params): Ignore
+ params with DECL_HAS_VALUE_EXPR_P.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * config/i386/i386.c (ix86_expand_set_or_movmem): Dump
+ selected expansion strategy.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * builtins.h (is_builtin_fn): Remove and fndecl_built_in_p.
+ * builtins.c (is_builtin_fn): Likewise.
+ * attribs.c (diag_attr_exclusions): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ (builtin_mathfn_code): Likewise.
+ (fold_builtin_expect): Likewise.
+ (fold_call_expr): Likewise.
+ (fold_builtin_call_array): Likewise.
+ (fold_call_stmt): Likewise.
+ (set_builtin_user_assembler_name): Likewise.
+ (is_simple_builtin): Likewise.
+ * calls.c (gimple_alloca_call_p): Likewise.
+ (maybe_warn_nonstring_arg): Likewise.
+ * cfgexpand.c (expand_call_stmt): Likewise.
+ * cgraph.c (cgraph_update_edges_for_call_stmt_node): Likewise.
+ (cgraph_edge::verify_corresponds_to_fndecl): Likewise.
+ (cgraph_node::verify_node): Likewise.
+ * cgraphclones.c (build_function_decl_skip_args): Likewise.
+ (cgraph_node::create_clone): Likewise.
+ * config/arm/arm.c (arm_insert_attributes): Likewise.
+ * config/i386/i386.c (ix86_gimple_fold_builtin): Likewise.
+ * dse.c (scan_insn): Likewise.
+ * expr.c (expand_expr_real_1): Likewise.
+ * fold-const.c (operand_equal_p): Likewise.
+ (fold_binary_loc): Likewise.
+ * gimple-fold.c (gimple_fold_stmt_to_constant_1): Likewise.
+ * gimple-low.c (lower_stmt): Likewise.
+ * gimple-pretty-print.c (dump_gimple_call): Likewise.
+ * gimple-ssa-warn-restrict.c (wrestrict_dom_walker::check_call): Likewise.
+ * gimple.c (gimple_build_call_from_tree): Likewise.
+ (gimple_call_builtin_p): Likewise.
+ (gimple_call_combined_fn): Likewise.
+ * gimplify.c (gimplify_call_expr): Likewise.
+ (gimple_boolify): Likewise.
+ (gimplify_modify_expr): Likewise.
+ (gimplify_addr_expr): Likewise.
+ * hsa-gen.c (gen_hsa_insns_for_call): Likewise.
+ * ipa-cp.c (determine_versionability): Likewise.
+ * ipa-fnsummary.c (compute_fn_summary): Likewise.
+ * ipa-param-manipulation.c (ipa_modify_formal_parameters): Likewise.
+ * ipa-split.c (visit_bb): Likewise.
+ (split_function): Likewise.
+ * ipa-visibility.c (cgraph_externally_visible_p): Likewise.
+ * lto-cgraph.c (input_node): Likewise.
+ * lto-streamer-out.c (write_symbol): Likewise.
+ * omp-low.c (setjmp_or_longjmp_p): Likewise.
+ (lower_omp_1): Likewise.
+ * predict.c (strip_predict_hints): Likewise.
+ * print-tree.c (print_node): Likewise.
+ * symtab.c (symtab_node::output_to_lto_symbol_table_p): Likewise.
+ * trans-mem.c (is_tm_irrevocable): Likewise.
+ (is_tm_load): Likewise.
+ (is_tm_simple_load): Likewise.
+ (is_tm_store): Likewise.
+ (is_tm_simple_store): Likewise.
+ (is_tm_abort): Likewise.
+ (tm_region_init_1): Likewise.
+ * tree-call-cdce.c (gen_shrink_wrap_conditions): Likewise.
+ * tree-cfg.c (verify_gimple_call): Likewise.
+ (move_stmt_r): Likewise.
+ (stmt_can_terminate_bb_p): Likewise.
+ * tree-eh.c (lower_eh_constructs_2): Likewise.
+ * tree-if-conv.c (if_convertible_stmt_p): Likewise.
+ * tree-inline.c (remap_gimple_stmt): Likewise.
+ (copy_bb): Likewise.
+ (estimate_num_insns): Likewise.
+ (fold_marked_statements): Likewise.
+ * tree-sra.c (scan_function): Likewise.
+ * tree-ssa-ccp.c (surely_varying_stmt_p): Likewise.
+ (optimize_stack_restore): Likewise.
+ (pass_fold_builtins::execute): Likewise.
+ * tree-ssa-dce.c (mark_stmt_if_obviously_necessary): Likewise.
+ (mark_all_reaching_defs_necessary_1): Likewise.
+ * tree-ssa-dom.c (dom_opt_dom_walker::optimize_stmt): Likewise.
+ * tree-ssa-forwprop.c (simplify_builtin_call): Likewise.
+ (pass_forwprop::execute): Likewise.
+ * tree-ssa-loop-im.c (stmt_cost): Likewise.
+ * tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise.
+ * tree-ssa-sccvn.c (fully_constant_vn_reference_p): Likewise.
+ * tree-ssa-strlen.c (get_string_length): Likewise.
+ * tree-ssa-structalias.c (handle_lhs_call): Likewise.
+ (find_func_aliases_for_call): Likewise.
+ * tree-ssa-ter.c (find_replaceable_in_bb): Likewise.
+ * tree-stdarg.c (optimize_va_list_gpr_fpr_size): Likewise.
+ * tree-tailcall.c (find_tail_calls): Likewise.
+ * tree.c (need_assembler_name_p): Likewise.
+ (free_lang_data_in_decl): Likewise.
+ (get_call_combined_fn): Likewise.
+ * ubsan.c (is_ubsan_builtin_p): Likewise.
+ * varasm.c (incorporeal_function_p): Likewise.
+ * tree.h (DECL_BUILT_IN): Remove and replace with
+ fndecl_built_in_p.
+ (DECL_BUILT_IN_P): Transfort to fndecl_built_in_p.
+ (fndecl_built_in_p): New.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR tree-optimization/86847
+ * tree-switch-conversion.c (switch_decision_tree::dump_case_nodes):
+ Dump also subtree probability.
+ (switch_decision_tree::do_jump_if_equal): New function.
+ (switch_decision_tree::emit_case_nodes): Handle special
+ situations in balanced tree that can be emitted much simpler.
+ Fix calculation of probabilities that happen in tree expansion.
+ * tree-switch-conversion.h (struct cluster): Add
+ is_single_value_p.
+ (struct simple_cluster): Likewise.
+ (struct case_tree_node): Add new function has_child.
+ (do_jump_if_equal): New.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * tree-switch-conversion.c (bit_test_cluster::find_bit_tests):
+ Add new argument to bit_test_cluster constructor.
+ (bit_test_cluster::emit): Set bits really number of values
+ handlel by a test.
+ (bit_test_cluster::hoist_edge_and_branch_if_true): Add
+ probability argument.
+ * tree-switch-conversion.h (struct bit_test_cluster):
+ Add m_handles_entire_switch.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR tree-optimization/86702
+ * tree-switch-conversion.c (jump_table_cluster::emit):
+ Make probabilities even for values in jump table
+ according to number of cases handled.
+ (switch_decision_tree::compute_cases_per_edge): Pass
+ argument to reset_out_edges_aux function.
+ (switch_decision_tree::analyze_switch_statement): Likewise.
+ * tree-switch-conversion.h (switch_decision_tree::reset_out_edges_aux):
+ Make it static.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * cfgexpand.c (expand_asm_stmt): Use label_to_block and pass
+ cfun argument explicitly.
+ * gimple-pretty-print.c (dump_gimple_switch): Likewise.
+ * hsa-gen.c (gen_hsa_insns_for_switch_stmt): Use new
+ function gimple_switch_default_bb.
+ (convert_switch_statements):
+ (expand_builtins):
+ * ipa-fnsummary.c (set_switch_stmt_execution_predicate):
+ * stmt.c (label_to_block_fn): Use label_to_block and pass
+ cfun argument explicitly and use gimple_switch_label_bb.
+ (expand_case): Likewise.
+ * tree-cfg.c (lower_phi_internal_fn): Use label_to_block and pass
+ cfun argument explicitly. Likewise.
+ (make_edges_bb): Likewise.
+ (make_cond_expr_edges): Likewise.
+ (get_cases_for_edge): Likewise.
+ (make_gimple_switch_edges): Likewise.
+ (label_to_block_fn): Likewise.
+ (label_to_block): Likewise.
+ (make_goto_expr_edges): Likewise.
+ (make_gimple_asm_edges): Likewise.
+ (main_block_label): Likewise.
+ (group_case_labels_stmt): Likewise.
+ (find_taken_edge_computed_goto): Likewise.
+ (find_taken_edge_switch_expr): Likewise.
+ (gimple_verify_flow_info): Likewise.
+ (gimple_redirect_edge_and_branch): Likewise.
+ (gimple_switch_label_bb): New function.
+ (gimple_switch_default_bb): Likewise.
+ (gimple_switch_edge): Likewise.
+ (gimple_switch_default_edge): Likewise.
+ * tree-cfg.h (label_to_block_fn): Remove and replace ...
+ (label_to_block): ... with this.
+ (gimple_switch_label_bb): New.
+ (gimple_switch_default_bb): Likewise.
+ (gimple_switch_edge): Likewise.
+ (gimple_switch_default_edge): Likewise.
+ * tree-cfgcleanup.c (convert_single_case_switch): Use
+ new gimple functions and pass new argument to label_to_block.
+ (cleanup_control_flow_bb):
+ * tree-eh.c (make_eh_dispatch_edges): Use label_to_block and pass
+ cfun argument explicitly.
+ (make_eh_edges): Likewise.
+ (redirect_eh_dispatch_edge): Likewise.
+ (lower_resx): Likewise.
+ (lower_eh_dispatch): Likewise.
+ (maybe_remove_unreachable_handlers): Likewise.
+ (unsplit_eh): Likewise.
+ (cleanup_empty_eh): Likewise.
+ (verify_eh_edges): Likewise.
+ (verify_eh_dispatch_edge): Likewise.
+ * tree-ssa-dom.c (record_edge_info): Likewise.
+ * tree-ssa-forwprop.c (simplify_gimple_switch_label_vec): Likewise.
+ * tree-ssa-threadedge.c (thread_around_empty_blocks): Likewise.
+ (thread_through_normal_block): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges): Likewise.
+ * tree-ssa-uninit.c (convert_control_dep_chain_into_preds):
+ * tree-switch-conversion.c (switch_conversion::collect): Use new
+ gimple functions.
+ (switch_conversion::check_final_bb): Likewise.
+ (switch_conversion::gather_default_values): Pass new argument
+ to label_to_block.
+ (switch_conversion::build_constructors): Likewise.
+ (switch_decision_tree::compute_cases_per_edge): Use new
+ gimple_switch_edge function.
+ (switch_decision_tree::analyze_switch_statement): Pass new argument
+ to label_to_block.
+ (switch_decision_tree::try_switch_expansion): Use
+ gimple_switch_default_edge.
+ * tree-vrp.c (find_switch_asserts): Pass new argument
+ to label_to_block.
+ * vr-values.c (vr_values::vrp_visit_switch_stmt): Likewise.
+ (vr_values::simplify_switch_using_ranges): Likewise.
+
+2018-08-27 Richard Biener <rguenther@suse.de>
+
+ * cfganal.h (rev_post_order_and_mark_dfs_back_seme): Declare.
+ * cfganal.c (rev_post_order_and_mark_dfs_back_seme): New function.
+
+ * tree-ssa-sccvn.h (struct vn_pval): New structure.
+ (struct vn_nary_op_s): Add unwind_to member. Add
+ predicated_values flag and put result into a union together
+ with a linked list of vn_pval.
+ (struct vn_ssa_aux): Add name member to make maintaining
+ a map of SSA name to vn_ssa_aux possible. Remove no longer
+ needed info, dfsnum, low, visited, on_sccstack, use_processed
+ and range_info_anti_range_p members.
+ (run_scc_vn, vn_eliminate, free_scc_vn, vn_valueize): Remove.
+ (do_rpo_vn, run_rpo_vn, eliminate_with_rpo_vn, free_rpo_vn):
+ New functions.
+ (vn_valueize): New global.
+ (vn_context_bb): Likewise.
+ (VN_INFO_RANGE_INFO, VN_INFO_ANTI_RANGE_P, VN_INFO_RANGE_TYPE,
+ VN_INFO_PTR_INFO): Remove.
+ * tree-ssa-sccvn.c: ... (rewrite)
+ (pass_fre::execute): For -O2+ initialize loops and run
+ RPO VN in optimistic mode (iterating). For -O1 and -Og
+ run RPO VN in non-optimistic mode.
+ * params.def (PARAM_SCCVN_MAX_SCC_SIZE): Remove.
+ (PARAM_RPO_VN_MAX_LOOP_DEPTH): Add.
+ * doc/invoke.texi (sccvn-max-scc-size): Remove.
+ (rpo-vn-max-loop-depth): Document.
+ * tree-ssa-alias.c (walk_non_aliased_vuses): Stop walking
+ when valuezing the VUSE signals we walked out of the region.
+ * tree-ssa-pre.c (phi_translate_1): Ignore predicated values.
+ (phi_translate): Set VN context block to use for availability
+ lookup.
+ (compute_avail): Likewise.
+ (pre_valueize): New function.
+ (pass_pre::execute): Adjust to the RPO VN API.
+
+ * tree-ssa-loop-ivcanon.c: Include tree-ssa-sccvn.h.
+ (propagate_constants_for_unrolling): Remove.
+ (tree_unroll_loops_completely): Perform value-numbering
+ on the unrolled bodies loop parent.
+
+2018-08-27 Richard Biener <rguenther@suse.de>
+
+ * tree-ssa-pre.c (compute_antic): Re-use inverted postorder
+ for partial antic compute.
+
+2018-08-27 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/87065
+ * combine.c (simplify_if_then_else): Formatting fix.
+ (if_then_else_cond): Guard MULT optimization with SCALAR_INT_MODE_P
+ check.
+ (known_cond): Don't return const_true_rtx for vector modes. Use
+ CONST0_RTX instead of const0_rtx. Formatting fixes.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR gcov-profile/87069
+ * gcov.c (process_file): Record files already processed
+ and warn about a file being processed multiple times.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR driver/83193
+ * config/aarch64/aarch64.c (aarch64_override_options_internal):
+ Set default values for x_aarch64_*_string strings.
+ * config/aarch64/aarch64.opt: Remove --{march,mcpu,mtune}==
+ prefix. For -mabi do not print '=ABI' in help and use
+ <option_value> format for -msve-vector-bits and -moverride
+ options.
+
+2018-08-26 Jeff Law <law@redhat.com>
+
+ * config/mips/frame-header-opt.c: Include "backend.h" rather than
+ "cfg.h"
+
+2018-08-26 Marek Polacek <polacek@redhat.com>
+
+ PR c++/87029, Implement -Wredundant-move.
+ * doc/invoke.texi: Document -Wredundant-move.
+
+2018-08-25 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/87059
+ * builtins.c (expand_builtin_strncmp): Convert MIN_EXPR operand
+ to the same type as the other.
+ * fold-const.c (fold_binary_loc): Assert expectation.
+
+2018-08-25 Iain Sandoe <iain@sandoe.co.uk>
+
+ * config/darwin.c (machopic_legitimize_pic_address): Clean up
+ extraneous parentheses, dead code section and formatting.
+
+2018-08-24 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * diagnostic-show-locus.c (layout::layout): Ensure the margin is
+ wide enough for jumps in the line-numbering to be visible.
+ (layout::print_gap_in_line_numbering): New member function.
+ (layout::calculate_line_spans): When using line numbering, merge
+ line spans that are only 1 line apart.
+ (diagnostic_show_locus): When printing line numbers, show gaps in
+ line numbering directly, rather than printing headers.
+ (selftest::test_diagnostic_show_locus_fixit_lines): Add test of
+ line-numbering with multiple line spans.
+ (selftest::test_fixit_insert_containing_newline_2): Add test of
+ line-numbering, in which the spans are close enough to be merged.
+
+2018-08-24 Aldy Hernandez <aldyh@redhat.com>
+
+ * gimple-ssa-evrp-analyze.c (set_ssa_range_info): Pass value_range
+ to range_includes_zero_p. Do not special case VR_ANTI_RANGE.
+ * tree-vrp.c (range_is_nonnull): Remove.
+ (range_includes_zero_p): Accept value_range instead of min/max.
+ (extract_range_from_binary_expr_1): Do not early bail on
+ POINTER_PLUS_EXPR.
+ Use range_includes_zero_p instead of range_is_nonnull.
+ (extract_range_from_unary_expr): Use range_includes_zero_p instead
+ of range_is_nonnull.
+ (vrp_meet_1): Pass value_range to range_includes_zero_p. Do not
+ special case VR_ANTI_RANGE.
+ (vrp_finalize): Same.
+ * tree-vrp.h (range_includes_zero_p): Pass value_range as argument
+ instead of min/max.
+ (range_is_nonnull): Remove.
+ * vr-values.c (vrp_stmt_computes_nonzero): Use
+ range_includes_zero_p instead of range_is_nonnull.
+ (extract_range_basic): Pass value_range to range_includes_zero_p
+ instead of range_is_nonnull.
+
+2018-08-24 Uros Bizjak <ubizjak@gmail.com>
+
+ * emit-rtl.c (init_emit_once): Do not emit MODE_POINTER_BOUNDS RTXes.
+ * emit-rtl.h (rtl_data): Remove return_bnd.
+ * explow.c (trunc_int_for_mode): Do not handle POINTER_BOUNDS_MODE_P.
+ * function.c (diddle_return_value): Do not handle crtl->return_bnd.
+ * genmodes.c (complete_mode): Do not handle MODE_POINTER_BOUNDS.
+ (POINTER_BOUNDS_MODE): Remove definition.
+ (make_pointer_bounds_mode): Remove.
+ (get_mode_class): Do not handle MODE_POINTER_BOUNDS.
+ * machmode.h (POINTER_BOUNDS_MODE_P): Remove definition.
+ (scalare_mode::includes_p): Do not handle MODE_POINTER_BOUNDS.
+ * mode-classes.def: Do not define MODE_POINTER_BOUNDS.
+ * stor-layout.c (int_mode_for_mode): Do not handle MODE_POINTER_BOUNDS.
+ * tree-core.h (enum tree_index): Remove TI_POINTER_BOUNDS_TYPE.
+ * varasm.c (output_constant_pool_2): Do not handle MODE_POINTER_BOUNDS.
+
+ * config/i386/i386-modes.def (BND32, BND64): Remove.
+ * config/i386/i386.c (dbx_register_map): Remove bound registers.
+ (dbx64_register_map): Ditto.
+ (svr4_dbx_register_map): Ditto.
+ (indirect_thunk_bnd_needed): Remove.
+ (indirect_thunks_bnd_used): Ditto.
+ (indirect_return_bnd_needed): Ditto.
+ (indirect_return_via_cx_bnd): Ditto.
+ (enum indirect_thunk_prefix): Remove indirect_thunk_prefix_bnd.
+ (indirect_thunk_name): Remove handling of indirect_thunk_prefix_bnd.
+ (output_indirect_thunk): Ditto. Remove need_prefix argument.
+ (output_indirect_thunk_function): Remove handling of
+ indirect_return_bnd_needed, indirect_return_via_cx_bnd,
+ indirect_thunk_bnd_needed and indirect_thunks_bnd_used variables.
+ (ix86_save_reg): Remove handling of crtl->return_bnd.
+ (ix86_legitimate_constant_p): Remove handling of POINTER_BOUNDS_MODE_P.
+ (ix86_print_operand_address_as): Remove handling of UNSPEC_BNDMK_ADDR
+ and UNSPEC_BNDLX_ADDR.
+ (ix86_output_indirect_branch_via_reg): Remove handling of
+ indirect_thunk_prefix_bnd.
+ (ix86_output_indirect_branch_via_push): Ditto.
+ (ix86_output_function_return): Ditto.
+ (ix86_output_indirect_function_return): Ditto.
+ (avoid_func_arg_motion): Do not handle UNSPEC_BNDSTX.
+ * config/i386/i386.h (FIXED_REGISTERS): Remove bound registers.
+ (CALL_USED_REGISTERS): Ditto.
+ (REG_ALLOC_ORDER): Update for removal of bound registers.
+ (HI_REGISTER_NAMES): Ditto.
+ * config/i386/i386.md (UNSPEC_BNDMK, UNSPEC_BNDMK_ADDR, UNSPEC_BNDSTX)
+ (UNSPEC_BNDLDX, UNSPEC_BNDLDX_ADDR, UNSPEC_BNDCL, UNSPEC_BNDCU)
+ (UNSPEC_BNDCN, UNSPEC_MPX_FENCE): Remove.
+ (BND0_REG, BND1_REG, BND2_REG, BND3_REG): Remove
+ (FIRST_PSEUDO_REG): Update.
+ (BND): Remove mode iterator.
+ * config/i386/predicates.md (bnd_mem_operator): Remove.
+
+2018-08-24 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-vect-stmts.c (vectorizable_bswap): Handle variable-length
+ vectors.
+
+2018-08-24 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree-vect-slp.c (vect_transform_slp_perm_load): Separate out
+ the case in which the permute needs only a single element and
+ repeats for every vector of the result. Extend that case to
+ handle variable-length vectors.
+ * tree-vect-stmts.c (vectorizable_load): Update accordingly.
+
+2018-08-24 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR debug/79342
+ * dwarf2out.c (save_macinfo_strings): Call set_indirect_string
+ on DW_MACINFO_start_file for -gsplit-dwarf -g3.
+
+2018-08-24 Richard Biener <rguenther@suse.de>
+
+ * cfg.h (struct control_flow_graph): Add edge_flags_allocated and
+ bb_flags_allocated members.
+ (auto_flag): New RAII class for allocating flags.
+ (auto_edge_flag): New RAII class for allocating edge flags.
+ (auto_bb_flag): New RAII class for allocating bb flags.
+ * cfgloop.c (verify_loop_structure): Allocate temporary edge
+ flag dynamically.
+ * cfganal.c (dfs_enumerate_from): Remove use of visited sbitmap
+ in favor of temporarily allocated BB flag.
+ * hsa-brig.c: Re-order includes.
+ * hsa-dump.c: Likewise.
+ * hsa-regalloc.c: Likewise.
+ * print-rtl.c: Likewise.
+ * profile-count.c: Likewise.
+
+2018-08-24 Segher Boessenkool <segher@kernel.crashing.org>
+
+ PR target/86989
+ * config/rs6000/rs6000.c (toc_relative_expr_p): Check that the base is
+ the TOC register.
+
+2018-08-24 Aldy Hernandez <aldyh@redhat.com>
+
+ PR 87073/bootstrap
+ * wide-int-range.cc (wide_int_range_div): Do not ignore result
+ from wide_int_range_multiplicative_op.
+
+2018-08-23 Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org>
+
+ * tree-vect-data-refs.c (vect_grouped_store_supported): Fix typo
+ "permutaion".
+
+2018-08-23 Giuliano Belinassi <giuliano.belinassi@usp.br>
+
+ * genmatch.c (parser::parse_operation): Fix typo 'exapnded'
+ to 'expanded'.
+
+2018-08-23 Alexander Monakov <amonakov@ispras.ru>
+
+ * tree-scalar-evolution.c (final_value_replacement_loop): Dump
+ full GENERIC expression used for replacement.
+
+2018-08-23 Aldy Hernandez <aldyh@redhat.com>
+
+ * tree-vrp.c (abs_extent_range): Remove.
+ (extract_range_into_wide_ints): Pass wide ints by reference.
+ (extract_range_from_binary_expr_1): Rewrite the *DIV_EXPR code.
+ Pass wide ints by reference in all calls to
+ extract_range_into_wide_ints.
+ * wide-int-range.cc (wide_int_range_div): New.
+ * wide-int-range.h (wide_int_range_div): New.
+ (wide_int_range_includes_zero_p): New.
+ (wide_int_range_zero_p): New.
+
+2018-08-23 Matthew Malcomson <matthew.malcomson@arm.com>
+
+ * config/aarch64/aarch64.md (arches): New enum.
+ (arch): New enum attr.
+ (arch_enabled): New attr.
+ (enabled): Now uses arch_enabled only.
+ (simd, sve, fp16): Removed attribute.
+ (fp): Attr now defined in terms of 'arch'.
+ (*mov<mode>_aarch64, *movsi_aarch64, *movdi_aarch64, *movti_aarch64,
+ *movhf_aarch64, <optab><fcvt_target><GPF:mode>2,
+ <FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3,
+ <FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3): Merge 'fp' and 'simd'
+ attributes into 'arch'.
+ (*movsf_aarch64, *movdf_aarch64, *movtf_aarch64, *add<mode>3_aarch64,
+ subdi3, neg<mode>2, <optab><mode>3, one_cmpl<mode>2,
+ *<NLOGICAL:optab>_one_cmpl<mode>3, *xor_one_cmpl<mode>3,
+ *aarch64_ashl_sisd_or_int_<mode>3, *aarch64_lshr_sisd_or_int_<mode>3,
+ *aarch64_ashr_sisd_or_int_<mode>3, *aarch64_sisd_ushl): Convert use of
+ 'simd' attribute into 'arch'.
+ (load_pair_sw_<SX:mode><SX2:mode>, load_pair_dw_<DX:mode><DX2:mode>,
+ store_pair_sw_<SX:mode><SX2:mode>, store_pair_dw_<DX:mode><DX2:mode>):
+ Convert use of 'fp' attribute to 'arch'.
+ * config/aarch64/aarch64-simd.md (move_lo_quad_internal_<mode>,
+ move_lo_quad_internal_<mode>): (different modes) Merge 'fp' and 'simd'
+ into 'arch'.
+ (move_lo_quad_internal_be_<mode>, move_lo_quad_internal_be_<mode>):
+ (different modes) Merge 'fp' and 'simd' into 'arch'.
+ (*aarch64_combinez<mode>, *aarch64_combinez_be<mode>): Merge 'fp' and
+ 'simd' into 'arch'.
+
+2018-08-23 Segher Boessenkool <segher@kernel.crashing.org>
+
+ PR rtl-optimization/87026
+ * expmed.c (canonicalize_comparison): If we can no longer create
+ pseudoregisters, don't.
+
+2018-08-23 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/86951
+ * config/arm/arm-protos.h (arm_emit_speculation_barrier): New
+ prototype.
+ * config/arm/arm.c (speculation_barrier_libfunc): New static
+ variable.
+ (arm_init_libfuncs): Initialize it.
+ (arm_emit_speculation_barrier): New function.
+ * config/arm/arm.md (speculation_barrier): Call
+ arm_emit_speculation_barrier for architectures that do not have
+ DSB or ISB.
+ (speculation_barrier_insn): Only match on Armv7 or later.
+
+2018-08-23 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/87024
+ * tree-inline.c (copy_bb): Drop unused __builtin_va_arg_pack_len
+ calls.
+
+2018-08-23 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_evpc_sve_tbl): Fix handling
+ of single-vector TBLs.
+ (aarch64_vectorize_vec_perm_const): Set one_vector_p when only
+ one input is given.
+
+2018-08-23 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR target/85910
+ * config/aarch64/aarch64.c (aarch64_expand_vec_perm_const_1): Fix
+ aarch64_evpc_tbl guard.
+
+2018-08-22 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ * tree-ssa-dse.c (compute_trims): Avoid folding away undefined
+ behaviour.
+
2018-08-22 Martin Sebor <msebor@redhat.com>
PR middle-end/87052
@@ -409,23 +1305,23 @@
2018-08-17 Martin Liska <mliska@suse.cz>
* common.opt: Remove Warn, Init and Report for options with
- Ignore/Deprecated flag. Warning is done automatically for
- Deprecated flags.
+ Ignore/Deprecated flag. Warning is done automatically for
+ Deprecated flags.
* config/i386/i386.opt: Likewise.
* config/ia64/ia64.opt: Likewise.
* config/rs6000/rs6000.opt: Likewise.
* cppbuiltin.c (define_builtin_macros_for_compilation_flags):
- Remove usage of flag_check_pointer_bounds.
+ Remove usage of flag_check_pointer_bounds.
* lto-wrapper.c (merge_and_complain): Do not handle
- OPT_fcheck_pointer_bounds.
+ OPT_fcheck_pointer_bounds.
(append_compiler_options): Likewise.
* opt-functions.awk: Do not handle Deprecated.
* optc-gen.awk: Check that Var, Report and Init are not
- used for an option with Ignore/Deprecated flag.
+ used for an option with Ignore/Deprecated flag.
* opts-common.c (decode_cmdline_option): Do not report
- CL_ERR_DEPRECATED.
+ CL_ERR_DEPRECATED.
(read_cmdline_option): Report warning for OPT_SPECIAL_deprecated
- options.
+ options.
* opts.h (struct cl_option): Remove cl_deprecated flag.
(CL_ERR_DEPRECATED): Remove error enum value.
diff --git a/gcc/ChangeLog.meissner b/gcc/ChangeLog.meissner
index 2fea0792440..d3762339182 100644
--- a/gcc/ChangeLog.meissner
+++ b/gcc/ChangeLog.meissner
@@ -1,3 +1,13 @@
+2018-08-30 Michael Meissner <meissner@linux.ibm.com>
+
+ Merge up to 263992.
+ * REVISION: Update subversion id.
+
+2018-08-30 Michael Meissner <meissner@linux.ibm.com>
+
+ Merge up to 263992.
+ * REVISION: Update subversion id.
+
2018-08-28 Michael Meissner <meissner@linux.ibm.com>
* config/rs6000/rs6000-addr.c (class toc_refs): Dynamically
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 30c6226939a..135f6e52f2d 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20180822
+20180830
diff --git a/gcc/REVISION b/gcc/REVISION
index 5644700d00b..34cf98c7a22 100644
--- a/gcc/REVISION
+++ b/gcc/REVISION
@@ -1 +1 @@
-addr2 branch, based on subversion id 263784.
+addr2 branch, based on subversion id 263992.
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 062270177fb..b61c605e996 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,15 @@
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * gcc-interface/decl.c (update_profile): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ * gcc-interface/gigi.h (call_is_atomic_load): Likewise.
+ * gcc-interface/utils.c (gnat_pushdecl): Likewise.
+
+2018-08-23 Giuliano Belinassi <giuliano.belinassi@usp.br>
+
+ * exp_unst.ads: Fix typo 'exapnded' to 'expanded'.
+
2018-08-21 Hristian Kirtchev <kirtchev@adacore.com>
* checks.adb, contracts.adb, exp_aggr.adb, exp_attr.adb,
@@ -284,7 +296,7 @@
* gcc-interface/Makefile.in (xoscons): Likewise.
2018-07-31 Alexandre Oliva <oliva@adacore.com>
- Olivier Hainque <hainque@adacore.com>
+ Olivier Hainque <hainque@adacore.com>
* gcc-interface/trans.c: Include debug.h.
(file_map): New static variable.
diff --git a/gcc/ada/exp_unst.ads b/gcc/ada/exp_unst.ads
index 3b67a0ddd35..a5cdf0690dc 100644
--- a/gcc/ada/exp_unst.ads
+++ b/gcc/ada/exp_unst.ads
@@ -477,7 +477,7 @@ package Exp_Unst is
-- subprograms exist. Similarly overloading would cause a naming issue.
-- In fact, the expanded code includes qualified names which eliminate this
- -- problem. We omitted the qualification from the exapnded examples above
+ -- problem. We omitted the qualification from the expanded examples above
-- for simplicity. But to see this in action, consider this example:
-- function Mnames return Boolean is
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index b1dc379c247..6f605bd64ec 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -5421,7 +5421,7 @@ update_profile (Entity_Id gnat_subprog)
if (DECL_P (gnu_type))
{
/* Builtins cannot have their address taken so we can reset them. */
- gcc_assert (DECL_BUILT_IN (gnu_type));
+ gcc_assert (fndecl_built_in_p (gnu_type));
save_gnu_tree (gnat_subprog, NULL_TREE, false);
save_gnu_tree (gnat_subprog, gnu_type, false);
return;
diff --git a/gcc/ada/gcc-interface/gigi.h b/gcc/ada/gcc-interface/gigi.h
index b890195cefc..eb64a8bbdbd 100644
--- a/gcc/ada/gcc-interface/gigi.h
+++ b/gcc/ada/gcc-interface/gigi.h
@@ -1081,7 +1081,7 @@ call_is_atomic_load (tree exp)
{
tree fndecl = get_callee_fndecl (exp);
- if (!(fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL))
+ if (!(fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)))
return false;
enum built_in_function code = DECL_FUNCTION_CODE (fndecl);
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index cc1fe770f2c..313d984b83d 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -773,7 +773,7 @@ gnat_pushdecl (tree decl, Node_Id gnat_node)
debugger at the proper time. */
if (DECL_EXTERNAL (decl)
&& TREE_CODE (decl) == FUNCTION_DECL
- && DECL_BUILT_IN (decl))
+ && fndecl_built_in_p (decl))
vec_safe_push (builtin_decls, decl);
else if (global_bindings_p ())
vec_safe_push (global_decls, decl);
diff --git a/gcc/attribs.c b/gcc/attribs.c
index 64700b6c8ce..8b721274d3b 100644
--- a/gcc/attribs.c
+++ b/gcc/attribs.c
@@ -432,7 +432,7 @@ diag_attr_exclusions (tree last_decl, tree node, tree attrname,
bool note = last_decl != NULL_TREE;
auto_diagnostic_group d;
if (TREE_CODE (node) == FUNCTION_DECL
- && DECL_BUILT_IN (node))
+ && fndecl_built_in_p (node))
note &= warning (OPT_Wattributes,
"ignoring attribute %qE in declaration of "
"a built-in function %qD because it conflicts "
diff --git a/gcc/builtins.c b/gcc/builtins.c
index b1a79f3f33f..58ea7475ef7 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -208,15 +208,6 @@ is_builtin_name (const char *name)
return false;
}
-
-/* Return true if DECL is a function symbol representing a built-in. */
-
-bool
-is_builtin_fn (tree decl)
-{
- return TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl);
-}
-
/* Return true if NODE should be considered for inline expansion regardless
of the optimization level. This means whenever a function is invoked with
its "internal" name, which normally contains the prefix "__builtin". */
@@ -576,7 +567,7 @@ string_length (const void *ptr, unsigned eltsize, unsigned maxelts)
tree
c_strlen (tree src, int only_value, unsigned eltsize)
{
- gcc_assert (eltsize == 1 || eltsize == 2 || eltsize == 4);
+ gcc_checking_assert (eltsize == 1 || eltsize == 2 || eltsize == 4);
STRIP_NOPS (src);
if (TREE_CODE (src) == COND_EXPR
&& (only_value || !TREE_SIDE_EFFECTS (TREE_OPERAND (src, 0))))
@@ -598,7 +589,7 @@ c_strlen (tree src, int only_value, unsigned eltsize)
/* Offset from the beginning of the string in bytes. */
tree byteoff;
tree memsize;
- src = string_constant (src, &byteoff, &memsize);
+ src = string_constant (src, &byteoff, &memsize, NULL);
if (src == 0)
return NULL_TREE;
@@ -665,10 +656,10 @@ c_strlen (tree src, int only_value, unsigned eltsize)
a null character if we can represent it as a single HOST_WIDE_INT. */
if (byteoff == 0)
eltoff = 0;
- else if (! tree_fits_shwi_p (byteoff))
+ else if (! tree_fits_uhwi_p (byteoff) || tree_to_uhwi (byteoff) % eltsize)
eltoff = -1;
else
- eltoff = tree_to_shwi (byteoff) / eltsize;
+ eltoff = tree_to_uhwi (byteoff) / eltsize;
/* If the offset is known to be out of bounds, warn, and call strlen at
runtime. */
@@ -700,6 +691,11 @@ c_strlen (tree src, int only_value, unsigned eltsize)
unsigned len = string_length (ptr + eltoff * eltsize, eltsize,
strelts - eltoff);
+ /* Don't know what to return if there was no zero termination.
+ Ideally this would turn into a gcc_checking_assert over time. */
+ if (len > maxelts - eltoff)
+ return NULL_TREE;
+
return ssize_int (len);
}
@@ -2979,6 +2975,10 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode)
tree func = get_callee_fndecl (exp);
tree len = c_strlen (src, 0);
+ /* FIXME: Change c_strlen() to return sizetype instead of ssizetype
+ so these conversions aren't necessary. */
+ if (len)
+ len = fold_convert_loc (loc, TREE_TYPE (bound), len);
if (TREE_CODE (bound) == INTEGER_CST)
{
@@ -2993,7 +2993,6 @@ expand_builtin_strnlen (tree exp, rtx target, machine_mode target_mode)
if (!len || TREE_CODE (len) != INTEGER_CST)
return NULL_RTX;
- len = fold_convert_loc (loc, size_type_node, len);
len = fold_build2_loc (loc, MIN_EXPR, size_type_node, len, bound);
return expand_expr (len, target, target_mode, EXPAND_NORMAL);
}
@@ -4759,7 +4758,10 @@ expand_builtin_strncmp (tree exp, ATTRIBUTE_UNUSED rtx target,
/* If we are not using the given length, we must incorporate it here.
The actual new length parameter will be MIN(len,arg3) in this case. */
if (len != len3)
- len = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (len), len, len3);
+ {
+ len = fold_convert_loc (loc, sizetype, len);
+ len = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (len), len, len3);
+ }
rtx arg1_rtx = get_memory_rtx (arg1, len);
rtx arg2_rtx = get_memory_rtx (arg2, len);
rtx arg3_rtx = expand_normal (len);
@@ -8151,11 +8153,8 @@ builtin_mathfn_code (const_tree t)
return END_BUILTINS;
fndecl = get_callee_fndecl (t);
- if (fndecl == NULL_TREE
- || TREE_CODE (fndecl) != FUNCTION_DECL
- || ! DECL_BUILT_IN (fndecl)
- || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
- return END_BUILTINS;
+ if (fndecl == NULL_TREE || !fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
+ return END_BUILTINS;
parmlist = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
init_const_call_expr_arg_iterator (t, &iter);
@@ -8310,9 +8309,8 @@ fold_builtin_expect (location_t loc, tree arg0, tree arg1, tree arg2,
if (TREE_CODE (inner) == CALL_EXPR
&& (fndecl = get_callee_fndecl (inner))
- && (DECL_BUILT_IN_P (fndecl, BUILT_IN_NORMAL, BUILT_IN_EXPECT)
- || DECL_BUILT_IN_P (fndecl, BUILT_IN_NORMAL,
- BUILT_IN_EXPECT_WITH_PROBABILITY)))
+ && (fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
+ || fndecl_built_in_p (fndecl, BUILT_IN_EXPECT_WITH_PROBABILITY)))
return arg0;
inner = inner_arg0;
@@ -9625,9 +9623,7 @@ fold_call_expr (location_t loc, tree exp, bool ignore)
{
tree ret = NULL_TREE;
tree fndecl = get_callee_fndecl (exp);
- if (fndecl
- && TREE_CODE (fndecl) == FUNCTION_DECL
- && DECL_BUILT_IN (fndecl)
+ if (fndecl && fndecl_built_in_p (fndecl)
/* If CALL_EXPR_VA_ARG_PACK is set, the arguments aren't finalized
yet. Defer folding until we see all the arguments
(after inlining). */
@@ -9641,10 +9637,7 @@ fold_call_expr (location_t loc, tree exp, bool ignore)
if (nargs && TREE_CODE (CALL_EXPR_ARG (exp, nargs - 1)) == CALL_EXPR)
{
tree fndecl2 = get_callee_fndecl (CALL_EXPR_ARG (exp, nargs - 1));
- if (fndecl2
- && TREE_CODE (fndecl2) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (fndecl2) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl2) == BUILT_IN_VA_ARG_PACK)
+ if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK))
return NULL_TREE;
}
@@ -9680,17 +9673,14 @@ fold_builtin_call_array (location_t loc, tree,
tree fndecl = TREE_OPERAND (fn, 0);
if (TREE_CODE (fndecl) == FUNCTION_DECL
- && DECL_BUILT_IN (fndecl))
+ && fndecl_built_in_p (fndecl))
{
/* If last argument is __builtin_va_arg_pack (), arguments to this
function are not finalized yet. Defer folding until they are. */
if (n && TREE_CODE (argarray[n - 1]) == CALL_EXPR)
{
tree fndecl2 = get_callee_fndecl (argarray[n - 1]);
- if (fndecl2
- && TREE_CODE (fndecl2) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (fndecl2) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl2) == BUILT_IN_VA_ARG_PACK)
+ if (fndecl2 && fndecl_built_in_p (fndecl2, BUILT_IN_VA_ARG_PACK))
return NULL_TREE;
}
if (avoid_folding_inline_builtin (fndecl))
@@ -10809,9 +10799,7 @@ fold_call_stmt (gcall *stmt, bool ignore)
tree ret = NULL_TREE;
tree fndecl = gimple_call_fndecl (stmt);
location_t loc = gimple_location (stmt);
- if (fndecl
- && TREE_CODE (fndecl) == FUNCTION_DECL
- && DECL_BUILT_IN (fndecl)
+ if (fndecl && fndecl_built_in_p (fndecl)
&& !gimple_call_va_arg_pack_p (stmt))
{
int nargs = gimple_call_num_args (stmt);
@@ -10858,8 +10846,7 @@ fold_call_stmt (gcall *stmt, bool ignore)
void
set_builtin_user_assembler_name (tree decl, const char *asmspec)
{
- gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
+ gcc_assert (fndecl_built_in_p (decl, BUILT_IN_NORMAL)
&& asmspec != 0);
tree builtin = builtin_decl_explicit (DECL_FUNCTION_CODE (decl));
@@ -10879,7 +10866,7 @@ set_builtin_user_assembler_name (tree decl, const char *asmspec)
bool
is_simple_builtin (tree decl)
{
- if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
+ if (decl && fndecl_built_in_p (decl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (decl))
{
/* Builtins that expand to constants. */
diff --git a/gcc/builtins.h b/gcc/builtins.h
index 805f1801604..c3d5ccbb6b6 100644
--- a/gcc/builtins.h
+++ b/gcc/builtins.h
@@ -49,7 +49,6 @@ extern struct target_builtins *this_target_builtins;
/* Non-zero if __builtin_constant_p should be folded right away. */
extern bool force_folding_builtin_constant_p;
-extern bool is_builtin_fn (tree);
extern bool called_as_built_in (tree);
extern bool get_object_alignment_1 (tree, unsigned int *,
unsigned HOST_WIDE_INT *);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 790df09eb59..5440b250469 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,37 @@
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR 87091
+ * c-common.c (c_cpp_error): Update for conversion of show_caret_p
+ to a tri-state.
+ (maybe_suggest_missing_token_insertion): Likewise.
+ (maybe_add_include_fixit): Add param "override_location". If set,
+ and source-printing is enabled, then override the rich_location's
+ primary location with that of the insertion point for the fix-it
+ hint, marking it with SHOW_LINES_WITHOUT_RANGE.
+ * c-common.h (extern void maybe_add_include_fixit): Add bool
+ param.
+ * c-format.c (selftest::test_type_mismatch_range_labels): Update
+ for conversion of show_caret_p to a tri-state.
+ * c-warn.c (warn_for_restrict): Likewise.
+ * known-headers.cc
+ (suggest_missing_header::~suggest_missing_header): Update call to
+ maybe_add_include_fixit to suggest overriding the location, as it
+ is for a note.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * c-common.c (check_function_restrict): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ (check_builtin_function_arguments): Likewise.
+ (reject_gcc_builtin): Likewise.
+ * c-warn.c (sizeof_pointer_memaccess_warning): Likewise.
+
+2018-08-26 Marek Polacek <polacek@redhat.com>
+
+ PR c++/87029, Implement -Wredundant-move.
+ * c.opt (Wredundant-move): New option.
+
2018-08-21 Marek Polacek <polacek@redhat.com>
PR c++/86981, Implement -Wpessimizing-move.
@@ -51,8 +85,8 @@
2018-08-17 Martin Liska <mliska@suse.cz>
* c.opt: Remove Warn, Init and Report for options with
- Ignore/Deprecated flag. Warning is done automatically for
- Deprecated flags.
+ Ignore/Deprecated flag. Warning is done automatically for
+ Deprecated flags.
2018-08-16 David Malcolm <dmalcolm@redhat.com>
@@ -365,7 +399,7 @@
2018-05-11 Martin Liska <mliska@suse.cz>
- PR sanitizer/85556
+ PR sanitizer/85556
* c-attribs.c (handle_no_sanitize_attribute): Iterate all
TREE_LIST values.
@@ -604,11 +638,11 @@
2018-01-18 Boris Kolpackov <boris@codesynthesis.com>
- PR other/70268
- * c.opt (-fmacro-prefix-map): New option.
- * c-opts.c (c_common_handle_option): Handle it.
- * c-lex.c (init_c_lex): Set remap_filename cpp callback.
- * c-ppoutput.c (init_pp_output): Likewise.
+ PR other/70268
+ * c.opt (-fmacro-prefix-map): New option.
+ * c-opts.c (c_common_handle_option): Handle it.
+ * c-lex.c (init_c_lex): Set remap_filename cpp callback.
+ * c-ppoutput.c (init_pp_output): Likewise.
2018-01-17 David Malcolm <dmalcolm@redhat.com>
@@ -677,7 +711,7 @@
Update copyright years.
2017-12-22 Mike Stump <mikestump@comcast.net>
- Eric Botcazou <ebotcazou@adacore.com>
+ Eric Botcazou <ebotcazou@adacore.com>
* c-pragma.c (init_pragma): Register pragma GCC unroll.
* c-pragma.h (enum pragma_kind): Add PRAGMA_UNROLL.
@@ -799,7 +833,7 @@
* c-common.h (c_switch_covers_all_cases_p): Declare.
2017-11-28 Julia Koval <julia.koval@intel.com>
- Sebastian Peryt <sebastian.peryt@intel.com>
+ Sebastian Peryt <sebastian.peryt@intel.com>
* array-notation-common.c: Delete.
* c-cilkplus.c: Ditto.
@@ -1157,8 +1191,8 @@
* c-cppbuiltin.c (c_cpp_builtins): Use opt_scalar_float_mode.
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
- Alan Hayward <alan.hayward@arm.com>
- David Sherwood <david.sherwood@arm.com>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
* c-common.c (c_common_fixed_point_type_for_size): Use new mode
iterators.
@@ -1620,7 +1654,7 @@
2017-05-01 Xi Ruoyao <ryxi@stu.xidian.edu.cn>
- PR c++/80038
+ PR c++/80038
* c-common.h (cilk_gimplify_call_params_in_spawned_fn): Remove
prototype.
(cilk_install_body_pedigree_operations): Likewise.
@@ -1944,8 +1978,8 @@
2016-12-08 Martin Sebor <msebor@redhat.com>
PR c/78165
- * c-pretty-print (pp_c_integer_constant): Avoid formatting type
- suffix.
+ * c-pretty-print (pp_c_integer_constant): Avoid formatting type
+ suffix.
2016-12-07 Martin Sebor <msebor@redhat.com>
diff --git a/gcc/c-family/ChangeLog.meissner b/gcc/c-family/ChangeLog.meissner
index 730efaa3610..c19f5752247 100644
--- a/gcc/c-family/ChangeLog.meissner
+++ b/gcc/c-family/ChangeLog.meissner
@@ -1,3 +1,7 @@
+2018-08-30 Michael Meissner <meissner@linux.ibm.com>
+
+ Merge up to 263992.
+
2018-08-22 Michael Meissner <meissner@linux.ibm.com>
Merge up to 263784.
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 95cff215d60..6a5d99171a0 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -5326,8 +5326,7 @@ check_function_restrict (const_tree fndecl, const_tree fntype,
{
/* Avoid diagnosing calls built-ins with a zero size/bound
here. They are checked in more detail elsewhere. */
- if (DECL_BUILT_IN (fndecl)
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& nargs == 3
&& TREE_CODE (argarray[2]) == INTEGER_CST
&& integer_zerop (argarray[2]))
@@ -5755,8 +5754,7 @@ bool
check_builtin_function_arguments (location_t loc, vec<location_t> arg_loc,
tree fndecl, int nargs, tree *args)
{
- if (!DECL_BUILT_IN (fndecl)
- || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
+ if (!fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
return true;
switch (DECL_FUNCTION_CODE (fndecl))
@@ -6133,7 +6131,7 @@ c_cpp_error (cpp_reader *pfile ATTRIBUTE_UNUSED, int level, int reason,
gcc_unreachable ();
}
if (done_lexing)
- richloc->set_range (0, input_location, true);
+ richloc->set_range (0, input_location, SHOW_RANGE_WITH_CARET);
diagnostic_set_info_translated (&diagnostic, msg, ap,
richloc, dlevel);
diagnostic_override_option_index (&diagnostic,
@@ -8011,7 +8009,7 @@ reject_gcc_builtin (const_tree expr, location_t loc /* = UNKNOWN_LOCATION */)
strlen, and for C++ operators new and delete.
The c_decl_implicit() test avoids false positives for implicitly
declared built-ins with library fallbacks (such as abs). */
- && DECL_BUILT_IN (expr)
+ && fndecl_built_in_p (expr)
&& DECL_IS_BUILTIN (expr)
&& !c_decl_implicit (expr)
&& !DECL_ASSEMBLER_NAME_SET_P (expr))
@@ -8338,8 +8336,8 @@ maybe_suggest_missing_token_insertion (rich_location *richloc,
location_t hint_loc = hint->get_start_loc ();
location_t old_loc = richloc->get_loc ();
- richloc->set_range (0, hint_loc, true);
- richloc->add_range (old_loc, false);
+ richloc->set_range (0, hint_loc, SHOW_RANGE_WITH_CARET);
+ richloc->add_range (old_loc);
}
}
@@ -8477,10 +8475,16 @@ static added_includes_t *added_includes;
location.
This function is idempotent: a header will be added at most once to
- any given file. */
+ any given file.
+
+ If OVERRIDE_LOCATION is true, then if a fix-it is added and will be
+ printed, then RICHLOC's primary location will be replaced by that of
+ the fix-it hint (for use by "inform" notes where the location of the
+ issue has already been reported). */
void
-maybe_add_include_fixit (rich_location *richloc, const char *header)
+maybe_add_include_fixit (rich_location *richloc, const char *header,
+ bool override_location)
{
location_t loc = richloc->get_loc ();
const char *file = LOCATION_FILE (loc);
@@ -8508,6 +8512,33 @@ maybe_add_include_fixit (rich_location *richloc, const char *header)
char *text = xasprintf ("#include %s\n", header);
richloc->add_fixit_insert_before (include_insert_loc, text);
free (text);
+
+ if (override_location && global_dc->show_caret)
+ {
+ /* Replace the primary location with that of the insertion point for the
+ fix-it hint.
+
+ We use SHOW_LINES_WITHOUT_RANGE so that we don't meaningless print a
+ caret for the insertion point (or colorize it).
+
+ Hence we print e.g.:
+
+ ../x86_64-pc-linux-gnu/libstdc++-v3/include/vector:74:1: note: msg 2
+ 73 | # include <debug/vector>
+ +++ |+#include <vector>
+ 74 | #endif
+
+ rather than:
+
+ ../x86_64-pc-linux-gnu/libstdc++-v3/include/vector:74:1: note: msg 2
+ 73 | # include <debug/vector>
+ +++ |+#include <vector>
+ 74 | #endif
+ | ^
+
+ avoiding the caret on the first column of line 74. */
+ richloc->set_range (0, include_insert_loc, SHOW_LINES_WITHOUT_RANGE);
+ }
}
/* Attempt to convert a braced array initializer list CTOR for array
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index 9b05e605250..c5e2028cbaa 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -1327,7 +1327,7 @@ excess_precision_mode_join (enum flt_eval_method, enum flt_eval_method);
extern int c_flt_eval_method (bool ts18661_p);
extern void add_no_sanitize_value (tree node, unsigned int flags);
-extern void maybe_add_include_fixit (rich_location *, const char *);
+extern void maybe_add_include_fixit (rich_location *, const char *, bool);
extern void maybe_suggest_missing_token_insertion (rich_location *richloc,
enum cpp_ttype token_type,
location_t prev_token_loc);
diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index 035878fd954..98c49cf5d18 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -4352,7 +4352,7 @@ test_type_mismatch_range_labels ()
range_label_for_type_mismatch param_label (integer_type_node,
char_type_node);
gcc_rich_location richloc (fmt, &fmt_label);
- richloc.add_range (param, false, &param_label);
+ richloc.add_range (param, SHOW_RANGE_WITHOUT_CARET, &param_label);
test_diagnostic_context dc;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
diff --git a/gcc/c-family/c-warn.c b/gcc/c-family/c-warn.c
index ca259aa7bd0..a1a7f935964 100644
--- a/gcc/c-family/c-warn.c
+++ b/gcc/c-family/c-warn.c
@@ -702,7 +702,7 @@ sizeof_pointer_memaccess_warning (location_t *sizeof_arg_loc, tree callee,
location_t loc;
if (TREE_CODE (callee) != FUNCTION_DECL
- || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
+ || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
|| vec_safe_length (params) <= 1)
return;
@@ -2429,7 +2429,7 @@ warn_for_restrict (unsigned param_pos, tree *argarray, unsigned nargs)
{
arg = argarray[pos - 1];
if (EXPR_HAS_LOCATION (arg))
- richloc.add_range (EXPR_LOCATION (arg), false);
+ richloc.add_range (EXPR_LOCATION (arg));
}
return warning_n (&richloc, OPT_Wrestrict, arg_positions.length (),
diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
index 76840dd77ad..31a2b972919 100644
--- a/gcc/c-family/c.opt
+++ b/gcc/c-family/c.opt
@@ -985,6 +985,10 @@ Wredundant-decls
C ObjC C++ ObjC++ Var(warn_redundant_decls) Warning
Warn about multiple declarations of the same object.
+Wredundant-move
+C++ ObjC++ Var(warn_redundant_move) Warning LangEnabledBy(C++ ObjC++,Wextra)
+Warn about redundant calls to std::move.
+
Wregister
C++ ObjC++ Var(warn_register) Warning
Warn about uses of register storage specifier.
diff --git a/gcc/c-family/known-headers.cc b/gcc/c-family/known-headers.cc
index 5524d216318..b0763cfe984 100644
--- a/gcc/c-family/known-headers.cc
+++ b/gcc/c-family/known-headers.cc
@@ -192,7 +192,7 @@ suggest_missing_header::~suggest_missing_header ()
return;
gcc_rich_location richloc (get_location ());
- maybe_add_include_fixit (&richloc, m_header_hint);
+ maybe_add_include_fixit (&richloc, m_header_hint, true);
inform (&richloc,
"%qs is defined in header %qs;"
" did you forget to %<#include %s%>?",
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index e943f0aac4a..51b706c6231 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,32 @@
+2018-08-30 Alexander Monakov <amonakov@ispras.ru>
+
+ * gimple-parser.c (c_parser_gimple_binary_expression): Accept infix
+ "__MULT_HIGHPART" for MULT_HIGHPART_EXPR.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR 87091
+ * c-decl.c (implicitly_declare): Update call to
+ maybe_add_include_fixit to suggest overriding the location, as it
+ is for a note.
+ * c-objc-common.c (c_tree_printer): Update for conversion of
+ show_caret_p to a tri-state.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * c-decl.c (locate_old_decl): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ (diagnose_mismatched_decls): Likewise.
+ (merge_decls): Likewise.
+ (warn_if_shadowing): Likewise.
+ (pushdecl): Likewise.
+ (implicitly_declare): Likewise.
+ * c-parser.c (c_parser_postfix_expression_after_primary): Likewise.
+ * c-tree.h (C_DECL_ISNT_PROTOTYPE): Likewise.
+ * c-typeck.c (build_function_call_vec): Likewise.
+ (convert_arguments): Likewise.
+
2018-08-20 David Malcolm <dmalcolm@redhat.com>
PR other/84889
@@ -281,7 +310,7 @@
DEBUG_BEGIN_STMTs.
2017-12-22 Mike Stump <mikestump@comcast.net>
- Eric Botcazou <ebotcazou@adacore.com>
+ Eric Botcazou <ebotcazou@adacore.com>
* c-parser.c (c_parser_while_statement): Add unroll parameter and
build ANNOTATE_EXPR if present. Add 3rd operand to ANNOTATE_EXPR.
@@ -398,7 +427,7 @@
c_switch_covers_all_cases_p returns true.
2017-11-28 Julia Koval <julia.koval@intel.com>
- Sebastian Peryt <sebastian.peryt@intel.com>
+ Sebastian Peryt <sebastian.peryt@intel.com>
* Make-lang.in (c/c-array-notation.o): Remove.
* c-array-notation.c: Delete.
@@ -1115,7 +1144,7 @@
2017-05-01 Xi Ruoyao <ryxi@stu.xidian.edu.cn>
- PR c++/80038
+ PR c++/80038
* c-gimplify.c (c_gimplify_expr): Remove calls to
cilk_gimplifY_call_params_in_spawned_fn.
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 95249779e3c..feafc022768 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -1800,7 +1800,7 @@ validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
static void
locate_old_decl (tree decl)
{
- if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)
+ if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
@@ -1843,7 +1843,7 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
- && DECL_BUILT_IN (olddecl)
+ && fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
auto_diagnostic_group d;
@@ -1877,7 +1877,7 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
- && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
+ && fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept harmless mismatch in function types.
This is for the ffs and fprintf builtins. */
@@ -2025,7 +2025,7 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
- if (DECL_BUILT_IN (olddecl)
+ if (fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)
&& (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
@@ -2297,8 +2297,8 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
- && !DECL_BUILT_IN (newdecl)
- && DECL_BUILT_IN (olddecl)
+ && !fndecl_built_in_p (newdecl)
+ && fndecl_built_in_p (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
@@ -2576,7 +2576,7 @@ merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
- if (DECL_BUILT_IN (olddecl))
+ if (fndecl_built_in_p (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
@@ -2840,7 +2840,7 @@ warn_if_shadowing (tree new_decl)
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
- && DECL_BUILT_IN (old_decl))
+ && fndecl_built_in_p (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
@@ -2953,7 +2953,7 @@ pushdecl (tree x)
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
- && DECL_BUILT_IN (b_use->decl))
+ && fndecl_built_in_p (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
@@ -3057,7 +3057,8 @@ pushdecl (tree x)
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
- if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl))
+ if (TREE_CODE (b->decl) == FUNCTION_DECL
+ && fndecl_built_in_p (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
@@ -3408,7 +3409,7 @@ implicitly_declare (location_t loc, tree functionid)
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
- if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl))
+ if (!fndecl_built_in_p (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
@@ -3429,7 +3430,7 @@ implicitly_declare (location_t loc, tree functionid)
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
- if (DECL_BUILT_IN (decl))
+ if (fndecl_built_in_p (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
@@ -3445,7 +3446,7 @@ implicitly_declare (location_t loc, tree functionid)
if (header != NULL && warned)
{
rich_location richloc (line_table, loc);
- maybe_add_include_fixit (&richloc, header);
+ maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"include %qs or provide a declaration of %qD",
header, decl);
diff --git a/gcc/c/c-objc-common.c b/gcc/c/c-objc-common.c
index 238af199ab5..12e777a4845 100644
--- a/gcc/c/c-objc-common.c
+++ b/gcc/c/c-objc-common.c
@@ -161,7 +161,8 @@ c_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
{
t = va_arg (*text->args_ptr, tree);
if (set_locus)
- text->set_location (0, DECL_SOURCE_LOCATION (t), true);
+ text->set_location (0, DECL_SOURCE_LOCATION (t),
+ SHOW_RANGE_WITH_CARET);
}
switch (*spec)
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 0d5dbea8f67..28384dfe913 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -9175,8 +9175,7 @@ c_parser_postfix_expression_after_primary (c_parser *parser,
sizeof_arg,
sizeof_ptr_memacc_comptypes);
if (TREE_CODE (expr.value) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (expr.value) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (expr.value) == BUILT_IN_MEMSET
+ && fndecl_built_in_p (expr.value, BUILT_IN_MEMSET)
&& vec_safe_length (exprlist) == 3)
{
tree arg0 = (*exprlist)[0];
@@ -9194,8 +9193,7 @@ c_parser_postfix_expression_after_primary (c_parser *parser,
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) == INTEGER_CST
&& TREE_CODE (orig_expr.value) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P)
+ && fndecl_built_in_p (orig_expr.value, BUILT_IN_CONSTANT_P))
expr.original_code = C_MAYBE_CONST_EXPR;
expr.original_type = NULL;
if (exprlist)
diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h
index ae1a1e60d4b..017c01c592c 100644
--- a/gcc/c/c-tree.h
+++ b/gcc/c/c-tree.h
@@ -102,7 +102,7 @@ along with GCC; see the file COPYING3. If not see
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
- && !DECL_BUILT_IN (EXP)))
+ && !fndecl_built_in_p (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index 54c7967a06b..5f8df12564d 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -3108,9 +3108,7 @@ build_function_call_vec (location_t loc, vec<location_t> arg_loc,
argarray = vec_safe_address (params);
/* Check that arguments to builtin functions match the expectations. */
- if (fundecl
- && DECL_BUILT_IN (fundecl)
- && DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL
+ if (fundecl && fndecl_built_in_p (fundecl, BUILT_IN_NORMAL)
&& !check_builtin_function_arguments (loc, arg_loc, fundecl, nargs,
argarray))
return error_mark_node;
@@ -3233,8 +3231,7 @@ convert_arguments (location_t loc, vec<location_t> arg_loc, tree typelist,
precision should be removed (classification) or not
(comparison). */
if (type_generic
- && DECL_BUILT_IN (fundecl)
- && DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (fundecl, BUILT_IN_NORMAL))
{
switch (DECL_FUNCTION_CODE (fundecl))
{
diff --git a/gcc/c/gimple-parser.c b/gcc/c/gimple-parser.c
index 1be5d14dc2d..ee2146f8a26 100644
--- a/gcc/c/gimple-parser.c
+++ b/gcc/c/gimple-parser.c
@@ -450,6 +450,7 @@ c_parser_gimple_statement (c_parser *parser, gimple_seq *seq)
gimple-binary-expression:
gimple-unary-expression * gimple-unary-expression
+ gimple-unary-expression __MULT_HIGHPART gimple-unary-expression
gimple-unary-expression / gimple-unary-expression
gimple-unary-expression % gimple-unary-expression
gimple-unary-expression + gimple-unary-expression
@@ -544,6 +545,16 @@ c_parser_gimple_binary_expression (c_parser *parser)
case CPP_OR_OR:
c_parser_error (parser, "%<||%> not valid in GIMPLE");
return ret;
+ case CPP_NAME:
+ {
+ tree id = c_parser_peek_token (parser)->value;
+ if (strcmp (IDENTIFIER_POINTER (id), "__MULT_HIGHPART") == 0)
+ {
+ code = MULT_HIGHPART_EXPR;
+ break;
+ }
+ }
+ /* Fallthru. */
default:
/* Not a binary expression. */
return lhs;
diff --git a/gcc/calls.c b/gcc/calls.c
index 0fb10b182b1..e9660b62424 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -715,7 +715,7 @@ gimple_alloca_call_p (const gimple *stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_BUILT_IN_ALLOCA:
@@ -1222,8 +1222,11 @@ alloc_max_size (void)
if (alloc_object_size_limit)
return alloc_object_size_limit;
- alloc_object_size_limit
- = build_int_cst (size_type_node, warn_alloc_size_limit);
+ HOST_WIDE_INT limit = warn_alloc_size_limit;
+ if (limit == HOST_WIDE_INT_MAX)
+ limit = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
+
+ alloc_object_size_limit = build_int_cst (size_type_node, limit);
return alloc_object_size_limit;
}
@@ -1542,10 +1545,10 @@ get_attr_nonstring_decl (tree expr, tree *ref)
void
maybe_warn_nonstring_arg (tree fndecl, tree exp)
{
- if (!fndecl || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
+ if (!fndecl || !fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
return;
- if (TREE_NO_WARNING (exp))
+ if (TREE_NO_WARNING (exp) || !warn_stringop_overflow)
return;
unsigned nargs = call_expr_nargs (exp);
@@ -1573,7 +1576,9 @@ maybe_warn_nonstring_arg (tree fndecl, tree exp)
the range of their known or possible lengths and use it
conservatively as the bound for the unbounded function,
and to adjust the range of the bound of the bounded ones. */
- for (unsigned argno = 0; argno < nargs && !*lenrng; argno ++)
+ for (unsigned argno = 0;
+ argno < MIN (nargs, 2)
+ && !(lenrng[1] && TREE_CODE (lenrng[1]) == INTEGER_CST); argno++)
{
tree arg = CALL_EXPR_ARG (exp, argno);
if (!get_attr_nonstring_decl (arg))
@@ -1585,12 +1590,12 @@ maybe_warn_nonstring_arg (tree fndecl, tree exp)
case BUILT_IN_STRNCAT:
case BUILT_IN_STPNCPY:
case BUILT_IN_STRNCPY:
- if (2 < nargs)
+ if (nargs > 2)
bound = CALL_EXPR_ARG (exp, 2);
break;
case BUILT_IN_STRNDUP:
- if (1 < nargs)
+ if (nargs > 1)
bound = CALL_EXPR_ARG (exp, 1);
break;
@@ -1600,7 +1605,7 @@ maybe_warn_nonstring_arg (tree fndecl, tree exp)
if (!get_attr_nonstring_decl (arg))
get_range_strlen (arg, lenrng);
- if (1 < nargs)
+ if (nargs > 1)
bound = CALL_EXPR_ARG (exp, 1);
break;
}
@@ -1640,11 +1645,9 @@ maybe_warn_nonstring_arg (tree fndecl, tree exp)
}
}
- if (*lenrng)
+ if (lenrng[1] && TREE_CODE (lenrng[1]) == INTEGER_CST)
{
/* Add one for the nul. */
- lenrng[0] = const_binop (PLUS_EXPR, TREE_TYPE (lenrng[0]),
- lenrng[0], size_one_node);
lenrng[1] = const_binop (PLUS_EXPR, TREE_TYPE (lenrng[1]),
lenrng[1], size_one_node);
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 6d55516adad..7be89d40604 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -79,6 +79,8 @@ init_flow (struct function *the_fun)
= EXIT_BLOCK_PTR_FOR_FN (the_fun);
EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
= ENTRY_BLOCK_PTR_FOR_FN (the_fun);
+ the_fun->cfg->edge_flags_allocated = EDGE_ALL_FLAGS;
+ the_fun->cfg->bb_flags_allocated = BB_ALL_FLAGS;
}
/* Helper function for remove_edge and clear_edges. Frees edge structure
diff --git a/gcc/cfg.h b/gcc/cfg.h
index 0953456782b..9fff135d11f 100644
--- a/gcc/cfg.h
+++ b/gcc/cfg.h
@@ -74,6 +74,10 @@ struct GTY(()) control_flow_graph {
/* Maximal count of BB in function. */
profile_count count_max;
+
+ /* Dynamically allocated edge/bb flags. */
+ int edge_flags_allocated;
+ int bb_flags_allocated;
};
@@ -121,4 +125,60 @@ extern basic_block get_bb_copy (basic_block);
void set_loop_copy (struct loop *, struct loop *);
struct loop *get_loop_copy (struct loop *);
+/* Generic RAII class to allocate a bit from storage of integer type T.
+ The allocated bit is accessible as mask with the single bit set
+ via the conversion operator to T. */
+
+template <class T>
+class auto_flag
+{
+public:
+ /* static assert T is integer type of max HOST_WIDE_INT precision. */
+ auto_flag (T *sptr)
+ {
+ m_sptr = sptr;
+ int free_bit = ffs_hwi (~*sptr);
+ /* If there are no unset bits... */
+ if (free_bit == 0)
+ gcc_unreachable ();
+ m_flag = HOST_WIDE_INT_1U << (free_bit - 1);
+ /* ...or if T is signed and thus the complement is sign-extended,
+ check if we ran out of bits. We could spare us this bit
+ if we could use C++11 std::make_unsigned<T>::type to pass
+ ~*sptr to ffs_hwi. */
+ if (m_flag == 0)
+ gcc_unreachable ();
+ gcc_checking_assert ((*sptr & m_flag) == 0);
+ *sptr |= m_flag;
+ }
+ ~auto_flag ()
+ {
+ gcc_checking_assert ((*m_sptr & m_flag) == m_flag);
+ *m_sptr &= ~m_flag;
+ }
+ operator T () const { return m_flag; }
+private:
+ T *m_sptr;
+ T m_flag;
+};
+
+/* RAII class to allocate an edge flag for temporary use. You have
+ to clear the flag from all edges when you are finished using it. */
+
+class auto_edge_flag : public auto_flag<int>
+{
+public:
+ auto_edge_flag (function *fun)
+ : auto_flag (&fun->cfg->edge_flags_allocated) {}
+};
+
+/* RAII class to allocate a bb flag for temporary use. You have
+ to clear the flag from all edges when you are finished using it. */
+class auto_bb_flag : public auto_flag<int>
+{
+public:
+ auto_bb_flag (function *fun)
+ : auto_flag (&fun->cfg->bb_flags_allocated) {}
+};
+
#endif /* GCC_CFG_H */
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index a901b3f3f2c..3b80758e8f2 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -1057,8 +1057,121 @@ pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
return pre_order_num;
}
+/* Unlike pre_and_rev_post_order_compute we fill rev_post_order backwards
+ so iterating in RPO order needs to start with rev_post_order[n - 1]
+ going to rev_post_order[0]. If FOR_ITERATION is true then try to
+ make CFG cycles fit into small contiguous regions of the RPO order.
+ When FOR_ITERATION is true this requires up-to-date loop structures. */
+
+int
+rev_post_order_and_mark_dfs_back_seme (struct function *fn, edge entry,
+ bitmap exit_bbs, bool for_iteration,
+ int *rev_post_order)
+{
+ int pre_order_num = 0;
+ int rev_post_order_num = 0;
+
+ /* Allocate stack for back-tracking up CFG. Worst case we need
+ O(n^2) edges but the following should suffice in practice without
+ a need to re-allocate. */
+ auto_vec<edge, 20> stack (2 * n_basic_blocks_for_fn (fn));
+
+ int *pre = XNEWVEC (int, 2 * last_basic_block_for_fn (fn));
+ int *post = pre + last_basic_block_for_fn (fn);
+
+ /* BB flag to track nodes that have been visited. */
+ auto_bb_flag visited (fn);
+ /* BB flag to track which nodes have post[] assigned to avoid
+ zeroing post. */
+ auto_bb_flag post_assigned (fn);
+
+ /* Push the first edge on to the stack. */
+ stack.quick_push (entry);
+
+ while (!stack.is_empty ())
+ {
+ basic_block src;
+ basic_block dest;
+
+ /* Look at the edge on the top of the stack. */
+ int idx = stack.length () - 1;
+ edge e = stack[idx];
+ src = e->src;
+ dest = e->dest;
+ e->flags &= ~EDGE_DFS_BACK;
+
+ /* Check if the edge destination has been visited yet. */
+ if (! bitmap_bit_p (exit_bbs, dest->index)
+ && ! (dest->flags & visited))
+ {
+ /* Mark that we have visited the destination. */
+ dest->flags |= visited;
+
+ pre[dest->index] = pre_order_num++;
+
+ if (EDGE_COUNT (dest->succs) > 0)
+ {
+ /* Since the DEST node has been visited for the first
+ time, check its successors. */
+ /* Push the edge vector in reverse to match previous behavior. */
+ stack.reserve (EDGE_COUNT (dest->succs));
+ for (int i = EDGE_COUNT (dest->succs) - 1; i >= 0; --i)
+ stack.quick_push (EDGE_SUCC (dest, i));
+ /* Generalize to handle more successors? */
+ if (for_iteration
+ && EDGE_COUNT (dest->succs) == 2)
+ {
+ edge &e1 = stack[stack.length () - 2];
+ if (loop_exit_edge_p (e1->src->loop_father, e1))
+ std::swap (e1, stack.last ());
+ }
+ }
+ else
+ {
+ /* There are no successors for the DEST node so assign
+ its reverse completion number. */
+ post[dest->index] = rev_post_order_num;
+ dest->flags |= post_assigned;
+ rev_post_order[rev_post_order_num] = dest->index;
+ rev_post_order_num++;
+ }
+ }
+ else
+ {
+ if (dest->flags & visited
+ && src != entry->src
+ && pre[src->index] >= pre[dest->index]
+ && !(dest->flags & post_assigned))
+ e->flags |= EDGE_DFS_BACK;
+
+ if (idx != 0 && stack[idx - 1]->src != src)
+ {
+ /* There are no more successors for the SRC node
+ so assign its reverse completion number. */
+ post[src->index] = rev_post_order_num;
+ src->flags |= post_assigned;
+ rev_post_order[rev_post_order_num] = src->index;
+ rev_post_order_num++;
+ }
+
+ stack.pop ();
+ }
+ }
+
+ XDELETEVEC (pre);
+
+ /* Clear the temporarily allocated flags. */
+ for (int i = 0; i < rev_post_order_num; ++i)
+ BASIC_BLOCK_FOR_FN (fn, rev_post_order[i])->flags
+ &= ~(post_assigned|visited);
+
+ return rev_post_order_num;
+}
+
+
+
/* Compute the depth first search order on the _reverse_ graph and
- store in the array DFS_ORDER, marking the nodes visited in VISITED.
+ store it in the array DFS_ORDER, marking the nodes visited in VISITED.
Returns the number of nodes visited.
The computation is split into three pieces:
@@ -1145,41 +1258,12 @@ dfs_enumerate_from (basic_block bb, int reverse,
{
basic_block *st, lbb;
int sp = 0, tv = 0;
- unsigned size;
-
- /* A bitmap to keep track of visited blocks. Allocating it each time
- this function is called is not possible, since dfs_enumerate_from
- is often used on small (almost) disjoint parts of cfg (bodies of
- loops), and allocating a large sbitmap would lead to quadratic
- behavior. */
- static sbitmap visited;
- static unsigned v_size;
-
-#define MARK_VISITED(BB) (bitmap_set_bit (visited, (BB)->index))
-#define UNMARK_VISITED(BB) (bitmap_clear_bit (visited, (BB)->index))
-#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
-
- /* Resize the VISITED sbitmap if necessary. */
- size = last_basic_block_for_fn (cfun);
- if (size < 10)
- size = 10;
- if (!visited)
- {
+ auto_bb_flag visited (cfun);
- visited = sbitmap_alloc (size);
- bitmap_clear (visited);
- v_size = size;
- }
- else if (v_size < size)
- {
- /* Ensure that we increase the size of the sbitmap exponentially. */
- if (2 * v_size > size)
- size = 2 * v_size;
-
- visited = sbitmap_resize (visited, size, 0);
- v_size = size;
- }
+#define MARK_VISITED(BB) ((BB)->flags |= visited)
+#define UNMARK_VISITED(BB) ((BB)->flags &= ~visited)
+#define VISITED_P(BB) (((BB)->flags & visited) != 0)
st = XNEWVEC (basic_block, rslt_max);
rslt[tv++] = st[sp++] = bb;
diff --git a/gcc/cfganal.h b/gcc/cfganal.h
index 101124f1f8e..122c665f7f6 100644
--- a/gcc/cfganal.h
+++ b/gcc/cfganal.h
@@ -67,6 +67,8 @@ extern void inverted_post_order_compute (vec<int> *postorder, sbitmap *start_poi
extern int pre_and_rev_post_order_compute_fn (struct function *,
int *, int *, bool);
extern int pre_and_rev_post_order_compute (int *, int *, bool);
+extern int rev_post_order_and_mark_dfs_back_seme (struct function *, edge,
+ bitmap, bool, int *);
extern int dfs_enumerate_from (basic_block, int,
bool (*)(const_basic_block, const void *),
basic_block *, int, const void *);
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 3c5b30b79f8..2d3111da25d 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -2616,7 +2616,7 @@ expand_call_stmt (gcall *stmt)
exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
- builtin_p = decl && DECL_BUILT_IN (decl);
+ builtin_p = decl && fndecl_built_in_p (decl);
/* If this is not a builtin function, the function type through which the
call is made may be different from the type of the function. */
@@ -2655,7 +2655,7 @@ expand_call_stmt (gcall *stmt)
CALL_EXPR_MUST_TAIL_CALL (exp) = gimple_call_must_tail_p (stmt);
CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt);
if (decl
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (decl, BUILT_IN_NORMAL)
&& ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (decl)))
CALL_ALLOCA_FOR_VAR_P (exp) = gimple_call_alloca_for_var_p (stmt);
else
@@ -3239,7 +3239,7 @@ expand_asm_stmt (gasm *stmt)
may insert further instructions into the same basic block after
asm goto and if we don't do this, insertion of instructions on
the fallthru edge might misbehave. See PR58670. */
- if (fallthru_bb && label_to_block_fn (cfun, label) == fallthru_bb)
+ if (fallthru_bb && label_to_block (cfun, label) == fallthru_bb)
{
if (fallthru_label == NULL_RTX)
fallthru_label = gen_label_rtx ();
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index e27cd39259c..0917b716da7 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -1539,6 +1539,7 @@ verify_loop_structure (void)
/* Check irreducible loops. */
if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
{
+ auto_edge_flag saved_irr_mask (cfun);
/* Record old info. */
auto_sbitmap irreds (last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
@@ -1550,7 +1551,7 @@ verify_loop_structure (void)
bitmap_clear_bit (irreds, bb->index);
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
- e->flags |= EDGE_ALL_FLAGS + 1;
+ e->flags |= saved_irr_mask;
}
/* Recount it. */
@@ -1576,20 +1577,20 @@ verify_loop_structure (void)
FOR_EACH_EDGE (e, ei, bb->succs)
{
if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
- && !(e->flags & (EDGE_ALL_FLAGS + 1)))
+ && !(e->flags & saved_irr_mask))
{
error ("edge from %d to %d should be marked irreducible",
e->src->index, e->dest->index);
err = 1;
}
else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
- && (e->flags & (EDGE_ALL_FLAGS + 1)))
+ && (e->flags & saved_irr_mask))
{
error ("edge from %d to %d should not be marked irreducible",
e->src->index, e->dest->index);
err = 1;
}
- e->flags &= ~(EDGE_ALL_FLAGS + 1);
+ e->flags &= ~saved_irr_mask;
}
}
}
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index d19f1aacab8..148f29ea749 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -1559,8 +1559,7 @@ cgraph_update_edges_for_call_stmt_node (cgraph_node *node,
{
/* Keep calls marked as dead dead. */
if (new_stmt && is_gimple_call (new_stmt) && e->callee
- && DECL_BUILT_IN_CLASS (e->callee->decl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (e->callee->decl) == BUILT_IN_UNREACHABLE)
+ && fndecl_built_in_p (e->callee->decl, BUILT_IN_UNREACHABLE))
{
node->get_edge (old_stmt)->set_call_stmt
(as_a <gcall *> (new_stmt));
@@ -3060,8 +3059,8 @@ cgraph_edge::verify_corresponds_to_fndecl (tree decl)
/* Optimizers can redirect unreachable calls or calls triggering undefined
behavior to builtin_unreachable. */
- if (DECL_BUILT_IN_CLASS (callee->decl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (callee->decl) == BUILT_IN_UNREACHABLE)
+
+ if (fndecl_built_in_p (callee->decl, BUILT_IN_UNREACHABLE))
return false;
if (callee->former_clone_of != node->decl
@@ -3187,8 +3186,7 @@ cgraph_node::verify_node (void)
/* Optimized out calls are redirected to __builtin_unreachable. */
&& (e->count.nonzero_p ()
|| ! e->callee->decl
- || DECL_BUILT_IN_CLASS (e->callee->decl) != BUILT_IN_NORMAL
- || DECL_FUNCTION_CODE (e->callee->decl) != BUILT_IN_UNREACHABLE)
+ || !fndecl_built_in_p (e->callee->decl, BUILT_IN_UNREACHABLE))
&& count
== ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (decl))->count
&& (!e->count.ipa_p ()
diff --git a/gcc/cgraphclones.c b/gcc/cgraphclones.c
index 6e84a31c1a5..0c0a94b04a3 100644
--- a/gcc/cgraphclones.c
+++ b/gcc/cgraphclones.c
@@ -222,7 +222,7 @@ build_function_decl_skip_args (tree orig_decl, bitmap args_to_skip,
DECL_VINDEX (new_decl) = NULL_TREE;
/* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (new_decl)
+ if (fndecl_built_in_p (new_decl)
&& args_to_skip
&& !bitmap_empty_p (args_to_skip))
{
@@ -482,8 +482,7 @@ cgraph_node::create_clone (tree new_decl, profile_count prof_count,
version. The only exception is when the edge was proved to
be unreachable during the clonning procedure. */
if (!e->callee
- || DECL_BUILT_IN_CLASS (e->callee->decl) != BUILT_IN_NORMAL
- || DECL_FUNCTION_CODE (e->callee->decl) != BUILT_IN_UNREACHABLE)
+ || !fndecl_built_in_p (e->callee->decl, BUILT_IN_UNREACHABLE))
e->redirect_callee_duplicating_thunks (new_node);
}
new_node->expand_all_artificial_thunks ();
diff --git a/gcc/combine.c b/gcc/combine.c
index d322614ed57..a2649b6d5a1 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -6495,7 +6495,7 @@ simplify_if_then_else (rtx x)
pc_rtx, pc_rtx, 0, 0, 0);
if (reg_mentioned_p (from, false_rtx))
false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
- from, false_val),
+ from, false_val),
pc_rtx, pc_rtx, 0, 0, 0);
SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
@@ -9335,6 +9335,7 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
if (COMPARISON_P (cond0)
&& COMPARISON_P (cond1)
+ && SCALAR_INT_MODE_P (mode)
&& ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
&& rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
&& rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
@@ -9515,12 +9516,12 @@ known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
if (COMPARISON_P (x))
{
if (comparison_dominates_p (cond, code))
- return const_true_rtx;
+ return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
code = reversed_comparison_code (x, NULL);
if (code != UNKNOWN
&& comparison_dominates_p (cond, code))
- return const0_rtx;
+ return CONST0_RTX (GET_MODE (x));
else
return x;
}
@@ -9563,7 +9564,7 @@ known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
/* We must simplify subreg here, before we lose track of the
original inner_mode. */
new_rtx = simplify_subreg (GET_MODE (x), r,
- inner_mode, SUBREG_BYTE (x));
+ inner_mode, SUBREG_BYTE (x));
if (new_rtx)
return new_rtx;
else
@@ -9588,7 +9589,7 @@ known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
/* We must simplify the zero_extend here, before we lose
track of the original inner_mode. */
new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
- r, inner_mode);
+ r, inner_mode);
if (new_rtx)
return new_rtx;
else
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 33fb9da1614..c4be3101fde 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1352,9 +1352,8 @@
fmov\\t%d0, %1
dup\\t%d0, %1"
[(set_attr "type" "neon_dup<q>,f_mcr,neon_dup<q>")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")
- (set_attr "length" "4")]
+ (set_attr "length" "4")
+ (set_attr "arch" "simd,fp,simd")]
)
(define_insn "move_lo_quad_internal_<mode>"
@@ -1368,9 +1367,8 @@
fmov\\t%d0, %1
dup\\t%d0, %1"
[(set_attr "type" "neon_dup<q>,f_mcr,neon_dup<q>")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")
- (set_attr "length" "4")]
+ (set_attr "length" "4")
+ (set_attr "arch" "simd,fp,simd")]
)
(define_insn "move_lo_quad_internal_be_<mode>"
@@ -1384,9 +1382,8 @@
fmov\\t%d0, %1
dup\\t%d0, %1"
[(set_attr "type" "neon_dup<q>,f_mcr,neon_dup<q>")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")
- (set_attr "length" "4")]
+ (set_attr "length" "4")
+ (set_attr "arch" "simd,fp,simd")]
)
(define_insn "move_lo_quad_internal_be_<mode>"
@@ -1400,9 +1397,8 @@
fmov\\t%d0, %1
dup\\t%d0, %1"
[(set_attr "type" "neon_dup<q>,f_mcr,neon_dup<q>")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")
- (set_attr "length" "4")]
+ (set_attr "length" "4")
+ (set_attr "arch" "simd,fp,simd")]
)
(define_expand "move_lo_quad_<mode>"
@@ -3114,8 +3110,7 @@
fmov\t%d0, %1
ldr\\t%d0, %1"
[(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")]
+ (set_attr "arch" "simd,fp,simd")]
)
(define_insn "*aarch64_combinez_be<mode>"
@@ -3129,8 +3124,7 @@
fmov\t%d0, %1
ldr\\t%d0, %1"
[(set_attr "type" "neon_move<q>, neon_from_gp, neon_load1_1reg")
- (set_attr "simd" "yes,*,yes")
- (set_attr "fp" "*,yes,*")]
+ (set_attr "arch" "simd,fp,simd")]
)
(define_expand "aarch64_combine<mode>"
diff --git a/gcc/config/aarch64/aarch64-speculation.cc b/gcc/config/aarch64/aarch64-speculation.cc
index 2dd06ae24dd..3cd9ba09c11 100644
--- a/gcc/config/aarch64/aarch64-speculation.cc
+++ b/gcc/config/aarch64/aarch64-speculation.cc
@@ -25,7 +25,7 @@
#include "rtl.h"
#include "tree-pass.h"
#include "profile-count.h"
-#include "cfg.h"
+#include "backend.h"
#include "cfgbuild.h"
#include "print-rtl.h"
#include "cfgrtl.h"
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 1e8d8104c06..1de76e07547 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -10557,6 +10557,13 @@ aarch64_override_options_internal (struct gcc_options *opts)
&& opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
opts->x_flag_prefetch_loop_arrays = 1;
+ if (opts->x_aarch64_arch_string == NULL)
+ opts->x_aarch64_arch_string = selected_arch->name;
+ if (opts->x_aarch64_cpu_string == NULL)
+ opts->x_aarch64_cpu_string = selected_cpu->name;
+ if (opts->x_aarch64_tune_string == NULL)
+ opts->x_aarch64_tune_string = selected_tune->name;
+
aarch64_override_options_after_change_1 (opts);
}
@@ -15423,7 +15430,10 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *d)
machine_mode sel_mode = mode_for_int_vector (d->vmode).require ();
rtx sel = vec_perm_indices_to_rtx (sel_mode, d->perm);
- aarch64_expand_sve_vec_perm (d->target, d->op0, d->op1, sel);
+ if (d->one_vector_p)
+ emit_unspec2 (d->target, UNSPEC_TBL, d->op0, force_reg (sel_mode, sel));
+ else
+ aarch64_expand_sve_vec_perm (d->target, d->op0, d->op1, sel);
return true;
}
@@ -15461,7 +15471,7 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
return true;
if (d->vec_flags == VEC_SVE_DATA)
return aarch64_evpc_sve_tbl (d);
- else if (d->vec_flags == VEC_SVE_DATA)
+ else if (d->vec_flags == VEC_ADVSIMD)
return aarch64_evpc_tbl (d);
}
return false;
@@ -15476,7 +15486,8 @@ aarch64_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
struct expand_vec_perm_d d;
/* Check whether the mask can be applied to a single vector. */
- if (op0 && rtx_equal_p (op0, op1))
+ if (sel.ninputs () == 1
+ || (op0 && rtx_equal_p (op0, op1)))
d.one_vector_p = true;
else if (sel.all_from_input_p (0))
{
@@ -15927,13 +15938,17 @@ aarch64_expand_movmem (rtx *operands)
/* Convert n to bits to make the rest of the code simpler. */
n = n * BITS_PER_UNIT;
+ /* Maximum amount to copy in one go. The AArch64 back-end has integer modes
+ larger than TImode, but we should not use them for loads/stores here. */
+ const int copy_limit = GET_MODE_BITSIZE (TImode);
+
while (n > 0)
{
/* Find the largest mode in which to do the copy in without over reading
or writing. */
opt_scalar_int_mode mode_iter;
FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
- if (GET_MODE_BITSIZE (mode_iter.require ()) <= n)
+ if (GET_MODE_BITSIZE (mode_iter.require ()) <= MIN (n, copy_limit))
cur_mode = mode_iter.require ();
gcc_assert (cur_mode != BLKmode);
@@ -15947,10 +15962,10 @@ aarch64_expand_movmem (rtx *operands)
cheaper. i.e. less instructions to do so. For instance doing a 15
byte copy it's more efficient to do two overlapping 8 byte copies than
8 + 6 + 1. */
- next_mode = smallest_mode_for_size (n, MODE_INT);
- int n_bits = GET_MODE_BITSIZE (next_mode).to_constant ();
- if (n > 0 && n_bits > n && n_bits <= 8 * BITS_PER_UNIT)
+ if (n > 0 && n <= 8 * BITS_PER_UNIT)
{
+ next_mode = smallest_mode_for_size (n, MODE_INT);
+ int n_bits = GET_MODE_BITSIZE (next_mode).to_constant ();
src = aarch64_move_pointer (src, (n - n_bits) / BITS_PER_UNIT);
dst = aarch64_move_pointer (dst, (n - n_bits) / BITS_PER_UNIT);
n = n_bits;
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 22d20eae5c5..955769a64d2 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -259,41 +259,51 @@
;; FP or SIMD registers then the pattern predicate should include TARGET_FLOAT
;; or TARGET_SIMD.
-;; Attribute that specifies whether or not the instruction touches fp
-;; registers. When this is set to yes for an alternative, that alternative
-;; will be disabled when !TARGET_FLOAT.
-(define_attr "fp" "no,yes" (const_string "no"))
+;; Attributes of the architecture required to support the instruction (or
+;; alternative). This attribute is used to compute attribute "enabled", use type
+;; "any" to enable an alternative in all cases.
-;; Attribute that specifies whether or not the instruction touches half
-;; precision fp registers. When this is set to yes for an alternative,
-;; that alternative will be disabled when !TARGET_FP_F16INST.
-(define_attr "fp16" "no,yes" (const_string "no"))
+(define_enum "arches" [ any fp simd sve fp16])
-;; Attribute that specifies whether or not the instruction touches simd
-;; registers. When this is set to yes for an alternative, that alternative
-;; will be disabled when !TARGET_SIMD.
-(define_attr "simd" "no,yes" (const_string "no"))
+(define_enum_attr "arch" "arches" (const_string "any"))
-;; Attribute that specifies whether or not the instruction uses SVE.
-;; When this is set to yes for an alternative, that alternative
-;; will be disabled when !TARGET_SVE.
-(define_attr "sve" "no,yes" (const_string "no"))
+;; [For compatibility with Arm in pipeline models]
+;; Attribute that specifies whether or not the instruction touches fp
+;; registers.
+;; Note that this attribute is not used anywhere in either the arm or aarch64
+;; backends except in the scheduling description for xgene1. In that
+;; scheduling description this attribute is used to subclass the load_4 and
+;; load_8 types.
+(define_attr "fp" "no,yes"
+ (if_then_else
+ (eq_attr "arch" "fp")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "arch_enabled" "no,yes"
+ (if_then_else
+ (ior
+ (eq_attr "arch" "any")
+
+ (and (eq_attr "arch" "fp")
+ (match_test "TARGET_FLOAT"))
+
+ (and (eq_attr "arch" "simd")
+ (match_test "TARGET_SIMD"))
+
+ (and (eq_attr "arch" "fp16")
+ (match_test "TARGET_FP_F16INST"))
+
+ (and (eq_attr "arch" "sve")
+ (match_test "TARGET_SVE")))
+ (const_string "yes")
+ (const_string "no")))
;; Attribute that controls whether an alternative is enabled or not.
;; Currently it is only used to disable alternatives which touch fp or simd
-;; registers when -mgeneral-regs-only is specified.
-(define_attr "enabled" "no,yes"
- (cond [(ior
- (and (eq_attr "fp" "yes")
- (eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
- (and (eq_attr "simd" "yes")
- (eq (symbol_ref "TARGET_SIMD") (const_int 0)))
- (and (eq_attr "fp16" "yes")
- (eq (symbol_ref "TARGET_FP_F16INST") (const_int 0)))
- (and (eq_attr "sve" "yes")
- (eq (symbol_ref "TARGET_SVE") (const_int 0))))
- (const_string "no")
- ] (const_string "yes")))
+;; registers when -mgeneral-regs-only is specified or to require a special
+;; architecture support.
+(define_attr "enabled" "no,yes" (attr "arch_enabled"))
;; Attribute that specifies whether we are dealing with a branch to a
;; label that is far away, i.e. further away than the maximum/minimum
@@ -1009,8 +1019,7 @@
;; The "mov_imm" type for CNT is just a placeholder.
[(set_attr "type" "mov_reg,mov_imm,neon_move,mov_imm,load_4,load_4,store_4,
store_4,neon_to_gp<q>,neon_from_gp<q>,neon_dup")
- (set_attr "simd" "*,*,yes,*,*,*,*,*,yes,yes,yes")
- (set_attr "sve" "*,*,*,yes,*,*,*,*,*,*,*")]
+ (set_attr "arch" "*,*,simd,sve,*,*,*,*,simd,simd,simd")]
)
(define_expand "mov<mode>"
@@ -1069,9 +1078,7 @@
;; The "mov_imm" type for CNT is just a placeholder.
[(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,mov_imm,mov_imm,load_4,
load_4,store_4,store_4,adr,adr,f_mcr,f_mrc,fmov,neon_move")
- (set_attr "fp" "*,*,*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*")
- (set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,yes")
- (set_attr "sve" "*,*,*,*,*,yes,*,*,*,*,*,*,*,*,*,*")]
+ (set_attr "arch" "*,*,*,*,*,sve,*,fp,*,fp,*,*,fp,fp,fp,simd")]
)
(define_insn_and_split "*movdi_aarch64"
@@ -1108,9 +1115,7 @@
[(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,mov_imm,mov_imm,mov_imm,
load_8,load_8,store_8,store_8,adr,adr,f_mcr,f_mrc,fmov,
neon_move")
- (set_attr "fp" "*,*,*,*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*")
- (set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,*,yes")
- (set_attr "sve" "*,*,*,*,*,*,yes,*,*,*,*,*,*,*,*,*,*")]
+ (set_attr "arch" "*,*,*,*,*,*,sve,*,fp,*,fp,*,*,fp,fp,fp,simd")]
)
(define_insn "insv_imm<mode>"
@@ -1163,8 +1168,7 @@
load_16,store_16,store_16,\
load_16,store_16")
(set_attr "length" "8,8,8,4,4,4,4,4,4")
- (set_attr "simd" "*,*,*,yes,*,*,*,*,*")
- (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")]
+ (set_attr "arch" "*,*,*,simd,*,*,*,fp,fp")]
)
;; Split a TImode register-register or register-immediate move into
@@ -1218,8 +1222,7 @@
mov\\t%w0, %w1"
[(set_attr "type" "neon_move,f_mcr,neon_move,neon_to_gp, neon_move,fconsts, \
neon_move,f_loads,f_stores,load_4,store_4,mov_reg")
- (set_attr "simd" "yes,*,yes,yes,yes,*,yes,*,*,*,*,*")
- (set_attr "fp16" "*,yes,*,*,*,yes,*,*,*,*,*,*")]
+ (set_attr "arch" "simd,fp16,simd,simd,simd,fp16,simd,*,*,*,*,*")]
)
(define_insn "*movsf_aarch64"
@@ -1243,7 +1246,7 @@
[(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconsts,neon_move,\
f_loads,f_stores,load_4,store_4,mov_reg,\
fconsts")
- (set_attr "simd" "yes,*,*,*,*,yes,*,*,*,*,*,*")]
+ (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
)
(define_insn "*movdf_aarch64"
@@ -1267,7 +1270,7 @@
[(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconstd,neon_move,\
f_loadd,f_stored,load_8,store_8,mov_reg,\
fconstd")
- (set_attr "simd" "yes,*,*,*,*,yes,*,*,*,*,*,*")]
+ (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
)
(define_split
@@ -1312,7 +1315,7 @@
[(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,neon_move_q,f_mcr,\
f_loadd,f_stored,load_16,store_16,store_16")
(set_attr "length" "4,8,8,8,4,4,4,4,4,4,4")
- (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*,*")]
+ (set_attr "arch" "simd,*,*,*,simd,*,*,*,*,*,*")]
)
(define_split
@@ -1359,7 +1362,7 @@
ldp\\t%w0, %w2, %1
ldp\\t%s0, %s2, %1"
[(set_attr "type" "load_8,neon_load1_2reg")
- (set_attr "fp" "*,yes")]
+ (set_attr "arch" "*,fp")]
)
;; Storing different modes that can still be merged
@@ -1376,7 +1379,7 @@
ldp\\t%x0, %x2, %1
ldp\\t%d0, %d2, %1"
[(set_attr "type" "load_16,neon_load1_2reg")
- (set_attr "fp" "*,yes")]
+ (set_attr "arch" "*,fp")]
)
;; Operands 0 and 2 are tied together by the final condition; so we allow
@@ -1394,7 +1397,7 @@
stp\\t%w1, %w3, %0
stp\\t%s1, %s3, %0"
[(set_attr "type" "store_8,neon_store1_2reg")
- (set_attr "fp" "*,yes")]
+ (set_attr "arch" "*,fp")]
)
;; Storing different modes that can still be merged
@@ -1411,7 +1414,7 @@
stp\\t%x1, %x3, %0
stp\\t%d1, %d3, %0"
[(set_attr "type" "store_16,neon_store1_2reg")
- (set_attr "fp" "*,yes")]
+ (set_attr "arch" "*,fp")]
)
;; Load pair with post-index writeback. This is primarily used in function
@@ -1637,7 +1640,7 @@
* return aarch64_output_sve_addvl_addpl (operands[0], operands[1], operands[2]);"
;; The "alu_imm" type for ADDVL/ADDPL is just a placeholder.
[(set_attr "type" "alu_imm,alu_sreg,neon_add,alu_imm,multiple,alu_imm")
- (set_attr "simd" "*,*,yes,*,*,*")]
+ (set_attr "arch" "*,*,simd,*,*,*")]
)
;; zero_extend version of above
@@ -2640,7 +2643,7 @@
sub\\t%x0, %x1, %x2
sub\\t%d0, %d1, %d2"
[(set_attr "type" "alu_sreg, neon_sub")
- (set_attr "simd" "*,yes")]
+ (set_attr "arch" "*,simd")]
)
(define_expand "subv<mode>4"
@@ -3247,7 +3250,7 @@
neg\\t%<w>0, %<w>1
neg\\t%<rtn>0<vas>, %<rtn>1<vas>"
[(set_attr "type" "alu_sreg, neon_neg<q>")
- (set_attr "simd" "*,yes")]
+ (set_attr "arch" "*,simd")]
)
;; zero_extend version of above
@@ -4092,7 +4095,7 @@
<logical>\\t%<w>0, %<w>1, %2
<logical>\\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
[(set_attr "type" "logic_reg,logic_imm,neon_logic")
- (set_attr "simd" "*,*,yes")]
+ (set_attr "arch" "*,*,simd")]
)
;; zero_extend version of above
@@ -4226,7 +4229,7 @@
mvn\\t%<w>0, %<w>1
mvn\\t%0.8b, %1.8b"
[(set_attr "type" "logic_reg,neon_logic")
- (set_attr "simd" "*,yes")]
+ (set_attr "arch" "*,simd")]
)
(define_insn "*one_cmpl_<optab><mode>2"
@@ -4249,7 +4252,7 @@
<NLOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1
<NLOGICAL:nlogical>\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
[(set_attr "type" "logic_reg,neon_logic")
- (set_attr "simd" "*,yes")]
+ (set_attr "arch" "*,simd")]
)
(define_insn "*<NLOGICAL:optab>_one_cmplsidi3_ze"
@@ -4289,7 +4292,7 @@
(set (match_dup 0) (not:GPI (match_dup 0)))]
""
[(set_attr "type" "logic_reg,multiple")
- (set_attr "simd" "*,yes")]
+ (set_attr "arch" "*,simd")]
)
(define_insn "*and_one_cmpl<mode>3_compare0"
@@ -4833,8 +4836,8 @@
lsl\t%<w>0, %<w>1, %<w>2
shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>"
- [(set_attr "simd" "no,no,yes,yes")
- (set_attr "type" "bfx,shift_reg,neon_shift_imm<q>, neon_shift_reg<q>")]
+ [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>, neon_shift_reg<q>")
+ (set_attr "arch" "*,*,simd,simd")]
)
;; Logical right shift using SISD or Integer instruction
@@ -4851,8 +4854,8 @@
ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
#
#"
- [(set_attr "simd" "no,no,yes,yes,yes")
- (set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")]
+ [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
+ (set_attr "arch" "*,*,simd,simd,simd")]
)
(define_split
@@ -4899,8 +4902,8 @@
sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
#
#"
- [(set_attr "simd" "no,no,yes,yes,yes")
- (set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")]
+ [(set_attr "type" "bfx,shift_reg,neon_shift_imm<q>,neon_shift_reg<q>,neon_shift_reg<q>")
+ (set_attr "arch" "*,*,simd,simd,simd")]
)
(define_split
@@ -4940,8 +4943,7 @@
UNSPEC_SISD_USHL))]
"TARGET_SIMD"
"ushl\t%d0, %d1, %d2"
- [(set_attr "simd" "yes")
- (set_attr "type" "neon_shift_reg")]
+ [(set_attr "type" "neon_shift_reg")]
)
(define_insn "*aarch64_ushl_2s"
@@ -4951,8 +4953,7 @@
UNSPEC_USHL_2S))]
"TARGET_SIMD"
"ushl\t%0.2s, %1.2s, %2.2s"
- [(set_attr "simd" "yes")
- (set_attr "type" "neon_shift_reg")]
+ [(set_attr "type" "neon_shift_reg")]
)
(define_insn "*aarch64_sisd_sshl"
@@ -4962,8 +4963,7 @@
UNSPEC_SISD_SSHL))]
"TARGET_SIMD"
"sshl\t%d0, %d1, %d2"
- [(set_attr "simd" "yes")
- (set_attr "type" "neon_shift_reg")]
+ [(set_attr "type" "neon_shift_reg")]
)
(define_insn "*aarch64_sshl_2s"
@@ -4973,8 +4973,7 @@
UNSPEC_SSHL_2S))]
"TARGET_SIMD"
"sshl\t%0.2s, %1.2s, %2.2s"
- [(set_attr "simd" "yes")
- (set_attr "type" "neon_shift_reg")]
+ [(set_attr "type" "neon_shift_reg")]
)
(define_insn "*aarch64_sisd_neg_qi"
@@ -4983,8 +4982,7 @@
UNSPEC_SISD_NEG))]
"TARGET_SIMD"
"neg\t%d0, %d1"
- [(set_attr "simd" "yes")
- (set_attr "type" "neon_neg")]
+ [(set_attr "type" "neon_neg")]
)
;; Rotate right
@@ -5620,9 +5618,8 @@
"@
<su_optab>cvtf\t%<GPF:s>0, %<s>1
<su_optab>cvtf\t%<GPF:s>0, %<w1>1"
- [(set_attr "simd" "yes,no")
- (set_attr "fp" "no,yes")
- (set_attr "type" "neon_int_to_fp_<Vetype>,f_cvti2f")]
+ [(set_attr "type" "neon_int_to_fp_<Vetype>,f_cvti2f")
+ (set_attr "arch" "simd,fp")]
)
(define_insn "<optab><fcvt_iesize><GPF:mode>2"
@@ -5707,8 +5704,7 @@
<FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:w1>0, %<GPF:s>1, #%2
<FCVT_F2FIXED:fcvt_fixed_insn>\t%<GPF:s>0, %<GPF:s>1, #%2"
[(set_attr "type" "f_cvtf2i, neon_fp_to_int_<GPF:Vetype>")
- (set_attr "fp" "yes, *")
- (set_attr "simd" "*, yes")]
+ (set_attr "arch" "fp,simd")]
)
(define_insn "<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3"
@@ -5721,8 +5717,7 @@
<FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:w>1, #%2
<FCVT_FIXED2F:fcvt_fixed_insn>\t%<GPI:v>0, %<GPI:v>1, #%2"
[(set_attr "type" "f_cvti2f, neon_int_to_fp_<GPI:Vetype>")
- (set_attr "fp" "yes, *")
- (set_attr "simd" "*, yes")]
+ (set_attr "arch" "fp,simd")]
)
(define_insn "<FCVT_F2FIXED:fcvt_fixed_insn>hf<mode>3"
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
index c8e82042224..b2e80cbf6f1 100644
--- a/gcc/config/aarch64/aarch64.opt
+++ b/gcc/config/aarch64/aarch64.opt
@@ -117,23 +117,23 @@ Enum(aarch64_tls_size) String(48) Value(48)
march=
Target RejectNegative ToLower Joined Var(aarch64_arch_string)
--march=ARCH Use features of architecture ARCH.
+Use features of architecture ARCH.
mcpu=
Target RejectNegative ToLower Joined Var(aarch64_cpu_string)
--mcpu=CPU Use features of and optimize for CPU.
+Use features of and optimize for CPU.
mtune=
Target RejectNegative ToLower Joined Var(aarch64_tune_string)
--mtune=CPU Optimize for CPU.
+Optimize for CPU.
mabi=
Target RejectNegative Joined Enum(aarch64_abi) Var(aarch64_abi) Init(AARCH64_ABI_DEFAULT)
--mabi=ABI Generate code that conforms to the specified ABI.
+Generate code that conforms to the specified ABI.
moverride=
Target RejectNegative ToLower Joined Var(aarch64_override_tune_string)
--moverride=STRING Power users only! Override CPU optimization parameters.
+-moverride=<string> Power users only! Override CPU optimization parameters.
Enum
Name(aarch64_abi) Type(int)
@@ -209,7 +209,7 @@ Enum(sve_vector_bits) String(2048) Value(SVE_2048)
msve-vector-bits=
Target RejectNegative Joined Enum(sve_vector_bits) Var(aarch64_sve_vector_bits) Init(SVE_SCALABLE)
--msve-vector-bits=N Set the number of bits in an SVE vector register to N.
+-msve-vector-bits=<number> Set the number of bits in an SVE vector register to N.
mverbose-cost-dump
Target Undocumented Var(flag_aarch64_verbose_cost)
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 8537262ce64..0dfb3ac59a6 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -56,6 +56,8 @@ extern void arm_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update
extern rtx arm_simd_vect_par_cnst_half (machine_mode mode, bool high);
extern bool arm_simd_check_vect_par_cnst_half_p (rtx op, machine_mode mode,
bool high);
+extern void arm_emit_speculation_barrier_function (void);
+
#ifdef RTX_CODE
extern void arm_gen_unlikely_cbranch (enum rtx_code, machine_mode cc_mode,
rtx label_ref);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 1d97db50a28..6332e68df05 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -2466,8 +2466,9 @@ arm_set_fixed_conv_libfunc (convert_optab optable, machine_mode to,
set_conv_libfunc (optable, to, from, buffer);
}
-/* Set up library functions unique to ARM. */
+static GTY(()) rtx speculation_barrier_libfunc;
+/* Set up library functions unique to ARM. */
static void
arm_init_libfuncs (void)
{
@@ -2753,6 +2754,8 @@ arm_init_libfuncs (void)
if (TARGET_AAPCS_BASED)
synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
+
+ speculation_barrier_libfunc = init_one_libfunc ("__speculation_barrier");
}
/* On AAPCS systems, this is the "struct __va_list". */
@@ -30838,7 +30841,7 @@ arm_insert_attributes (tree fndecl, tree * attributes)
return;
if (TREE_CODE (fndecl) != FUNCTION_DECL || DECL_EXTERNAL(fndecl)
- || DECL_BUILT_IN (fndecl) || DECL_ARTIFICIAL (fndecl))
+ || fndecl_built_in_p (fndecl) || DECL_ARTIFICIAL (fndecl))
return;
/* Nested definitions must inherit mode. */
@@ -31528,6 +31531,16 @@ arm_constant_alignment (const_tree exp, HOST_WIDE_INT align)
return align;
}
+/* Emit a speculation barrier on target architectures that do not have
+ DSB/ISB directly. Such systems probably don't need a barrier
+ themselves, but if the code is ever run on a later architecture, it
+ might become a problem. */
+void
+arm_emit_speculation_barrier_function ()
+{
+ emit_library_call (speculation_barrier_libfunc, LCT_NORMAL, VOIDmode);
+}
+
#if CHECKING_P
namespace selftest {
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index ca2a2f5469f..270b8e454b3 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -12016,10 +12016,16 @@
[(unspec_volatile [(const_int 0)] VUNSPEC_SPECULATION_BARRIER)]
"TARGET_EITHER"
"
- /* Don't emit anything for Thumb1 and suppress the warning from the
- generic expansion. */
- if (!TARGET_32BIT)
- DONE;
+ /* For thumb1 (except Armv8 derivatives), and for pre-Armv7 we don't
+ have a usable barrier (and probably don't need one in practice).
+ But to be safe if such code is run on later architectures, call a
+ helper function in libgcc that will do the thing for the active
+ system. */
+ if (!(arm_arch7 || arm_arch8))
+ {
+ arm_emit_speculation_barrier_function ();
+ DONE;
+ }
"
)
@@ -12027,7 +12033,7 @@
;; tracking.
(define_insn "*speculation_barrier_insn"
[(unspec_volatile [(const_int 0)] VUNSPEC_SPECULATION_BARRIER)]
- "TARGET_32BIT"
+ "arm_arch7 || arm_arch8"
"isb\;dsb\\tsy"
[(set_attr "type" "block")
(set_attr "length" "8")]
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 233076a3b77..aa2ef91c64a 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -824,8 +824,7 @@ machopic_legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
/* First handle a simple SYMBOL_REF or LABEL_REF */
if (GET_CODE (orig) == LABEL_REF
- || (GET_CODE (orig) == SYMBOL_REF
- ))
+ || GET_CODE (orig) == SYMBOL_REF)
{
/* addr(foo) = &func+(foo-func) */
orig = machopic_indirect_data_reference (orig, reg);
@@ -1024,10 +1023,6 @@ machopic_legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
return pic_ref;
}
}
-
- else if (GET_CODE (orig) == SYMBOL_REF)
- return orig;
-
else if (GET_CODE (orig) == PLUS
&& (GET_CODE (XEXP (orig, 0)) == MEM
|| GET_CODE (XEXP (orig, 0)) == SYMBOL_REF
@@ -1057,12 +1052,10 @@ machopic_legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
}
/* Likewise, should we set special REG_NOTEs here? */
}
-
else if (GET_CODE (orig) == CONST)
{
return machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, reg);
}
-
else if (GET_CODE (orig) == MEM
&& GET_CODE (XEXP (orig, 0)) == SYMBOL_REF)
{
diff --git a/gcc/config/i386/i386-modes.def b/gcc/config/i386/i386-modes.def
index 08c79a5df4e..12c17ce7dfc 100644
--- a/gcc/config/i386/i386-modes.def
+++ b/gcc/config/i386/i386-modes.def
@@ -98,9 +98,6 @@ VECTOR_MODE (INT, QI, 14); /* V14QI */
VECTOR_MODE (INT, HI, 6); /* V6HI */
VECTOR_MODE (INT, SI, 64); /* V64SI */
-POINTER_BOUNDS_MODE (BND32, 8);
-POINTER_BOUNDS_MODE (BND64, 16);
-
INT_MODE (OI, 32);
INT_MODE (XI, 64);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 03118102319..8672a666024 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -262,7 +262,7 @@ enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
EVEX_SSE_REGS, EVEX_SSE_REGS, EVEX_SSE_REGS, EVEX_SSE_REGS,
/* Mask registers. */
MASK_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS,
- MASK_EVEX_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS,
+ MASK_EVEX_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS, MASK_EVEX_REGS
};
/* The "default" register map used in 32bit mode. */
@@ -278,8 +278,7 @@ int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
-1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
-1, -1, -1, -1, -1, -1, -1, -1, /* AVX-512 registers 16-23*/
-1, -1, -1, -1, -1, -1, -1, -1, /* AVX-512 registers 24-31*/
- 93, 94, 95, 96, 97, 98, 99, 100, /* Mask registers */
- 101, 102, 103, 104, /* bound registers */
+ 93, 94, 95, 96, 97, 98, 99, 100 /* Mask registers */
};
/* The "default" register map used in 64bit mode. */
@@ -295,8 +294,7 @@ int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
67, 68, 69, 70, 71, 72, 73, 74, /* AVX-512 registers 16-23 */
75, 76, 77, 78, 79, 80, 81, 82, /* AVX-512 registers 24-31 */
- 118, 119, 120, 121, 122, 123, 124, 125, /* Mask registers */
- 126, 127, 128, 129, /* bound registers */
+ 118, 119, 120, 121, 122, 123, 124, 125 /* Mask registers */
};
/* Define the register numbers to be used in Dwarf debugging information.
@@ -364,8 +362,7 @@ int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
-1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
-1, -1, -1, -1, -1, -1, -1, -1, /* AVX-512 registers 16-23*/
-1, -1, -1, -1, -1, -1, -1, -1, /* AVX-512 registers 24-31*/
- 93, 94, 95, 96, 97, 98, 99, 100, /* Mask registers */
- 101, 102, 103, 104, /* bound registers */
+ 93, 94, 95, 96, 97, 98, 99, 100 /* Mask registers */
};
/* Define parameter passing and return registers. */
@@ -3482,7 +3479,7 @@ ix86_option_override_internal (bool main_args_p,
| PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR;
const wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2
| PTA_POPCNT;
- const wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_AES | PTA_PCLMUL;
+ const wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL;
const wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE
| PTA_XSAVEOPT;
const wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE
@@ -3491,7 +3488,7 @@ ix86_option_override_internal (bool main_args_p,
| PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE;
const wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_PRFCHW
| PTA_RDSEED;
- const wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_CLFLUSHOPT
+ const wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES | PTA_CLFLUSHOPT
| PTA_XSAVEC | PTA_XSAVES | PTA_SGX;
const wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F
| PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU
@@ -3508,7 +3505,7 @@ ix86_option_override_internal (bool main_args_p,
| PTA_AVX512F | PTA_AVX512CD;
const wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE;
const wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE | PTA_RDRND;
- const wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_SHA | PTA_XSAVE
+ const wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA | PTA_XSAVE
| PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT | PTA_XSAVEOPT
| PTA_FSGSBASE;
const wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID
@@ -10646,26 +10643,16 @@ static int indirectlabelno;
/* True if call thunk function is needed. */
static bool indirect_thunk_needed = false;
-/* True if call thunk function with the BND prefix is needed. */
-static bool indirect_thunk_bnd_needed = false;
/* Bit masks of integer registers, which contain branch target, used
by call thunk functions. */
static int indirect_thunks_used;
-/* Bit masks of integer registers, which contain branch target, used
- by call thunk functions with the BND prefix. */
-static int indirect_thunks_bnd_used;
/* True if return thunk function is needed. */
static bool indirect_return_needed = false;
-/* True if return thunk function with the BND prefix is needed. */
-static bool indirect_return_bnd_needed = false;
/* True if return thunk function via CX is needed. */
static bool indirect_return_via_cx;
-/* True if return thunk function via CX with the BND prefix is
- needed. */
-static bool indirect_return_via_cx_bnd;
#ifndef INDIRECT_LABEL
# define INDIRECT_LABEL "LIND"
@@ -10675,7 +10662,6 @@ static bool indirect_return_via_cx_bnd;
enum indirect_thunk_prefix
{
indirect_thunk_prefix_none,
- indirect_thunk_prefix_bnd,
indirect_thunk_prefix_nt
};
@@ -10712,10 +10698,8 @@ indirect_thunk_name (char name[32], unsigned int regno,
{
const char *prefix;
- if (need_prefix == indirect_thunk_prefix_bnd)
- prefix = "_bnd";
- else if (need_prefix == indirect_thunk_prefix_nt
- && regno != INVALID_REGNUM)
+ if (need_prefix == indirect_thunk_prefix_nt
+ && regno != INVALID_REGNUM)
{
/* NOTRACK prefix is only used with external thunk via
register so that NOTRACK prefix can be added to indirect
@@ -10743,35 +10727,19 @@ indirect_thunk_name (char name[32], unsigned int regno,
else
{
if (regno != INVALID_REGNUM)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- ASM_GENERATE_INTERNAL_LABEL (name, "LITBR", regno);
- else
- ASM_GENERATE_INTERNAL_LABEL (name, "LITR", regno);
- }
+ ASM_GENERATE_INTERNAL_LABEL (name, "LITR", regno);
else
{
if (ret_p)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- ASM_GENERATE_INTERNAL_LABEL (name, "LRTB", 0);
- else
- ASM_GENERATE_INTERNAL_LABEL (name, "LRT", 0);
- }
+ ASM_GENERATE_INTERNAL_LABEL (name, "LRT", 0);
else
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- ASM_GENERATE_INTERNAL_LABEL (name, "LITB", 0);
- else
- ASM_GENERATE_INTERNAL_LABEL (name, "LIT", 0);
- }
+ ASM_GENERATE_INTERNAL_LABEL (name, "LIT", 0);
}
}
}
-/* Output a call and return thunk for indirect branch. If BND_P is
- true, the BND prefix is needed. If REGNO != -1, the function
- address is in REGNO and the call and return thunk looks like:
+/* Output a call and return thunk for indirect branch. If REGNO != -1,
+ the function address is in REGNO and the call and return thunk looks like:
call L2
L1:
@@ -10796,8 +10764,7 @@ indirect_thunk_name (char name[32], unsigned int regno,
*/
static void
-output_indirect_thunk (enum indirect_thunk_prefix need_prefix,
- unsigned int regno)
+output_indirect_thunk (unsigned int regno)
{
char indirectlabel1[32];
char indirectlabel2[32];
@@ -10808,10 +10775,7 @@ output_indirect_thunk (enum indirect_thunk_prefix need_prefix,
indirectlabelno++);
/* Call */
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd call\t", asm_out_file);
- else
- fputs ("\tcall\t", asm_out_file);
+ fputs ("\tcall\t", asm_out_file);
assemble_name_raw (asm_out_file, indirectlabel2);
fputc ('\n', asm_out_file);
@@ -10845,17 +10809,13 @@ output_indirect_thunk (enum indirect_thunk_prefix need_prefix,
output_asm_insn ("lea\t{%E1, %0|%0, %E1}", xops);
}
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd ret\n", asm_out_file);
- else
- fputs ("\tret\n", asm_out_file);
+ fputs ("\tret\n", asm_out_file);
}
/* Output a funtion with a call and return thunk for indirect branch.
- If BND_P is true, the BND prefix is needed. If REGNO != UNVALID_REGNUM,
- the function address is in REGNO. Otherwise, the function address is
- on the top of stack. Thunk is used for function return if RET_P is
- true. */
+ If REGNO != INVALID_REGNUM, the function address is in REGNO.
+ Otherwise, the function address is on the top of stack. Thunk is
+ used for function return if RET_P is true. */
static void
output_indirect_thunk_function (enum indirect_thunk_prefix need_prefix,
@@ -10864,7 +10824,7 @@ output_indirect_thunk_function (enum indirect_thunk_prefix need_prefix,
char name[32];
tree decl;
- /* Create __x86_indirect_thunk/__x86_indirect_thunk_bnd. */
+ /* Create __x86_indirect_thunk. */
indirect_thunk_name (name, regno, need_prefix, ret_p);
decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
get_identifier (name),
@@ -10919,7 +10879,7 @@ output_indirect_thunk_function (enum indirect_thunk_prefix need_prefix,
/* Make sure unwind info is emitted for the thunk if needed. */
final_start_function (emit_barrier (), asm_out_file, 1);
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
final_end_function ();
init_insn_lengths ();
@@ -10957,23 +10917,12 @@ ix86_code_end (void)
if (indirect_return_needed)
output_indirect_thunk_function (indirect_thunk_prefix_none,
INVALID_REGNUM, true);
- if (indirect_return_bnd_needed)
- output_indirect_thunk_function (indirect_thunk_prefix_bnd,
- INVALID_REGNUM, true);
-
if (indirect_return_via_cx)
output_indirect_thunk_function (indirect_thunk_prefix_none,
CX_REG, true);
- if (indirect_return_via_cx_bnd)
- output_indirect_thunk_function (indirect_thunk_prefix_bnd,
- CX_REG, true);
-
if (indirect_thunk_needed)
output_indirect_thunk_function (indirect_thunk_prefix_none,
INVALID_REGNUM, false);
- if (indirect_thunk_bnd_needed)
- output_indirect_thunk_function (indirect_thunk_prefix_bnd,
- INVALID_REGNUM, false);
for (regno = FIRST_REX_INT_REG; regno <= LAST_REX_INT_REG; regno++)
{
@@ -10981,10 +10930,6 @@ ix86_code_end (void)
if ((indirect_thunks_used & (1 << i)))
output_indirect_thunk_function (indirect_thunk_prefix_none,
regno, false);
-
- if ((indirect_thunks_bnd_used & (1 << i)))
- output_indirect_thunk_function (indirect_thunk_prefix_bnd,
- regno, false);
}
for (regno = FIRST_INT_REG; regno <= LAST_INT_REG; regno++)
@@ -10996,10 +10941,6 @@ ix86_code_end (void)
output_indirect_thunk_function (indirect_thunk_prefix_none,
regno, false);
- if ((indirect_thunks_bnd_used & (1 << regno)))
- output_indirect_thunk_function (indirect_thunk_prefix_bnd,
- regno, false);
-
if (!(pic_labels_used & (1 << regno)))
continue;
@@ -11274,16 +11215,6 @@ ix86_save_reg (unsigned int regno, bool maybe_eh_return, bool ignore_outlined)
while (nregs-- > 0)
if ((i + nregs) == regno)
return false;
-
- reg = crtl->return_bnd;
- if (reg)
- {
- i = REGNO (reg);
- nregs = REG_NREGS (reg);
- while (nregs-- > 0)
- if ((i + nregs) == regno)
- return false;
- }
}
return (df_regs_ever_live_p (regno)
@@ -15494,10 +15425,6 @@ ix86_force_load_from_GOT_p (rtx x)
static bool
ix86_legitimate_constant_p (machine_mode mode, rtx x)
{
- /* Pointer bounds constants are not valid. */
- if (POINTER_BOUNDS_MODE_P (GET_MODE (x)))
- return false;
-
switch (GET_CODE (x))
{
case CONST:
@@ -18636,25 +18563,6 @@ ix86_print_operand_address_as (FILE *file, rtx addr,
ok = ix86_decompose_address (XVECEXP (addr, 0, 0), &parts);
code = 'q';
}
- else if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_BNDMK_ADDR)
- {
- ok = ix86_decompose_address (XVECEXP (addr, 0, 1), &parts);
- gcc_assert (parts.base == NULL_RTX || parts.index == NULL_RTX);
- if (parts.base != NULL_RTX)
- {
- parts.index = parts.base;
- parts.scale = 1;
- }
- parts.base = XVECEXP (addr, 0, 0);
- addr = XVECEXP (addr, 0, 0);
- }
- else if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_BNDLDX_ADDR)
- {
- ok = ix86_decompose_address (XVECEXP (addr, 0, 0), &parts);
- gcc_assert (parts.index == NULL_RTX);
- parts.index = XVECEXP (addr, 0, 1);
- addr = XVECEXP (addr, 0, 0);
- }
else
ok = ix86_decompose_address (addr, &parts);
@@ -27622,6 +27530,11 @@ ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
issetmem,
issetmem && val_exp == const0_rtx, have_as,
&dynamic_check, &noalign, false);
+
+ if (dump_file)
+ fprintf (dump_file, "Selected stringop expansion strategy: %s\n",
+ stringop_alg_names[alg]);
+
if (alg == libcall)
return false;
gcc_assert (alg != no_stringop);
@@ -28540,10 +28453,7 @@ ix86_output_indirect_branch_via_reg (rtx call_op, bool sibcall_p)
int i = regno;
if (i >= FIRST_REX_INT_REG)
i -= (FIRST_REX_INT_REG - LAST_INT_REG - 1);
- if (need_prefix == indirect_thunk_prefix_bnd)
- indirect_thunks_bnd_used |= 1 << i;
- else
- indirect_thunks_used |= 1 << i;
+ indirect_thunks_used |= 1 << i;
}
indirect_thunk_name (thunk_name_buf, regno, need_prefix, false);
thunk_name = thunk_name_buf;
@@ -28554,23 +28464,15 @@ ix86_output_indirect_branch_via_reg (rtx call_op, bool sibcall_p)
if (sibcall_p)
{
if (thunk_name != NULL)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- else
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
- }
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
else
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
}
else
{
if (thunk_name != NULL)
{
- if (need_prefix == indirect_thunk_prefix_bnd)
- fprintf (asm_out_file, "\tbnd call\t%s\n", thunk_name);
- else
- fprintf (asm_out_file, "\tcall\t%s\n", thunk_name);
+ fprintf (asm_out_file, "\tcall\t%s\n", thunk_name);
return;
}
@@ -28585,32 +28487,21 @@ ix86_output_indirect_branch_via_reg (rtx call_op, bool sibcall_p)
indirectlabelno++);
/* Jump. */
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd jmp\t", asm_out_file);
- else
- fputs ("\tjmp\t", asm_out_file);
+ fputs ("\tjmp\t", asm_out_file);
assemble_name_raw (asm_out_file, indirectlabel2);
fputc ('\n', asm_out_file);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, indirectlabel1);
if (thunk_name != NULL)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- else
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
- }
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
else
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, indirectlabel2);
/* Call. */
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd call\t", asm_out_file);
- else
- fputs ("\tcall\t", asm_out_file);
+ fputs ("\tcall\t", asm_out_file);
assemble_name_raw (asm_out_file, indirectlabel1);
fputc ('\n', asm_out_file);
}
@@ -28649,12 +28540,7 @@ ix86_output_indirect_branch_via_push (rtx call_op, const char *xasm,
!= indirect_branch_thunk_inline)
{
if (cfun->machine->indirect_branch_type == indirect_branch_thunk)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- indirect_thunk_bnd_needed = true;
- else
- indirect_thunk_needed = true;
- }
+ indirect_thunk_needed = true;
indirect_thunk_name (thunk_name_buf, regno, need_prefix, false);
thunk_name = thunk_name_buf;
}
@@ -28668,14 +28554,9 @@ ix86_output_indirect_branch_via_push (rtx call_op, const char *xasm,
{
output_asm_insn (push_buf, &call_op);
if (thunk_name != NULL)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- else
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
- }
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
else
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
}
else
{
@@ -28690,10 +28571,7 @@ ix86_output_indirect_branch_via_push (rtx call_op, const char *xasm,
indirectlabelno++);
/* Jump. */
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd jmp\t", asm_out_file);
- else
- fputs ("\tjmp\t", asm_out_file);
+ fputs ("\tjmp\t", asm_out_file);
assemble_name_raw (asm_out_file, indirectlabel2);
fputc ('\n', asm_out_file);
@@ -28735,22 +28613,14 @@ ix86_output_indirect_branch_via_push (rtx call_op, const char *xasm,
output_asm_insn (push_buf, &call_op);
if (thunk_name != NULL)
- {
- if (need_prefix == indirect_thunk_prefix_bnd)
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- else
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
- }
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
else
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, indirectlabel2);
/* Call. */
- if (need_prefix == indirect_thunk_prefix_bnd)
- fputs ("\tbnd call\t", asm_out_file);
- else
- fputs ("\tcall\t", asm_out_file);
+ fputs ("\tcall\t", asm_out_file);
assemble_name_raw (asm_out_file, indirectlabel1);
fputc ('\n', asm_out_file);
}
@@ -28808,19 +28678,11 @@ ix86_output_function_return (bool long_p)
== indirect_branch_thunk);
indirect_thunk_name (thunk_name, INVALID_REGNUM, need_prefix,
true);
- if (need_prefix == indirect_thunk_prefix_bnd)
- {
- indirect_return_bnd_needed |= need_thunk;
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- }
- else
- {
- indirect_return_needed |= need_thunk;
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
- }
+ indirect_return_needed |= need_thunk;
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
}
else
- output_indirect_thunk (need_prefix, INVALID_REGNUM);
+ output_indirect_thunk (INVALID_REGNUM);
return "";
}
@@ -28851,27 +28713,16 @@ ix86_output_indirect_function_return (rtx ret_op)
bool need_thunk = (cfun->machine->function_return_type
== indirect_branch_thunk);
indirect_thunk_name (thunk_name, regno, need_prefix, true);
- if (need_prefix == indirect_thunk_prefix_bnd)
- {
- if (need_thunk)
- {
- indirect_return_via_cx_bnd = true;
- indirect_thunks_bnd_used |= 1 << CX_REG;
- }
- fprintf (asm_out_file, "\tbnd jmp\t%s\n", thunk_name);
- }
- else
+
+ if (need_thunk)
{
- if (need_thunk)
- {
- indirect_return_via_cx = true;
- indirect_thunks_used |= 1 << CX_REG;
- }
- fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
+ indirect_return_via_cx = true;
+ indirect_thunks_used |= 1 << CX_REG;
}
+ fprintf (asm_out_file, "\tjmp\t%s\n", thunk_name);
}
else
- output_indirect_thunk (need_prefix, regno);
+ output_indirect_thunk (regno);
return "";
}
@@ -29486,16 +29337,6 @@ avoid_func_arg_motion (rtx_insn *first_arg, rtx_insn *insn)
rtx set;
rtx tmp;
- /* Add anti dependencies for bounds stores. */
- if (INSN_P (insn)
- && GET_CODE (PATTERN (insn)) == PARALLEL
- && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
- && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_BNDSTX)
- {
- add_dependence (first_arg, insn, REG_DEP_ANTI);
- return;
- }
-
set = single_set (insn);
if (!set)
return;
@@ -33493,7 +33334,7 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi)
{
gimple *stmt = gsi_stmt (*gsi);
tree fndecl = gimple_call_fndecl (stmt);
- gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
+ gcc_checking_assert (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_MD));
int n_args = gimple_call_num_args (stmt);
enum ix86_builtins fn_code = (enum ix86_builtins) DECL_FUNCTION_CODE (fndecl);
tree decl = NULL_TREE;
@@ -38083,7 +37924,7 @@ rdseed_step:
{
tree fndecl = gimple_call_fndecl (def_stmt);
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
+ && fndecl_built_in_p (fndecl, BUILT_IN_MD))
switch ((unsigned int) DECL_FUNCTION_CODE (fndecl))
{
case IX86_BUILTIN_CMPPD:
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index fbba598ffd5..2a46fccdec1 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -1033,9 +1033,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
0, 0, 0, 0, 0, 0, 0, 0, \
/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
- 0, 0, 0, 0, 0, 0, 0, 0, \
-/* b0, b1, b2, b3*/ \
- 0, 0, 0, 0 }
+ 0, 0, 0, 0, 0, 0, 0, 0 }
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
@@ -1072,9 +1070,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \
6, 6, 6, 6, 6, 6, 6, 6, \
/* k0, k1, k2, k3, k4, k5, k6, k7*/ \
- 1, 1, 1, 1, 1, 1, 1, 1, \
-/* b0, b1, b2, b3*/ \
- 1, 1, 1, 1 }
+ 1, 1, 1, 1, 1, 1, 1, 1 }
/* Order in which to allocate registers. Each register must be
listed once, even those in FIXED_REGISTERS. List frame pointer
@@ -1090,8 +1086,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, \
- 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, \
- 78, 79, 80 }
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76 }
/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
to be rearranged based on a particular function. When using sse math,
@@ -2043,8 +2038,7 @@ do { \
"xmm20", "xmm21", "xmm22", "xmm23", \
"xmm24", "xmm25", "xmm26", "xmm27", \
"xmm28", "xmm29", "xmm30", "xmm31", \
- "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7", \
- "bnd0", "bnd1", "bnd2", "bnd3" }
+ "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" }
#define REGISTER_NAMES HI_REGISTER_NAMES
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 71faa218ffa..62dab1662a3 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -184,16 +184,6 @@
UNSPEC_PDEP
UNSPEC_PEXT
- UNSPEC_BNDMK
- UNSPEC_BNDMK_ADDR
- UNSPEC_BNDSTX
- UNSPEC_BNDLDX
- UNSPEC_BNDLDX_ADDR
- UNSPEC_BNDCL
- UNSPEC_BNDCU
- UNSPEC_BNDCN
- UNSPEC_MPX_FENCE
-
;; IRET support
UNSPEC_INTERRUPT_RETURN
])
@@ -428,11 +418,7 @@
(MASK5_REG 74)
(MASK6_REG 75)
(MASK7_REG 76)
- (BND0_REG 77)
- (BND1_REG 78)
- (BND2_REG 79)
- (BND3_REG 80)
- (FIRST_PSEUDO_REG 81)
+ (FIRST_PSEUDO_REG 77)
])
;; Insns whose names begin with "x86_" are emitted by gen_FOO calls
@@ -1054,10 +1040,6 @@
(define_mode_iterator DWIH [(SI "!TARGET_64BIT")
(DI "TARGET_64BIT")])
-;; Bound modes.
-(define_mode_iterator BND [(BND32 "!TARGET_LP64")
- (BND64 "TARGET_LP64")])
-
;; Instruction suffix for integer modes.
(define_mode_attr imodesuffix [(QI "b") (HI "w") (SI "l") (DI "q")])
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index babaf1d9433..ed3bc7c6619 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -1135,9 +1135,6 @@
(define_predicate "vsib_mem_operator"
(match_code "mem"))
-(define_predicate "bnd_mem_operator"
- (match_code "mem"))
-
;; Return true if the rtx is known to be at least 32 bits aligned.
(define_predicate "aligned_operand"
(match_operand 0 "general_operand")
diff --git a/gcc/config/mips/frame-header-opt.c b/gcc/config/mips/frame-header-opt.c
index 77298dce69e..ed51947bb6c 100644
--- a/gcc/config/mips/frame-header-opt.c
+++ b/gcc/config/mips/frame-header-opt.c
@@ -29,13 +29,13 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "context.h"
#include "coretypes.h"
+#include "backend.h"
#include "tree.h"
#include "tree-core.h"
#include "tree-pass.h"
#include "target.h"
#include "target-globals.h"
#include "profile-count.h"
-#include "cfg.h"
#include "cgraph.h"
#include "function.h"
#include "basic-block.h"
diff --git a/gcc/config/riscv/pic.md b/gcc/config/riscv/pic.md
index a4a9732656c..942502058e0 100644
--- a/gcc/config/riscv/pic.md
+++ b/gcc/config/riscv/pic.md
@@ -22,71 +22,100 @@
;; Simplify PIC loads to static variables.
;; These should go away once we figure out how to emit auipc discretely.
-(define_insn "*local_pic_load_s<mode>"
+(define_insn "*local_pic_load<mode>"
[(set (match_operand:ANYI 0 "register_operand" "=r")
- (sign_extend:ANYI (mem:ANYI (match_operand 1 "absolute_symbolic_operand" ""))))]
+ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
+ "USE_LOAD_ADDRESS_MACRO (operands[1])"
+ "<default_load>\t%0,%1"
+ [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_load_s<mode>"
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
+ (sign_extend:SUPERQI (mem:SUBX (match_operand 1 "absolute_symbolic_operand" ""))))]
"USE_LOAD_ADDRESS_MACRO (operands[1])"
- "<load>\t%0,%1"
+ "<SUBX:load>\t%0,%1"
[(set (attr "length") (const_int 8))])
(define_insn "*local_pic_load_u<mode>"
- [(set (match_operand:ZERO_EXTEND_LOAD 0 "register_operand" "=r")
- (zero_extend:ZERO_EXTEND_LOAD (mem:ZERO_EXTEND_LOAD (match_operand 1 "absolute_symbolic_operand" ""))))]
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
+ (zero_extend:SUPERQI (mem:SUBX (match_operand 1 "absolute_symbolic_operand" ""))))]
"USE_LOAD_ADDRESS_MACRO (operands[1])"
- "<load>u\t%0,%1"
+ "<SUBX:load>u\t%0,%1"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_load<mode>"
- [(set (match_operand:ANYF 0 "register_operand" "=f")
+;; We can support ANYF loads into X register if there is no double support
+;; or if the target is 64-bit.
+
+(define_insn "*local_pic_load<ANYF:mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f,*r")
(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
- (clobber (match_scratch:DI 2 "=r"))]
- "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
- "<load>\t%0,%1,%2"
+ (clobber (match_scratch:P 2 "=r,X"))]
+ "TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[1])
+ && (!TARGET_DOUBLE_FLOAT || TARGET_64BIT)"
+ "@
+ <ANYF:load>\t%0,%1,%2
+ <softload>\t%0,%1"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_load<mode>"
+;; ??? For a 32-bit target with double float, a DF load into a X reg isn't
+;; supported. ld is not valid in that case. Punt for now. Maybe add a split
+;; for this later.
+
+(define_insn "*local_pic_load_32d<ANYF:mode>"
[(set (match_operand:ANYF 0 "register_operand" "=f")
(mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
- (clobber (match_scratch:SI 2 "=r"))]
- "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
- "<load>\t%0,%1,%2"
+ (clobber (match_scratch:P 2 "=r"))]
+ "TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[1])
+ && (TARGET_DOUBLE_FLOAT && !TARGET_64BIT)"
+ "<ANYF:load>\t%0,%1,%2"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_loadu<mode>"
- [(set (match_operand:SUPERQI 0 "register_operand" "=r")
- (zero_extend:SUPERQI (mem:SUBX (match_operand 1 "absolute_symbolic_operand" ""))))]
- "USE_LOAD_ADDRESS_MACRO (operands[1])"
- "<load>u\t%0,%1"
+(define_insn "*local_pic_load_sf<mode>"
+ [(set (match_operand:SOFTF 0 "register_operand" "=r")
+ (mem:SOFTF (match_operand 1 "absolute_symbolic_operand" "")))]
+ "!TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[1])"
+ "<softload>\t%0,%1"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_storedi<mode>"
- [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
- (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
- (clobber (match_scratch:DI 2 "=&r"))]
- "TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
- "<store>\t%z1,%0,%2"
- [(set (attr "length") (const_int 8))])
+;; Simplify PIC stores to static variables.
+;; These should go away once we figure out how to emit auipc discretely.
-(define_insn "*local_pic_storesi<mode>"
+(define_insn "*local_pic_store<ANYI:mode>"
[(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
(match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
- (clobber (match_scratch:SI 2 "=&r"))]
- "!TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
- "<store>\t%z1,%0,%2"
+ (clobber (match_scratch:P 2 "=&r"))]
+ "USE_LOAD_ADDRESS_MACRO (operands[0])"
+ "<ANYI:store>\t%z1,%0,%2"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_storedi<mode>"
+(define_insn "*local_pic_store<ANYF:mode>"
[(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
- (match_operand:ANYF 1 "register_operand" "f"))
- (clobber (match_scratch:DI 2 "=r"))]
- "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
- "<store>\t%1,%0,%2"
+ (match_operand:ANYF 1 "register_operand" "f,*r"))
+ (clobber (match_scratch:P 2 "=r,&r"))]
+ "TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[0])
+ && (!TARGET_DOUBLE_FLOAT || TARGET_64BIT)"
+ "@
+ <ANYF:store>\t%1,%0,%2
+ <softstore>\t%1,%0,%2"
[(set (attr "length") (const_int 8))])
-(define_insn "*local_pic_storesi<mode>"
- [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
- (match_operand:ANYF 1 "register_operand" "f"))
- (clobber (match_scratch:SI 2 "=r"))]
- "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
- "<store>\t%1,%0,%2"
+;; ??? For a 32-bit target with double float, a DF store from a X reg isn't
+;; supported. sd is not valid in that case. Punt for now. Maybe add a split
+;; for this later.
+
+(define_insn "*local_pic_store_32d<ANYF:mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
+ (clobber (match_scratch:P 2 "=r"))]
+ "TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[1])
+ && (TARGET_DOUBLE_FLOAT && !TARGET_64BIT)"
+ "<ANYF:store>\t%1,%0,%2"
+ [(set (attr "length") (const_int 8))])
+
+(define_insn "*local_pic_store_sf<SOFTF:mode>"
+ [(set (mem:SOFTF (match_operand 0 "absolute_symbolic_operand" ""))
+ (match_operand:SOFTF 1 "register_operand" "r"))
+ (clobber (match_scratch:P 2 "=&r"))]
+ "!TARGET_HARD_FLOAT && USE_LOAD_ADDRESS_MACRO (operands[0])"
+ "<softstore>\t%1,%0,%2"
[(set (attr "length") (const_int 8))])
diff --git a/gcc/config/riscv/riscv.c b/gcc/config/riscv/riscv.c
index 69e70feaf33..9d6d981a42a 100644
--- a/gcc/config/riscv/riscv.c
+++ b/gcc/config/riscv/riscv.c
@@ -802,7 +802,13 @@ riscv_address_insns (rtx x, machine_mode mode, bool might_split_p)
int n = 1;
if (!riscv_classify_address (&addr, x, mode, false))
- return 0;
+ {
+ /* This could be a pattern from the pic.md file. In which case we want
+ this address to always have a cost of 3 to make it as expensive as the
+ most expensive symbol. This prevents constant propagation from
+ preferring symbols over register plus offset. */
+ return 3;
+ }
/* BLKmode is used for single unaligned loads and stores and should
not count as a multiword mode. */
diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
index 613af9d79e4..95fbb282c7c 100644
--- a/gcc/config/riscv/riscv.md
+++ b/gcc/config/riscv/riscv.md
@@ -269,9 +269,6 @@
;; Iterator for QImode extension patterns.
(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
-;; Iterator for extending loads.
-(define_mode_iterator ZERO_EXTEND_LOAD [QI HI (SI "TARGET_64BIT")])
-
;; Iterator for hardware integer modes narrower than XLEN.
(define_mode_iterator SUBX [QI HI (SI "TARGET_64BIT")])
@@ -282,6 +279,9 @@
(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
(DF "TARGET_DOUBLE_FLOAT")])
+;; Iterator for floating-point modes that can be loaded into X registers.
+(define_mode_iterator SOFTF [SF (DF "TARGET_64BIT")])
+
;; This attribute gives the length suffix for a sign- or zero-extension
;; instruction.
(define_mode_attr size [(QI "b") (HI "h")])
@@ -289,9 +289,19 @@
;; Mode attributes for loads.
(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
+;; Instruction names for integer loads that aren't explicitly sign or zero
+;; extended. See riscv_output_move and LOAD_EXTEND_OP.
+(define_mode_attr default_load [(QI "lbu") (HI "lhu") (SI "lw") (DI "ld")])
+
+;; Mode attribute for FP loads into integer registers.
+(define_mode_attr softload [(SF "lw") (DF "ld")])
+
;; Instruction names for stores.
(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
+;; Instruction names for FP stores from integer registers.
+(define_mode_attr softstore [(SF "sw") (DF "sd")])
+
;; This attribute gives the best constraint to use for registers of
;; a given mode.
(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md
index 3419e3a7a1c..1ddf5802607 100644
--- a/gcc/config/rs6000/altivec.md
+++ b/gcc/config/rs6000/altivec.md
@@ -603,7 +603,7 @@
"vcmpbfp %0,%1,%2"
[(set_attr "type" "veccmp")])
-(define_insn "*altivec_eq<mode>"
+(define_insn "altivec_eq<mode>"
[(set (match_operand:VI2 0 "altivec_register_operand" "=v")
(eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v")
(match_operand:VI2 2 "altivec_register_operand" "v")))]
@@ -2304,7 +2304,7 @@
;; Compare vectors producing a vector result and a predicate, setting CR6 to
;; indicate a combined status
-(define_insn "*altivec_vcmpequ<VI_char>_p"
+(define_insn "altivec_vcmpequ<VI_char>_p"
[(set (reg:CC CR6_REGNO)
(unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v")
(match_operand:VI2 2 "register_operand" "v"))]
diff --git a/gcc/config/rs6000/rs6000-string.c b/gcc/config/rs6000/rs6000-string.c
index 451e9ed33da..ff0414586d0 100644
--- a/gcc/config/rs6000/rs6000-string.c
+++ b/gcc/config/rs6000/rs6000-string.c
@@ -157,6 +157,33 @@ do_load_for_compare (rtx reg, rtx mem, machine_mode mode)
{
switch (GET_MODE (reg))
{
+ case E_V16QImode:
+ switch (mode)
+ {
+ case E_V16QImode:
+ if (!BYTES_BIG_ENDIAN)
+ {
+ if (TARGET_P9_VECTOR)
+ emit_insn (gen_vsx_ld_elemrev_v16qi_internal (reg, mem));
+ else
+ {
+ rtx reg_v2di = simplify_gen_subreg (V2DImode, reg,
+ V16QImode, 0);
+ gcc_assert (MEM_P (mem));
+ rtx addr = XEXP (mem, 0);
+ rtx mem_v2di = gen_rtx_MEM (V2DImode, addr);
+ MEM_COPY_ATTRIBUTES (mem_v2di, mem);
+ set_mem_size (mem, GET_MODE_SIZE (V2DImode));
+ emit_insn (gen_vsx_ld_elemrev_v2di (reg_v2di, mem_v2di));
+ }
+ }
+ else
+ emit_insn (gen_vsx_movv2di_64bit (reg, mem));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
case E_DImode:
switch (mode)
{
@@ -227,6 +254,12 @@ do_load_for_compare (rtx reg, rtx mem, machine_mode mode)
gcc_unreachable ();
}
break;
+
+ case E_QImode:
+ gcc_assert (mode == E_QImode);
+ emit_move_insn (reg, mem);
+ break;
+
default:
gcc_unreachable ();
break;
@@ -1705,17 +1738,17 @@ expand_strncmp_align_check (rtx strncmp_label, rtx src_addr, HOST_WIDE_INT bytes
RESULT_REG is the rtx for the result register.
EQUALITY_COMPARE_REST is a flag to indicate we need to make a cleanup call
to strcmp/strncmp if we have equality at the end of the inline comparison.
- CLEANUP_LABEL is rtx for a label we generate if we need code to clean up
- and generate the final comparison result.
+ P_CLEANUP_LABEL is a pointer to rtx for a label we generate if we need code
+ to clean up and generate the final comparison result.
FINAL_MOVE_LABEL is rtx for a label we can branch to when we can just
set the final result. */
static void
-expand_strncmp_gpr_sequence(unsigned HOST_WIDE_INT bytes_to_compare,
- unsigned int base_align,
- rtx orig_src1, rtx orig_src2,
- rtx tmp_reg_src1, rtx tmp_reg_src2, rtx result_reg,
- bool equality_compare_rest, rtx &cleanup_label,
- rtx final_move_label)
+expand_strncmp_gpr_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
+ unsigned int base_align,
+ rtx orig_src1, rtx orig_src2,
+ rtx tmp_reg_src1, rtx tmp_reg_src2, rtx result_reg,
+ bool equality_compare_rest, rtx *p_cleanup_label,
+ rtx final_move_label)
{
unsigned int word_mode_size = GET_MODE_SIZE (word_mode);
machine_mode load_mode;
@@ -1724,6 +1757,8 @@ expand_strncmp_gpr_sequence(unsigned HOST_WIDE_INT bytes_to_compare,
unsigned HOST_WIDE_INT offset = 0;
rtx src1_addr = force_reg (Pmode, XEXP (orig_src1, 0));
rtx src2_addr = force_reg (Pmode, XEXP (orig_src2, 0));
+ gcc_assert (p_cleanup_label != NULL);
+ rtx cleanup_label = *p_cleanup_label;
while (bytes_to_compare > 0)
{
@@ -1876,6 +1911,178 @@ expand_strncmp_gpr_sequence(unsigned HOST_WIDE_INT bytes_to_compare,
bytes_to_compare -= cmp_bytes;
}
+ *p_cleanup_label = cleanup_label;
+ return;
+}
+
+/* Generate the sequence of compares for strcmp/strncmp using vec/vsx
+ instructions.
+
+ BYTES_TO_COMPARE is the number of bytes to be compared.
+ ORIG_SRC1 is the unmodified rtx for the first string.
+ ORIG_SRC2 is the unmodified rtx for the second string.
+ S1ADDR is the register to use for the base address of the first string.
+ S2ADDR is the register to use for the base address of the second string.
+ OFF_REG is the register to use for the string offset for loads.
+ S1DATA is the register for loading the first string.
+ S2DATA is the register for loading the second string.
+ VEC_RESULT is the rtx for the vector result indicating the byte difference.
+ EQUALITY_COMPARE_REST is a flag to indicate we need to make a cleanup call
+ to strcmp/strncmp if we have equality at the end of the inline comparison.
+ P_CLEANUP_LABEL is a pointer to rtx for a label we generate if we need code to clean up
+ and generate the final comparison result.
+ FINAL_MOVE_LABEL is rtx for a label we can branch to when we can just
+ set the final result. */
+static void
+expand_strncmp_vec_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
+ rtx orig_src1, rtx orig_src2,
+ rtx s1addr, rtx s2addr, rtx off_reg,
+ rtx s1data, rtx s2data,
+ rtx vec_result, bool equality_compare_rest,
+ rtx *p_cleanup_label, rtx final_move_label)
+{
+ machine_mode load_mode;
+ unsigned int load_mode_size;
+ unsigned HOST_WIDE_INT cmp_bytes = 0;
+ unsigned HOST_WIDE_INT offset = 0;
+
+ gcc_assert (p_cleanup_label != NULL);
+ rtx cleanup_label = *p_cleanup_label;
+
+ emit_move_insn (s1addr, force_reg (Pmode, XEXP (orig_src1, 0)));
+ emit_move_insn (s2addr, force_reg (Pmode, XEXP (orig_src2, 0)));
+
+ unsigned int i;
+ rtx zr[16];
+ for (i = 0; i < 16; i++)
+ zr[i] = GEN_INT (0);
+ rtvec zv = gen_rtvec_v (16, zr);
+ rtx zero_reg = gen_reg_rtx (V16QImode);
+ rs6000_expand_vector_init (zero_reg, gen_rtx_PARALLEL (V16QImode, zv));
+
+ while (bytes_to_compare > 0)
+ {
+ /* VEC/VSX compare sequence for P8:
+ check each 16B with:
+ lxvd2x 32,28,8
+ lxvd2x 33,29,8
+ vcmpequb 2,0,1 # compare strings
+ vcmpequb 4,0,3 # compare w/ 0
+ xxlorc 37,36,34 # first FF byte is either mismatch or end of string
+ vcmpequb. 7,5,3 # reg 7 contains 0
+ bnl 6,.Lmismatch
+
+ For the P8 LE case, we use lxvd2x and compare full 16 bytes
+ but then use use vgbbd and a shift to get two bytes with the
+ information we need in the correct order.
+
+ VEC/VSX compare sequence if TARGET_P9_VECTOR:
+ lxvb16x/lxvb16x # load 16B of each string
+ vcmpnezb. # produces difference location or zero byte location
+ bne 6,.Lmismatch
+
+ Use the overlapping compare trick for the last block if it is
+ less than 16 bytes.
+ */
+
+ load_mode = V16QImode;
+ load_mode_size = GET_MODE_SIZE (load_mode);
+
+ if (bytes_to_compare >= load_mode_size)
+ cmp_bytes = load_mode_size;
+ else
+ {
+ /* Move this load back so it doesn't go past the end. P8/P9
+ can do this efficiently. This is never called with less
+ than 16 bytes so we should always be able to do this. */
+ unsigned int extra_bytes = load_mode_size - bytes_to_compare;
+ cmp_bytes = bytes_to_compare;
+ gcc_assert (offset > extra_bytes);
+ offset -= extra_bytes;
+ cmp_bytes = load_mode_size;
+ bytes_to_compare = cmp_bytes;
+ }
+
+ /* The offset currently used is always kept in off_reg so that the
+ cleanup code on P8 can use it to extract the differing byte. */
+ emit_move_insn (off_reg, GEN_INT (offset));
+
+ rtx addr1 = gen_rtx_PLUS (Pmode, s1addr, off_reg);
+ do_load_for_compare_from_addr (load_mode, s1data, addr1, orig_src1);
+ rtx addr2 = gen_rtx_PLUS (Pmode, s2addr, off_reg);
+ do_load_for_compare_from_addr (load_mode, s2data, addr2, orig_src2);
+
+ /* Cases to handle. A and B are chunks of the two strings.
+ 1: Not end of comparison:
+ A != B: branch to cleanup code to compute result.
+ A == B: next block
+ 2: End of the inline comparison:
+ A != B: branch to cleanup code to compute result.
+ A == B: call strcmp/strncmp
+ 3: compared requested N bytes:
+ A == B: branch to result 0.
+ A != B: cleanup code to compute result. */
+
+ unsigned HOST_WIDE_INT remain = bytes_to_compare - cmp_bytes;
+
+ if (TARGET_P9_VECTOR)
+ emit_insn (gen_vcmpnezb_p (vec_result, s1data, s2data));
+ else
+ {
+ /* Emit instructions to do comparison and zero check. */
+ rtx cmp_res = gen_reg_rtx (load_mode);
+ rtx cmp_zero = gen_reg_rtx (load_mode);
+ rtx cmp_combined = gen_reg_rtx (load_mode);
+ emit_insn (gen_altivec_eqv16qi (cmp_res, s1data, s2data));
+ emit_insn (gen_altivec_eqv16qi (cmp_zero, s1data, zero_reg));
+ emit_insn (gen_orcv16qi3 (vec_result, cmp_zero, cmp_res));
+ emit_insn (gen_altivec_vcmpequb_p (cmp_combined, vec_result, zero_reg));
+ }
+
+ bool branch_to_cleanup = (remain > 0 || equality_compare_rest);
+ rtx cr6 = gen_rtx_REG (CCmode, CR6_REGNO);
+ rtx dst_label;
+ rtx cmp_rtx;
+ if (branch_to_cleanup)
+ {
+ /* Branch to cleanup code, otherwise fall through to do more
+ compares. P8 and P9 use different CR bits because on P8
+ we are looking at the result of a comparsion vs a
+ register of zeroes so the all-true condition means no
+ difference or zero was found. On P9, vcmpnezb sets a byte
+ to 0xff if there is a mismatch or zero, so the all-false
+ condition indicates we found no difference or zero. */
+ if (!cleanup_label)
+ cleanup_label = gen_label_rtx ();
+ dst_label = cleanup_label;
+ if (TARGET_P9_VECTOR)
+ cmp_rtx = gen_rtx_NE (VOIDmode, cr6, const0_rtx);
+ else
+ cmp_rtx = gen_rtx_GE (VOIDmode, cr6, const0_rtx);
+ }
+ else
+ {
+ /* Branch to final return or fall through to cleanup,
+ result is already set to 0. */
+ dst_label = final_move_label;
+ if (TARGET_P9_VECTOR)
+ cmp_rtx = gen_rtx_EQ (VOIDmode, cr6, const0_rtx);
+ else
+ cmp_rtx = gen_rtx_LT (VOIDmode, cr6, const0_rtx);
+ }
+
+ rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, dst_label);
+ rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
+ lab_ref, pc_rtx);
+ rtx j2 = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
+ JUMP_LABEL (j2) = dst_label;
+ LABEL_NUSES (dst_label) += 1;
+
+ offset += cmp_bytes;
+ bytes_to_compare -= cmp_bytes;
+ }
+ *p_cleanup_label = cleanup_label;
+ return;
}
/* Generate the final sequence that identifies the differing
@@ -1948,6 +2155,96 @@ emit_final_str_compare_gpr (rtx str1, rtx str2, rtx result)
return;
}
+/* Generate the final sequence that identifies the differing
+ byte and generates the final result, taking into account
+ zero bytes:
+
+ P8:
+ vgbbd 0,0
+ vsldoi 0,0,0,9
+ mfvsrd 9,32
+ addi 10,9,-1 # count trailing zero bits
+ andc 9,10,9
+ popcntd 9,9
+ lbzx 10,28,9 # use that offset to load differing byte
+ lbzx 3,29,9
+ subf 3,3,10 # subtract for final result
+
+ P9:
+ vclzlsbb # counts trailing bytes with lsb=0
+ vextublx # extract differing byte
+
+ STR1 is the reg rtx for data from string 1.
+ STR2 is the reg rtx for data from string 2.
+ RESULT is the reg rtx for the comparison result.
+ S1ADDR is the register to use for the base address of the first string.
+ S2ADDR is the register to use for the base address of the second string.
+ ORIG_SRC1 is the unmodified rtx for the first string.
+ ORIG_SRC2 is the unmodified rtx for the second string.
+ OFF_REG is the register to use for the string offset for loads.
+ VEC_RESULT is the rtx for the vector result indicating the byte difference.
+ */
+
+static void
+emit_final_str_compare_vec (rtx str1, rtx str2, rtx result,
+ rtx s1addr, rtx s2addr,
+ rtx orig_src1, rtx orig_src2,
+ rtx off_reg, rtx vec_result)
+{
+ if (TARGET_P9_VECTOR)
+ {
+ rtx diffix = gen_reg_rtx (SImode);
+ rtx chr1 = gen_reg_rtx (SImode);
+ rtx chr2 = gen_reg_rtx (SImode);
+ rtx chr1_di = simplify_gen_subreg (DImode, chr1, SImode, 0);
+ rtx chr2_di = simplify_gen_subreg (DImode, chr2, SImode, 0);
+ emit_insn (gen_vclzlsbb_v16qi (diffix, vec_result));
+ emit_insn (gen_vextublx (chr1, diffix, str1));
+ emit_insn (gen_vextublx (chr2, diffix, str2));
+ do_sub3 (result, chr1_di, chr2_di);
+ }
+ else
+ {
+ rtx diffix = gen_reg_rtx (DImode);
+ rtx result_gbbd = gen_reg_rtx (V16QImode);
+ /* Since each byte of the input is either 00 or FF, the bytes in
+ dw0 and dw1 after vgbbd are all identical to each other. */
+ emit_insn (gen_p8v_vgbbd (result_gbbd, vec_result));
+ /* For LE, we shift by 9 and get BA in the low two bytes then CTZ.
+ For BE, we shift by 7 and get AB in the high two bytes then CLZ. */
+ rtx result_shifted = gen_reg_rtx (V16QImode);
+ int shift_amt = (BYTES_BIG_ENDIAN) ? 7 : 9;
+ emit_insn (gen_altivec_vsldoi_v16qi (result_shifted,result_gbbd,result_gbbd, GEN_INT (shift_amt)));
+
+ rtx diffix_df = simplify_gen_subreg (DFmode, diffix, DImode, 0);
+ emit_insn (gen_p8_mfvsrd_3_v16qi (diffix_df, result_shifted));
+ rtx count = gen_reg_rtx (DImode);
+
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_clzdi2 (count, diffix));
+ else
+ emit_insn (gen_ctzdi2 (count, diffix));
+
+ /* P8 doesn't have a good solution for extracting one byte from
+ a vsx reg like vextublx on P9 so we just compute the offset
+ of the differing byte and load it from each string. */
+ do_add3 (off_reg, off_reg, count);
+
+ rtx chr1 = gen_reg_rtx (QImode);
+ rtx chr2 = gen_reg_rtx (QImode);
+ rtx addr1 = gen_rtx_PLUS (Pmode, s1addr, off_reg);
+ do_load_for_compare_from_addr (QImode, chr1, addr1, orig_src1);
+ rtx addr2 = gen_rtx_PLUS (Pmode, s2addr, off_reg);
+ do_load_for_compare_from_addr (QImode, chr2, addr2, orig_src2);
+ machine_mode rmode = GET_MODE (result);
+ rtx chr1_rm = simplify_gen_subreg (rmode, chr1, QImode, 0);
+ rtx chr2_rm = simplify_gen_subreg (rmode, chr2, QImode, 0);
+ do_sub3 (result, chr1_rm, chr2_rm);
+ }
+
+ return;
+}
+
/* Expand a string compare operation with length, and return
true if successful. Return false if we should let the
compiler generate normal code, probably a strncmp call.
@@ -2002,21 +2299,43 @@ expand_strn_compare (rtx operands[], int no_length)
gcc_assert (GET_MODE (target) == SImode);
- unsigned int word_mode_size = GET_MODE_SIZE (word_mode);
+ unsigned int required_align = 8;
unsigned HOST_WIDE_INT offset = 0;
unsigned HOST_WIDE_INT bytes; /* N from the strncmp args if available. */
unsigned HOST_WIDE_INT compare_length; /* How much to compare inline. */
+
if (no_length)
- /* Use this as a standin to determine the mode to use. */
- bytes = rs6000_string_compare_inline_limit * word_mode_size;
+ bytes = rs6000_string_compare_inline_limit;
else
bytes = UINTVAL (bytes_rtx);
- machine_mode load_mode =
- select_block_compare_mode (0, bytes, base_align);
- unsigned int load_mode_size = GET_MODE_SIZE (load_mode);
- compare_length = rs6000_string_compare_inline_limit * load_mode_size;
+ /* Is it OK to use vec/vsx for this. TARGET_VSX means we have at
+ least POWER7 but we use TARGET_EFFICIENT_UNALIGNED_VSX which is
+ at least POWER8. That way we can rely on overlapping compares to
+ do the final comparison of less than 16 bytes. Also I do not want
+ to deal with making this work for 32 bits. */
+ int use_vec = (bytes >= 16 && !TARGET_32BIT && TARGET_EFFICIENT_UNALIGNED_VSX);
+
+ if (use_vec)
+ required_align = 16;
+
+ machine_mode load_mode;
+ rtx tmp_reg_src1, tmp_reg_src2;
+ if (use_vec)
+ {
+ load_mode = V16QImode;
+ tmp_reg_src1 = gen_reg_rtx (V16QImode);
+ tmp_reg_src2 = gen_reg_rtx (V16QImode);
+ }
+ else
+ {
+ load_mode = select_block_compare_mode (0, bytes, base_align);
+ tmp_reg_src1 = gen_reg_rtx (word_mode);
+ tmp_reg_src2 = gen_reg_rtx (word_mode);
+ }
+
+ compare_length = rs6000_string_compare_inline_limit;
/* If we have equality at the end of the last compare and we have not
found the end of the string, we need to call strcmp/strncmp to
@@ -2040,10 +2359,7 @@ expand_strn_compare (rtx operands[], int no_length)
rtx final_move_label = gen_label_rtx ();
rtx final_label = gen_label_rtx ();
rtx begin_compare_label = NULL;
- unsigned int required_align = 8;
-
- required_align = 8;
-
+
if (base_align < required_align)
{
/* Generate code that checks distance to 4k boundary for this case. */
@@ -2060,7 +2376,7 @@ expand_strn_compare (rtx operands[], int no_length)
the subsequent code generation are in agreement so we do not
go past the length we tested for a 4k boundary crossing. */
unsigned HOST_WIDE_INT align_test = compare_length;
- if (align_test < 8)
+ if (align_test < required_align)
{
align_test = HOST_WIDE_INT_1U << ceil_log2 (align_test);
base_align = align_test;
@@ -2102,7 +2418,7 @@ expand_strn_compare (rtx operands[], int no_length)
else
{
/* -m32 -mpowerpc64 results in word_mode being DImode even
- though otherwise it is 32-bit. The length arg to strncmp
+ though otherwise it is 32-bit. The length arg to strncmp
is a size_t which will be the same size as pointers. */
rtx len_rtx = gen_reg_rtx (Pmode);
emit_move_insn (len_rtx, gen_int_mode (bytes, Pmode));
@@ -2124,17 +2440,32 @@ expand_strn_compare (rtx operands[], int no_length)
}
rtx cleanup_label = NULL;
- rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
- rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
+ rtx s1addr = NULL, s2addr = NULL, off_reg = NULL, vec_result = NULL;
/* Generate a sequence of GPR or VEC/VSX instructions to compare out
to the length specified. */
- expand_strncmp_gpr_sequence(compare_length, base_align,
- orig_src1, orig_src2,
- tmp_reg_src1, tmp_reg_src2,
- result_reg,
- equality_compare_rest,
- cleanup_label, final_move_label);
+ if (use_vec)
+ {
+ s1addr = gen_reg_rtx (Pmode);
+ s2addr = gen_reg_rtx (Pmode);
+ off_reg = gen_reg_rtx (Pmode);
+ vec_result = gen_reg_rtx (load_mode);
+ emit_move_insn (result_reg, GEN_INT (0));
+ expand_strncmp_vec_sequence (compare_length,
+ orig_src1, orig_src2,
+ s1addr, s2addr, off_reg,
+ tmp_reg_src1, tmp_reg_src2,
+ vec_result,
+ equality_compare_rest,
+ &cleanup_label, final_move_label);
+ }
+ else
+ expand_strncmp_gpr_sequence (compare_length, base_align,
+ orig_src1, orig_src2,
+ tmp_reg_src1, tmp_reg_src2,
+ result_reg,
+ equality_compare_rest,
+ &cleanup_label, final_move_label);
offset = compare_length;
@@ -2174,7 +2505,12 @@ expand_strn_compare (rtx operands[], int no_length)
if (cleanup_label)
emit_label (cleanup_label);
- emit_final_str_compare_gpr (tmp_reg_src1, tmp_reg_src2, result_reg);
+ if (use_vec)
+ emit_final_str_compare_vec (tmp_reg_src1, tmp_reg_src2, result_reg,
+ s1addr, s2addr, orig_src1, orig_src2,
+ off_reg, vec_result);
+ else
+ emit_final_str_compare_gpr (tmp_reg_src1, tmp_reg_src2, result_reg);
emit_label (final_move_label);
emit_insn (gen_movsi (target,
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 78d87db6b5a..921f2eab7b0 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -7957,7 +7957,9 @@ toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
*tocrel_offset_ret = tocrel_offset;
return (GET_CODE (tocrel_base) == UNSPEC
- && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
+ && XINT (tocrel_base, 1) == UNSPEC_TOCREL
+ && REG_P (XVECEXP (tocrel_base, 0, 1))
+ && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
}
/* Return true if X is a constant pool address, and also for cmodel=medium
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index c18d7a2bb78..edb28699fbc 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -334,8 +334,8 @@ Target Report Var(rs6000_block_compare_inline_loop_limit) Init(-1) RejectNegativ
Max number of bytes to compare with loops.
mstring-compare-inline-limit=
-Target Report Var(rs6000_string_compare_inline_limit) Init(8) RejectNegative Joined UInteger Save
-Max number of pairs of load insns for compare.
+Target Report Var(rs6000_string_compare_inline_limit) Init(64) RejectNegative Joined UInteger Save
+Max number of bytes to compare.
misel
Target Report Mask(ISEL) Var(rs6000_isa_flags)
diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md
index c3c099f40e9..89e193a2822 100644
--- a/gcc/config/rs6000/vsx.md
+++ b/gcc/config/rs6000/vsx.md
@@ -1412,7 +1412,7 @@
}
})
-(define_insn "*vsx_ld_elemrev_v16qi_internal"
+(define_insn "vsx_ld_elemrev_v16qi_internal"
[(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
(vec_select:V16QI
(match_operand:V16QI 1 "memory_operand" "Z")
@@ -5051,6 +5051,22 @@
"vcmpnezb %0,%1,%2"
[(set_attr "type" "vecsimple")])
+;; Vector Compare Not Equal or Zero Byte predicate or record-form
+(define_insn "vcmpnezb_p"
+ [(set (reg:CC CR6_REGNO)
+ (unspec:CC
+ [(match_operand:V16QI 1 "altivec_register_operand" "v")
+ (match_operand:V16QI 2 "altivec_register_operand" "v")]
+ UNSPEC_VCMPNEZB))
+ (set (match_operand:V16QI 0 "altivec_register_operand" "=v")
+ (unspec:V16QI
+ [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPNEZB))]
+ "TARGET_P9_VECTOR"
+ "vcmpnezb. %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
;; Vector Compare Not Equal Half Word (specified/not+eq:)
(define_insn "vcmpneh"
[(set (match_operand:V8HI 0 "altivec_register_operand" "=v")
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 0fbd8167a04..6ecd48dfac8 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,115 @@
+2018-08-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/87095
+ * decl.c (begin_destructor_body): If current_class_type has
+ virtual bases and the primary base is nearly empty virtual base,
+ voidify clearing of vptr and make it conditional on in-charge
+ argument.
+
+2018-08-29 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/85265
+ * parser.c (cp_parser_introduction_list): If cp_parser_identifier
+ returns error_mark_node early exit the loop.
+ (cp_parser_template_introduction): Improve error-recovery, remove
+ error call about empty introduction-list.
+
+2018-08-29 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/85110
+ * call.c (print_conversion_rejection): Add "fn" param and use it
+ for "no known conversion" messages to underline the pertinent
+ param.
+ (print_z_candidate): Supply "fn" to the new param above.
+
+2018-08-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/87122
+ * pt.c (tsubst_expr) <case RANGE_FOR_STMT>: If
+ processing_template_decl and decl is structured binding decl, call
+ cp_finish_decomp.
+
+2018-08-28 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/86546
+ * decl.c (finish_case_label): If the type is erroneous early
+ return error_mark_node.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/63392
+ * parser.c (cp_parser_diagnose_invalid_type_name): Add fix-it
+ hint.
+
+2018-08-27 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/86993
+ * cp-tree.h (cxx_readonly_error): Add location_t argument.
+ * typeck2.c (cxx_readonly_error): Add LOC argument, pass it to
+ ERROR_FOR_ASSIGNMENT macro and readonly_error. Add LOC argument
+ to ERROR_FOR_ASSIGNMENT macro, use error_at instead of error and
+ pass LOC to it. Formatting fixes.
+ * typeck.c (cp_build_unary_op): Pass location to cxx_readonly_error.
+ (cp_build_modify_expr): Pass loc to cxx_readonly_error.
+ * semantics.c (finish_asm_stmt): Pass input_location to
+ cxx_readonly_error.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * decl.c (grokdeclarator): Update for conversion of show_caret_p
+ to a tri-state.
+ * error.c (cp_printer): Likewise.
+ * name-lookup.c (maybe_suggest_missing_std_header): Update call to
+ maybe_add_include_fixit to suggest overriding the location, as it
+ is for a note.
+ * parser.c (cp_parser_string_literal): Update for conversion of
+ show_caret_p to a tri-state.
+ (cp_parser_elaborated_type_specifier): Likewise.
+ (set_and_check_decl_spec_loc): Likewise.
+ * pt.c (listify): Update call to maybe_add_include_fixit to not
+ override the location, as it is for an error.
+ * rtti.c (typeid_ok_p): Likewise.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * call.c (build_call_a): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ (build_cxx_call): Likewise.
+ * constexpr.c (constexpr_fn_retval): Likewise.
+ (cxx_eval_builtin_function_call): Likewise.
+ (cxx_eval_call_expression): Likewise.
+ (potential_constant_expression_1): Likewise.
+ * cp-gimplify.c (cp_gimplify_expr): Likewise.
+ (cp_fold): Likewise.
+ * decl.c (decls_match): Likewise.
+ (validate_constexpr_redeclaration): Likewise.
+ (duplicate_decls): Likewise.
+ (make_rtl_for_nonlocal_decl): Likewise.
+ * name-lookup.c (consider_binding_level): Likewise.
+ (cp_emit_debug_info_for_using): Likewise.
+ * semantics.c (finish_call_expr): Likewise.
+ * tree.c (builtin_valid_in_constant_expr_p): Likewise.
+
+2018-08-26 Marek Polacek <polacek@redhat.com>
+
+ PR c++/87080
+ * typeck.c (maybe_warn_pessimizing_move): Do nothing in a template.
+
+ PR c++/87029, Implement -Wredundant-move.
+ * typeck.c (treat_lvalue_as_rvalue_p): New function.
+ (maybe_warn_pessimizing_move): Call convert_from_reference.
+ Warn about redundant moves.
+
+2018-08-24 Marek Polacek <polacek@redhat.com>
+
+ PR c++/67012
+ PR c++/86942
+ * decl.c (grokdeclarator): Disallow functions with trailing return
+ type with decltype(auto) as its type. Also check the function if
+ it's inner declarator doesn't exist
+
2018-08-21 Marek Polacek <polacek@redhat.com>
PR c++/86499
@@ -278,9 +390,9 @@
2018-07-31 Martin Liska <mliska@suse.cz>
- PR c++/86653
+ PR c++/86653
* parser.c (cp_parser_condition): Initialize non_constant_p
- to false.
+ to false.
2018-07-28 David Malcolm <dmalcolm@redhat.com>
@@ -3527,7 +3639,7 @@
2018-01-10 Paolo Carlini <paolo.carlini@oracle.com>
* parser.c (cp_parser_std_attribute_spec): When
- token_pair::require_open / require_close return false simply
+ token_pair::require_open / require_close return false simply
return error_mark_node, avoid duplicate cp_parser_error about
expected '(' / ')', respectively.
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 626830c0d9a..a1567026975 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -389,7 +389,7 @@ build_call_a (tree function, int n, tree *argarray)
/* Don't pass empty class objects by value. This is useful
for tags in STL, which are used to control overload resolution.
We don't need to handle other cases of copying empty classes. */
- if (! decl || ! DECL_BUILT_IN (decl))
+ if (!decl || !fndecl_built_in_p (decl))
for (i = 0; i < n; i++)
{
tree arg = CALL_EXPR_ARG (function, i);
@@ -3432,10 +3432,11 @@ equal_functions (tree fn1, tree fn2)
return fn1 == fn2;
}
-/* Print information about a candidate being rejected due to INFO. */
+/* Print information about a candidate FN being rejected due to INFO. */
static void
-print_conversion_rejection (location_t loc, struct conversion_info *info)
+print_conversion_rejection (location_t loc, struct conversion_info *info,
+ tree fn)
{
tree from = info->from;
if (!TYPE_P (from))
@@ -3466,8 +3467,12 @@ print_conversion_rejection (location_t loc, struct conversion_info *info)
inform (loc, " no known conversion from %qH to %qI",
from, info->to_type);
else
- inform (loc, " no known conversion for argument %d from %qH to %qI",
- info->n_arg + 1, from, info->to_type);
+ {
+ if (TREE_CODE (fn) == FUNCTION_DECL)
+ loc = get_fndecl_argument_location (fn, info->n_arg);
+ inform (loc, " no known conversion for argument %d from %qH to %qI",
+ info->n_arg + 1, from, info->to_type);
+ }
}
/* Print information about a candidate with WANT parameters and we found
@@ -3542,10 +3547,10 @@ print_z_candidate (location_t loc, const char *msgstr,
r->u.arity.expected);
break;
case rr_arg_conversion:
- print_conversion_rejection (cloc, &r->u.conversion);
+ print_conversion_rejection (cloc, &r->u.conversion, fn);
break;
case rr_bad_arg_conversion:
- print_conversion_rejection (cloc, &r->u.bad_conversion);
+ print_conversion_rejection (cloc, &r->u.bad_conversion, fn);
break;
case rr_explicit_conversion:
inform (cloc, " return type %qT of explicit conversion function "
@@ -7922,7 +7927,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain)
{
/* The implicit move specified in 15.8.3/3 fails "...if the type of
the first parameter of the selected constructor is not an rvalue
- reference to the object’s type (possibly cv-qualified)...." */
+ reference to the object's type (possibly cv-qualified)...." */
gcc_assert (!(complain & tf_error));
tree ptype = convs[0]->type;
if (!TYPE_REF_P (ptype)
@@ -8869,8 +8874,7 @@ build_cxx_call (tree fn, int nargs, tree *argarray,
/* Check that arguments to builtin functions match the expectations. */
if (fndecl
&& !processing_template_decl
- && DECL_BUILT_IN (fndecl)
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
int i;
diff --git a/gcc/cp/constexpr.c b/gcc/cp/constexpr.c
index 54c8b5edf8d..f646519135f 100644
--- a/gcc/cp/constexpr.c
+++ b/gcc/cp/constexpr.c
@@ -721,8 +721,7 @@ constexpr_fn_retval (tree body)
{
tree fun = get_function_named_in_call (body);
if (fun != NULL_TREE
- && DECL_BUILT_IN_CLASS (fun) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fun) == BUILT_IN_UNREACHABLE)
+ && fndecl_built_in_p (fun, BUILT_IN_UNREACHABLE))
return NULL_TREE;
}
/* Fallthru. */
@@ -1198,8 +1197,8 @@ cxx_eval_builtin_function_call (const constexpr_ctx *ctx, tree t, tree fun,
/* For __builtin_is_constant_evaluated, defer it if not
ctx->pretend_const_required, otherwise fold it to true. */
- if (DECL_BUILT_IN_CLASS (fun) == BUILT_IN_FRONTEND
- && (int) DECL_FUNCTION_CODE (fun) == CP_BUILT_IN_IS_CONSTANT_EVALUATED)
+ if (fndecl_built_in_p (fun, CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ BUILT_IN_FRONTEND))
{
if (!ctx->pretend_const_required)
{
@@ -1242,8 +1241,7 @@ cxx_eval_builtin_function_call (const constexpr_ctx *ctx, tree t, tree fun,
/* Do not allow__builtin_unreachable in constexpr function.
The __builtin_unreachable call with BUILTINS_LOCATION
comes from cp_maybe_instrument_return. */
- if (DECL_BUILT_IN_CLASS (fun) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fun) == BUILT_IN_UNREACHABLE
+ if (fndecl_built_in_p (fun, BUILT_IN_UNREACHABLE)
&& EXPR_LOCATION (t) == BUILTINS_LOCATION)
error ("%<constexpr%> call flows off the end of the function");
else
@@ -1528,7 +1526,7 @@ cxx_eval_call_expression (const constexpr_ctx *ctx, tree t,
if (is_ubsan_builtin_p (fun))
return void_node;
- if (is_builtin_fn (fun))
+ if (fndecl_built_in_p (fun))
return cxx_eval_builtin_function_call (ctx, t, fun,
lval, non_constant_p, overflow_p);
if (!DECL_DECLARED_CONSTEXPR_P (fun))
@@ -5522,7 +5520,7 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
if (!DECL_DECLARED_CONSTEXPR_P (fun)
/* Allow any built-in function; if the expansion
isn't constant, we'll deal with that then. */
- && !is_builtin_fn (fun))
+ && !fndecl_built_in_p (fun))
{
if (flags & tf_error)
{
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index f6109914102..90a8f9fef8f 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -797,9 +797,8 @@ cp_gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree decl = cp_get_callee_fndecl_nofold (*expr_p);
if (decl
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_FRONTEND
- && ((int) DECL_FUNCTION_CODE (decl)
- == CP_BUILT_IN_IS_CONSTANT_EVALUATED))
+ && fndecl_built_in_p (decl, CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ BUILT_IN_FRONTEND))
*expr_p = boolean_false_node;
}
break;
@@ -2489,7 +2488,7 @@ cp_fold (tree x)
/* Some built-in function calls will be evaluated at compile-time in
fold (). Set optimize to 1 when folding __builtin_constant_p inside
a constexpr function so that fold_builtin_1 doesn't fold it to 0. */
- if (callee && DECL_BUILT_IN (callee) && !optimize
+ if (callee && fndecl_built_in_p (callee) && !optimize
&& DECL_IS_BUILTIN_CONSTANT_P (callee)
&& current_function_decl
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl))
@@ -2497,9 +2496,8 @@ cp_fold (tree x)
/* Defer folding __builtin_is_constant_evaluated. */
if (callee
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_FRONTEND
- && ((int) DECL_FUNCTION_CODE (callee)
- == CP_BUILT_IN_IS_CONSTANT_EVALUATED))
+ && fndecl_built_in_p (callee, CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ BUILT_IN_FRONTEND))
break;
x = copy_node (x);
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 055f2bc5f2b..43e452cc1a3 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -7388,7 +7388,8 @@ cxx_incomplete_type_error (const_tree value, const_tree type)
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
-extern void cxx_readonly_error (tree, enum lvalue_use);
+extern void cxx_readonly_error (location_t, tree,
+ enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 82ec4af87be..c6711f74177 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -968,7 +968,7 @@ decls_match (tree newdecl, tree olddecl, bool record_versions /* = true */)
if (same_type_p (TREE_TYPE (f1), r2))
{
if (!prototype_p (f2) && DECL_EXTERN_C_P (olddecl)
- && (DECL_BUILT_IN (olddecl)
+ && (fndecl_built_in_p (olddecl)
#ifdef SYSTEM_IMPLICIT_EXTERN_C
|| (DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_CLASS_SCOPE_P (newdecl))
|| (DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_CLASS_SCOPE_P (olddecl))
@@ -1208,7 +1208,7 @@ validate_constexpr_redeclaration (tree old_decl, tree new_decl)
return true;
if (TREE_CODE (old_decl) == FUNCTION_DECL)
{
- if (DECL_BUILT_IN (old_decl))
+ if (fndecl_built_in_p (old_decl))
{
/* Hide a built-in declaration. */
DECL_DECLARED_CONSTEXPR_P (old_decl)
@@ -1442,7 +1442,7 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
{
warning_at (newdecl_loc,
OPT_Wshadow,
- DECL_BUILT_IN (olddecl)
+ fndecl_built_in_p (olddecl)
? G_("shadowing built-in function %q#D")
: G_("shadowing library function %q#D"), olddecl);
/* Discard the old built-in function. */
@@ -1450,7 +1450,7 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
}
/* If the built-in is not ansi, then programs can override
it even globally without an error. */
- else if (! DECL_BUILT_IN (olddecl))
+ else if (! fndecl_built_in_p (olddecl))
warning_at (newdecl_loc, 0,
"library function %q#D redeclared as non-function %q#D",
olddecl, newdecl);
@@ -1537,7 +1537,7 @@ next_arg:;
/* Don't really override olddecl for __* prefixed builtins
except for __[^b]*_chk, the compiler might be using those
explicitly. */
- if (DECL_BUILT_IN (olddecl))
+ if (fndecl_built_in_p (olddecl))
{
tree id = DECL_NAME (olddecl);
const char *name = IDENTIFIER_POINTER (id);
@@ -1578,9 +1578,9 @@ next_arg:;
"declaration %q#D", newdecl, olddecl);
else
warning (OPT_Wshadow,
- DECL_BUILT_IN (olddecl)
- ? G_("shadowing built-in function %q#D")
- : G_("shadowing library function %q#D"), olddecl);
+ fndecl_built_in_p (olddecl)
+ ? G_("shadowing built-in function %q#D")
+ : G_("shadowing library function %q#D"), olddecl);
}
else
/* Discard the old built-in function. */
@@ -2522,7 +2522,7 @@ next_arg:;
/* If redeclaring a builtin function, it stays built in
if newdecl is a gnu_inline definition, or if newdecl is just
a declaration. */
- if (DECL_BUILT_IN (olddecl)
+ if (fndecl_built_in_p (olddecl)
&& (new_defines_function ? GNU_INLINE_P (newdecl) : types_match))
{
DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl);
@@ -3662,6 +3662,8 @@ finish_case_label (location_t loc, tree low_value, tree high_value)
return error_mark_node;
type = SWITCH_STMT_TYPE (switch_stack->switch_stmt);
+ if (type == error_mark_node)
+ return error_mark_node;
low_value = case_conversion (type, low_value);
high_value = case_conversion (type, high_value);
@@ -6611,7 +6613,7 @@ make_rtl_for_nonlocal_decl (tree decl, tree init, const char* asmspec)
else
{
if (TREE_CODE (decl) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (decl, BUILT_IN_NORMAL))
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
@@ -10737,14 +10739,14 @@ grokdeclarator (const cp_declarator *declarator,
if (signed_p && unsigned_p)
{
gcc_rich_location richloc (declspecs->locations[ds_signed]);
- richloc.add_range (declspecs->locations[ds_unsigned], false);
+ richloc.add_range (declspecs->locations[ds_unsigned]);
error_at (&richloc,
"%<signed%> and %<unsigned%> specified together");
}
else if (long_p && short_p)
{
gcc_rich_location richloc (declspecs->locations[ds_long]);
- richloc.add_range (declspecs->locations[ds_short], false);
+ richloc.add_range (declspecs->locations[ds_short]);
error_at (&richloc, "%<long%> and %<short%> specified together");
}
else if (TREE_CODE (type) != INTEGER_TYPE
@@ -10888,7 +10890,7 @@ grokdeclarator (const cp_declarator *declarator,
if (staticp == 2)
{
gcc_rich_location richloc (declspecs->locations[ds_virtual]);
- richloc.add_range (declspecs->locations[ds_storage_class], false);
+ richloc.add_range (declspecs->locations[ds_storage_class]);
error_at (&richloc, "member %qD cannot be declared both %<virtual%> "
"and %<static%>", dname);
storage_class = sc_none;
@@ -10897,7 +10899,7 @@ grokdeclarator (const cp_declarator *declarator,
if (constexpr_p)
{
gcc_rich_location richloc (declspecs->locations[ds_virtual]);
- richloc.add_range (declspecs->locations[ds_constexpr], false);
+ richloc.add_range (declspecs->locations[ds_constexpr]);
error_at (&richloc, "member %qD cannot be declared both %<virtual%> "
"and %<constexpr%>", dname);
}
@@ -11246,7 +11248,10 @@ grokdeclarator (const cp_declarator *declarator,
/* Handle a late-specified return type. */
tree late_return_type = declarator->u.function.late_return_type;
- if (funcdecl_p)
+ if (funcdecl_p
+ /* This is the case e.g. for
+ using T = auto () -> int. */
+ || inner_declarator == NULL)
{
if (tree auto_node = type_uses_auto (type))
{
@@ -11278,6 +11283,16 @@ grokdeclarator (const cp_declarator *declarator,
name, type);
return error_mark_node;
}
+ else if (is_auto (type) && AUTO_IS_DECLTYPE (type))
+ {
+ if (funcdecl_p)
+ error ("%qs function with trailing return type has "
+ "%<decltype(auto)%> as its type rather than "
+ "plain %<auto%>", name);
+ else
+ error ("invalid use of %<decltype(auto)%>");
+ return error_mark_node;
+ }
tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node);
if (!tmpl)
if (tree late_auto = type_uses_auto (late_return_type))
@@ -11435,7 +11450,7 @@ grokdeclarator (const cp_declarator *declarator,
{
/* Cannot be both friend and virtual. */
gcc_rich_location richloc (declspecs->locations[ds_virtual]);
- richloc.add_range (declspecs->locations[ds_friend], false);
+ richloc.add_range (declspecs->locations[ds_friend]);
error_at (&richloc, "virtual functions cannot be friends");
friendp = 0;
}
@@ -15683,6 +15698,18 @@ begin_destructor_body (void)
tree stmt = cp_build_modify_expr (input_location, vtbl_ptr,
NOP_EXPR, vtbl,
tf_warning_or_error);
+ /* If the vptr is shared with some virtual nearly empty base,
+ don't clear it if not in charge, the dtor of the virtual
+ nearly empty base will do that later. */
+ if (CLASSTYPE_VBASECLASSES (current_class_type)
+ && CLASSTYPE_PRIMARY_BINFO (current_class_type)
+ && BINFO_VIRTUAL_P
+ (CLASSTYPE_PRIMARY_BINFO (current_class_type)))
+ {
+ stmt = convert_to_void (stmt, ICV_STATEMENT,
+ tf_warning_or_error);
+ stmt = build_if_in_charge (stmt);
+ }
finish_decl_cleanup (NULL_TREE, stmt);
}
else
diff --git a/gcc/cp/error.c b/gcc/cp/error.c
index 452ecb95467..5bab3f345ed 100644
--- a/gcc/cp/error.c
+++ b/gcc/cp/error.c
@@ -4119,7 +4119,7 @@ cp_printer (pretty_printer *pp, text_info *text, const char *spec,
pp_string (pp, result);
if (set_locus && t != NULL)
- text->set_location (0, location_of (t), true);
+ text->set_location (0, location_of (t), SHOW_RANGE_WITH_CARET);
return true;
#undef next_tree
#undef next_tcode
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index 8c7f68522da..c0a12d74634 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -5630,7 +5630,7 @@ maybe_suggest_missing_std_header (location_t location, tree name)
if (cxx_dialect >= header_hint->min_dialect)
{
const char *header = header_hint->header;
- maybe_add_include_fixit (&richloc, header);
+ maybe_add_include_fixit (&richloc, header, true);
inform (&richloc,
"%<std::%s%> is defined in header %qs;"
" did you forget to %<#include %s%>?",
@@ -5791,7 +5791,7 @@ consider_binding_level (tree name, best_match <tree, const char *> &bm,
/* Skip anticipated decls of builtin functions. */
if (TREE_CODE (d) == FUNCTION_DECL
- && DECL_BUILT_IN (d)
+ && fndecl_built_in_p (d)
&& DECL_ANTICIPATED (d))
continue;
@@ -7274,7 +7274,7 @@ cp_emit_debug_info_for_using (tree t, tree context)
of a builtin function. */
if (TREE_CODE (t) == FUNCTION_DECL
&& DECL_EXTERNAL (t)
- && DECL_BUILT_IN (t))
+ && fndecl_built_in_p (t))
return;
/* Do not supply context to imported_module_or_decl, if
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 49d476b383f..92e6b40efb4 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -3405,8 +3405,10 @@ cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree id,
else if (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope))
{
+ gcc_rich_location richloc (location);
+ richloc.add_fixit_insert_before ("typename ");
if (TREE_CODE (parser->scope) == TYPENAME_TYPE)
- error_at (location,
+ error_at (&richloc,
"need %<typename%> before %<%T::%D::%E%> because "
"%<%T::%D%> is a dependent scope",
TYPE_CONTEXT (parser->scope),
@@ -3415,7 +3417,7 @@ cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree id,
TYPE_CONTEXT (parser->scope),
TYPENAME_TYPE_FULLNAME (parser->scope));
else
- error_at (location, "need %<typename%> before %<%T::%E%> because "
+ error_at (&richloc, "need %<typename%> before %<%T::%E%> because "
"%qT is a dependent scope",
parser->scope, id, parser->scope);
}
@@ -4133,7 +4135,7 @@ cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok,
else if (curr_type != CPP_STRING)
{
rich_location rich_loc (line_table, tok->location);
- rich_loc.add_range (last_tok_loc, false);
+ rich_loc.add_range (last_tok_loc);
error_at (&rich_loc,
"unsupported non-standard concatenation "
"of string literals");
@@ -15240,11 +15242,15 @@ cp_parser_introduction_list (cp_parser *parser)
if (is_pack)
cp_lexer_consume_token (parser->lexer);
+ tree identifier = cp_parser_identifier (parser);
+ if (identifier == error_mark_node)
+ break;
+
/* Build placeholder. */
tree parm = build_nt (WILDCARD_DECL);
DECL_SOURCE_LOCATION (parm)
= cp_lexer_peek_token (parser->lexer)->location;
- DECL_NAME (parm) = cp_parser_identifier (parser);
+ DECL_NAME (parm) = identifier;
WILDCARD_PACK_P (parm) = is_pack;
vec_safe_push (introduction_vec, parm);
@@ -17755,7 +17761,7 @@ cp_parser_elaborated_type_specifier (cp_parser* parser,
|| cp_parser_is_keyword (token, RID_STRUCT))
{
gcc_rich_location richloc (token->location);
- richloc.add_range (input_location, false);
+ richloc.add_range (input_location);
richloc.add_fixit_remove ();
pedwarn (&richloc, 0, "elaborated-type-specifier for "
"a scoped enum must not use the %qD keyword",
@@ -27176,18 +27182,18 @@ cp_parser_template_introduction (cp_parser* parser, bool member_p)
matching identifiers. */
tree introduction_list = cp_parser_introduction_list (parser);
+ /* Look for closing brace for introduction. */
+ if (!braces.require_close (parser))
+ return true;
+
/* The introduction-list shall not be empty. */
int nargs = TREE_VEC_LENGTH (introduction_list);
if (nargs == 0)
{
- error ("empty introduction-list");
+ /* In cp_parser_introduction_list we have already issued an error. */
return true;
}
- /* Look for closing brace for introduction. */
- if (!braces.require_close (parser))
- return true;
-
if (tmpl_decl == error_mark_node)
{
cp_parser_name_lookup_error (parser, concept_name, tmpl_decl, NLE_NULL,
@@ -28390,7 +28396,7 @@ set_and_check_decl_spec_loc (cp_decl_specifier_seq *decl_specs,
gcc_rich_location richloc (location);
if (gnu != decl_specs->gnu_thread_keyword_p)
{
- richloc.add_range (decl_specs->locations[ds_thread], false);
+ richloc.add_range (decl_specs->locations[ds_thread]);
error_at (&richloc,
"both %<__thread%> and %<thread_local%> specified");
}
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index efed9a1bf60..0a618a5447d 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -16832,6 +16832,8 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t);
RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t);
finish_range_for_decl (stmt, decl, expr);
+ if (decomp_first && decl != error_mark_node)
+ cp_finish_decomp (decl, decomp_first, decomp_cnt);
}
else
{
@@ -26077,7 +26079,7 @@ listify (tree arg)
if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list))
{
gcc_rich_location richloc (input_location);
- maybe_add_include_fixit (&richloc, "<initializer_list>");
+ maybe_add_include_fixit (&richloc, "<initializer_list>", false);
error_at (&richloc,
"deducing from brace-enclosed initializer list"
" requires %<#include <initializer_list>%>");
diff --git a/gcc/cp/rtti.c b/gcc/cp/rtti.c
index 6692fb7ff86..94a92198781 100644
--- a/gcc/cp/rtti.c
+++ b/gcc/cp/rtti.c
@@ -317,7 +317,7 @@ typeid_ok_p (void)
if (!COMPLETE_TYPE_P (const_type_info_type_node))
{
gcc_rich_location richloc (input_location);
- maybe_add_include_fixit (&richloc, "<typeinfo>");
+ maybe_add_include_fixit (&richloc, "<typeinfo>", false);
error_at (&richloc,
"must %<#include <typeinfo>%> before using"
" %<typeid%>");
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index bfdca5024d3..676de011868 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -1532,7 +1532,7 @@ finish_asm_stmt (int volatile_p, tree string, tree output_operands,
effectively const. */
|| (CLASS_TYPE_P (TREE_TYPE (operand))
&& C_TYPE_FIELDS_READONLY (TREE_TYPE (operand)))))
- cxx_readonly_error (operand, lv_asm);
+ cxx_readonly_error (input_location, operand, lv_asm);
tree *op = &operand;
while (TREE_CODE (*op) == COMPOUND_EXPR)
@@ -2546,8 +2546,7 @@ finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual,
if ((complain & tf_warning)
&& TREE_CODE (fn) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fn) == BUILT_IN_MEMSET
+ && fndecl_built_in_p (fn, BUILT_IN_MEMSET)
&& vec_safe_length (*args) == 3
&& !any_type_dependent_arguments_p (*args))
{
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index 8a1d2993f94..c6f216dab4b 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -420,9 +420,8 @@ builtin_valid_in_constant_expr_p (const_tree decl)
return false;
if (DECL_BUILT_IN_CLASS (decl) != BUILT_IN_NORMAL)
{
- if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_FRONTEND
- && ((int) DECL_FUNCTION_CODE (decl)
- == CP_BUILT_IN_IS_CONSTANT_EVALUATED))
+ if (fndecl_built_in_p (decl, CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ BUILT_IN_FRONTEND))
return true;
/* Not a built-in. */
return false;
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 122d9dcd4b3..ab088a946b3 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -6228,9 +6228,10 @@ cp_build_unary_op (enum tree_code code, tree xarg, bool noconvert,
|| TREE_READONLY (arg))
{
if (complain & tf_error)
- cxx_readonly_error (arg, ((code == PREINCREMENT_EXPR
- || code == POSTINCREMENT_EXPR)
- ? lv_increment : lv_decrement));
+ cxx_readonly_error (location, arg,
+ ((code == PREINCREMENT_EXPR
+ || code == POSTINCREMENT_EXPR)
+ ? lv_increment : lv_decrement));
else
return error_mark_node;
}
@@ -8159,7 +8160,7 @@ cp_build_modify_expr (location_t loc, tree lhs, enum tree_code modifycode,
&& C_TYPE_FIELDS_READONLY (lhstype))))
{
if (complain & tf_error)
- cxx_readonly_error (lhs, lv_assign);
+ cxx_readonly_error (loc, lhs, lv_assign);
return error_mark_node;
}
@@ -9178,6 +9179,19 @@ can_do_nrvo_p (tree retval, tree functype)
&& !TYPE_VOLATILE (TREE_TYPE (retval)));
}
+/* Returns true if we should treat RETVAL, an expression being returned,
+ as if it were designated by an rvalue. See [class.copy.elision]. */
+
+static bool
+treat_lvalue_as_rvalue_p (tree retval)
+{
+ return ((cxx_dialect != cxx98)
+ && ((VAR_P (retval) && !DECL_HAS_VALUE_EXPR_P (retval))
+ || TREE_CODE (retval) == PARM_DECL)
+ && DECL_CONTEXT (retval) == current_function_decl
+ && !TREE_STATIC (retval));
+}
+
/* Warn about wrong usage of std::move in a return statement. RETVAL
is the expression we are returning; FUNCTYPE is the type the function
is declared to return. */
@@ -9185,13 +9199,20 @@ can_do_nrvo_p (tree retval, tree functype)
static void
maybe_warn_pessimizing_move (tree retval, tree functype)
{
- if (!warn_pessimizing_move)
+ if (!(warn_pessimizing_move || warn_redundant_move))
return;
+ location_t loc = cp_expr_loc_or_loc (retval, input_location);
+
/* C++98 doesn't know move. */
if (cxx_dialect < cxx11)
return;
+ /* Wait until instantiation time, since we can't gauge if we should do
+ the NRVO until then. */
+ if (processing_template_decl)
+ return;
+
/* This is only interesting for class types. */
if (!CLASS_TYPE_P (functype))
return;
@@ -9207,14 +9228,24 @@ maybe_warn_pessimizing_move (tree retval, tree functype)
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR)
arg = TREE_OPERAND (arg, 0);
+ arg = convert_from_reference (arg);
/* Warn if we could do copy elision were it not for the move. */
if (can_do_nrvo_p (arg, functype))
{
auto_diagnostic_group d;
- if (warning_at (location_of (retval), OPT_Wpessimizing_move,
+ if (warning_at (loc, OPT_Wpessimizing_move,
"moving a local object in a return statement "
"prevents copy elision"))
- inform (location_of (retval), "remove %<std::move%> call");
+ inform (loc, "remove %<std::move%> call");
+ }
+ /* Warn if the move is redundant. It is redundant when we would
+ do maybe-rvalue overload resolution even without std::move. */
+ else if (treat_lvalue_as_rvalue_p (arg))
+ {
+ auto_diagnostic_group d;
+ if (warning_at (loc, OPT_Wredundant_move,
+ "redundant move in return statement"))
+ inform (loc, "remove %<std::move%> call");
}
}
}
@@ -9494,11 +9525,7 @@ check_return_expr (tree retval, bool *no_warning)
Note that these conditions are similar to, but not as strict as,
the conditions for the named return value optimization. */
bool converted = false;
- if ((cxx_dialect != cxx98)
- && ((VAR_P (retval) && !DECL_HAS_VALUE_EXPR_P (retval))
- || TREE_CODE (retval) == PARM_DECL)
- && DECL_CONTEXT (retval) == current_function_decl
- && !TREE_STATIC (retval)
+ if (treat_lvalue_as_rvalue_p (retval)
/* This is only interesting for class type. */
&& CLASS_TYPE_P (functype))
{
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 1e899ab17a1..71fbff167a5 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -67,28 +67,28 @@ binfo_or_else (tree base, tree type)
value may not be changed thereafter. */
void
-cxx_readonly_error (tree arg, enum lvalue_use errstring)
+cxx_readonly_error (location_t loc, tree arg, enum lvalue_use errstring)
{
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. */
-#define ERROR_FOR_ASSIGNMENT(AS, ASM, IN, DE, ARG) \
+#define ERROR_FOR_ASSIGNMENT(LOC, AS, ASM, IN, DE, ARG) \
do { \
switch (errstring) \
{ \
case lv_assign: \
- error(AS, ARG); \
+ error_at (LOC, AS, ARG); \
break; \
case lv_asm: \
- error(ASM, ARG); \
+ error_at (LOC, ASM, ARG); \
break; \
case lv_increment: \
- error (IN, ARG); \
+ error_at (LOC, IN, ARG); \
break; \
- case lv_decrement: \
- error (DE, ARG); \
+ case lv_decrement: \
+ error_at (LOC, DE, ARG); \
break; \
default: \
gcc_unreachable (); \
@@ -101,32 +101,25 @@ cxx_readonly_error (tree arg, enum lvalue_use errstring)
&& DECL_LANG_SPECIFIC (arg)
&& DECL_IN_AGGR_P (arg)
&& !TREE_STATIC (arg))
- ERROR_FOR_ASSIGNMENT (G_("assignment of "
- "constant field %qD"),
- G_("constant field %qD "
- "used as %<asm%> output"),
- G_("increment of "
- "constant field %qD"),
- G_("decrement of "
- "constant field %qD"),
+ ERROR_FOR_ASSIGNMENT (loc,
+ G_("assignment of constant field %qD"),
+ G_("constant field %qD used as %<asm%> output"),
+ G_("increment of constant field %qD"),
+ G_("decrement of constant field %qD"),
arg);
else if (INDIRECT_REF_P (arg)
&& TYPE_REF_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
&& (VAR_P (TREE_OPERAND (arg, 0))
|| TREE_CODE (TREE_OPERAND (arg, 0)) == PARM_DECL))
- ERROR_FOR_ASSIGNMENT (G_("assignment of "
- "read-only reference %qD"),
- G_("read-only reference %qD "
- "used as %<asm%> output"),
- G_("increment of "
- "read-only reference %qD"),
- G_("decrement of "
- "read-only reference %qD"),
- TREE_OPERAND (arg, 0));
+ ERROR_FOR_ASSIGNMENT (loc,
+ G_("assignment of read-only reference %qD"),
+ G_("read-only reference %qD used as %<asm%> output"),
+ G_("increment of read-only reference %qD"),
+ G_("decrement of read-only reference %qD"),
+ TREE_OPERAND (arg, 0));
else
- readonly_error (input_location, arg, errstring);
+ readonly_error (loc, arg, errstring);
}
-
/* Structure that holds information about declarations whose type was
incomplete and we could not check whether it was abstract or not. */
diff --git a/gcc/diagnostic-show-locus.c b/gcc/diagnostic-show-locus.c
index a7598269588..6ce8a0f4a9b 100644
--- a/gcc/diagnostic-show-locus.c
+++ b/gcc/diagnostic-show-locus.c
@@ -126,7 +126,7 @@ class layout_range
public:
layout_range (const expanded_location *start_exploc,
const expanded_location *finish_exploc,
- bool show_caret_p,
+ enum range_display_kind range_display_kind,
const expanded_location *caret_exploc,
const range_label *label);
@@ -135,7 +135,7 @@ class layout_range
layout_point m_start;
layout_point m_finish;
- bool m_show_caret_p;
+ enum range_display_kind m_range_display_kind;
layout_point m_caret;
const range_label *m_label;
};
@@ -241,6 +241,7 @@ class layout
int get_num_line_spans () const { return m_line_spans.length (); }
const line_span *get_line_span (int idx) const { return &m_line_spans[idx]; }
+ void print_gap_in_line_numbering ();
bool print_heading_for_line_span_index_p (int line_span_idx) const;
expanded_location get_expanded_location (const line_span *) const;
@@ -411,12 +412,12 @@ colorizer::get_color_by_name (const char *name)
layout_range::layout_range (const expanded_location *start_exploc,
const expanded_location *finish_exploc,
- bool show_caret_p,
+ enum range_display_kind range_display_kind,
const expanded_location *caret_exploc,
const range_label *label)
: m_start (*start_exploc),
m_finish (*finish_exploc),
- m_show_caret_p (show_caret_p),
+ m_range_display_kind (range_display_kind),
m_caret (*caret_exploc),
m_label (label)
{
@@ -544,7 +545,7 @@ make_range (int start_line, int start_col, int end_line, int end_col)
= {"test.c", start_line, start_col, NULL, false};
const expanded_location finish_exploc
= {"test.c", end_line, end_col, NULL, false};
- return layout_range (&start_exploc, &finish_exploc, false,
+ return layout_range (&start_exploc, &finish_exploc, SHOW_RANGE_WITHOUT_CARET,
&start_exploc, NULL);
}
@@ -923,6 +924,9 @@ layout::layout (diagnostic_context * context,
if (highest_line < 0)
highest_line = 0;
m_linenum_width = num_digits (highest_line);
+ /* If we're showing jumps in the line-numbering, allow at least 3 chars. */
+ if (m_line_spans.length () > 1)
+ m_linenum_width = MAX (m_linenum_width, 3);
/* Adjust m_x_offset.
Center the primary caret to fit in max_width; all columns
@@ -982,13 +986,13 @@ layout::maybe_add_location_range (const location_range *loc_range,
return false;
if (finish.file != m_exploc.file)
return false;
- if (loc_range->m_show_caret_p)
+ if (loc_range->m_range_display_kind == SHOW_RANGE_WITH_CARET)
if (caret.file != m_exploc.file)
return false;
/* Sanitize the caret location for non-primary ranges. */
if (m_layout_ranges.length () > 0)
- if (loc_range->m_show_caret_p)
+ if (loc_range->m_range_display_kind == SHOW_RANGE_WITH_CARET)
if (!compatible_locations_p (loc_range->m_loc, m_primary_loc))
/* Discard any non-primary ranges that can't be printed
sanely relative to the primary location. */
@@ -996,7 +1000,7 @@ layout::maybe_add_location_range (const location_range *loc_range,
/* Everything is now known to be in the correct source file,
but it may require further sanitization. */
- layout_range ri (&start, &finish, loc_range->m_show_caret_p, &caret,
+ layout_range ri (&start, &finish, loc_range->m_range_display_kind, &caret,
loc_range->m_label);
/* If we have a range that finishes before it starts (perhaps
@@ -1033,7 +1037,7 @@ layout::maybe_add_location_range (const location_range *loc_range,
return false;
if (!will_show_line_p (finish.line))
return false;
- if (loc_range->m_show_caret_p)
+ if (loc_range->m_range_display_kind == SHOW_RANGE_WITH_CARET)
if (!will_show_line_p (caret.line))
return false;
}
@@ -1059,6 +1063,20 @@ layout::will_show_line_p (linenum_type row) const
return false;
}
+/* Print a line showing a gap in the line numbers, for showing the boundary
+ between two line spans. */
+
+void
+layout::print_gap_in_line_numbering ()
+{
+ gcc_assert (m_show_line_numbers_p);
+
+ for (int i = 0; i < m_linenum_width + 1; i++)
+ pp_character (m_pp, '.');
+
+ pp_newline (m_pp);
+}
+
/* Return true iff we should print a heading when starting the
line span with the given index. */
@@ -1141,7 +1159,16 @@ static line_span
get_line_span_for_fixit_hint (const fixit_hint *hint)
{
gcc_assert (hint);
- return line_span (LOCATION_LINE (hint->get_start_loc ()),
+
+ int start_line = LOCATION_LINE (hint->get_start_loc ());
+
+ /* For line-insertion fix-it hints, add the previous line to the
+ span, to give the user more context on the proposed change. */
+ if (hint->ends_with_newline_p ())
+ if (start_line > 1)
+ start_line--;
+
+ return line_span (start_line,
LOCATION_LINE (hint->get_next_loc ()));
}
@@ -1156,21 +1183,34 @@ get_line_span_for_fixit_hint (const fixit_hint *hint)
This function populates m_line_spans with an ordered, disjoint list of
the line spans of interest.
- For example, if the primary caret location is on line 7, with ranges
- covering lines 5-6 and lines 9-12:
+ Printing a gap between line spans takes one line, so, when printing
+ line numbers, we allow a gap of up to one line between spans when
+ merging, since it makes more sense to print the source line rather than a
+ "gap-in-line-numbering" line. When not printing line numbers, it's
+ better to be more explicit about what's going on, so keeping them as
+ separate spans is preferred.
+
+ For example, if the primary range is on lines 8-10, with secondary ranges
+ covering lines 5-6 and lines 13-15:
004
- 005 |RANGE 0
- 006 |RANGE 0
- 007 |PRIMARY CARET
- 008
- 009 |RANGE 1
- 010 |RANGE 1
- 011 |RANGE 1
- 012 |RANGE 1
- 013
-
- then we want two spans: lines 5-7 and lines 9-12. */
+ 005 |RANGE 1
+ 006 |RANGE 1
+ 007
+ 008 |PRIMARY RANGE
+ 009 |PRIMARY CARET
+ 010 |PRIMARY RANGE
+ 011
+ 012
+ 013 |RANGE 2
+ 014 |RANGE 2
+ 015 |RANGE 2
+ 016
+
+ With line numbering on, we want two spans: lines 5-10 and lines 13-15.
+
+ With line numbering off (with span headers), we want three spans: lines 5-6,
+ lines 8-10, and lines 13-15. */
void
layout::calculate_line_spans ()
@@ -1210,7 +1250,8 @@ layout::calculate_line_spans ()
line_span *current = &m_line_spans[m_line_spans.length () - 1];
const line_span *next = &tmp_spans[i];
gcc_assert (next->m_first_line >= current->m_first_line);
- if (next->m_first_line <= current->m_last_line + 1)
+ const int merger_distance = m_show_line_numbers_p ? 1 : 0;
+ if (next->m_first_line <= current->m_last_line + 1 + merger_distance)
{
/* We can merge them. */
if (next->m_last_line > current->m_last_line)
@@ -1321,8 +1362,12 @@ layout::should_print_annotation_line_p (linenum_type row) const
layout_range *range;
int i;
FOR_EACH_VEC_ELT (m_layout_ranges, i, range)
- if (range->intersects_line_p (row))
- return true;
+ {
+ if (range->m_range_display_kind == SHOW_LINES_WITHOUT_RANGE)
+ return false;
+ if (range->intersects_line_p (row))
+ return true;
+ }
return false;
}
@@ -2061,13 +2106,18 @@ layout::get_state_at_point (/* Inputs. */
int i;
FOR_EACH_VEC_ELT (m_layout_ranges, i, range)
{
+ if (range->m_range_display_kind == SHOW_LINES_WITHOUT_RANGE)
+ /* Bail out early, so that such ranges don't affect underlining or
+ source colorization. */
+ continue;
+
if (range->contains_point (row, column))
{
out_state->range_idx = i;
/* Are we at the range's caret? is it visible? */
out_state->draw_caret_p = false;
- if (range->m_show_caret_p
+ if (range->m_range_display_kind == SHOW_RANGE_WITH_CARET
&& row == range->m_caret.m_line
&& column == range->m_caret.m_column)
out_state->draw_caret_p = true;
@@ -2226,11 +2276,11 @@ gcc_rich_location::add_location_if_nearby (location_t loc)
layout layout (global_dc, this, DK_ERROR);
location_range loc_range;
loc_range.m_loc = loc;
- loc_range.m_show_caret_p = false;
+ loc_range.m_range_display_kind = SHOW_RANGE_WITHOUT_CARET;
if (!layout.maybe_add_location_range (&loc_range, true))
return false;
- add_range (loc, false);
+ add_range (loc);
return true;
}
@@ -2269,10 +2319,22 @@ diagnostic_show_locus (diagnostic_context * context,
line_span_idx++)
{
const line_span *line_span = layout.get_line_span (line_span_idx);
- if (layout.print_heading_for_line_span_index_p (line_span_idx))
+ if (context->show_line_numbers_p)
{
- expanded_location exploc = layout.get_expanded_location (line_span);
- context->start_span (context, exploc);
+ /* With line numbers, we should show whenever the line-numbering
+ "jumps". */
+ if (line_span_idx > 0)
+ layout.print_gap_in_line_numbering ();
+ }
+ else
+ {
+ /* Without line numbers, we print headings for some line spans. */
+ if (layout.print_heading_for_line_span_index_p (line_span_idx))
+ {
+ expanded_location exploc
+ = layout.get_expanded_location (line_span);
+ context->start_span (context, exploc);
+ }
}
linenum_type last_line = line_span->get_last_line ();
for (linenum_type row = line_span->get_first_line ();
@@ -2368,8 +2430,8 @@ test_one_liner_multiple_carets_and_ranges ()
dc.caret_chars[2] = 'C';
rich_location richloc (line_table, foo);
- richloc.add_range (bar, true);
- richloc.add_range (field, true);
+ richloc.add_range (bar, SHOW_RANGE_WITH_CARET);
+ richloc.add_range (field, SHOW_RANGE_WITH_CARET);
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
ASSERT_STREQ ("\n"
" foo = bar.field;\n"
@@ -2490,7 +2552,7 @@ test_one_liner_fixit_replace_equal_secondary_range ()
location_t finish = linemap_position_for_column (line_table, 15);
rich_location richloc (line_table, equals);
location_t field = make_location (start, start, finish);
- richloc.add_range (field, false);
+ richloc.add_range (field);
richloc.add_fixit_replace (field, "m_field");
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
/* The replacement range is indicated in the annotation line,
@@ -2637,8 +2699,8 @@ test_one_liner_labels ()
text_range_label label1 ("1");
text_range_label label2 ("2");
gcc_rich_location richloc (foo, &label0);
- richloc.add_range (bar, false, &label1);
- richloc.add_range (field, false, &label2);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label1);
+ richloc.add_range (field, SHOW_RANGE_WITHOUT_CARET, &label2);
{
test_diagnostic_context dc;
@@ -2669,8 +2731,8 @@ test_one_liner_labels ()
text_range_label label1 ("label 1");
text_range_label label2 ("label 2");
gcc_rich_location richloc (foo, &label0);
- richloc.add_range (bar, false, &label1);
- richloc.add_range (field, false, &label2);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label1);
+ richloc.add_range (field, SHOW_RANGE_WITHOUT_CARET, &label2);
test_diagnostic_context dc;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -2691,8 +2753,8 @@ test_one_liner_labels ()
text_range_label label1 ("bbbb");
text_range_label label2 ("c");
gcc_rich_location richloc (foo, &label0);
- richloc.add_range (bar, false, &label1);
- richloc.add_range (field, false, &label2);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label1);
+ richloc.add_range (field, SHOW_RANGE_WITHOUT_CARET, &label2);
test_diagnostic_context dc;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -2711,8 +2773,8 @@ test_one_liner_labels ()
text_range_label label1 ("1");
text_range_label label2 ("2");
gcc_rich_location richloc (field, &label0);
- richloc.add_range (bar, false, &label1);
- richloc.add_range (foo, false, &label2);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label1);
+ richloc.add_range (foo, SHOW_RANGE_WITHOUT_CARET, &label2);
test_diagnostic_context dc;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -2731,8 +2793,8 @@ test_one_liner_labels ()
text_range_label label1 ("label 1");
text_range_label label2 ("label 2");
gcc_rich_location richloc (bar, &label0);
- richloc.add_range (bar, false, &label1);
- richloc.add_range (bar, false, &label2);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label1);
+ richloc.add_range (bar, SHOW_RANGE_WITHOUT_CARET, &label2);
test_diagnostic_context dc;
diagnostic_show_locus (&dc, &richloc, DK_ERROR);
@@ -2943,6 +3005,29 @@ test_diagnostic_show_locus_fixit_lines (const line_table_case &case_)
" =\n",
pp_formatted_text (dc.printer));
}
+
+ /* As above, but verify the behavior of multiple line spans
+ with line-numbering enabled. */
+ {
+ const location_t y
+ = linemap_position_for_line_and_column (line_table, ord_map, 3, 24);
+ const location_t colon
+ = linemap_position_for_line_and_column (line_table, ord_map, 6, 25);
+ rich_location richloc (line_table, colon);
+ richloc.add_fixit_insert_before (y, ".");
+ richloc.add_fixit_replace (colon, "=");
+ test_diagnostic_context dc;
+ dc.show_line_numbers_p = true;
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ ("\n"
+ " 3 | y\n"
+ " | .\n"
+ "....\n"
+ " 6 | : 0.0};\n"
+ " | ^\n"
+ " | =\n",
+ pp_formatted_text (dc.printer));
+ }
}
@@ -3412,13 +3497,31 @@ test_fixit_insert_containing_newline (const line_table_case &case_)
{
rich_location richloc (line_table, case_loc);
richloc.add_fixit_insert_before (line_start, " break;\n");
- test_diagnostic_context dc;
- diagnostic_show_locus (&dc, &richloc, DK_ERROR);
- ASSERT_STREQ ("\n"
- "+ break;\n"
- " case 'b':\n"
- " ^~~~~~~~~\n",
- pp_formatted_text (dc.printer));
+
+ /* Without line numbers. */
+ {
+ test_diagnostic_context dc;
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ ("\n"
+ " x = a;\n"
+ "+ break;\n"
+ " case 'b':\n"
+ " ^~~~~~~~~\n",
+ pp_formatted_text (dc.printer));
+ }
+
+ /* With line numbers. */
+ {
+ test_diagnostic_context dc;
+ dc.show_line_numbers_p = true;
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ ("\n"
+ "2 | x = a;\n"
+ "+ |+ break;\n"
+ "3 | case 'b':\n"
+ " | ^~~~~~~~~\n",
+ pp_formatted_text (dc.printer));
+ }
}
/* Verify that attempts to add text with a newline fail when the
@@ -3475,16 +3578,33 @@ test_fixit_insert_containing_newline_2 (const line_table_case &case_)
if (putchar_finish > LINE_MAP_MAX_LOCATION_WITH_COLS)
return;
- test_diagnostic_context dc;
- diagnostic_show_locus (&dc, &richloc, DK_ERROR);
- ASSERT_STREQ ("\n"
- "FILENAME:1:1:\n"
- "+#include <stdio.h>\n"
- " test (int ch)\n"
- "FILENAME:3:2:\n"
- " putchar (ch);\n"
- " ^~~~~~~\n",
- pp_formatted_text (dc.printer));
+ {
+ test_diagnostic_context dc;
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ ("\n"
+ "FILENAME:1:1:\n"
+ "+#include <stdio.h>\n"
+ " test (int ch)\n"
+ "FILENAME:3:2:\n"
+ " putchar (ch);\n"
+ " ^~~~~~~\n",
+ pp_formatted_text (dc.printer));
+ }
+
+ /* With line-numbering, the line spans are close enough to be
+ consolidated, since it makes little sense to skip line 2. */
+ {
+ test_diagnostic_context dc;
+ dc.show_line_numbers_p = true;
+ diagnostic_show_locus (&dc, &richloc, DK_ERROR);
+ ASSERT_STREQ ("\n"
+ "+ |+#include <stdio.h>\n"
+ "1 | test (int ch)\n"
+ "2 | {\n"
+ "3 | putchar (ch);\n"
+ " | ^~~~~~~\n",
+ pp_formatted_text (dc.printer));
+ }
}
/* Replacement fix-it hint containing a newline.
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e4148297a87..637f5ada8f6 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -231,7 +231,7 @@ in the following sections.
-Wdelete-non-virtual-dtor -Wdeprecated-copy -Wliteral-suffix @gol
-Wmultiple-inheritance @gol
-Wnamespaces -Wnarrowing @gol
--Wpessimizing-move @gol
+-Wpessimizing-move -Wredundant-move @gol
-Wnoexcept -Wnoexcept-type -Wclass-memaccess @gol
-Wnon-virtual-dtor -Wreorder -Wregister @gol
-Weffc++ -Wstrict-null-sentinel -Wtemplates @gol
@@ -464,7 +464,7 @@ Objective-C and Objective-C++ Dialects}.
-ftree-loop-ivcanon -ftree-loop-linear -ftree-loop-optimize @gol
-ftree-loop-vectorize @gol
-ftree-parallelize-loops=@var{n} -ftree-pre -ftree-partial-pre -ftree-pta @gol
--ftree-reassoc -ftree-sink -ftree-slsr -ftree-sra @gol
+-ftree-reassoc -ftree-scev-cprop -ftree-sink -ftree-slsr -ftree-sra @gol
-ftree-switch-conversion -ftree-tail-merge @gol
-ftree-ter -ftree-vectorize -ftree-vrp -funconstrained-commons @gol
-funit-at-a-time -funroll-all-loops -funroll-loops @gol
@@ -3158,6 +3158,49 @@ But in this example, the @code{std::move} call prevents copy elision.
This warning is enabled by @option{-Wall}.
+@item -Wno-redundant-move @r{(C++ and Objective-C++ only)}
+@opindex Wredundant-move
+@opindex Wno-redundant-move
+This warning warns about redundant calls to @code{std::move}; that is, when
+a move operation would have been performed even without the @code{std::move}
+call. This happens because the compiler is forced to treat the object as if
+it were an rvalue in certain situations such as returning a local variable,
+where copy elision isn't applicable. Consider:
+
+@smallexample
+struct T @{
+@dots{}
+@};
+T fn(T t)
+@{
+ @dots{}
+ return std::move (t);
+@}
+@end smallexample
+
+Here, the @code{std::move} call is redundant. Because G++ implements Core
+Issue 1579, another example is:
+
+@smallexample
+struct T @{ // convertible to U
+@dots{}
+@};
+struct U @{
+@dots{}
+@};
+U fn()
+@{
+ T t;
+ @dots{}
+ return std::move (t);
+@}
+@end smallexample
+In this example, copy elision isn't applicable because the type of the
+expression being returned and the function return type differ, yet G++
+treats the return value as if it were designated by an rvalue.
+
+This warning is enabled by @option{-Wextra}.
+
@item -fext-numeric-literals @r{(C++ and Objective-C++ only)}
@opindex fext-numeric-literals
@opindex fno-ext-numeric-literals
@@ -4112,6 +4155,7 @@ name is still supported, but the newer name is more descriptive.)
-Wold-style-declaration @r{(C only)} @gol
-Woverride-init @gol
-Wsign-compare @r{(C only)} @gol
+-Wredundant-move @r{(only for C++)} @gol
-Wtype-limits @gol
-Wuninitialized @gol
-Wshift-negative-value @r{(in C++03 and in C99 and newer)} @gol
@@ -7769,6 +7813,7 @@ compilation time.
-ftree-forwprop @gol
-ftree-fre @gol
-ftree-phiprop @gol
+-ftree-scev-cprop @gol
-ftree-sink @gol
-ftree-slsr @gol
-ftree-sra @gol
@@ -9076,6 +9121,15 @@ determining number of iterations requires complicated analysis. Later
optimizations then may determine the number easily. Useful especially
in connection with unrolling.
+@item -ftree-scev-cprop
+@opindex ftree-scev-cprop
+Perform final value replacement. If a variable is modified in a loop
+in such a way that its value when exiting the loop can be determined using
+only its initial value and the number of loop iterations, replace uses of
+the final value by such a computation, provided it is sufficiently cheap.
+This reduces data dependencies and may allow further simplifications.
+Enabled by default at @option{-O} and higher.
+
@item -fivopts
@opindex fivopts
Perform induction variable optimizations (strength reduction, induction
@@ -11032,11 +11086,12 @@ parameter sets a limit on the length of the sets that are computed,
which prevents the runaway behavior. Setting a value of 0 for
this parameter allows an unlimited set length.
-@item sccvn-max-scc-size
-Maximum size of a strongly connected component (SCC) during SCCVN
-processing. If this limit is hit, SCCVN processing for the whole
-function is not done and optimizations depending on it are
-disabled. The default maximum SCC size is 10000.
+@item rpo-vn-max-loop-depth
+Maximum loop depth that is value-numbered optimistically. The default
+maximum loop depth is three. When the limit hits the innermost
+@var{rpo-vn-max-loop-depth} loops and the outermost loop in the
+loop nest are value-numbered optimistically and the remaining ones not.
+The default maximum loop depth is seven.
@item sccvn-max-alias-queries-per-access
Maximum number of alias-oracle queries we perform when looking for
@@ -24501,12 +24556,10 @@ target-specific.
@item -mstring-compare-inline-limit=@var{num}
@opindex mstring-compare-inline-limit
-Generate at most @var{num} pairs of load instructions to compare the
-string inline. If the difference or end of string is not found at the
+Compare at most @var{num} string bytes with inline code.
+If the difference or end of string is not found at the
end of the inline compare a call to @code{strcmp} or @code{strncmp} will
-take care of the rest of the comparison. The default is 8 pairs of
-loads, which will compare 64 bytes on a 64-bit target and 32 bytes on a
-32-bit target.
+take care of the rest of the comparison. The default is 64 bytes.
@item -G @var{num}
@opindex G
diff --git a/gcc/dse.c b/gcc/dse.c
index 26c6007b9ed..cfebfa0e110 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -2419,8 +2419,7 @@ scan_insn (bb_info_t bb_info, rtx_insn *insn)
&& GET_CODE (sym) == SYMBOL_REF
&& SYMBOL_REF_DECL (sym)
&& TREE_CODE (SYMBOL_REF_DECL (sym)) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (sym)) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (sym)) == BUILT_IN_MEMSET)
+ && fndecl_built_in_p (SYMBOL_REF_DECL (sym), BUILT_IN_MEMSET))
memset_call = SYMBOL_REF_DECL (sym);
if (const_call || memset_call)
diff --git a/gcc/dumpfile.h b/gcc/dumpfile.h
index 0305d36fa78..671b7b9c8a6 100644
--- a/gcc/dumpfile.h
+++ b/gcc/dumpfile.h
@@ -28,7 +28,7 @@ along with GCC; see the file COPYING3. If not see
format codes (see pretty-print.c), with additional codes for middle-end
specific entities (see dumpfile.c). */
-#if GCC_VERSION >= 3005
+#if GCC_VERSION >= 9000
#define ATTRIBUTE_GCC_DUMP_PRINTF(m, n) \
__attribute__ ((__format__ (__gcc_dump_printf__, m ,n))) \
ATTRIBUTE_NONNULL(m)
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 91af4e8fc4e..77317ed2575 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -28287,6 +28287,12 @@ save_macinfo_strings (void)
&& (debug_str_section->common.flags & SECTION_MERGE) != 0)
set_indirect_string (find_AT_string (ref->info));
break;
+ case DW_MACINFO_start_file:
+ /* -gsplit-dwarf -g3 will also output filename as indirect
+ string. */
+ if (!dwarf_split_debug_info)
+ break;
+ /* Fall through. */
case DW_MACRO_define_strp:
case DW_MACRO_undef_strp:
set_indirect_string (find_AT_string (ref->info));
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index dacf27e087c..9a735fab5bf 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -6410,13 +6410,6 @@ init_emit_once (void)
if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
const_tiny_rtx[0][i] = const0_rtx;
- FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
- {
- scalar_mode smode = smode_iter.require ();
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
- const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
- }
-
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index 4e7bd1ec26d..f089355aef7 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -75,9 +75,6 @@ struct GTY(()) rtl_data {
result in a register, current_function_return_rtx will always be
the hard register containing the result. */
rtx return_rtx;
- /* If nonxero, an RTL expression for the lcoation at which the current
- function returns bounds for its result. */
- rtx return_bnd;
/* Vector of initial-value pairs. Each pair consists of a pseudo
register of approprite mode that stores the initial value a hard
diff --git a/gcc/explow.c b/gcc/explow.c
index 9a6182ac5c5..7d83eb16b6d 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -56,8 +56,7 @@ trunc_int_for_mode (HOST_WIDE_INT c, machine_mode mode)
int width = GET_MODE_PRECISION (smode);
/* You want to truncate to a _what_? */
- gcc_assert (SCALAR_INT_MODE_P (mode)
- || POINTER_BOUNDS_MODE_P (mode));
+ gcc_assert (SCALAR_INT_MODE_P (mode));
/* Canonicalize BImode to 0 and STORE_FLAG_VALUE. */
if (smode == BImode)
diff --git a/gcc/expmed.c b/gcc/expmed.c
index e2819309e4b..caf29e88924 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -6239,10 +6239,20 @@ canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
wrapping around in the case of unsigned values. If any occur
cancel the optimization. */
wi::overflow_type overflow = wi::OVF_NONE;
- wide_int imm_modif = wi::add (imm_val, to_add, sgn, &overflow);
+ wide_int imm_modif;
+
+ if (to_add == 1)
+ imm_modif = wi::add (imm_val, 1, sgn, &overflow);
+ else
+ imm_modif = wi::sub (imm_val, 1, sgn, &overflow);
+
if (overflow)
return;
+ /* The following creates a pseudo; if we cannot do that, bail out. */
+ if (!can_create_pseudo_p ())
+ return;
+
rtx reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1);
rtx new_imm = immed_wide_int_const (imm_modif, mode);
diff --git a/gcc/expr.c b/gcc/expr.c
index 58574bafbf3..cd5cf12fca6 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -10937,7 +10937,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
}
/* Check for a built-in function. */
- if (fndecl && DECL_BUILT_IN (fndecl))
+ if (fndecl && fndecl_built_in_p (fndecl))
{
gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
return expand_builtin (exp, target, subtarget, tmode, ignore);
@@ -11303,12 +11303,15 @@ is_aligning_offset (const_tree offset, const_tree exp)
/* Return the tree node if an ARG corresponds to a string constant or zero
if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
non-constant) offset in bytes within the string that ARG is accessing.
+ If NONSTR is non-null, consider valid even sequences of characters that
+ aren't nul-terminated strings. In that case, if ARG refers to such
+ a sequence set *NONSTR to its declaration and clear it otherwise.
The type of the offset is sizetype. If MEM_SIZE is non-zero the storage
size of the memory is returned. If MEM_SIZE is zero, the string is
only returned when it is properly zero terminated. */
tree
-string_constant (tree arg, tree *ptr_offset, tree *mem_size)
+string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *nonstr)
{
tree array;
STRIP_NOPS (arg);
@@ -11362,7 +11365,7 @@ string_constant (tree arg, tree *ptr_offset, tree *mem_size)
return NULL_TREE;
tree offset;
- if (tree str = string_constant (arg0, &offset, mem_size))
+ if (tree str = string_constant (arg0, &offset, mem_size, nonstr))
{
/* Avoid pointers to arrays (see bug 86622). */
if (POINTER_TYPE_P (TREE_TYPE (arg))
@@ -11404,6 +11407,9 @@ string_constant (tree arg, tree *ptr_offset, tree *mem_size)
*ptr_offset = fold_convert (sizetype, offset);
if (mem_size)
*mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
+ /* This is not strictly correct. FIXME in follow-up patch. */
+ if (nonstr)
+ *nonstr = NULL_TREE;
return array;
}
@@ -11450,22 +11456,35 @@ string_constant (tree arg, tree *ptr_offset, tree *mem_size)
if (!array_size || TREE_CODE (array_size) != INTEGER_CST)
return NULL_TREE;
- /* Avoid returning a string that doesn't fit in the array
- it is stored in, like
+ /* Avoid returning an array that is unterminated because it lacks
+ a terminating nul, like
const char a[4] = "abcde";
- but do handle those that fit even if they have excess
+ but do handle those that are strings even if they have excess
initializers, such as in
const char a[4] = "abc\000\000";
The excess elements contribute to TREE_STRING_LENGTH()
but not to strlen(). */
unsigned HOST_WIDE_INT charsize
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (init))));
+ /* Compute the lower bound number of elements (not bytes) in the array
+ that the string is used to initialize. The actual size of the array
+ may be greater if the string is shorter, but the the important
+ data point is whether the literal, inlcuding the terminating nul,
+ fits the array. */
+ unsigned HOST_WIDE_INT array_elts
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (init))) / charsize;
+
+ /* Compute the string length in (wide) characters. */
unsigned HOST_WIDE_INT length = TREE_STRING_LENGTH (init);
length = string_length (TREE_STRING_POINTER (init), charsize,
length / charsize);
if (mem_size)
*mem_size = TYPE_SIZE_UNIT (TREE_TYPE (init));
- else if (compare_tree_int (array_size, length + 1) < 0)
+ if (nonstr)
+ *nonstr = array_elts > length ? NULL_TREE : array;
+
+ if ((!mem_size && !nonstr)
+ && array_elts <= length)
return NULL_TREE;
*ptr_offset = offset;
diff --git a/gcc/expr.h b/gcc/expr.h
index d4d25645f61..4177de8060b 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -288,7 +288,7 @@ expand_normal (tree exp)
/* Return the tree node and offset if a given argument corresponds to
a string constant. */
-extern tree string_constant (tree, tree *, tree * = NULL);
+extern tree string_constant (tree, tree *, tree *, tree *);
/* Two different ways of generating switch statements. */
extern int try_casesi (tree, tree, tree, tree, rtx, rtx, rtx, profile_probability);
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index b318fc7705f..bdd24c5969b 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -3451,7 +3451,7 @@ operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
case tcc_declaration:
/* Consider __builtin_sqrt equal to sqrt. */
return (TREE_CODE (arg0) == FUNCTION_DECL
- && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
+ && fndecl_built_in_p (arg0) && fndecl_built_in_p (arg1)
&& DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
&& DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
@@ -10753,8 +10753,7 @@ fold_binary_loc (location_t loc, enum tree_code code, tree type,
tree fndecl = get_callee_fndecl (arg0);
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRLEN
+ && fndecl_built_in_p (fndecl, BUILT_IN_STRLEN)
&& call_expr_nargs (arg0) == 1
&& TREE_CODE (TREE_TYPE (CALL_EXPR_ARG (arg0, 0))) == POINTER_TYPE)
{
@@ -13780,7 +13779,7 @@ fold_read_from_constant_string (tree exp)
location_t loc = EXPR_LOCATION (exp);
if (TREE_CODE (exp) == INDIRECT_REF)
- string = string_constant (exp1, &index);
+ string = string_constant (exp1, &index, NULL, NULL);
else
{
tree low_bound = array_ref_low_bound (exp);
@@ -14593,7 +14592,7 @@ c_getstr (tree src, unsigned HOST_WIDE_INT *strlen /* = NULL */,
if (strlen)
*strlen = 0;
- src = string_constant (src, &offset_node);
+ src = string_constant (src, &offset_node, NULL, NULL);
if (src == 0)
return NULL;
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 88edff3eed6..04598438aae 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,71 @@
+2017-08-28 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/80477
+ * trans-expr.c (gfc_conv_procedure_call): Allocatable class
+ scalar results being passed to a derived type formal argument
+ are finalized if possible. Otherwise, rely on existing code for
+ deallocation. Make the deallocation of allocatable result
+ components conditional on finalization not taking place. Make
+ the freeing of data components after finalization conditional
+ on the data being NULL.
+ (gfc_trans_arrayfunc_assign): Change the gcc_assert to a
+ condition to return NULL_TREE.
+ (gfc_trans_assignment_1): If the assignment is class to class
+ and the rhs expression must be finalized but the assignment
+ is not marked as a polymorphic assignment, use the vptr copy
+ function instead of gfc_trans_scalar_assign.
+
+ PR fortran/86481
+ * trans-expr.c (gfc_conv_expr_reference): Do not add the post
+ block to the pre block if the expression is to be finalized.
+ * trans-stmt.c (gfc_trans_allocate): If the expr3 must be
+ finalized, load the post block into a finalization block and
+ add it right at the end of the allocation block.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR 87091
+ * error.c (gfc_format_decoder): Update for conversion of
+ show_caret_p to a tri-state.
+
+2018-08-25 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/86545
+ * resolve.c (resolve_transfer): Correctly determine typespec for
+ generic function calls, in order to throw a proper error.
+
+2018-08-24 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/86837
+ * frontend-passes.c (var_in_expr_callback): New function.
+ (var_in_expr): New function.
+ (traverse_io_block): Use var_in_expr instead of
+ gfc_check_dependency for checking if the variable depends on the
+ previous interators.
+
+2018-08-23 Janne Blomqvist <blomqvist.janne@gmail.com>
+
+ * trans-intrinsic.c (gfc_conv_intrinsic_minmaxval): Delete
+ HONOR_SIGNED_ZEROS checks.
+
+2018-08-23 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/86863
+ * resolve.c (resolve_typebound_call): If the TBP is not marked
+ as a subroutine, check the specific symbol.
+
+2018-08-22 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ * gfortran.texi: Mention that asynchronous I/O does
+ not work on systems which lack condition variables, such
+ as AIX.
+
+2018-08-22 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/86935
+ * match.c (gfc_match_associate): Improve diagnostics for the ASSOCIATE
+ statement.
+
2018-08-22 Andrew Benson <abensonca@gmail.com>
* module.c (load_generic_interfaces): Move call to find_symbol()
@@ -903,7 +971,7 @@
* trans-intrinsic.c (conv_intrinsic_kill, conv_intrinsic_kill_sub): new
functions.
(gfc_conv_intrinsic_function): Use conv_intrinsic_kill.
- (gfc_conv_intrinsic_subroutine): Use conv_intrinsic_kill_sub.
+ (gfc_conv_intrinsic_subroutine): Use conv_intrinsic_kill_sub.
* trans.h: Declare gfor_fndecl_kill and gfor_fndecl_kill_sub.
2018-03-11 Paul Thomas <pault@gcc.gnu.org>
@@ -1138,7 +1206,7 @@
* trans-stmt.c (gfc_trans_lock_unlock): Likewise.
(gfc_trans_event_post_wait): Likewise.
(gfc_trans_sync): Likewise.
- (gfc_trans_stop): Use size_t for character lengths, int for exit
+ (gfc_trans_stop): Use size_t for character lengths, int for exit
codes.
2018-02-20 Thomas Koenig <tkoenig@gcc.gnu.org>
@@ -1759,7 +1827,7 @@
(gfc_interpret_character): Use gfc_charlen_t.
* target-memory.h (gfc_encode_character): Modify prototype.
* trans-array.c (gfc_trans_array_ctor_element): Use existing type.
- (get_array_ctor_var_strlen): Use gfc_conv_mpz_to_tree_type.
+ (get_array_ctor_var_strlen): Use gfc_conv_mpz_to_tree_type.
(trans_array_constructor): Use existing type.
(get_array_charlen): Likewise.
* trans-const.c (gfc_conv_mpz_to_tree_type): New function.
diff --git a/gcc/fortran/error.c b/gcc/fortran/error.c
index 7e882ba76bf..b3b0138b0c3 100644
--- a/gcc/fortran/error.c
+++ b/gcc/fortran/error.c
@@ -953,7 +953,7 @@ gfc_format_decoder (pretty_printer *pp, text_info *text, const char *spec,
= linemap_position_for_loc_and_offset (line_table,
loc->lb->location,
offset);
- text->set_location (loc_num, src_loc, true);
+ text->set_location (loc_num, src_loc, SHOW_RANGE_WITH_CARET);
pp_string (pp, result[loc_num]);
return true;
}
diff --git a/gcc/fortran/frontend-passes.c b/gcc/fortran/frontend-passes.c
index f9dcddcb156..0a5e8937015 100644
--- a/gcc/fortran/frontend-passes.c
+++ b/gcc/fortran/frontend-passes.c
@@ -1104,6 +1104,31 @@ convert_elseif (gfc_code **c, int *walk_subtrees ATTRIBUTE_UNUSED,
return 0;
}
+/* Callback function to var_in_expr - return true if expr1 and
+ expr2 are identical variables. */
+static int
+var_in_expr_callback (gfc_expr **e, int *walk_subtrees ATTRIBUTE_UNUSED,
+ void *data)
+{
+ gfc_expr *expr1 = (gfc_expr *) data;
+ gfc_expr *expr2 = *e;
+
+ if (expr2->expr_type != EXPR_VARIABLE)
+ return 0;
+
+ return expr1->symtree->n.sym == expr2->symtree->n.sym;
+}
+
+/* Return true if expr1 is found in expr2. */
+
+static bool
+var_in_expr (gfc_expr *expr1, gfc_expr *expr2)
+{
+ gcc_assert (expr1->expr_type == EXPR_VARIABLE);
+
+ return gfc_expr_walker (&expr2, var_in_expr_callback, (void *) expr1);
+}
+
struct do_stack
{
struct do_stack *prev;
@@ -1256,9 +1281,9 @@ traverse_io_block (gfc_code *code, bool *has_reached, gfc_code *prev)
for (int j = i - 1; j < i; j++)
{
if (iters[j]
- && (gfc_check_dependency (var, iters[j]->start, true)
- || gfc_check_dependency (var, iters[j]->end, true)
- || gfc_check_dependency (var, iters[j]->step, true)))
+ && (var_in_expr (var, iters[j]->start)
+ || var_in_expr (var, iters[j]->end)
+ || var_in_expr (var, iters[j]->step)))
return false;
}
}
diff --git a/gcc/fortran/gfortran.texi b/gcc/fortran/gfortran.texi
index 0f3f454ff83..30934046a49 100644
--- a/gcc/fortran/gfortran.texi
+++ b/gcc/fortran/gfortran.texi
@@ -1509,7 +1509,8 @@ end program main
Asynchronous I/O is supported if the program is linked against the
POSIX thread library. If that is not the case, all I/O is performed
-as synchronous.
+as synchronous. On systems which do not support pthread condition
+variables, such as AIX, I/O is also performed as synchronous.
On some systems, such as Darwin or Solaris, the POSIX thread library
is always linked in, so asynchronous I/O is always performed. On other
diff --git a/gcc/fortran/match.c b/gcc/fortran/match.c
index 1ab0e0fad9a..85247dd8334 100644
--- a/gcc/fortran/match.c
+++ b/gcc/fortran/match.c
@@ -1889,17 +1889,21 @@ gfc_match_associate (void)
gfc_association_list* a;
/* Match the next association. */
- if (gfc_match (" %n => %e", newAssoc->name, &newAssoc->target)
- != MATCH_YES)
+ if (gfc_match (" %n =>", newAssoc->name) != MATCH_YES)
+ {
+ gfc_error ("Expected association at %C");
+ goto assocListError;
+ }
+
+ if (gfc_match (" %e", &newAssoc->target) != MATCH_YES)
{
/* Have another go, allowing for procedure pointer selectors. */
gfc_matching_procptr_assignment = 1;
- if (gfc_match (" %n => %e", newAssoc->name, &newAssoc->target)
- != MATCH_YES)
- {
- gfc_error ("Expected association at %C");
- goto assocListError;
- }
+ if (gfc_match (" %e", &newAssoc->target) != MATCH_YES)
+ {
+ gfc_error ("Invalid association target at %C");
+ goto assocListError;
+ }
gfc_matching_procptr_assignment = 0;
}
newAssoc->where = gfc_current_locus;
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index 4ad4dcf780d..ded27624283 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -6266,9 +6266,17 @@ resolve_typebound_call (gfc_code* c, const char **name, bool *overridable)
/* Check that's really a SUBROUTINE. */
if (!c->expr1->value.compcall.tbp->subroutine)
{
- gfc_error ("%qs at %L should be a SUBROUTINE",
- c->expr1->value.compcall.name, &c->loc);
- return false;
+ if (!c->expr1->value.compcall.tbp->is_generic
+ && c->expr1->value.compcall.tbp->u.specific
+ && c->expr1->value.compcall.tbp->u.specific->n.sym
+ && c->expr1->value.compcall.tbp->u.specific->n.sym->attr.subroutine)
+ c->expr1->value.compcall.tbp->subroutine = 1;
+ else
+ {
+ gfc_error ("%qs at %L should be a SUBROUTINE",
+ c->expr1->value.compcall.name, &c->loc);
+ return false;
+ }
}
if (!check_typebound_baseobject (c->expr1))
@@ -9272,7 +9280,6 @@ resolve_select_type (gfc_code *code, gfc_namespace *old_ns)
static void
resolve_transfer (gfc_code *code)
{
- gfc_typespec *ts;
gfc_symbol *sym, *derived;
gfc_ref *ref;
gfc_expr *exp;
@@ -9308,7 +9315,9 @@ resolve_transfer (gfc_code *code)
_("item in READ")))
return;
- ts = exp->expr_type == EXPR_STRUCTURE ? &exp->ts : &exp->symtree->n.sym->ts;
+ const gfc_typespec *ts = exp->expr_type == EXPR_STRUCTURE
+ || exp->expr_type == EXPR_FUNCTION
+ ? &exp->ts : &exp->symtree->n.sym->ts;
/* Go to actual component transferred. */
for (ref = exp->ref; ref; ref = ref->next)
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 54e318e21f7..56ce98c78c6 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -4886,6 +4886,8 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
for (arg = args, argc = 0; arg != NULL;
arg = arg->next, formal = formal ? formal->next : NULL, ++argc)
{
+ bool finalized = false;
+
e = arg->expr;
fsym = formal ? formal->sym : NULL;
parm_kind = MISSING;
@@ -5360,7 +5362,42 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
&& e->ts.type == BT_CLASS
&& !CLASS_DATA (e)->attr.dimension
&& !CLASS_DATA (e)->attr.codimension)
- parmse.expr = gfc_class_data_get (parmse.expr);
+ {
+ parmse.expr = gfc_class_data_get (parmse.expr);
+ /* The result is a class temporary, whose _data component
+ must be freed to avoid a memory leak. */
+ if (e->expr_type == EXPR_FUNCTION
+ && CLASS_DATA (e)->attr.allocatable)
+ {
+ tree zero;
+
+ gfc_expr *var;
+
+ /* Borrow the function symbol to make a call to
+ gfc_add_finalizer_call and then restore it. */
+ tmp = e->symtree->n.sym->backend_decl;
+ e->symtree->n.sym->backend_decl
+ = TREE_OPERAND (parmse.expr, 0);
+ e->symtree->n.sym->attr.flavor = FL_VARIABLE;
+ var = gfc_lval_expr_from_sym (e->symtree->n.sym);
+ finalized = gfc_add_finalizer_call (&parmse.post,
+ var);
+ gfc_free_expr (var);
+ e->symtree->n.sym->backend_decl = tmp;
+ e->symtree->n.sym->attr.flavor = FL_PROCEDURE;
+
+ /* Then free the class _data. */
+ zero = build_int_cst (TREE_TYPE (parmse.expr), 0);
+ tmp = fold_build2_loc (input_location, NE_EXPR,
+ logical_type_node,
+ parmse.expr, zero);
+ tmp = build3_v (COND_EXPR, tmp,
+ gfc_call_free (parmse.expr),
+ build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&parmse.post, tmp);
+ gfc_add_modify (&parmse.post, parmse.expr, zero);
+ }
+ }
/* Wrap scalar variable in a descriptor. We need to convert
the address of a pointer back to the pointer itself before,
@@ -5687,9 +5724,18 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
tmp = build_fold_indirect_ref_loc (input_location, tmp);
}
- tmp = gfc_deallocate_alloc_comp (e->ts.u.derived, tmp, parm_rank);
-
- gfc_prepend_expr_to_block (&post, tmp);
+ if (!finalized && !e->must_finalize)
+ {
+ if ((e->ts.type == BT_CLASS
+ && GFC_CLASS_TYPE_P (TREE_TYPE (tmp)))
+ || e->ts.type == BT_DERIVED)
+ tmp = gfc_deallocate_alloc_comp (e->ts.u.derived, tmp,
+ parm_rank);
+ else if (e->ts.type == BT_CLASS)
+ tmp = gfc_deallocate_alloc_comp (CLASS_DATA (e)->ts.u.derived,
+ tmp, parm_rank);
+ gfc_prepend_expr_to_block (&post, tmp);
+ }
}
/* Add argument checking of passing an unallocated/NULL actual to
@@ -6410,7 +6456,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
final_fndecl = gfc_class_vtab_final_get (se->expr);
is_final = fold_build2_loc (input_location, NE_EXPR,
logical_type_node,
- final_fndecl,
+ final_fndecl,
fold_convert (TREE_TYPE (final_fndecl),
null_pointer_node));
final_fndecl = build_fold_indirect_ref_loc (input_location,
@@ -6420,28 +6466,43 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
gfc_build_addr_expr (NULL, tmp),
gfc_class_vtab_size_get (se->expr),
boolean_false_node);
- tmp = fold_build3_loc (input_location, COND_EXPR,
+ tmp = fold_build3_loc (input_location, COND_EXPR,
void_type_node, is_final, tmp,
build_empty_stmt (input_location));
if (se->ss && se->ss->loop)
{
- gfc_add_expr_to_block (&se->ss->loop->post, tmp);
- tmp = gfc_call_free (info->data);
+ gfc_prepend_expr_to_block (&se->ss->loop->post, tmp);
+ tmp = fold_build2_loc (input_location, NE_EXPR,
+ logical_type_node,
+ info->data,
+ fold_convert (TREE_TYPE (info->data),
+ null_pointer_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ void_type_node, tmp,
+ gfc_call_free (info->data),
+ build_empty_stmt (input_location));
gfc_add_expr_to_block (&se->ss->loop->post, tmp);
}
else
{
- gfc_add_expr_to_block (&se->post, tmp);
- tmp = gfc_class_data_get (se->expr);
- tmp = gfc_call_free (tmp);
+ tree classdata;
+ gfc_prepend_expr_to_block (&se->post, tmp);
+ classdata = gfc_class_data_get (se->expr);
+ tmp = fold_build2_loc (input_location, NE_EXPR,
+ logical_type_node,
+ classdata,
+ fold_convert (TREE_TYPE (classdata),
+ null_pointer_node));
+ tmp = fold_build3_loc (input_location, COND_EXPR,
+ void_type_node, tmp,
+ gfc_call_free (classdata),
+ build_empty_stmt (input_location));
gfc_add_expr_to_block (&se->post, tmp);
}
-
-no_finalization:
- expr->must_finalize = 0;
}
+no_finalization:
gfc_add_block_to_block (&se->post, &post);
}
@@ -8072,7 +8133,9 @@ gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr)
var = gfc_create_var (TREE_TYPE (se->expr), NULL);
gfc_add_modify (&se->pre, var, se->expr);
}
- gfc_add_block_to_block (&se->pre, &se->post);
+
+ if (!expr->must_finalize)
+ gfc_add_block_to_block (&se->pre, &se->post);
/* Take the address of that value. */
se->expr = gfc_build_addr_expr (NULL_TREE, var);
@@ -9262,10 +9325,12 @@ gfc_trans_arrayfunc_assign (gfc_expr * expr1, gfc_expr * expr2)
/* The frontend doesn't seem to bother filling in expr->symtree for intrinsic
functions. */
comp = gfc_get_proc_ptr_comp (expr2);
- gcc_assert (expr2->value.function.isym
+
+ if (!(expr2->value.function.isym
|| (comp && comp->attr.dimension)
|| (!comp && gfc_return_by_reference (expr2->value.function.esym)
- && expr2->value.function.esym->result->attr.dimension));
+ && expr2->value.function.esym->result->attr.dimension)))
+ return NULL;
gfc_init_se (&se, NULL);
gfc_start_block (&se.pre);
@@ -10238,6 +10303,8 @@ gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
gfc_add_block_to_block (&loop.post, &rse.post);
}
+ tmp = NULL_TREE;
+
if (is_poly_assign)
tmp = trans_class_assignment (&body, expr1, expr2, &lse, &rse,
use_vptr_copy || (lhs_attr.allocatable
@@ -10266,13 +10333,35 @@ gfc_trans_assignment_1 (gfc_expr * expr1, gfc_expr * expr2, bool init_flag,
code.resolved_isym = gfc_intrinsic_subroutine_by_id (GFC_ISYM_CAF_SEND);
tmp = gfc_conv_intrinsic_subroutine (&code);
}
- else
+ else if (!is_poly_assign && expr2->must_finalize
+ && expr1->ts.type == BT_CLASS
+ && expr2->ts.type == BT_CLASS)
+ {
+ /* This case comes about when the scalarizer provides array element
+ references. Use the vptr copy function, since this does a deep
+ copy of allocatable components, without which the finalizer call */
+ tmp = gfc_get_vptr_from_expr (rse.expr);
+ if (tmp != NULL_TREE)
+ {
+ tree fcn = gfc_vptr_copy_get (tmp);
+ if (POINTER_TYPE_P (TREE_TYPE (fcn)))
+ fcn = build_fold_indirect_ref_loc (input_location, fcn);
+ tmp = build_call_expr_loc (input_location,
+ fcn, 2,
+ gfc_build_addr_expr (NULL, rse.expr),
+ gfc_build_addr_expr (NULL, lse.expr));
+ }
+ }
+
+ /* If nothing else works, do it the old fashioned way! */
+ if (tmp == NULL_TREE)
tmp = gfc_trans_scalar_assign (&lse, &rse, expr1->ts,
gfc_expr_is_variable (expr2)
|| scalar_to_array
|| expr2->expr_type == EXPR_ARRAY,
!(l_is_temp || init_flag) && dealloc,
expr1->symtree->n.sym->attr.codimension);
+
/* Add the pre blocks to the body. */
gfc_add_block_to_block (&body, &rse.pre);
gfc_add_block_to_block (&body, &lse.pre);
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 387cf80b921..b2cea93742a 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -5511,22 +5511,10 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
{
/* MIN_EXPR/MAX_EXPR has unspecified behavior with NaNs or
signed zeros. */
- if (HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
- {
- tmp = fold_build2_loc (input_location, op, logical_type_node,
- arrayse.expr, limit);
- ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr);
- tmp = build3_v (COND_EXPR, tmp, ifbody,
- build_empty_stmt (input_location));
- gfc_add_expr_to_block (&block2, tmp);
- }
- else
- {
- tmp = fold_build2_loc (input_location,
- op == GT_EXPR ? MAX_EXPR : MIN_EXPR,
- type, arrayse.expr, limit);
- gfc_add_modify (&block2, limit, tmp);
- }
+ tmp = fold_build2_loc (input_location,
+ op == GT_EXPR ? MAX_EXPR : MIN_EXPR,
+ type, arrayse.expr, limit);
+ gfc_add_modify (&block2, limit, tmp);
}
if (fast)
@@ -5535,8 +5523,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
/* MIN_EXPR/MAX_EXPR has unspecified behavior with NaNs or
signed zeros. */
- if (HONOR_NANS (DECL_MODE (limit))
- || HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
+ if (HONOR_NANS (DECL_MODE (limit)))
{
tmp = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
@@ -5598,8 +5585,7 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op)
/* MIN_EXPR/MAX_EXPR has unspecified behavior with NaNs or
signed zeros. */
- if (HONOR_NANS (DECL_MODE (limit))
- || HONOR_SIGNED_ZEROS (DECL_MODE (limit)))
+ if (HONOR_NANS (DECL_MODE (limit)))
{
tmp = fold_build2_loc (input_location, op, logical_type_node,
arrayse.expr, limit);
diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c
index cc1a4294327..795d3cc0a13 100644
--- a/gcc/fortran/trans-stmt.c
+++ b/gcc/fortran/trans-stmt.c
@@ -5783,6 +5783,7 @@ gfc_trans_allocate (gfc_code * code)
enum { E3_UNSET = 0, E3_SOURCE, E3_MOLD, E3_DESC } e3_is;
stmtblock_t block;
stmtblock_t post;
+ stmtblock_t final_block;
tree nelems;
bool upoly_expr, tmp_expr3_len_flag = false, al_len_needs_set, is_coarray;
bool needs_caf_sync, caf_refs_comp;
@@ -5801,6 +5802,7 @@ gfc_trans_allocate (gfc_code * code)
gfc_init_block (&block);
gfc_init_block (&post);
+ gfc_init_block (&final_block);
/* STAT= (and maybe ERRMSG=) is present. */
if (code->expr1)
@@ -5842,6 +5844,11 @@ gfc_trans_allocate (gfc_code * code)
is_coarray = gfc_is_coarray (code->expr3);
+ if (code->expr3->expr_type == EXPR_FUNCTION && !code->expr3->mold
+ && (gfc_is_class_array_function (code->expr3)
+ || gfc_is_alloc_class_scalar_function (code->expr3)))
+ code->expr3->must_finalize = 1;
+
/* Figure whether we need the vtab from expr3. */
for (al = code->ext.alloc.list; !vtab_needed && al != NULL;
al = al->next)
@@ -5914,7 +5921,10 @@ gfc_trans_allocate (gfc_code * code)
temp_obj_created = temp_var_needed = !VAR_P (se.expr);
}
gfc_add_block_to_block (&block, &se.pre);
- gfc_add_block_to_block (&post, &se.post);
+ if (code->expr3->must_finalize)
+ gfc_add_block_to_block (&final_block, &se.post);
+ else
+ gfc_add_block_to_block (&post, &se.post);
/* Special case when string in expr3 is zero. */
if (code->expr3->ts.type == BT_CHARACTER
@@ -6743,6 +6753,8 @@ gfc_trans_allocate (gfc_code * code)
gfc_add_block_to_block (&block, &se.post);
gfc_add_block_to_block (&block, &post);
+ if (code->expr3 && code->expr3->must_finalize)
+ gfc_add_block_to_block (&block, &final_block);
return gfc_finish_block (&block);
}
diff --git a/gcc/function.c b/gcc/function.c
index dee303cdbdd..302438323c8 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -5180,7 +5180,6 @@ diddle_return_value_1 (void (*doit) (rtx, void *), void *arg, rtx outgoing)
void
diddle_return_value (void (*doit) (rtx, void *), void *arg)
{
- diddle_return_value_1 (doit, arg, crtl->return_bnd);
diddle_return_value_1 (doit, arg, crtl->return_rtx);
}
diff --git a/gcc/gcc-rich-location.c b/gcc/gcc-rich-location.c
index 2576c7387ee..81beb61661c 100644
--- a/gcc/gcc-rich-location.c
+++ b/gcc/gcc-rich-location.c
@@ -47,7 +47,7 @@ gcc_rich_location::add_expr (tree expr, range_label *label)
gcc_assert (expr);
if (CAN_HAVE_RANGE_P (expr))
- add_range (EXPR_LOCATION (expr), false, label);
+ add_range (EXPR_LOCATION (expr), SHOW_RANGE_WITHOUT_CARET, label);
}
/* If T is an expression, add a range for it to the rich_location,
diff --git a/gcc/gcov.c b/gcc/gcov.c
index 43dfc9a4b2c..ff4020c713e 100644
--- a/gcc/gcov.c
+++ b/gcc/gcov.c
@@ -408,6 +408,10 @@ static vector<source_info> sources;
/* Mapping of file names to sources */
static vector<name_map> names;
+/* Record all processed files in order to warn about
+ a file being read multiple times. */
+static vector<char *> processed_files;
+
/* This holds data summary information. */
static unsigned object_runs;
@@ -1146,6 +1150,17 @@ static void
process_file (const char *file_name)
{
create_file_names (file_name);
+
+ for (unsigned i = 0; i < processed_files.size (); i++)
+ if (strcmp (da_file_name, processed_files[i]) == 0)
+ {
+ fnotice (stderr, "'%s' file is already processed\n",
+ file_name);
+ return;
+ }
+
+ processed_files.push_back (xstrdup (da_file_name));
+
read_graph_file ();
read_count_file ();
}
diff --git a/gcc/genmatch.c b/gcc/genmatch.c
index 5848722684b..50d72f8f1e7 100644
--- a/gcc/genmatch.c
+++ b/gcc/genmatch.c
@@ -4150,7 +4150,7 @@ parser::parse_operation ()
if (active_fors.length() == 0)
record_operlist (id_tok->src_loc, p);
else
- fatal_at (id_tok, "operator-list %s cannot be exapnded inside 'for'", id);
+ fatal_at (id_tok, "operator-list %s cannot be expanded inside 'for'", id);
}
return op;
}
diff --git a/gcc/genmodes.c b/gcc/genmodes.c
index af0d280e208..6db9ed475f4 100644
--- a/gcc/genmodes.c
+++ b/gcc/genmodes.c
@@ -340,7 +340,6 @@ complete_mode (struct mode_data *m)
break;
case MODE_INT:
- case MODE_POINTER_BOUNDS:
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
case MODE_FRACT:
@@ -572,19 +571,6 @@ make_special_mode (enum mode_class cl, const char *name,
new_mode (cl, name, file, line);
}
-#define POINTER_BOUNDS_MODE(N, Y) \
- make_pointer_bounds_mode (#N, Y, __FILE__, __LINE__)
-
-static void ATTRIBUTE_UNUSED
-make_pointer_bounds_mode (const char *name,
- unsigned int bytesize,
- const char *file, unsigned int line)
-{
- struct mode_data *m = new_mode (MODE_POINTER_BOUNDS, name, file, line);
- m->bytesize = bytesize;
-}
-
-
#define INT_MODE(N, Y) FRACTIONAL_INT_MODE (N, -1U, Y)
#define FRACTIONAL_INT_MODE(N, B, Y) \
make_int_mode (#N, B, Y, __FILE__, __LINE__)
@@ -1213,7 +1199,6 @@ get_mode_class (struct mode_data *mode)
case MODE_UFRACT:
case MODE_ACCUM:
case MODE_UACCUM:
- case MODE_POINTER_BOUNDS:
return "scalar_mode";
case MODE_FLOAT:
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index e0892ffa572..bb294849504 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -152,7 +152,7 @@ write_predicate_subfunction (struct pred_data *p)
p->exp = and_exp;
printf ("static inline int\n"
- "%s_1 (rtx op, machine_mode mode ATTRIBUTE_UNUSED)\n",
+ "%s_1 (rtx op ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED)\n",
p->name);
rtx_reader_ptr->print_md_ptr_loc (p->c_block);
if (p->c_block[0] == '{')
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 07341ebe66f..cbca6a95b76 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -6347,8 +6347,7 @@ gimple_fold_stmt_to_constant_1 (gimple *stmt, tree (*valueize) (tree),
fn = (*valueize) (gimple_call_fn (stmt));
if (TREE_CODE (fn) == ADDR_EXPR
- && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL
- && DECL_BUILT_IN (TREE_OPERAND (fn, 0))
+ && fndecl_built_in_p (TREE_OPERAND (fn, 0))
&& gimple_builtin_call_types_compatible_p (stmt,
TREE_OPERAND (fn, 0)))
{
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index 9623fb86a04..c3777a1e761 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -355,7 +355,7 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
}
if (decl
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (decl, BUILT_IN_NORMAL))
{
if (DECL_FUNCTION_CODE (decl) == BUILT_IN_SETJMP)
{
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index d3c5ec6f79b..83e22735571 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -911,8 +911,7 @@ dump_gimple_call (pretty_printer *buffer, gcall *gs, int spc,
if (TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn))
pp_string (buffer, " [tm-clone]");
if (TREE_CODE (fn) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_START
+ && fndecl_built_in_p (fn, BUILT_IN_TM_START)
&& gimple_call_num_args (gs) > 0)
{
tree t = gimple_call_arg (gs, 0);
@@ -992,7 +991,7 @@ dump_gimple_switch (pretty_printer *buffer, gswitch *gs, int spc,
if (cfun && cfun->cfg)
{
- basic_block dest = label_to_block (label);
+ basic_block dest = label_to_block (cfun, label);
if (dest)
{
edge label_edge = find_edge (gimple_bb (gs), dest);
diff --git a/gcc/gimple-ssa-evrp-analyze.c b/gcc/gimple-ssa-evrp-analyze.c
index b9dcf906ff7..e9afa80e191 100644
--- a/gcc/gimple-ssa-evrp-analyze.c
+++ b/gcc/gimple-ssa-evrp-analyze.c
@@ -119,12 +119,7 @@ evrp_range_analyzer::set_ssa_range_info (tree lhs, value_range *vr)
wi::to_wide (vr->max));
}
else if (POINTER_TYPE_P (TREE_TYPE (lhs))
- && ((vr->type == VR_RANGE
- && range_includes_zero_p (vr->min,
- vr->max) == 0)
- || (vr->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr->min,
- vr->max) == 1)))
+ && range_includes_zero_p (vr) == 0)
set_ptr_nonnull (lhs);
}
diff --git a/gcc/gimple-ssa-warn-alloca.c b/gcc/gimple-ssa-warn-alloca.c
index 434770772ae..4d5aed866e1 100644
--- a/gcc/gimple-ssa-warn-alloca.c
+++ b/gcc/gimple-ssa-warn-alloca.c
@@ -38,6 +38,8 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
#include "intl.h"
+static unsigned HOST_WIDE_INT adjusted_warn_limit (bool);
+
const pass_data pass_data_walloca = {
GIMPLE_PASS,
"walloca",
@@ -82,7 +84,9 @@ pass_walloca::gate (function *fun ATTRIBUTE_UNUSED)
// Warning is disabled when its size limit is greater than PTRDIFF_MAX
// for the target maximum, which makes the limit negative since when
// represented in signed HOST_WIDE_INT.
- return warn_alloca_limit >= 0 || warn_vla_limit >= 0;
+ unsigned HOST_WIDE_INT max = tree_to_uhwi (TYPE_MAX_VALUE (ptrdiff_type_node));
+ return (adjusted_warn_limit (false) <= max
+ || adjusted_warn_limit (true) <= max);
}
// Possible problematic uses of alloca.
@@ -127,6 +131,30 @@ struct alloca_type_and_limit {
alloca_type_and_limit (enum alloca_type type) : type(type) { }
};
+/* Return the value of the argument N to -Walloca-larger-than= or
+ -Wvla-larger-than= adjusted for the target data model so that
+ when N == HOST_WIDE_INT_MAX, the adjusted value is set to
+ PTRDIFF_MAX on the target. This is done to prevent warnings
+ for unknown/unbounded allocations in the "permissive mode"
+ while still diagnosing excessive and necessarily invalid
+ allocations. */
+
+static unsigned HOST_WIDE_INT
+adjusted_warn_limit (bool idx)
+{
+ static HOST_WIDE_INT limits[2];
+ if (limits[idx])
+ return limits[idx];
+
+ limits[idx] = idx ? warn_vla_limit : warn_alloca_limit;
+ if (limits[idx] != HOST_WIDE_INT_MAX)
+ return limits[idx];
+
+ limits[idx] = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
+ return limits[idx];
+}
+
+
// NOTE: When we get better range info, this entire function becomes
// irrelevant, as it should be possible to get range info for an SSA
// name at any point in the program.
@@ -309,11 +337,7 @@ alloca_call_type (gimple *stmt, bool is_vla, tree *invalid_casted_type)
// Adjust warn_alloca_max_size for VLAs, by taking the underlying
// type into account.
- unsigned HOST_WIDE_INT max_size;
- if (is_vla)
- max_size = warn_vla_limit;
- else
- max_size = warn_alloca_limit;
+ unsigned HOST_WIDE_INT max_size = adjusted_warn_limit (is_vla);
// Check for the obviously bounded case.
if (TREE_CODE (len) == INTEGER_CST)
@@ -510,6 +534,8 @@ pass_walloca::execute (function *fun)
struct alloca_type_and_limit t
= alloca_call_type (stmt, is_vla, &invalid_casted_type);
+ unsigned HOST_WIDE_INT adjusted_alloca_limit
+ = adjusted_warn_limit (false);
// Even if we think the alloca call is OK, make sure it's not in a
// loop, except for a VLA, since VLAs are guaranteed to be cleaned
// up when they go out of scope, including in a loop.
@@ -519,8 +545,7 @@ pass_walloca::execute (function *fun)
is less than the maximum valid object size. */
const offset_int maxobjsize
= wi::to_offset (max_object_size ());
- if ((unsigned HOST_WIDE_INT) warn_alloca_limit
- < maxobjsize.to_uhwi ())
+ if (adjusted_alloca_limit < maxobjsize.to_uhwi ())
t = alloca_type_and_limit (ALLOCA_IN_LOOP);
}
@@ -544,7 +569,8 @@ pass_walloca::execute (function *fun)
print_decu (t.limit, buff);
inform (loc, G_("limit is %wu bytes, but argument "
"may be as large as %s"),
- is_vla ? warn_vla_limit : warn_alloca_limit, buff);
+ is_vla ? warn_vla_limit : adjusted_alloca_limit,
+ buff);
}
}
break;
@@ -559,7 +585,7 @@ pass_walloca::execute (function *fun)
{
print_decu (t.limit, buff);
inform (loc, G_("limit is %wu bytes, but argument is %s"),
- is_vla ? warn_vla_limit : warn_alloca_limit,
+ is_vla ? warn_vla_limit : adjusted_alloca_limit,
buff);
}
}
diff --git a/gcc/gimple-ssa-warn-restrict.c b/gcc/gimple-ssa-warn-restrict.c
index 977dd860ef7..ea30b7108f8 100644
--- a/gcc/gimple-ssa-warn-restrict.c
+++ b/gcc/gimple-ssa-warn-restrict.c
@@ -1731,7 +1731,7 @@ wrestrict_dom_walker::check_call (gimple *call)
return;
tree func = gimple_call_fndecl (call);
- if (!func || DECL_BUILT_IN_CLASS (func) != BUILT_IN_NORMAL)
+ if (!func || !fndecl_built_in_p (func, BUILT_IN_NORMAL))
return;
/* Argument number to extract from the call (depends on the built-in
diff --git a/gcc/gimple.c b/gcc/gimple.c
index 8d56a966cc1..e3e651b1e61 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -376,7 +376,7 @@ gimple_build_call_from_tree (tree t, tree fnptrtype)
gimple_call_set_must_tail (call, CALL_EXPR_MUST_TAIL_CALL (t));
gimple_call_set_return_slot_opt (call, CALL_EXPR_RETURN_SLOT_OPT (t));
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (fndecl)))
gimple_call_set_alloca_for_var (call, CALL_ALLOCA_FOR_VAR_P (t));
else
@@ -2681,8 +2681,7 @@ gimple_call_builtin_p (const gimple *stmt, enum built_in_function code)
tree fndecl;
if (is_gimple_call (stmt)
&& (fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == code)
+ && fndecl_built_in_p (fndecl, code))
return gimple_builtin_call_types_compatible_p (stmt, fndecl);
return false;
}
@@ -2701,7 +2700,7 @@ gimple_call_combined_fn (const gimple *stmt)
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& gimple_builtin_call_types_compatible_p (stmt, fndecl))
return as_combined_fn (DECL_FUNCTION_CODE (fndecl));
}
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index e35137aec2c..dbd0f0ebd0c 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -3209,8 +3209,7 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
- if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_BUILT_IN_ALLOCA:
@@ -3245,7 +3244,7 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
default:
;
}
- if (fndecl && DECL_BUILT_IN (fndecl))
+ if (fndecl && fndecl_built_in_p (fndecl))
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
@@ -3297,9 +3296,7 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
- && TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
+ && fndecl_built_in_p (last_arg_fndecl, BUILT_IN_VA_ARG_PACK))
{
tree call = *expr_p;
@@ -3773,8 +3770,7 @@ gimple_boolify (tree expr)
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
- && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT
+ && fndecl_built_in_p (fn, BUILT_IN_EXPECT)
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
@@ -5719,8 +5715,7 @@ gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p));
tree fndecl = get_callee_fndecl (*from_p);
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
+ && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
&& call_expr_nargs (*from_p) == 3)
call_stmt = gimple_build_call_internal (IFN_BUILTIN_EXPECT, 3,
CALL_EXPR_ARG (*from_p, 0),
@@ -5978,7 +5973,7 @@ gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
being taken (we can unify those cases here) then we can mark
the builtin for implicit generation by GCC. */
if (TREE_CODE (op0) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (op0) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (op0, BUILT_IN_NORMAL)
&& builtin_decl_declared_p (DECL_FUNCTION_CODE (op0)))
set_builtin_decl_implicit_p (DECL_FUNCTION_CODE (op0), true);
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index a392ed89bad..b032a27f6f8 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,9 @@
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * go-gcc.cc (Gcc_backend::call_expression): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+
2018-07-20 Martin Sebor <msebor@redhat.com>
PR middle-end/82063
@@ -468,7 +474,7 @@
2015-07-12 Aldy Hernandez <aldyh@redhat.com>
* gofrontend/backend.h: Fix double word typos.
- * gofrontend/expressions.cc: Same.
+ * gofrontend/expressions.cc: Same.
* gospec.c: Same.
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index 76a2026f1e0..1a449b7cc47 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -1948,8 +1948,8 @@ Gcc_backend::call_expression(Bfunction*, // containing fcn for call
tree excess_type = NULL_TREE;
if (optimize
&& TREE_CODE(fndecl) == FUNCTION_DECL
- && DECL_IS_BUILTIN(fndecl)
- && DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
+ && DECL_IS_BUILTIN (fndecl)
&& nargs > 0
&& ((SCALAR_FLOAT_TYPE_P(rettype)
&& SCALAR_FLOAT_TYPE_P(TREE_TYPE(args[0])))
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 51098e77954..9495882b5da 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-274c88df4d6f9360dcd657b6e069a3b5a1d37a90
+da249ffd264154cc992e76ff03f91f700d3bf53e
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index 2c2ae1cf1c6..143f0a73df6 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -9635,13 +9635,9 @@ Call_expression::do_lower(Gogo* gogo, Named_object* function,
"__builtin_return_address",
0);
}
- else if (this->args_ != NULL
- && this->args_->size() == 1
+ else if ((this->args_ == NULL || this->args_->size() == 0)
&& n == "getcallersp")
{
- // The actual argument to getcallersp is always the
- // address of a parameter; we don't need that for the
- // GCC builtin function, so we just ignore it.
static Named_object* builtin_frame_address;
return this->lower_to_builtin(&builtin_frame_address,
"__builtin_frame_address",
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 2fb67064471..00098dabef8 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -7975,12 +7975,11 @@ Map_type::make_map_type_descriptor_type()
Type* bool_type = Type::lookup_bool_type();
Struct_type* sf =
- Type::make_builtin_struct_type(12,
+ Type::make_builtin_struct_type(11,
"", tdt,
"key", ptdt,
"elem", ptdt,
"bucket", ptdt,
- "hmap", ptdt,
"keysize", uint8_type,
"indirectkey", bool_type,
"valuesize", uint8_type,
@@ -8065,11 +8064,6 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
vals->push_back(Expression::make_type_descriptor(bucket_type, bloc));
++p;
- go_assert(p->is_field_name("hmap"));
- Type* hmap_type = this->hmap_type(bucket_type);
- vals->push_back(Expression::make_type_descriptor(hmap_type, bloc));
-
- ++p;
go_assert(p->is_field_name("keysize"));
if (keysize > Map_type::max_key_size)
vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc));
diff --git a/gcc/hsa-brig.c b/gcc/hsa-brig.c
index d3efff40453..ca066118ebd 100644
--- a/gcc/hsa-brig.c
+++ b/gcc/hsa-brig.c
@@ -35,8 +35,8 @@ along with GCC; see the file COPYING3. If not see
#include "stor-layout.h"
#include "output.h"
#include "basic-block.h"
-#include "cfg.h"
#include "function.h"
+#include "cfg.h"
#include "fold-const.h"
#include "stringpool.h"
#include "gimple-pretty-print.h"
diff --git a/gcc/hsa-dump.c b/gcc/hsa-dump.c
index 1407475b0c8..96f82647558 100644
--- a/gcc/hsa-dump.c
+++ b/gcc/hsa-dump.c
@@ -27,8 +27,8 @@ along with GCC; see the file COPYING3. If not see
#include "vec.h"
#include "tree.h"
#include "basic-block.h"
-#include "cfg.h"
#include "function.h"
+#include "cfg.h"
#include "dumpfile.h"
#include "gimple-pretty-print.h"
#include "cgraph.h"
diff --git a/gcc/hsa-gen.c b/gcc/hsa-gen.c
index 6595bedac82..69e092ec4fa 100644
--- a/gcc/hsa-gen.c
+++ b/gcc/hsa-gen.c
@@ -3475,7 +3475,6 @@ gen_hsa_insns_for_switch_stmt (gswitch *s, hsa_bb *hbb)
e->flags &= ~EDGE_FALLTHRU;
e->flags |= EDGE_TRUE_VALUE;
- function *func = DECL_STRUCT_FUNCTION (current_function_decl);
tree index_tree = gimple_switch_index (s);
tree lowest = get_switch_low (s);
tree highest = get_switch_high (s);
@@ -3499,9 +3498,7 @@ gen_hsa_insns_for_switch_stmt (gswitch *s, hsa_bb *hbb)
hbb->append_insn (new hsa_insn_cbr (cmp_reg));
- tree default_label = gimple_switch_default_label (s);
- basic_block default_label_bb = label_to_block_fn (func,
- CASE_LABEL (default_label));
+ basic_block default_label_bb = gimple_switch_default_bb (cfun, s);
if (!gimple_seq_empty_p (phi_nodes (default_label_bb)))
{
@@ -3536,7 +3533,7 @@ gen_hsa_insns_for_switch_stmt (gswitch *s, hsa_bb *hbb)
for (unsigned i = 1; i < labels; i++)
{
tree label = gimple_switch_label (s, i);
- basic_block bb = label_to_block_fn (func, CASE_LABEL (label));
+ basic_block bb = label_to_block (cfun, CASE_LABEL (label));
unsigned HOST_WIDE_INT sub_low
= tree_to_uhwi (int_const_binop (MINUS_EXPR, CASE_LOW (label), lowest));
@@ -5303,8 +5300,7 @@ gen_hsa_insns_for_call (gimple *stmt, hsa_bb *hbb)
tree function_decl = gimple_call_fndecl (stmt);
/* Prefetch pass can create type-mismatching prefetch builtin calls which
fail the gimple_call_builtin_p test above. Handle them here. */
- if (DECL_BUILT_IN_CLASS (function_decl)
- && DECL_FUNCTION_CODE (function_decl) == BUILT_IN_PREFETCH)
+ if (fndecl_built_in_p (function_decl, BUILT_IN_PREFETCH))
return;
if (function_decl == NULL_TREE)
@@ -6290,12 +6286,11 @@ LD: hard_work_3 ();
static bool
convert_switch_statements (void)
{
- function *func = DECL_STRUCT_FUNCTION (current_function_decl);
basic_block bb;
bool modified_cfg = false;
- FOR_EACH_BB_FN (bb, func)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
if (gsi_end_p (gsi))
@@ -6318,7 +6313,7 @@ convert_switch_statements (void)
tree index_type = TREE_TYPE (index);
tree default_label = gimple_switch_default_label (s);
basic_block default_label_bb
- = label_to_block_fn (func, CASE_LABEL (default_label));
+ = label_to_block (cfun, CASE_LABEL (default_label));
basic_block cur_bb = bb;
auto_vec <edge> new_edges;
@@ -6330,8 +6325,7 @@ convert_switch_statements (void)
should be fixed after we add new collection of edges. */
for (unsigned i = 0; i < labels; i++)
{
- tree label = gimple_switch_label (s, i);
- basic_block label_bb = label_to_block_fn (func, CASE_LABEL (label));
+ basic_block label_bb = gimple_switch_label_bb (cfun, s, i);
edge e = find_edge (bb, label_bb);
edge_counts.safe_push (e->count ());
edge_probabilities.safe_push (e->probability);
@@ -6413,8 +6407,7 @@ convert_switch_statements (void)
gsi_insert_before (&cond_gsi, c, GSI_SAME_STMT);
- basic_block label_bb
- = label_to_block_fn (func, CASE_LABEL (label));
+ basic_block label_bb = label_to_block (cfun, CASE_LABEL (label));
edge new_edge = make_edge (cur_bb, label_bb, EDGE_TRUE_VALUE);
profile_probability prob_sum = sum_slice <profile_probability>
(edge_probabilities, i, labels, profile_probability::never ())
@@ -6481,10 +6474,9 @@ convert_switch_statements (void)
static void
expand_builtins ()
{
- function *func = DECL_STRUCT_FUNCTION (current_function_decl);
basic_block bb;
- FOR_EACH_BB_FN (bb, func)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
diff --git a/gcc/hsa-regalloc.c b/gcc/hsa-regalloc.c
index f402587408d..819f680d1bc 100644
--- a/gcc/hsa-regalloc.c
+++ b/gcc/hsa-regalloc.c
@@ -27,9 +27,9 @@ along with GCC; see the file COPYING3. If not see
#include "tree.h"
#include "dominance.h"
#include "basic-block.h"
-#include "cfg.h"
-#include "cfganal.h"
#include "function.h"
+#include "cfganal.h"
+#include "cfg.h"
#include "bitmap.h"
#include "dumpfile.h"
#include "cgraph.h"
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 42dd4cc2904..4838194cd73 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -638,8 +638,7 @@ determine_versionability (struct cgraph_node *node,
if (DECL_EXTERNAL (node->decl))
for (cgraph_edge *edge = node->callees; !reason && edge;
edge = edge->next_callee)
- if (DECL_BUILT_IN (edge->callee->decl)
- && DECL_BUILT_IN_CLASS (edge->callee->decl) == BUILT_IN_NORMAL)
+ if (fndecl_built_in_p (edge->callee->decl, BUILT_IN_NORMAL))
{
if (DECL_FUNCTION_CODE (edge->callee->decl) == BUILT_IN_VA_ARG_PACK)
reason = "external function which calls va_arg_pack";
diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c
index a8fc2c2df9a..62095c6cf6f 100644
--- a/gcc/ipa-fnsummary.c
+++ b/gcc/ipa-fnsummary.c
@@ -1291,7 +1291,7 @@ set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
tree min, max;
predicate p;
- e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
+ e = gimple_switch_edge (cfun, last, case_idx);
min = CASE_LOW (cl);
max = CASE_HIGH (cl);
@@ -2455,10 +2455,8 @@ compute_fn_summary (struct cgraph_node *node, bool early)
for (e = node->callees; e; e = e->next_callee)
{
tree cdecl = e->callee->decl;
- if (DECL_BUILT_IN (cdecl)
- && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL
- && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS
- || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START))
+ if (fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS)
+ || fndecl_built_in_p (cdecl, BUILT_IN_VA_START))
break;
}
node->local.can_change_signature = !e;
diff --git a/gcc/ipa-param-manipulation.c b/gcc/ipa-param-manipulation.c
index 1ab1fcccdae..1e3a92a125f 100644
--- a/gcc/ipa-param-manipulation.c
+++ b/gcc/ipa-param-manipulation.c
@@ -218,7 +218,7 @@ ipa_modify_formal_parameters (tree fndecl, ipa_parm_adjustment_vec adjustments)
}
/* When signature changes, we need to clear builtin info. */
- if (DECL_BUILT_IN (fndecl))
+ if (fndecl_built_in_p (fndecl))
{
DECL_BUILT_IN_CLASS (fndecl) = NOT_BUILT_IN;
DECL_FUNCTION_CODE (fndecl) = (enum built_in_function) 0;
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index 0e6440f8997..38f5bcf00a6 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -899,8 +899,7 @@ visit_bb (basic_block bb, basic_block return_bb,
/* Check builtins that prevent splitting. */
if (gimple_code (stmt) == GIMPLE_CALL
&& (decl = gimple_call_fndecl (stmt)) != NULL_TREE
- && DECL_BUILT_IN (decl)
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (decl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (decl))
{
/* FIXME: once we will allow passing non-parm values to split part,
@@ -1347,7 +1346,7 @@ split_function (basic_block return_bb, struct split_point *split_point,
/* For usual cloning it is enough to clear builtin only when signature
changes. For partial inlining we however can not expect the part
of builtin implementation to have same semantic as the whole. */
- if (DECL_BUILT_IN (node->decl))
+ if (fndecl_built_in_p (node->decl))
{
DECL_BUILT_IN_CLASS (node->decl) = NOT_BUILT_IN;
DECL_FUNCTION_CODE (node->decl) = (enum built_in_function) 0;
diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h
index 1609ac14d7f..98f2a75cd81 100644
--- a/gcc/ipa-utils.h
+++ b/gcc/ipa-utils.h
@@ -179,22 +179,24 @@ polymorphic_type_binfo_p (const_tree binfo)
inline bool
type_with_linkage_p (const_tree t)
{
- if (!TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL
- || !TYPE_STUB_DECL (t))
+ if (!TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL)
+ return false;
+
+ /* To support -fno-lto-odr-type-merigng recognize types with vtables
+ to have linkage. */
+ if (RECORD_OR_UNION_TYPE_P (t)
+ && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t)))
+ return true;
+
+ /* After free_lang_data was run and -flto-odr-type-merging we can recongize
+ types with linkage by presence of mangled name. */
+ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))
+ return true;
+
+ /* If free lang data was not run check if indeed the type looks like C++
+ type with linkage. */
+ if (in_lto_p || !TYPE_STUB_DECL (t))
return false;
- /* In LTO do not get confused by non-C++ produced types or types built
- with -fno-lto-odr-type-merigng. */
- if (in_lto_p)
- {
- /* To support -fno-lto-odr-type-merigng recognize types with vtables
- to have linkage. */
- if (RECORD_OR_UNION_TYPE_P (t)
- && TYPE_BINFO (t) && BINFO_VTABLE (TYPE_BINFO (t)))
- return true;
- /* With -flto-odr-type-merging C++ FE specify mangled names
- for all types with the linkage. */
- return DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t));
- }
if (!RECORD_OR_UNION_TYPE_P (t) && TREE_CODE (t) != ENUMERAL_TYPE)
return false;
@@ -214,18 +216,16 @@ type_in_anonymous_namespace_p (const_tree t)
{
gcc_checking_assert (type_with_linkage_p (t));
- if (!TREE_PUBLIC (TYPE_STUB_DECL (t)))
- {
- /* C++ FE uses magic <anon> as assembler names of anonymous types.
- verify that this match with type_in_anonymous_namespace_p. */
- gcc_checking_assert (!in_lto_p
- || !DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))
- || !strcmp ("<anon>",
- IDENTIFIER_POINTER
- (DECL_ASSEMBLER_NAME (TYPE_NAME (t)))));
- return true;
- }
- return false;
+ /* free_lang_data clears TYPE_STUB_DECL but sets assembler name to
+ "<anon>" */
+ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))
+ return !strcmp ("<anon>",
+ IDENTIFIER_POINTER
+ (DECL_ASSEMBLER_NAME (TYPE_NAME (t))));
+ else if (!TYPE_STUB_DECL (t))
+ return false;
+ else
+ return !TREE_PUBLIC (TYPE_STUB_DECL (t));
}
/* Return true of T is type with One Definition Rule info attached.
diff --git a/gcc/ipa-visibility.c b/gcc/ipa-visibility.c
index 907dc9d0e2b..000207fa31b 100644
--- a/gcc/ipa-visibility.c
+++ b/gcc/ipa-visibility.c
@@ -203,7 +203,7 @@ cgraph_externally_visible_p (struct cgraph_node *node,
using the implicit built-in declarations anymore. Similarly this enables
us to remove them as unreachable before actual calls may appear during
expansion or folding. */
- if (DECL_BUILT_IN (node->decl))
+ if (fndecl_built_in_p (node->decl))
return true;
/* If linker counts on us, we must preserve the function. */
diff --git a/gcc/jit/ChangeLog b/gcc/jit/ChangeLog
index 22679671a01..40dbde606d1 100644
--- a/gcc/jit/ChangeLog
+++ b/gcc/jit/ChangeLog
@@ -2158,7 +2158,7 @@
2014-12-09 David Malcolm <dmalcolm@redhat.com>
- PR jit/64206
+ PR jit/64206
* Make-lang.in (jit_OBJS): Add jit/jit-tempdir.o.
* jit-common.h (gcc::jit::tempdir): New forward decl.
* jit-playback.c: Include jit-tempdir.h.
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index d5e390cb5f4..1e6a7adeaa2 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -1266,7 +1266,7 @@ input_node (struct lto_file_decl_data *file_data,
have already been read will have their tag stored in the 'aux'
field. Since built-in functions can be referenced in multiple
functions, they are expected to be read more than once. */
- if (node->aux && !DECL_BUILT_IN (node->decl))
+ if (node->aux && !fndecl_built_in_p (node->decl))
internal_error ("bytecode stream: found multiple instances of cgraph "
"node with uid %d", node->get_uid ());
diff --git a/gcc/lto-opts.c b/gcc/lto-opts.c
index 09ec7c057e6..dbb41f6c8b3 100644
--- a/gcc/lto-opts.c
+++ b/gcc/lto-opts.c
@@ -78,6 +78,21 @@ lto_write_options (void)
&& !global_options.x_flag_openacc)
append_to_collect_gcc_options (&temporary_obstack, &first_p,
"-fno-openacc");
+ /* Append PIC/PIE mode because its default depends on target and it is
+ subject of merging in lto-wrapper. */
+ if (!global_options_set.x_flag_pic && !global_options_set.x_flag_pie)
+ {
+ append_to_collect_gcc_options (&temporary_obstack, &first_p,
+ global_options.x_flag_pic == 2
+ ? "-fPIC"
+ : global_options.x_flag_pic == 1
+ ? "-fpic"
+ : global_options.x_flag_pie == 2
+ ? "-fPIE"
+ : global_options.x_flag_pie == 1
+ ? "-fpie"
+ : "-fno-pie");
+ }
/* Append options from target hook and store them to offload_lto section. */
if (lto_stream_offload_p)
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 9e28d678342..21ac1a46659 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -837,7 +837,7 @@ DFS::DFS_write_tree_body (struct output_block *ob,
if (CODE_CONTAINS_STRUCT (code, TS_FUNCTION_DECL))
{
- DFS_follow_tree_edge (DECL_VINDEX (expr));
+ gcc_checking_assert (DECL_VINDEX (expr) == NULL);
DFS_follow_tree_edge (DECL_FUNCTION_PERSONALITY (expr));
DFS_follow_tree_edge (DECL_FUNCTION_SPECIFIC_TARGET (expr));
DFS_follow_tree_edge (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (expr));
@@ -857,7 +857,9 @@ DFS::DFS_write_tree_body (struct output_block *ob,
DFS_follow_tree_edge (TYPE_CONTEXT (expr));
/* TYPE_CANONICAL is re-computed during type merging, so no need
to follow it here. */
- DFS_follow_tree_edge (TYPE_STUB_DECL (expr));
+ /* Do not stream TYPE_STUB_DECL; it is not needed by LTO but currently
+ it can not be freed by free_lang_data without triggering ICEs in
+ langhooks. */
}
if (CODE_CONTAINS_STRUCT (code, TS_TYPE_NON_COMMON))
@@ -1253,7 +1255,6 @@ hash_tree (struct streamer_tree_cache_d *cache, hash_map<tree, hashval_t> *map,
if (CODE_CONTAINS_STRUCT (code, TS_FUNCTION_DECL))
{
- visit (DECL_VINDEX (t));
visit (DECL_FUNCTION_PERSONALITY (t));
visit (DECL_FUNCTION_SPECIFIC_TARGET (t));
visit (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (t));
@@ -1270,7 +1271,6 @@ hash_tree (struct streamer_tree_cache_d *cache, hash_map<tree, hashval_t> *map,
;
else
visit (TYPE_CONTEXT (t));
- visit (TYPE_STUB_DECL (t));
}
if (CODE_CONTAINS_STRUCT (code, TS_TYPE_NON_COMMON))
@@ -2618,7 +2618,8 @@ write_symbol (struct streamer_tree_cache_d *cache,
unsigned char c;
gcc_checking_assert (TREE_PUBLIC (t)
- && !is_builtin_fn (t)
+ && (TREE_CODE (t) != FUNCTION_DECL
+ || !fndecl_built_in_p (t))
&& !DECL_ABSTRACT_P (t)
&& (!VAR_P (t) || !DECL_HARD_REGISTER (t)));
diff --git a/gcc/lto-wrapper.c b/gcc/lto-wrapper.c
index 9cfdfae24e1..2b9d47e5143 100644
--- a/gcc/lto-wrapper.c
+++ b/gcc/lto-wrapper.c
@@ -409,6 +409,11 @@ merge_and_complain (struct cl_decoded_option **decoded_options,
It is a common mistake to mix few -fPIC compiled objects into otherwise
non-PIC code. We do not want to build everything with PIC then.
+ Similarly we merge PIE options, however in addition we keep
+ -fPIC + -fPIE = -fPIE
+ -fpic + -fPIE = -fpie
+ -fPIC/-fpic + -fpie = -fpie
+
It would be good to warn on mismatches, but it is bit hard to do as
we do not know what nothing translates to. */
@@ -416,11 +421,38 @@ merge_and_complain (struct cl_decoded_option **decoded_options,
if ((*decoded_options)[j].opt_index == OPT_fPIC
|| (*decoded_options)[j].opt_index == OPT_fpic)
{
- if (!pic_option
- || (pic_option->value > 0) != ((*decoded_options)[j].value > 0))
- remove_option (decoded_options, j, decoded_options_count);
- else if (pic_option->opt_index == OPT_fPIC
- && (*decoded_options)[j].opt_index == OPT_fpic)
+ /* -fno-pic in one unit implies -fno-pic everywhere. */
+ if ((*decoded_options)[j].value == 0)
+ j++;
+ /* If we have no pic option or merge in -fno-pic, we still may turn
+ existing pic/PIC mode into pie/PIE if -fpie/-fPIE is present. */
+ else if ((pic_option && pic_option->value == 0)
+ || !pic_option)
+ {
+ if (pie_option)
+ {
+ bool big = (*decoded_options)[j].opt_index == OPT_fPIC
+ && pie_option->opt_index == OPT_fPIE;
+ (*decoded_options)[j].opt_index = big ? OPT_fPIE : OPT_fpie;
+ if (pie_option->value)
+ (*decoded_options)[j].canonical_option[0] = big ? "-fPIE" : "-fpie";
+ else
+ (*decoded_options)[j].canonical_option[0] = big ? "-fno-pie" : "-fno-pie";
+ (*decoded_options)[j].value = pie_option->value;
+ j++;
+ }
+ else if (pic_option)
+ {
+ (*decoded_options)[j] = *pic_option;
+ j++;
+ }
+ /* We do not know if target defaults to pic or not, so just remove
+ option if it is missing in one unit but enabled in other. */
+ else
+ remove_option (decoded_options, j, decoded_options_count);
+ }
+ else if (pic_option->opt_index == OPT_fpic
+ && (*decoded_options)[j].opt_index == OPT_fPIC)
{
(*decoded_options)[j] = *pic_option;
j++;
@@ -431,11 +463,42 @@ merge_and_complain (struct cl_decoded_option **decoded_options,
else if ((*decoded_options)[j].opt_index == OPT_fPIE
|| (*decoded_options)[j].opt_index == OPT_fpie)
{
- if (!pie_option
- || pie_option->value != (*decoded_options)[j].value)
- remove_option (decoded_options, j, decoded_options_count);
- else if (pie_option->opt_index == OPT_fPIE
- && (*decoded_options)[j].opt_index == OPT_fpie)
+ /* -fno-pie in one unit implies -fno-pie everywhere. */
+ if ((*decoded_options)[j].value == 0)
+ j++;
+ /* If we have no pie option or merge in -fno-pie, we still preserve
+ PIE/pie if pic/PIC is present. */
+ else if ((pie_option && pie_option->value == 0)
+ || !pie_option)
+ {
+ /* If -fPIC/-fpic is given, merge it with -fPIE/-fpie. */
+ if (pic_option)
+ {
+ if (pic_option->opt_index == OPT_fpic
+ && (*decoded_options)[j].opt_index == OPT_fPIE)
+ {
+ (*decoded_options)[j].opt_index = OPT_fpie;
+ (*decoded_options)[j].canonical_option[0]
+ = pic_option->value ? "-fpie" : "-fno-pie";
+ }
+ else if (!pic_option->value)
+ (*decoded_options)[j].canonical_option[0] = "-fno-pie";
+ (*decoded_options)[j].value = pic_option->value;
+ j++;
+ }
+ else if (pie_option)
+ {
+ (*decoded_options)[j] = *pie_option;
+ j++;
+ }
+ /* Because we always append pic/PIE options this code path should
+ not happen unless the LTO object was built by old lto1 which
+ did not contain that logic yet. */
+ else
+ remove_option (decoded_options, j, decoded_options_count);
+ }
+ else if (pie_option->opt_index == OPT_fpie
+ && (*decoded_options)[j].opt_index == OPT_fPIE)
{
(*decoded_options)[j] = *pie_option;
j++;
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index a92317cb774..f997f03ed88 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,20 @@
+2018-08-29 Martin Liska <mliska@suse.cz>
+
+ PR bootstrap/87130
+ * lto.c (read_cgraph_and_symbols): Fix thinko, revert
+ to behavior before r263887.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * lto-lang.c (handle_const_attribute): Use new function
+ fndecl_built_in_p and remove check for FUNCTION_DECL if
+ possible.
+ * lto-symtab.c (lto_symtab_merge_p): Likewise.
+ (lto_symtab_merge_decls_1): Likewise.
+ (lto_symtab_merge_symbols): Likewise.
+ * lto.c (lto_maybe_register_decl): Likewise.
+ (read_cgraph_and_symbols): Likewise.
+
2018-08-21 Tom de Vries <tdevries@suse.de>
* lto.c (lto_main): Call debuginfo_early_start and
@@ -32,12 +49,12 @@
2018-06-20 Martin Liska <mliska@suse.cz>
* lto-symtab.c (lto_symtab_merge_p): Remove not valid
- FIXME comment.
+ FIXME comment.
2018-06-19 Martin Liska <mliska@suse.cz>
* lto-partition.c (add_symbol_to_partition_1): Use symbol_summary::get instead
- of get_create.
+ of get_create.
(undo_partition): Likewise.
(lto_balanced_map): Likewise.
@@ -277,11 +294,11 @@
2017-08-21 Richard Biener <rguenther@suse.de>
- * lto.c (unify_scc): Truncate DIE reference queue for dropped SCCs.
- (lto_read_decls): Process TRANSLATION_UNIT_DECLs. Remove
- TYPE_DECL debug processing, register DIE references from
- prevailing SCCs with the debug machinery.
- (lto_section_with_id): Handle LTO debug sections.
+ * lto.c (unify_scc): Truncate DIE reference queue for dropped SCCs.
+ (lto_read_decls): Process TRANSLATION_UNIT_DECLs. Remove
+ TYPE_DECL debug processing, register DIE references from
+ prevailing SCCs with the debug machinery.
+ (lto_section_with_id): Handle LTO debug sections.
2017-08-16 Nathan Sidwell <nathan@acm.org>
@@ -354,7 +371,7 @@
2017-05-01 Xi Ruoyao <ryxi@stu.xidian.edu.cn>
- PR c++/80038
+ PR c++/80038
* lto-lang.c (lto_init): Set in_lto_p earlier.
2017-04-12 Richard Biener <rguenther@suse.de>
@@ -513,7 +530,7 @@
decls have no assemblernames.
2016-01-19 Martin Liska <mliska@suse.cz>
- Martin Jambor <mjambor@suse.cz>
+ Martin Jambor <mjambor@suse.cz>
* lto-partition.c: Include "hsa.h"
(add_symbol_to_partition_1): Put hsa implementations into the
@@ -2142,7 +2159,7 @@
2012-06-18 Lawrence Crowl <crowl@google.com>
- * lto.c (do_whole_program_analysis): Rename use of TV_PHASE_CGRAPH to
+ * lto.c (do_whole_program_analysis): Rename use of TV_PHASE_CGRAPH to
TV_PHASE_OPT_GEN. Use new timevar TV_PHASE_STREAM_OUT around the call
to lto_wpa_write_files.
(lto_main): Rename use of TV_PHASE_CGRAPH to TV_PHASE_OPT_GEN. Move
@@ -2198,7 +2215,7 @@
2012-04-16 Jan Hubicka <jh@suse.cz>
* lto.c (read_cgraph_and_symbols): Use FOR_EACH
- walkers to walk cgraph and varpool.
+ walkers to walk cgraph and varpool.
(materialize_cgraph): Likewise.
* lto-partition.c (lto_1_to_1_map): Likewise.
(lto_balanced_map): Likewise.
@@ -2758,7 +2775,7 @@
2010-10-03 Andi Kleen <ak@linux.intel.com>
* lto.c (lto_file_finalize): Replace gcc_assert for missing section
- with fatal_error.
+ with fatal_error.
2010-09-28 Jan Hubicka <jh@suse.cz>
@@ -2861,7 +2878,7 @@
2010-07-10 Andi Kleen <ak@linux.intel.com>
PR lto/44992
- * lto.c: Include splay-tree.h
+ * lto.c: Include splay-tree.h
(lto_resolution_read): Change to walk file_ids tree and parse
extra file_id in resolution file.
(lto_section_with_id): Add.
@@ -2914,7 +2931,7 @@
2010-07-04 Jan Hubicka <jh@suse.cz>
- * lto.c (read_cgraph_and_symbols): Dump cgraph before merging.
+ * lto.c (read_cgraph_and_symbols): Dump cgraph before merging.
2010-06-13 Richard Guenther <rguenther@suse.de>
@@ -3333,7 +3350,7 @@
* lto.c (lto_resolution_read): Add more checks. Discard rest of line.
2009-11-04 Richard Guenther <rguenther@suse.de>
- Rafael Avila de Espindola <espindola@google.com>
+ Rafael Avila de Espindola <espindola@google.com>
* lto-elf.c (lto_elf_build_section_table): Add the base offset.
(lto_elf_file_open): Handle offsets in arguments name@offest.
diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c
index de6ec1c077a..8eb4a25a634 100644
--- a/gcc/lto/lto-lang.c
+++ b/gcc/lto/lto-lang.c
@@ -303,8 +303,7 @@ handle_const_attribute (tree *node, tree ARG_UNUSED (name),
tree ARG_UNUSED (args), int ARG_UNUSED (flags),
bool * ARG_UNUSED (no_add_attrs))
{
- if (TREE_CODE (*node) != FUNCTION_DECL
- || !DECL_BUILT_IN (*node))
+ if (!fndecl_built_in_p (*node))
inform (UNKNOWN_LOCATION, "%s:%s: %E: %E", __FILE__, __func__, *node, name);
tree type = TREE_TYPE (*node);
diff --git a/gcc/lto/lto-symtab.c b/gcc/lto/lto-symtab.c
index 0d603c0281f..cf08d455d87 100644
--- a/gcc/lto/lto-symtab.c
+++ b/gcc/lto/lto-symtab.c
@@ -546,14 +546,14 @@ lto_symtab_merge_p (tree prevailing, tree decl)
if (TREE_CODE (prevailing) == FUNCTION_DECL)
{
- if (DECL_BUILT_IN (prevailing) != DECL_BUILT_IN (decl))
+ if (fndecl_built_in_p (prevailing) != fndecl_built_in_p (decl))
{
if (dump_file)
fprintf (dump_file, "Not merging decls; "
"DECL_BUILT_IN mismatch\n");
return false;
}
- if (DECL_BUILT_IN (prevailing)
+ if (fndecl_built_in_p (prevailing)
&& (DECL_BUILT_IN_CLASS (prevailing) != DECL_BUILT_IN_CLASS (decl)
|| DECL_FUNCTION_CODE (prevailing) != DECL_FUNCTION_CODE (decl)))
{
@@ -797,7 +797,7 @@ lto_symtab_merge_decls_1 (symtab_node *first)
{
for (e = first; e; e = e->next_sharing_asm_name)
if (TREE_CODE (e->decl) == FUNCTION_DECL
- && !DECL_BUILT_IN (e->decl)
+ && !fndecl_built_in_p (e->decl)
&& lto_symtab_symbol_p (e))
{
prevailing = e;
@@ -1030,7 +1030,7 @@ lto_symtab_merge_symbols (void)
/* Builtins are not merged via decl merging. It is however
possible that tree merging unified the declaration. We
do not want duplicate entries in symbol table. */
- if (cnode && DECL_BUILT_IN (node->decl)
+ if (cnode && fndecl_built_in_p (node->decl)
&& (cnode2 = cgraph_node::get (node->decl))
&& cnode2 != cnode)
lto_cgraph_replace_node (cnode2, cnode);
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 10618896022..598492df527 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -894,7 +894,7 @@ lto_maybe_register_decl (struct data_in *data_in, tree t, unsigned ix)
if (TREE_CODE (t) == VAR_DECL)
lto_register_var_decl_in_symtab (data_in, t, ix);
else if (TREE_CODE (t) == FUNCTION_DECL
- && !DECL_BUILT_IN (t))
+ && !fndecl_built_in_p (t))
lto_register_function_decl_in_symtab (data_in, t, ix);
}
@@ -2923,7 +2923,8 @@ read_cgraph_and_symbols (unsigned nfiles, const char **fnames)
FOR_EACH_SYMBOL (snode)
if (snode->externally_visible && snode->real_symbol_p ()
&& snode->lto_file_data && snode->lto_file_data->resolution_map
- && !is_builtin_fn (snode->decl)
+ && !(TREE_CODE (snode->decl) == FUNCTION_DECL
+ && fndecl_built_in_p (snode->decl))
&& !(VAR_P (snode->decl) && DECL_HARD_REGISTER (snode->decl)))
{
ld_plugin_symbol_resolution_t *res;
diff --git a/gcc/machmode.h b/gcc/machmode.h
index b938eeaa26b..239a9098f53 100644
--- a/gcc/machmode.h
+++ b/gcc/machmode.h
@@ -237,9 +237,6 @@ extern const unsigned char mode_class[NUM_MACHINE_MODES];
|| CLASS == MODE_ACCUM \
|| CLASS == MODE_UACCUM)
-#define POINTER_BOUNDS_MODE_P(MODE) \
- (GET_MODE_CLASS (MODE) == MODE_POINTER_BOUNDS)
-
/* An optional T (i.e. a T or nothing), where T is some form of mode class. */
template<typename T>
class opt_mode
@@ -482,7 +479,6 @@ scalar_mode::includes_p (machine_mode m)
case MODE_UACCUM:
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
- case MODE_POINTER_BOUNDS:
return true;
default:
return false;
diff --git a/gcc/match.pd b/gcc/match.pd
index cb3c93e3e16..be669caf844 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -776,6 +776,11 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(bit_not (bit_and:cs (bit_not @0) @1))
(bit_ior @0 (bit_not @1)))
+/* ~(~a | b) --> a & ~b */
+(simplify
+ (bit_not (bit_ior:cs (bit_not @0) @1))
+ (bit_and @0 (bit_not @1)))
+
/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
#if GIMPLE
(simplify
@@ -981,6 +986,16 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
(bit_and @0 @1))
+/* (~x | y) & (x | ~y) -> ~(x ^ y) */
+(simplify
+ (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
+ (bit_not (bit_xor @0 @1)))
+
+/* (~x | y) ^ (x | ~y) -> x ^ y */
+(simplify
+ (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
+ (bit_xor @0 @1))
+
/* ~x & ~y -> ~(x | y)
~x | ~y -> ~(x & y) */
(for op (bit_and bit_ior)
@@ -1027,7 +1042,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(for opo (bit_and bit_xor)
opi (bit_xor bit_and)
(simplify
- (opo:c (opi:c @0 @1) @1)
+ (opo:c (opi:cs @0 @1) @1)
(bit_and (bit_not @0) @1)))
/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
diff --git a/gcc/mode-classes.def b/gcc/mode-classes.def
index 522e446c7ba..62ec13815f8 100644
--- a/gcc/mode-classes.def
+++ b/gcc/mode-classes.def
@@ -22,7 +22,6 @@ along with GCC; see the file COPYING3. If not see
DEF_MODE_CLASS (MODE_CC), /* condition code in a register */ \
DEF_MODE_CLASS (MODE_INT), /* integer */ \
DEF_MODE_CLASS (MODE_PARTIAL_INT), /* integer with padding bits */ \
- DEF_MODE_CLASS (MODE_POINTER_BOUNDS), /* bounds */ \
DEF_MODE_CLASS (MODE_FRACT), /* signed fractional number */ \
DEF_MODE_CLASS (MODE_UFRACT), /* unsigned fractional number */ \
DEF_MODE_CLASS (MODE_ACCUM), /* signed accumulator */ \
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index 63693714d04..5eeec721351 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -2049,7 +2049,7 @@
Merge from 'apple/trunk' branch on FSF servers.
2005-08-23 Stuart Hastings <stuart@apple.com>
- Ziemowit Laski <zlaski@apple.com>
+ Ziemowit Laski <zlaski@apple.com>
Radar 4209854
* objc-act.c (objc_decay_parm_type): New function.
diff --git a/gcc/objcp/ChangeLog b/gcc/objcp/ChangeLog
index ec829b92270..b8a97939f48 100644
--- a/gcc/objcp/ChangeLog
+++ b/gcc/objcp/ChangeLog
@@ -241,7 +241,7 @@
* Make-lang.in (cc1objplus-dummy): Remove.
(cc1objplus-checksum): Change to run checksum over object files
- and options only.
+ and options only.
2010-10-04 Andi Kleen <ak@linux.intel.com>
@@ -388,13 +388,13 @@
Revert:
2008-02-07 Andreas Tobler <andreast-list@fgznet.ch>
- Douglas Gregor <doug.gregor@gmail.com>
+ Douglas Gregor <doug.gregor@gmail.com>
PR bootstrap/35115
* objcp-decl.c (objcp_comptypes): Call cp_comptypes, not comptypes.
2008-02-07 Andreas Tobler <andreast-list@fgznet.ch>
- Douglas Gregor <doug.gregor@gmail.com>
+ Douglas Gregor <doug.gregor@gmail.com>
PR bootstrap/35115
* objcp-decl.c (objcp_comptypes): Call cp_comptypes, not comptypes.
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 843c66fd221..fdabf67249b 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -2975,9 +2975,8 @@ scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
static bool
setjmp_or_longjmp_p (const_tree fndecl)
{
- if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
- || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
+ if (fndecl_built_in_p (fndecl, BUILT_IN_SETJMP)
+ || fndecl_built_in_p (fndecl, BUILT_IN_LONGJMP))
return true;
tree declname = DECL_NAME (fndecl);
@@ -8832,7 +8831,7 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
call_stmt = as_a <gcall *> (stmt);
fndecl = gimple_call_fndecl (call_stmt);
if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
diff --git a/gcc/params.def b/gcc/params.def
index a09785d4e07..a0ad3ecdad6 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -847,15 +847,15 @@ DEFPARAM (PARAM_MAX_PARTIAL_ANTIC_LENGTH,
"Maximum length of partial antic set when performing tree pre optimization.",
100, 0, 0)
-/* The following is used as a stop-gap limit for cases where really huge
- SCCs blow up memory and compile-time use too much. If we hit this limit,
- SCCVN and such FRE and PRE will be not done at all for the current
- function. */
-
-DEFPARAM (PARAM_SCCVN_MAX_SCC_SIZE,
- "sccvn-max-scc-size",
- "Maximum size of a SCC before SCCVN stops processing a function.",
- 10000, 10, 0)
+/* The following is used as a stop-gap limit for cases where really deep
+ loop nests cause compile-time to blow up. If we hit this limit,
+ FRE and PRE will value-number outer loops (but the outermost) in a
+ loop nest non-optimistically. */
+
+DEFPARAM (PARAM_RPO_VN_MAX_LOOP_DEPTH,
+ "rpo-vn-max-loop-depth",
+ "Maximum depth of a loop nest to fully value-number optimistically.",
+ 7, 2, 0)
/* The following is used as a stop-gap limit for cases where really huge
functions blow up compile-time use too much. It limits the number of
diff --git a/gcc/predict.c b/gcc/predict.c
index 8c8e79153fc..ca6a901cd56 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -3996,10 +3996,12 @@ strip_predict_hints (function *fun, bool early)
tree fndecl = gimple_call_fndecl (stmt);
if (!early
- && ((DECL_BUILT_IN_P (fndecl, BUILT_IN_NORMAL, BUILT_IN_EXPECT)
+ && ((fndecl != NULL_TREE
+ && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT)
&& gimple_call_num_args (stmt) == 2)
- || (DECL_BUILT_IN_P (fndecl, BUILT_IN_NORMAL,
- BUILT_IN_EXPECT_WITH_PROBABILITY)
+ || (fndecl != NULL_TREE
+ && fndecl_built_in_p (fndecl,
+ BUILT_IN_EXPECT_WITH_PROBABILITY)
&& gimple_call_num_args (stmt) == 3)
|| (gimple_call_internal_p (stmt)
&& gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT)))
diff --git a/gcc/pretty-print.c b/gcc/pretty-print.c
index 02967d05f75..7dd900b3bbf 100644
--- a/gcc/pretty-print.c
+++ b/gcc/pretty-print.c
@@ -705,10 +705,11 @@ static void pp_quoted_string (pretty_printer *, const char *, size_t = -1);
For use e.g. when implementing "+" in client format decoders. */
void
-text_info::set_location (unsigned int idx, location_t loc, bool show_caret_p)
+text_info::set_location (unsigned int idx, location_t loc,
+ enum range_display_kind range_display_kind)
{
gcc_checking_assert (m_richloc);
- m_richloc->set_range (idx, loc, show_caret_p);
+ m_richloc->set_range (idx, loc, range_display_kind);
}
location_t
diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h
index 0d67e308050..2decc516b1f 100644
--- a/gcc/pretty-print.h
+++ b/gcc/pretty-print.h
@@ -36,7 +36,8 @@ struct text_info
void **x_data;
rich_location *m_richloc;
- void set_location (unsigned int idx, location_t loc, bool caret_p);
+ void set_location (unsigned int idx, location_t loc,
+ enum range_display_kind range_display_kind);
location_t get_location (unsigned int index_of_location) const;
};
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index ba9ac02fce7..5dd2e31340a 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -36,11 +36,11 @@ along with GCC; see the file COPYING3. If not see
#include "alias.h"
#include "tree.h"
#include "basic-block.h"
-#include "cfg.h"
#include "print-tree.h"
#include "flags.h"
#include "predict.h"
#include "function.h"
+#include "cfg.h"
#include "basic-block.h"
#include "diagnostic.h"
#include "tree-pretty-print.h"
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 5347e064704..8caf9dd0dd0 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -427,7 +427,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
fputs (" autoinline", file);
if (code == FUNCTION_DECL && DECL_UNINLINABLE (node))
fputs (" uninlinable", file);
- if (code == FUNCTION_DECL && DECL_BUILT_IN (node))
+ if (code == FUNCTION_DECL && fndecl_built_in_p (node))
fputs (" built-in", file);
if (code == FUNCTION_DECL && DECL_STATIC_CHAIN (node))
fputs (" static-chain", file);
@@ -502,7 +502,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
print_node (file, "size", DECL_SIZE (node), indent + 4);
print_node (file, "unit-size", DECL_SIZE_UNIT (node), indent + 4);
- if (code != FUNCTION_DECL || DECL_BUILT_IN (node))
+ if (code != FUNCTION_DECL || fndecl_built_in_p (node))
indent_to (file, indent + 3);
if (DECL_USER_ALIGN (node))
@@ -514,7 +514,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent,
fprintf (file, " offset_align " HOST_WIDE_INT_PRINT_UNSIGNED,
DECL_OFFSET_ALIGN (node));
- if (code == FUNCTION_DECL && DECL_BUILT_IN (node))
+ if (code == FUNCTION_DECL && fndecl_built_in_p (node))
{
if (DECL_BUILT_IN_CLASS (node) == BUILT_IN_MD)
fprintf (file, " built-in: BUILT_IN_MD:%d", DECL_FUNCTION_CODE (node));
diff --git a/gcc/profile-count.c b/gcc/profile-count.c
index 716ffcc8eb0..f4ab244e3a3 100644
--- a/gcc/profile-count.c
+++ b/gcc/profile-count.c
@@ -25,8 +25,8 @@ along with GCC; see the file COPYING3. If not see
#include "options.h"
#include "tree.h"
#include "basic-block.h"
-#include "cfg.h"
#include "function.h"
+#include "cfg.h"
#include "gimple.h"
#include "data-streamer.h"
#include "cgraph.h"
diff --git a/gcc/sanopt.c b/gcc/sanopt.c
index 223c06a8355..082f936adb5 100644
--- a/gcc/sanopt.c
+++ b/gcc/sanopt.c
@@ -1165,13 +1165,15 @@ sanitize_rewrite_addressable_params (function *fun)
gimple_add_tmp_var (var);
+ /* We skip parameters that have a DECL_VALUE_EXPR. */
+ if (DECL_HAS_VALUE_EXPR_P (arg))
+ continue;
+
if (dump_file)
fprintf (dump_file,
"Rewriting parameter whose address is taken: %s\n",
IDENTIFIER_POINTER (DECL_NAME (arg)));
- gcc_assert (!DECL_HAS_VALUE_EXPR_P (arg));
-
SET_DECL_PT_UID (var, DECL_PT_UID (arg));
/* Assign value of parameter to newly created variable. */
diff --git a/gcc/sreal.c b/gcc/sreal.c
index 950937a7cc9..e17d93afb6a 100644
--- a/gcc/sreal.c
+++ b/gcc/sreal.c
@@ -64,7 +64,7 @@ along with GCC; see the file COPYING3. If not see
void
sreal::dump (FILE *file) const
{
- fprintf (file, "(%" PRIi64 " * 2^%d)", m_sig, m_exp);
+ fprintf (file, "(%" PRIi64 " * 2^%d)", (int64_t)m_sig, m_exp);
}
DEBUG_FUNCTION void
@@ -114,7 +114,7 @@ sreal::to_int () const
if (m_exp >= SREAL_PART_BITS)
return sign * INTTYPE_MAXIMUM (int64_t);
if (m_exp > 0)
- return sign * (SREAL_ABS (m_sig) << m_exp);
+ return sign * (SREAL_ABS ((int64_t)m_sig) << m_exp);
if (m_exp < 0)
return m_sig >> -m_exp;
return m_sig;
@@ -138,7 +138,8 @@ sreal
sreal::operator+ (const sreal &other) const
{
int dexp;
- sreal tmp, r;
+ sreal tmp;
+ int64_t r_sig, r_exp;
const sreal *a_p = this, *b_p = &other, *bb;
@@ -146,10 +147,14 @@ sreal::operator+ (const sreal &other) const
std::swap (a_p, b_p);
dexp = a_p->m_exp - b_p->m_exp;
- r.m_exp = a_p->m_exp;
+ r_exp = a_p->m_exp;
if (dexp > SREAL_BITS)
{
- r.m_sig = a_p->m_sig;
+ r_sig = a_p->m_sig;
+
+ sreal r;
+ r.m_sig = r_sig;
+ r.m_exp = r_exp;
return r;
}
@@ -162,8 +167,8 @@ sreal::operator+ (const sreal &other) const
bb = &tmp;
}
- r.m_sig = a_p->m_sig + bb->m_sig;
- r.normalize ();
+ r_sig = a_p->m_sig + (int64_t)bb->m_sig;
+ sreal r (r_sig, r_exp);
return r;
}
@@ -174,7 +179,8 @@ sreal
sreal::operator- (const sreal &other) const
{
int dexp;
- sreal tmp, r;
+ sreal tmp;
+ int64_t r_sig, r_exp;
const sreal *bb;
const sreal *a_p = this, *b_p = &other;
@@ -186,10 +192,14 @@ sreal::operator- (const sreal &other) const
}
dexp = a_p->m_exp - b_p->m_exp;
- r.m_exp = a_p->m_exp;
+ r_exp = a_p->m_exp;
if (dexp > SREAL_BITS)
{
- r.m_sig = sign * a_p->m_sig;
+ r_sig = sign * a_p->m_sig;
+
+ sreal r;
+ r.m_sig = r_sig;
+ r.m_exp = r_exp;
return r;
}
if (dexp == 0)
@@ -201,8 +211,8 @@ sreal::operator- (const sreal &other) const
bb = &tmp;
}
- r.m_sig = sign * (a_p->m_sig - bb->m_sig);
- r.normalize ();
+ r_sig = sign * ((int64_t) a_p->m_sig - (int64_t)bb->m_sig);
+ sreal r (r_sig, r_exp);
return r;
}
@@ -212,17 +222,14 @@ sreal
sreal::operator* (const sreal &other) const
{
sreal r;
- if (absu_hwi (m_sig) < SREAL_MIN_SIG || absu_hwi (other.m_sig) < SREAL_MIN_SIG)
+ if (absu_hwi (m_sig) < SREAL_MIN_SIG
+ || absu_hwi (other.m_sig) < SREAL_MIN_SIG)
{
r.m_sig = 0;
r.m_exp = -SREAL_MAX_EXP;
}
else
- {
- r.m_sig = m_sig * other.m_sig;
- r.m_exp = m_exp + other.m_exp;
- r.normalize ();
- }
+ r.normalize (m_sig * (int64_t) other.m_sig, m_exp + other.m_exp);
return r;
}
@@ -233,11 +240,9 @@ sreal
sreal::operator/ (const sreal &other) const
{
gcc_checking_assert (other.m_sig != 0);
- sreal r;
- r.m_sig
- = SREAL_SIGN (m_sig) * (SREAL_ABS (m_sig) << SREAL_PART_BITS) / other.m_sig;
- r.m_exp = m_exp - other.m_exp - SREAL_PART_BITS;
- r.normalize ();
+ sreal r (SREAL_SIGN (m_sig)
+ * ((int64_t)SREAL_ABS (m_sig) << SREAL_PART_BITS) / other.m_sig,
+ m_exp - other.m_exp - SREAL_PART_BITS);
return r;
}
@@ -272,15 +277,15 @@ namespace selftest {
static void
sreal_verify_basics (void)
{
- sreal minimum = INT_MIN;
- sreal maximum = INT_MAX;
+ sreal minimum = INT_MIN/2;
+ sreal maximum = INT_MAX/2;
sreal seven = 7;
sreal minus_two = -2;
sreal minus_nine = -9;
- ASSERT_EQ (INT_MIN, minimum.to_int ());
- ASSERT_EQ (INT_MAX, maximum.to_int ());
+ ASSERT_EQ (INT_MIN/2, minimum.to_int ());
+ ASSERT_EQ (INT_MAX/2, maximum.to_int ());
ASSERT_FALSE (minus_two < minus_two);
ASSERT_FALSE (seven < seven);
diff --git a/gcc/sreal.h b/gcc/sreal.h
index 91ae526d38f..e2ad1a38e3e 100644
--- a/gcc/sreal.h
+++ b/gcc/sreal.h
@@ -20,8 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_SREAL_H
#define GCC_SREAL_H
-/* SREAL_PART_BITS has to be an even number. */
-#define SREAL_PART_BITS 32
+#define SREAL_PART_BITS 31
#define UINT64_BITS 64
@@ -45,9 +44,9 @@ public:
sreal () : m_sig (-1), m_exp (-1) {}
/* Construct a sreal. */
- sreal (int64_t sig, int exp = 0) : m_sig (sig), m_exp (exp)
+ sreal (int64_t sig, int exp = 0)
{
- normalize ();
+ normalize (sig, exp);
}
void dump (FILE *) const;
@@ -130,14 +129,14 @@ public:
}
private:
- inline void normalize ();
- inline void normalize_up ();
- inline void normalize_down ();
+ inline void normalize (int64_t new_sig, signed int new_exp);
+ inline void normalize_up (int64_t new_sig, signed int new_exp);
+ inline void normalize_down (int64_t new_sig, signed int new_exp);
void shift_right (int amount);
static sreal signedless_plus (const sreal &a, const sreal &b, bool negative);
static sreal signedless_minus (const sreal &a, const sreal &b, bool negative);
- int64_t m_sig; /* Significant. */
+ int32_t m_sig; /* Significant. */
signed int m_exp; /* Exponent. */
};
@@ -199,23 +198,24 @@ inline sreal operator>> (const sreal &a, int exp)
Make this separate method so inliner can handle hot path better. */
inline void
-sreal::normalize_up ()
+sreal::normalize_up (int64_t new_sig, signed int new_exp)
{
- unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
int shift = SREAL_PART_BITS - 2 - floor_log2 (sig);
gcc_checking_assert (shift > 0);
sig <<= shift;
- m_exp -= shift;
+ new_exp -= shift;
gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
/* Check underflow. */
- if (m_exp < -SREAL_MAX_EXP)
+ if (new_exp < -SREAL_MAX_EXP)
{
- m_exp = -SREAL_MAX_EXP;
+ new_exp = -SREAL_MAX_EXP;
sig = 0;
}
- if (SREAL_SIGN (m_sig) == -1)
+ m_exp = new_exp;
+ if (SREAL_SIGN (new_sig) == -1)
m_sig = -sig;
else
m_sig = sig;
@@ -226,16 +226,16 @@ sreal::normalize_up ()
Make this separate method so inliner can handle hot path better. */
inline void
-sreal::normalize_down ()
+sreal::normalize_down (int64_t new_sig, signed int new_exp)
{
int last_bit;
- unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
int shift = floor_log2 (sig) - SREAL_PART_BITS + 2;
gcc_checking_assert (shift > 0);
last_bit = (sig >> (shift-1)) & 1;
sig >>= shift;
- m_exp += shift;
+ new_exp += shift;
gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
/* Round the number. */
@@ -243,16 +243,17 @@ sreal::normalize_down ()
if (sig > SREAL_MAX_SIG)
{
sig >>= 1;
- m_exp++;
+ new_exp++;
}
/* Check overflow. */
- if (m_exp > SREAL_MAX_EXP)
+ if (new_exp > SREAL_MAX_EXP)
{
- m_exp = SREAL_MAX_EXP;
+ new_exp = SREAL_MAX_EXP;
sig = SREAL_MAX_SIG;
}
- if (SREAL_SIGN (m_sig) == -1)
+ m_exp = new_exp;
+ if (SREAL_SIGN (new_sig) == -1)
m_sig = -sig;
else
m_sig = sig;
@@ -261,16 +262,24 @@ sreal::normalize_down ()
/* Normalize *this; the hot path. */
inline void
-sreal::normalize ()
+sreal::normalize (int64_t new_sig, signed int new_exp)
{
- unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
if (sig == 0)
- m_exp = -SREAL_MAX_EXP;
+ {
+ m_sig = 0;
+ m_exp = -SREAL_MAX_EXP;
+ }
else if (sig > SREAL_MAX_SIG)
- normalize_down ();
+ normalize_down (new_sig, new_exp);
else if (sig < SREAL_MIN_SIG)
- normalize_up ();
+ normalize_up (new_sig, new_exp);
+ else
+ {
+ m_sig = new_sig;
+ m_exp = new_exp;
+ }
}
#endif
diff --git a/gcc/stmt.c b/gcc/stmt.c
index b8df1818137..07355984de1 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -81,8 +81,6 @@ struct simple_case_node
/* Label to jump to when node matches. */
tree m_code_label;
};
-
-extern basic_block label_to_block_fn (struct function *, tree);
static bool check_unique_operand_names (tree, tree, tree);
static char *resolve_operand_name_1 (char *, tree, tree, tree);
@@ -907,7 +905,7 @@ expand_case (gswitch *stmt)
/* Find the default case target label. */
tree default_lab = CASE_LABEL (gimple_switch_default_label (stmt));
default_label = jump_target_rtx (default_lab);
- basic_block default_bb = label_to_block_fn (cfun, default_lab);
+ basic_block default_bb = label_to_block (cfun, default_lab);
edge default_edge = find_edge (bb, default_bb);
/* Get upper and lower bounds of case values. */
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index cb377ca5b82..088f3606a0d 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -390,7 +390,6 @@ int_mode_for_mode (machine_mode mode)
case MODE_VECTOR_ACCUM:
case MODE_VECTOR_UFRACT:
case MODE_VECTOR_UACCUM:
- case MODE_POINTER_BOUNDS:
return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
case MODE_RANDOM:
@@ -761,14 +760,19 @@ layout_decl (tree decl, unsigned int known_align)
{
tree size = DECL_SIZE_UNIT (decl);
- if (size != 0 && TREE_CODE (size) == INTEGER_CST
- && compare_tree_int (size, warn_larger_than_size) > 0)
+ if (size != 0 && TREE_CODE (size) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT uhwisize = tree_to_uhwi (size);
-
- warning (OPT_Wlarger_than_, "size of %q+D %wu bytes exceeds "
- "maximum object size %wu",
- decl, uhwisize, warn_larger_than_size);
+ /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
+ as if PTRDIFF_MAX had been specified, with the value
+ being that on the target rather than the host. */
+ unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
+ if (max_size == HOST_WIDE_INT_MAX)
+ max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
+
+ if (compare_tree_int (size, max_size) > 0)
+ warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
+ "maximum object size %wu",
+ decl, size, max_size);
}
}
diff --git a/gcc/substring-locations.c b/gcc/substring-locations.c
index 1981394a3f0..faf78845840 100644
--- a/gcc/substring-locations.c
+++ b/gcc/substring-locations.c
@@ -171,7 +171,7 @@ format_warning_n_va (const substring_loc &fmt_loc,
gcc_rich_location richloc (primary_loc, primary_label);
if (param_loc != UNKNOWN_LOCATION)
- richloc.add_range (param_loc, false, param_label);
+ richloc.add_range (param_loc, SHOW_RANGE_WITHOUT_CARET, param_label);
if (!err && corrected_substring && substring_within_range)
richloc.add_fixit_replace (fmt_substring_range, corrected_substring);
diff --git a/gcc/symtab.c b/gcc/symtab.c
index c5464cbe6d7..3cf1f629413 100644
--- a/gcc/symtab.c
+++ b/gcc/symtab.c
@@ -2323,7 +2323,7 @@ symtab_node::output_to_lto_symbol_table_p (void)
return false;
/* FIXME: Builtins corresponding to real functions probably should have
symbol table entries. */
- if (is_builtin_fn (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl))
return false;
/* We have real symbol that should be in symbol table. However try to trim
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 59a9038dd04..e9e4b5c91b5 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,335 @@
+2018-08-30 Qing Zhao <qing.zhao@oracle.com>
+
+ PR 86519
+ gcc.dg/strcmpopt_6.c: Remove.
+ gcc.target/aarch64/strcmpopt_6.c: New testcase.
+ gcc.target/i386/strcmpopt_6.c: Likewise.
+
+2018-08-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87147
+ * gcc.dg/torture/pr87147.c: New testcase.
+
+2018-08-30 Tamar Christina <tamar.christina@arm.com>
+
+ * gcc.target/aarch64/large_struct_copy_2.c: New.
+
+2018-08-29 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR middle-end/87053
+ * gcc.c-torture/execute/pr87053.c: New test.
+
+2018-08-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/87095
+ * g++.dg/ubsan/vptr-13.C: New test.
+
+2018-08-29 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/85265
+ * g++.dg/concepts/pr85265.C: New.
+
+2018-08-29 Martin Sebor <msebor@redhat.com>
+ Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR tree-optimization/86714
+ PR tree-optimization/86711
+ * gcc.c-torture/execute/memchr-1.c: New test.
+ * gcc.c-torture/execute/pr86714.c: New test.
+ * gcc.c-torture/execute/widechar-3.c: New test.
+ * gcc.dg/strlenopt-58.c: New test.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87132
+ * gcc.dg/torture/pr87132.c: New testcase.
+
+2018-08-29 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/85110
+ * g++.dg/diagnostic/param-type-mismatch-2.C: Update expected
+ output to reflect underlining of pertinent parameter in decl
+ for "no known conversion" messages.
+
+2018-08-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/87122
+ * g++.dg/cpp1z/decomp47.C: New test.
+
+2018-08-29 Matthew Malcomson <matthew.malcomson@arm.com>
+
+ * gcc.target/aarch64/simd/vect_su_add_sub.c: Use 32 and 64-bit types
+ where appropriate.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * gfortran.dg/pr87117.f90: New testcase.
+
+2018-08-29 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87126
+ * gcc.dg/tree-ssa/pr87126.c: New testcase.
+
+2018-08-28 MCC CS <deswurstes@users.noreply.github.com>
+
+ PR tree-optimization/87009
+ * gcc.dg/pr87009.c: New test.
+
+2018-08-28 Martin Sebor <msebor@redhat.com>
+
+ PR middle-end/86631
+ * g++.dg/Walloca1.C: Adjust.
+
+2018-08-28 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/86546
+ * g++.dg/other/switch4.C: New.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87124
+ * g++.dg/torture/pr87124.C: New testcase.
+
+2017-08-28 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/80477
+ * gfortran.dg/class_result_7.f90: New test.
+ * gfortran.dg/class_result_8.f90: New test.
+ * gfortran.dg/class_result_9.f90: New test.
+
+ PR fortran/86481
+ * gfortran.dg/allocate_with_source_25.f90: New test.
+
+2018-08-28 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/87099
+ * gcc.dg/pr87099.c: New test.
+
+2018-08-28 Richard Sandiford <richard.sandiford@arm.com>
+
+ PR testsuite/87078
+ * gcc.dg/vect/slp-37.c: Restrict scan tests to vect_hw_misalign.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * gcc.dg/pr87117-1.c: New testcase.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * gcc.dg/pr87117-2.c: New testcase.
+
+2018-08-28 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/87117
+ * gcc.dg/lvalue-5.c: New testcase.
+
+2018-08-27 Jeff Law <law@redhat.com>
+
+ PR tree-optimization/87110
+ * gcc.c-torture/compile/pr87110.c: New test.
+
+2018-08-27 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/86914
+ * gcc.dg/strlenopt-57.c: New test.
+
+2018-08-27 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/87112
+ * gcc.dg/pr87112.c: New test.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/63392
+ * g++.dg/diagnostic/missing-typename.C: New test.
+
+2018-08-27 Jeff Law <law@redhat.com>
+
+ * gcc.c-torture/compile/dse.c: New test.
+
+2018-08-27 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/86993
+ * g++.dg/diagnostic/pr86993.C: New test.
+
+2018-08-27 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/86927
+ * gcc.dg/vect/pr86927.c: New testcase.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * gcc.dg/empty.h: New file.
+ * gcc.dg/fixits-pr84852-1.c: Update for move of fix-it hint to
+ top of file and removal of redundant second printing of warning
+ location.
+ * gcc.dg/fixits-pr84852-2.c: Likewise.
+ * gcc.dg/missing-header-fixit-3.c: Likewise.
+ * gcc.dg/missing-header-fixit-4.c: New test.
+ * gcc.dg/plugin/diagnostic_plugin_test_show_locus.c: Update for
+ conversion of show_caret_p to a tri-state.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * g++.dg/pr85523.C: Extend expected output to show line
+ before line-insertion fix-it hint.
+ * gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c
+ (test_fixit_insert_newline): Add previous line to expected output.
+ * gcc.dg/plugin/diagnostic-test-show-locus-bw.c: Likewise.
+ * gcc.dg/plugin/diagnostic-test-show-locus-color.c: Likewise.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/86962
+ * gcc.dg/asan/pr86962.c: New test.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * gcc.dg/tree-prof/val-prof-10.c: New test.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ PR tree-optimization/86847
+ * gcc.dg/tree-ssa/switch-3.c: New test.
+ * gcc.dg/tree-ssa/vrp105.c: Remove.
+
+2018-08-27 Martin Liska <mliska@suse.cz>
+
+ * gcc.dg/tree-ssa/switch-2.c: New test.
+
+2018-08-27 Richard Biener <rguenther@suse.de>
+
+ * g++.dg/torture/20180705-1.C: New testcase.
+ * gcc.dg/tree-ssa/ssa-fre-67.c: Likewise.
+ * gcc.dg/tree-ssa/ssa-ccp-14.c: Scan FRE dump.
+ * gcc.dg/tree-ssa/ssa-fre-46.c: Use -O2.
+ * gcc.dg/tree-ssa/vrp92.c: Disable FRE.
+ * gcc.dg/pr83666.c: Drop --param=sccvn-max-scc-size option.
+ * gcc.dg/pr85195.c: Likewise.
+ * gcc.dg/pr85467.c: Likewise.
+ * gcc.dg/torture/pr81790.c: Likewise.
+
+ * gfortran.dg/reassoc_4.f: Change max-completely-peeled-insns
+ param to current default.
+
+2018-08-27 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/87065
+ * gcc.target/i386/pr87065.c: New test.
+
+2018-08-26 Marek Polacek <polacek@redhat.com>
+
+ PR c++/87080
+ * g++.dg/cpp0x/Wpessimizing-move5.C: New test.
+
+ PR c++/87029, Implement -Wredundant-move.
+ * g++.dg/cpp0x/Wredundant-move1.C: New test.
+ * g++.dg/cpp0x/Wredundant-move2.C: New test.
+ * g++.dg/cpp0x/Wredundant-move3.C: New test.
+ * g++.dg/cpp0x/Wredundant-move4.C: New test.
+
+2018-08-25 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR libfortran/86704
+ * gfortran.dg/matmul_19.f90: New test.
+
+2018-08-25 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/86545
+ * gfortran.dg/generic_35.f90: New test case.
+
+2018-08-24 David Malcolm <dmalcolm@redhat.com>
+
+ PR c++/87091
+ * gcc.dg/missing-header-fixit-3.c: Update for changes to how
+ line spans are printed with -fdiagnostics-show-line-numbers.
+
+2018-08-24 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR fortran/86837
+ * gfortran.dg/implied_do_io_6.f90: New test.
+
+2018-08-24 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR middle-end/87092
+ * gcc.dg/pr87092.c: New test.
+
+2018-08-24 Marek Polacek <polacek@redhat.com>
+
+ PR c++/67012
+ PR c++/86942
+ * g++.dg/cpp0x/auto52.C: New test.
+ * g++.dg/cpp1y/auto-fn52.C: New test.
+ * g++.dg/cpp1y/auto-fn53.C: New test.
+ * g++.dg/cpp1y/auto-fn54.C: New test.
+
+2018-08-24 Richard Sandiford <richard.sandiford@arm.com>
+
+ * lib/target-supports.exp (vect_perm_supported): Only return
+ false for variable-length vectors if the permute size is not
+ a power of 2.
+ (check_effective_target_vect_perm)
+ (check_effective_target_vect_perm_byte)
+ (check_effective_target_vect_perm_short): Remove check for
+ variable-length vectors.
+ * gcc.dg/vect/slp-23.c: Add an XFAIL for variable-length SVE.
+ * gcc.dg/vect/slp-perm-10.c: Likewise.
+ * gcc.dg/vect/slp-perm-9.c: Add an XFAIL for variable-length vectors.
+
+2018-08-24 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/bswap_1.c: New test.
+ * gcc.target/aarch64/sve/bswap_2.c: Likewise.
+ * gcc.target/aarch64/sve/bswap_3.c: Likewise.
+
+2018-08-24 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/slp_perm_1.c: New test.
+ * gcc.target/aarch64/sve/slp_perm_2.c: Likewise.
+ * gcc.target/aarch64/sve/slp_perm_3.c: Likewise.
+ * gcc.target/aarch64/sve/slp_perm_4.c: Likewise.
+ * gcc.target/aarch64/sve/slp_perm_5.c: Likewise.
+ * gcc.target/aarch64/sve/slp_perm_6.c: Likewise.
+ * gcc.target/aarch64/sve/slp_perm_7.c: Likewise.
+
+2018-08-24 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR debug/79342
+ * gcc.dg/pr79342.: New test.
+
+2018-08-23 Martin Sebor <msebor@redhat.com>
+
+ PR tree-optimization/87072
+ * gcc.dg/Warray-bounds-35.c: New test.
+
+2018-08-23 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/87024
+ * gcc.dg/pr87024.c: New testcase.
+
+2018-08-23 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.dg/vect/no-vfa-vect-depend-2.c: Remove XFAIL.
+ * gcc.dg/vect/no-vfa-vect-depend-3.c: Likewise.
+ * gcc.dg/vect/pr65947-13.c: Update for vect_fold_extract_last.
+ * gcc.dg/vect/pr80631-2.c: Likewise.
+
+2017-08-23 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/86863
+ * gfortran.dg/submodule_32.f08: New test.
+
+2018-08-22 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/86935
+ * gfortran.dg/associate_3.f90: Update error message.
+ * gfortran.dg/associate_39.f90: New test case.
+
2018-08-22 Janus Weil <janus@gcc.gnu.org>
PR fortran/86888
diff --git a/gcc/testsuite/ChangeLog.meissner b/gcc/testsuite/ChangeLog.meissner
index 730efaa3610..c19f5752247 100644
--- a/gcc/testsuite/ChangeLog.meissner
+++ b/gcc/testsuite/ChangeLog.meissner
@@ -1,3 +1,7 @@
+2018-08-30 Michael Meissner <meissner@linux.ibm.com>
+
+ Merge up to 263992.
+
2018-08-22 Michael Meissner <meissner@linux.ibm.com>
Merge up to 263784.
diff --git a/gcc/testsuite/g++.dg/Walloca1.C b/gcc/testsuite/g++.dg/Walloca1.C
index b860a4286c2..2985ac91c62 100644
--- a/gcc/testsuite/g++.dg/Walloca1.C
+++ b/gcc/testsuite/g++.dg/Walloca1.C
@@ -1,7 +1,9 @@
-/* PR middle-end/79809 */
+/* PR middle-end/79809 - ICE in alloca_call_type, at gimple-ssa-warn-alloca.c */
/* { dg-do compile } */
/* { dg-options "-Walloca-larger-than=4207115063 -Wvla-larger-than=1233877270 -O2" } */
/* { dg-require-effective-target alloca } */
int a;
-char *b = static_cast<char *>(__builtin_alloca (a)); // { dg-warning "argument to .alloca. may be too large|unbounded use of" }
+char *b = static_cast<char *>(__builtin_alloca (a));
+
+// { dg-prune-output "argument to .alloca." }
diff --git a/gcc/testsuite/g++.dg/concepts/pr85265.C b/gcc/testsuite/g++.dg/concepts/pr85265.C
new file mode 100644
index 00000000000..86124ceb712
--- /dev/null
+++ b/gcc/testsuite/g++.dg/concepts/pr85265.C
@@ -0,0 +1,6 @@
+// { dg-do compile { target c++14 } }
+// { dg-additional-options "-fconcepts" }
+
+template<typename> concept bool C = true;
+
+C{} void foo(); // { dg-error "expected identifier" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wpessimizing-move5.C b/gcc/testsuite/g++.dg/cpp0x/Wpessimizing-move5.C
new file mode 100644
index 00000000000..02ad2113505
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/Wpessimizing-move5.C
@@ -0,0 +1,14 @@
+// PR c++/87080
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wpessimizing-move" }
+
+struct a {
+ template<typename b> a &operator<<(b);
+};
+a c();
+template<typename>
+a fn2()
+{
+ int d = 42;
+ return c() << d;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wredundant-move1.C b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move1.C
new file mode 100644
index 00000000000..5d4a25dbc3b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move1.C
@@ -0,0 +1,106 @@
+// PR c++/87029
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wredundant-move" }
+
+// Define std::move.
+namespace std {
+ template<typename _Tp>
+ struct remove_reference
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ constexpr typename std::remove_reference<_Tp>::type&&
+ move(_Tp&& __t) noexcept
+ { return static_cast<typename std::remove_reference<_Tp>::type&&>(__t); }
+}
+
+struct T {
+ T() { }
+ T(const T&) { }
+ T(T&&) { }
+};
+
+struct U {
+ U() { }
+ U(const U&) { }
+ U(U&&) { }
+ U(T) { }
+};
+
+T
+fn1 (T t)
+{
+ return t;
+}
+
+T
+fn2 (T t)
+{
+ // Will use move even without std::move.
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+T
+fn3 (const T t)
+{
+ // t is const: will decay into copy.
+ return t;
+}
+
+T
+fn4 (const T t)
+{
+ // t is const: will decay into copy despite std::move, so it's redundant.
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+int
+fn5 (int i)
+{
+ // Not a class type.
+ return std::move (i);
+}
+
+T
+fn6 (T t, bool b)
+{
+ if (b)
+ throw std::move (t);
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+U
+fn7 (T t)
+{
+ // Core 1579 means we'll get a move here.
+ return t;
+}
+
+U
+fn8 (T t)
+{
+ // Core 1579 means we'll get a move here. Even without std::move.
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+T
+fn9 (T& t)
+{
+ // T is a reference and the move isn't redundant.
+ return std::move (t);
+}
+
+T
+fn10 (T&& t)
+{
+ // T is a reference and the move isn't redundant.
+ return std::move (t);
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wredundant-move2.C b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move2.C
new file mode 100644
index 00000000000..f181afeeb84
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move2.C
@@ -0,0 +1,57 @@
+// PR c++/87029
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wredundant-move" }
+
+// Define std::move.
+namespace std {
+ template<typename _Tp>
+ struct remove_reference
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ constexpr typename std::remove_reference<_Tp>::type&&
+ move(_Tp&& __t) noexcept
+ { return static_cast<typename std::remove_reference<_Tp>::type&&>(__t); }
+}
+
+struct T { };
+struct U { U(T); };
+
+template<typename Tp>
+T
+fn1 (T t)
+{
+ // Non-dependent type.
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+template<typename Tp1, typename Tp2>
+Tp1
+fn2 (Tp2 t)
+{
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+template<typename Tp1, typename Tp2>
+Tp1
+fn3 (Tp2 t)
+{
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+int
+main ()
+{
+ T t;
+ fn1<T>(t);
+ fn2<T, T>(t);
+ fn3<U, T>(t);
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wredundant-move3.C b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move3.C
new file mode 100644
index 00000000000..7084134e370
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move3.C
@@ -0,0 +1,43 @@
+// PR c++/87029
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wredundant-move" }
+
+// Define std::move.
+namespace std {
+ template<typename _Tp>
+ struct remove_reference
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ constexpr typename std::remove_reference<_Tp>::type&&
+ move(_Tp&& __t) noexcept
+ { return static_cast<typename std::remove_reference<_Tp>::type&&>(__t); }
+}
+
+struct T { };
+
+T
+fn1 (T t)
+{
+ return (1, std::move (t));
+}
+
+T
+fn2 (T t)
+{
+ return [&](){ return std::move (t); }();
+}
+
+T
+fn3 (T t)
+{
+ return [=](){ return std::move (t); }();
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/Wredundant-move4.C b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move4.C
new file mode 100644
index 00000000000..aa89e46de99
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/Wredundant-move4.C
@@ -0,0 +1,86 @@
+// PR c++/87029
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wredundant-move" }
+
+// Define std::move.
+namespace std {
+ template<typename _Tp>
+ struct remove_reference
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ struct remove_reference<_Tp&&>
+ { typedef _Tp type; };
+
+ template<typename _Tp>
+ constexpr typename std::remove_reference<_Tp>::type&&
+ move(_Tp&& __t) noexcept
+ { return static_cast<typename std::remove_reference<_Tp>::type&&>(__t); }
+}
+
+struct T {
+ T() { }
+ T(const T&) { }
+ T(T&&) { }
+};
+
+struct U {
+ U() { }
+ U(const U&) { }
+ U(U&&) { }
+ U(T) { }
+};
+
+U
+fn1 (T t, bool b)
+{
+ if (b)
+ return t;
+ else
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+U
+fn2 (bool b)
+{
+ T t;
+ if (b)
+ return t;
+ else
+ return std::move (t); // { dg-warning "redundant move in return statement" }
+}
+
+U
+fn3 (bool b)
+{
+ static T t;
+ if (b)
+ return t;
+ else
+ return std::move (t);
+}
+
+T g;
+
+U
+fn4 (bool b)
+{
+ if (b)
+ return g;
+ else
+ return std::move (g);
+}
+
+long int
+fn5 (bool b)
+{
+ int i = 42;
+ if (b)
+ return i;
+ else
+ return std::move (i);
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/auto52.C b/gcc/testsuite/g++.dg/cpp0x/auto52.C
new file mode 100644
index 00000000000..9bfe7c754b5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/auto52.C
@@ -0,0 +1,6 @@
+// PR c++/86942
+// { dg-do compile { target c++11 } }
+
+using T = auto() -> int;
+using U = void() -> int; // { dg-error "function with trailing return type not declared with .auto." }
+using W = auto(); // { dg-error "invalid use of .auto." }
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn52.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn52.C
new file mode 100644
index 00000000000..e239bc27dc2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn52.C
@@ -0,0 +1,4 @@
+// PR c++/67012
+// { dg-do compile { target c++14 } }
+
+decltype(auto) f() -> int; // { dg-error "function with trailing return type has" }
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn53.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn53.C
new file mode 100644
index 00000000000..720aeeb215d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn53.C
@@ -0,0 +1,4 @@
+// PR c++/86942
+// { dg-do compile { target c++14 } }
+
+using T = decltype(auto) () -> int; // { dg-error "invalid use of" }
diff --git a/gcc/testsuite/g++.dg/cpp1y/auto-fn54.C b/gcc/testsuite/g++.dg/cpp1y/auto-fn54.C
new file mode 100644
index 00000000000..f3391ddfd75
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/auto-fn54.C
@@ -0,0 +1,3 @@
+// { dg-do compile { target c++14 } }
+
+using T = int () -> decltype(auto); // { dg-error "function with trailing return type not declared with .auto." }
diff --git a/gcc/testsuite/g++.dg/cpp1z/decomp47.C b/gcc/testsuite/g++.dg/cpp1z/decomp47.C
new file mode 100644
index 00000000000..f0d202d696a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/decomp47.C
@@ -0,0 +1,32 @@
+// PR c++/87122
+// { dg-do run { target c++14 } }
+// { dg-options "" }
+
+extern "C" void abort ();
+struct S { int a, b; };
+int c;
+
+template <int N>
+void
+foo ()
+{
+ S x[4] = { { N, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 } };
+ auto f = [](auto & y) {
+ for (auto & [ u, v ] : y) // { dg-warning "structured bindings only available with" "" { target c++14_down } }
+ {
+ if ((u & 1) != 1 || v != u + 1 || u < N || u > 7 || (c & (1 << u))
+ || &u != &y[v / 2 - 1].a || &v != &y[v / 2 - 1].b)
+ abort ();
+ c |= 1 << u;
+ }
+ };
+ f (x);
+}
+
+int
+main ()
+{
+ foo<1> ();
+ if (c != 0xaa)
+ abort ();
+}
diff --git a/gcc/testsuite/g++.dg/diagnostic/missing-typename.C b/gcc/testsuite/g++.dg/diagnostic/missing-typename.C
new file mode 100644
index 00000000000..21d1ed18a60
--- /dev/null
+++ b/gcc/testsuite/g++.dg/diagnostic/missing-typename.C
@@ -0,0 +1,12 @@
+// fix-it hint for missing "typename" (PR c++/63392)
+// { dg-options "-fdiagnostics-show-caret" }
+
+template<typename T>
+class test_1 {
+ T::type x; // { dg-error "need 'typename' before 'T::type' because 'T' is a dependent scope" }
+ /* { dg-begin-multiline-output "" }
+ T::type x;
+ ^
+ typename
+ { dg-end-multiline-output "" } */
+};
diff --git a/gcc/testsuite/g++.dg/diagnostic/param-type-mismatch-2.C b/gcc/testsuite/g++.dg/diagnostic/param-type-mismatch-2.C
index 8cf2dabca64..4957f61878e 100644
--- a/gcc/testsuite/g++.dg/diagnostic/param-type-mismatch-2.C
+++ b/gcc/testsuite/g++.dg/diagnostic/param-type-mismatch-2.C
@@ -82,7 +82,10 @@ int test_4 (int first, const char *second, float third)
^~~~~~~~
{ dg-end-multiline-output "" } */
// { dg-message "no known conversion for argument 2 from 'const char\\*' to 'const char\\*\\*'" "" { target *-*-* } s4_member_1 }
- // TODO: underline the pertinent param
+ /* { dg-begin-multiline-output "" }
+ struct s4 { static int member_1 (int one, const char **two, float three); };
+ ~~~~~~~~~~~~~^~~
+ { dg-end-multiline-output "" } */
}
/* non-static member, with argname. */
@@ -103,7 +106,10 @@ int test_5 (int first, const char *second, float third)
^~~~~~~~
{ dg-end-multiline-output "" } */
// { dg-message "no known conversion for argument 2 from 'const char\\*' to 'const char\\*\\*'" "" { target *-*-* } s5_member_1 }
- // TODO: underline the pertinent param
+ /* { dg-begin-multiline-output "" }
+ struct s5 { int member_1 (int one, const char **two, float three); };
+ ~~~~~~~~~~~~~^~~
+ { dg-end-multiline-output "" } */
}
/* non-static member, with argname, via a ptr. */
@@ -123,7 +129,10 @@ int test_6 (int first, const char *second, float third, s6 *ptr)
^~~~~~~~
{ dg-end-multiline-output "" } */
// { dg-message "no known conversion for argument 2 from 'const char\\*' to 'const char\\*\\*'" "" { target *-*-* } s6_member_1 }
- // TODO: underline the pertinent param
+ /* { dg-begin-multiline-output "" }
+ struct s6 { int member_1 (int one, const char **two, float three); };
+ ~~~~~~~~~~~~~^~~
+ { dg-end-multiline-output "" } */
}
/* Template function. */
@@ -170,7 +179,10 @@ int test_8 (int first, const char *second, float third)
^~~~~~~~
{ dg-end-multiline-output "" } */
// { dg-message "no known conversion for argument 2 from 'const char\\*' to 'const char\\*\\*'" "" { target *-*-* } s8_member_1 }
- // TODO: underline the pertinent param
+ /* { dg-begin-multiline-output "" }
+ struct s8 { static int member_1 (int one, T two, float three); };
+ ~~^~~
+ { dg-end-multiline-output "" } */
}
/* Template class, non-static function. */
@@ -192,5 +204,8 @@ int test_9 (int first, const char *second, float third)
^~~~~~~~
{ dg-end-multiline-output "" } */
// { dg-message "no known conversion for argument 2 from 'const char\\*' to 'const char\\*\\*'" "" { target *-*-* } s9_member_1 }
- // TODO: underline the pertinent param
+ /* { dg-begin-multiline-output "" }
+ struct s9 { int member_1 (int one, T two, float three); };
+ ~~^~~
+ { dg-end-multiline-output "" } */
}
diff --git a/gcc/testsuite/g++.dg/diagnostic/pr86993.C b/gcc/testsuite/g++.dg/diagnostic/pr86993.C
new file mode 100644
index 00000000000..313a489ae86
--- /dev/null
+++ b/gcc/testsuite/g++.dg/diagnostic/pr86993.C
@@ -0,0 +1,13 @@
+// PR c++/86993
+// { dg-options "-fdiagnostics-show-caret" }
+
+int
+main ()
+{
+ const int i = 5; // { dg-error "assignment of read-only variable 'i'" "" { target *-*-* } .+1 }
+ i = 5 + 6;
+/* { dg-begin-multiline-output "" }
+ i = 5 + 6;
+ ~~^~~~~~~
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/g++.dg/other/switch4.C b/gcc/testsuite/g++.dg/other/switch4.C
new file mode 100644
index 00000000000..42acb1b0d95
--- /dev/null
+++ b/gcc/testsuite/g++.dg/other/switch4.C
@@ -0,0 +1,6 @@
+// PR c++/86546
+
+class a b; // { dg-error "aggregate" }
+void c() {
+ switch () // { dg-error "expected" }
+ case b // { dg-error "expected" }
diff --git a/gcc/testsuite/g++.dg/pr85523.C b/gcc/testsuite/g++.dg/pr85523.C
index 9cd939be54d..0ed16ff18cd 100644
--- a/gcc/testsuite/g++.dg/pr85523.C
+++ b/gcc/testsuite/g++.dg/pr85523.C
@@ -47,6 +47,7 @@ struct s5 {
i = z.i;
} // { dg-warning "no return statement in function returning non-void" }
/* { dg-begin-multiline-output "" }
+ i = z.i;
+ return *this;
}
^
@@ -63,6 +64,7 @@ struct s6 {
i = z.i;
} // { dg-warning "no return statement in function returning non-void" }
/* { dg-begin-multiline-output "" }
+ i = z.i;
+ return *this;
}
^
@@ -81,6 +83,7 @@ struct s7 {
i = z.i;
} // { dg-warning "no return statement in function returning non-void" }
/* { dg-begin-multiline-output "" }
+ i = z.i;
+ return *this;
}
^
diff --git a/gcc/testsuite/g++.dg/torture/20180705-1.C b/gcc/testsuite/g++.dg/torture/20180705-1.C
new file mode 100644
index 00000000000..8460473bee8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/20180705-1.C
@@ -0,0 +1,30 @@
+// { dg-do compile }
+
+typedef long unsigned int size_t;
+extern void fancy_abort () __attribute__ ((__noreturn__));
+class cpp_string_location_reader { };
+class cpp_substring_ranges {
+public:
+ void add_range ();
+};
+typedef unsigned char uchar;
+void
+cpp_interpret_string_1 (size_t count, cpp_string_location_reader *loc_readers, cpp_substring_ranges *ranges, uchar c, const uchar *p)
+{
+ size_t i;
+ ((void)(!((loc_readers != __null ) == (ranges != __null )) ? fancy_abort (), 0 : 0));
+ cpp_string_location_reader *loc_reader = __null;
+ for (i = 0; i < count; i++)
+ {
+ if (loc_readers) loc_reader = &loc_readers[i];
+ if (*p == 'R') continue;
+ for (;;)
+ {
+ switch (c) {
+ case 'x': if (ranges) ranges->add_range (); break;
+ case '7': ((void)(!((loc_reader != __null ) == (ranges != __null )) ? fancy_abort (), 0 : 0)); break;
+ }
+ p = 0;
+ }
+ }
+}
diff --git a/gcc/testsuite/g++.dg/torture/pr87124.C b/gcc/testsuite/g++.dg/torture/pr87124.C
new file mode 100644
index 00000000000..3e7d480f19d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr87124.C
@@ -0,0 +1,12 @@
+// { dg-do compile }
+
+class A {
+ void m_fn1();
+};
+
+void A::m_fn1()
+{
+ A *a = this;
+ for (int i; i && a;)
+ a = 0;
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/vptr-13.C b/gcc/testsuite/g++.dg/ubsan/vptr-13.C
new file mode 100644
index 00000000000..345581fd9d0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/vptr-13.C
@@ -0,0 +1,19 @@
+// PR c++/87095
+// { dg-do run }
+// { dg-options "-fsanitize=vptr -fno-sanitize-recover=vptr" }
+
+struct A
+{
+ virtual ~A () {}
+};
+
+struct B : virtual A {};
+
+struct C : B {};
+
+int
+main ()
+{
+ C c;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/compile/dse.c b/gcc/testsuite/gcc.c-torture/compile/dse.c
new file mode 100644
index 00000000000..908e6503eb4
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/dse.c
@@ -0,0 +1,19 @@
+typedef unsigned long microblaze_reg_t;
+struct pt_regs
+{
+ microblaze_reg_t msr;
+ int pt_mode;
+};
+struct task_struct
+{
+ void *stack;
+};
+int
+copy_thread (struct task_struct *p)
+{
+ struct pt_regs *childregs =
+ (((struct pt_regs *) ((1 << 13) + ((void *) (p)->stack))) - 1);
+ memset (childregs, 0, sizeof (struct pt_regs));
+ childregs->pt_mode = 1;
+}
+
diff --git a/gcc/testsuite/gcc.c-torture/compile/pr87110.c b/gcc/testsuite/gcc.c-torture/compile/pr87110.c
new file mode 100644
index 00000000000..8428d3d120a
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/compile/pr87110.c
@@ -0,0 +1,13 @@
+enum a { b, c };
+struct d {
+ _Bool e;
+ enum a f
+};
+g, h;
+i() {
+ struct d j[h];
+ j[0] = (struct d){.f = c};
+ for (; g;)
+ (struct d){};
+}
+
diff --git a/gcc/testsuite/gcc.c-torture/execute/memchr-1.c b/gcc/testsuite/gcc.c-torture/execute/memchr-1.c
new file mode 100644
index 00000000000..ec376322992
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/memchr-1.c
@@ -0,0 +1,153 @@
+/* PR tree-optimization/86711 - wrong folding of memchr
+
+ Verify that memchr() of arrays initialized with string literals
+ where the nul doesn't fit in the array doesn't find the nul. */
+typedef __SIZE_TYPE__ size_t;
+typedef __WCHAR_TYPE__ wchar_t;
+
+extern void* memchr (const void*, int, size_t);
+
+#define A(expr) \
+ ((expr) \
+ ? (void)0 \
+ : (__builtin_printf ("assertion failed on line %i: %s\n", \
+ __LINE__, #expr), \
+ __builtin_abort ()))
+
+static const char c = '1';
+static const char s1[1] = "1";
+static const char s4[4] = "1234";
+
+static const char s4_2[2][4] = { "1234", "5678" };
+static const char s5_3[3][5] = { "12345", "6789", "01234" };
+
+volatile int v0 = 0;
+volatile int v1 = 1;
+volatile int v2 = 2;
+volatile int v3 = 3;
+volatile int v4 = 3;
+
+void test_narrow (void)
+{
+ int i0 = 0;
+ int i1 = i0 + 1;
+ int i2 = i1 + 1;
+ int i3 = i2 + 1;
+ int i4 = i3 + 1;
+
+ A (memchr ("" + 1, 0, 0) == 0);
+
+ A (memchr (&c, 0, sizeof c) == 0);
+ A (memchr (&c + 1, 0, sizeof c - 1) == 0);
+ A (memchr (&c + i1, 0, sizeof c - i1) == 0);
+ A (memchr (&c + v1, 0, sizeof c - v1) == 0);
+
+ A (memchr (s1, 0, sizeof s1) == 0);
+ A (memchr (s1 + 1, 0, sizeof s1 - 1) == 0);
+ A (memchr (s1 + i1, 0, sizeof s1 - i1) == 0);
+ A (memchr (s1 + v1, 0, sizeof s1 - v1) == 0);
+
+ A (memchr (&s1, 0, sizeof s1) == 0);
+ A (memchr (&s1 + 1, 0, sizeof s1 - 1) == 0);
+ A (memchr (&s1 + i1, 0, sizeof s1 - i1) == 0);
+ A (memchr (&s1 + v1, 0, sizeof s1 - v1) == 0);
+
+ A (memchr (&s1[0], 0, sizeof s1) == 0);
+ A (memchr (&s1[0] + 1, 0, sizeof s1 - 1) == 0);
+ A (memchr (&s1[0] + i1, 0, sizeof s1 - i1) == 0);
+ A (memchr (&s1[0] + v1, 0, sizeof s1 - v1) == 0);
+
+ A (memchr (&s1[i0], 0, sizeof s1) == 0);
+ A (memchr (&s1[i0] + 1, 0, sizeof s1 - 1) == 0);
+ A (memchr (&s1[i0] + i1, 0, sizeof s1 - i1) == 0);
+ A (memchr (&s1[i0] + v1, 0, sizeof s1 - v1) == 0);
+
+ A (memchr (&s1[v0], 0, sizeof s1) == 0);
+ A (memchr (&s1[v0] + 1, 0, sizeof s1 - 1) == 0);
+ A (memchr (&s1[v0] + i1, 0, sizeof s1 - i1) == 0);
+ A (memchr (&s1[v0] + v1, 0, sizeof s1 - v1) == 0);
+
+
+ A (memchr (s4 + i0, 0, sizeof s4 - i0) == 0);
+ A (memchr (s4 + i1, 0, sizeof s4 - i1) == 0);
+ A (memchr (s4 + i2, 0, sizeof s4 - i2) == 0);
+ A (memchr (s4 + i3, 0, sizeof s4 - i3) == 0);
+ A (memchr (s4 + i4, 0, sizeof s4 - i4) == 0);
+
+ A (memchr (s4 + v0, 0, sizeof s4 - v0) == 0);
+ A (memchr (s4 + v1, 0, sizeof s4 - v1) == 0);
+ A (memchr (s4 + v2, 0, sizeof s4 - v2) == 0);
+ A (memchr (s4 + v3, 0, sizeof s4 - v3) == 0);
+ A (memchr (s4 + v4, 0, sizeof s4 - v4) == 0);
+
+
+ A (memchr (s4_2, 0, sizeof s4_2) == 0);
+
+ A (memchr (s4_2[0], 0, sizeof s4_2[0]) == 0);
+ A (memchr (s4_2[1], 0, sizeof s4_2[1]) == 0);
+
+ A (memchr (s4_2[0] + 1, 0, sizeof s4_2[0] - 1) == 0);
+ A (memchr (s4_2[1] + 2, 0, sizeof s4_2[1] - 2) == 0);
+ A (memchr (s4_2[1] + 3, 0, sizeof s4_2[1] - 3) == 0);
+
+ A (memchr (s4_2[v0], 0, sizeof s4_2[v0]) == 0);
+ A (memchr (s4_2[v0] + 1, 0, sizeof s4_2[v0] - 1) == 0);
+
+
+ /* The following calls must find the nul. */
+ A (memchr ("", 0, 1) != 0);
+ A (memchr (s5_3, 0, sizeof s5_3) == &s5_3[1][4]);
+
+ A (memchr (&s5_3[0][0] + i0, 0, sizeof s5_3 - i0) == &s5_3[1][4]);
+ A (memchr (&s5_3[0][0] + i1, 0, sizeof s5_3 - i1) == &s5_3[1][4]);
+ A (memchr (&s5_3[0][0] + i2, 0, sizeof s5_3 - i2) == &s5_3[1][4]);
+ A (memchr (&s5_3[0][0] + i4, 0, sizeof s5_3 - i4) == &s5_3[1][4]);
+
+ A (memchr (&s5_3[1][i0], 0, sizeof s5_3[1] - i0) == &s5_3[1][4]);
+}
+
+static const wchar_t wc = L'1';
+static const wchar_t ws1[] = L"1";
+static const wchar_t ws4[] = L"\x00123456\x12005678\x12340078\x12345600";
+
+void test_wide (void)
+{
+ int i0 = 0;
+ int i1 = i0 + 1;
+ int i2 = i1 + 1;
+ int i3 = i2 + 1;
+ int i4 = i3 + 1;
+
+ A (memchr (L"" + 1, 0, 0) == 0);
+ A (memchr (&wc + 1, 0, 0) == 0);
+ A (memchr (L"\x12345678", 0, sizeof (wchar_t)) == 0);
+
+ const size_t nb = sizeof ws4;
+ const size_t nwb = sizeof (wchar_t);
+
+ const char *pws1 = (const char*)ws1;
+ const char *pws4 = (const char*)ws4;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ A (memchr (ws1, 0, sizeof ws1) == pws1 + 1);
+
+ A (memchr (&ws4[0], 0, nb) == pws4 + 3);
+ A (memchr (&ws4[1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 2);
+ A (memchr (&ws4[2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 1);
+ A (memchr (&ws4[3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 0);
+#else
+ A (memchr (ws1, 0, sizeof ws1) == pws1 + 0);
+
+ A (memchr (&ws4[0], 0, nb) == pws4 + 0);
+ A (memchr (&ws4[1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 0);
+ A (memchr (&ws4[2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 1);
+ A (memchr (&ws4[3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 2);
+#endif
+}
+
+
+int main ()
+{
+ test_narrow ();
+ test_wide ();
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr86714.c b/gcc/testsuite/gcc.c-torture/execute/pr86714.c
new file mode 100644
index 00000000000..3ad68522e71
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr86714.c
@@ -0,0 +1,26 @@
+/* PR tree-optimization/86714 - tree-ssa-forwprop.c confused by too
+ long initializer
+
+ The excessively long initializer for a[0] is undefined but this
+ test verifies that the excess elements are not considered a part
+ of the value of the array as a matter of QoI. */
+
+const char a[2][3] = { "1234", "xyz" };
+char b[6];
+
+void *pb = b;
+
+int main ()
+{
+ __builtin_memcpy (b, a, 4);
+ __builtin_memset (b + 4, 'a', 2);
+
+ if (b[0] != '1' || b[1] != '2' || b[2] != '3'
+ || b[3] != 'x' || b[4] != 'a' || b[5] != 'a')
+ __builtin_abort ();
+
+ if (__builtin_memcmp (pb, "123xaa", 6))
+ __builtin_abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/pr87053.c b/gcc/testsuite/gcc.c-torture/execute/pr87053.c
new file mode 100644
index 00000000000..0170731860b
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/pr87053.c
@@ -0,0 +1,17 @@
+/* PR middle-end/87053 */
+
+const union
+{ struct {
+ char x[4];
+ char y[4];
+ };
+ struct {
+ char z[8];
+ };
+} u = {{"1234", "567"}};
+
+int main ()
+{
+ if (__builtin_strlen (u.z) != 7)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.c-torture/execute/widechar-3.c b/gcc/testsuite/gcc.c-torture/execute/widechar-3.c
new file mode 100644
index 00000000000..0810c7dd5f4
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/widechar-3.c
@@ -0,0 +1,26 @@
+extern void abort (void);
+extern void exit (int);
+
+static int f(char *x)
+{
+ return __builtin_strlen(x);
+}
+
+int foo ()
+{
+ return f((char*)&L"abcdef"[0]);
+}
+
+
+int
+main()
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if (foo () != 0)
+ abort ();
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (foo () != 1)
+ abort ();
+#endif
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/Warray-bounds-35.c b/gcc/testsuite/gcc.dg/Warray-bounds-35.c
new file mode 100644
index 00000000000..b3ad3625206
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/Warray-bounds-35.c
@@ -0,0 +1,15 @@
+/* PR tree-optimization/87072 - g++6.2.0 false warning: array subscript
+ is above array bounds, with misleading line number
+ { dg-do compile }
+ { dg-options "-O3 -Wall" } */
+
+int a[10];
+
+void f (unsigned n)
+{
+ for (unsigned j = 0; j < n; j++) {
+ for (unsigned k = 0; k < j; k++)
+ a[j] += k; /* { dg-bogus "\\\[-Warray-bounds]" } */
+ a[j] += j;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/asan/pr86962.c b/gcc/testsuite/gcc.dg/asan/pr86962.c
new file mode 100644
index 00000000000..7a8cfa98f0e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/asan/pr86962.c
@@ -0,0 +1,13 @@
+/* PR sanitizer/86962 */
+/* { dg-do compile } */
+
+extern int dummy (int *);
+
+void foo(int i)
+{
+ int j=i;
+
+ void bar() { int x=j, y=i; }
+
+ dummy(&i);
+}
diff --git a/gcc/testsuite/gcc.dg/empty.h b/gcc/testsuite/gcc.dg/empty.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/empty.h
diff --git a/gcc/testsuite/gcc.dg/fixits-pr84852-1.c b/gcc/testsuite/gcc.dg/fixits-pr84852-1.c
index 98087abd929..346626b4eb9 100644
--- a/gcc/testsuite/gcc.dg/fixits-pr84852-1.c
+++ b/gcc/testsuite/gcc.dg/fixits-pr84852-1.c
@@ -13,13 +13,10 @@
int foo (void) { return strlen(""); }
/* { dg-warning "incompatible implicit declaration of built-in function 'strlen'" "" { target *-*-* } -812156810 } */
-/* { dg-message "include '<string.h>' or provide a declaration of 'strlen'" "" { target *-*-* } -812156810 } */
+/* { dg-message "include '<string.h>' or provide a declaration of 'strlen'" "" { target *-*-* } 1 } */
#if 0
{ dg-begin-multiline-output "" }
+#include <string.h>
/* This is padding (to avoid the output containing DejaGnu directives). */
{ dg-end-multiline-output "" }
#endif
-
-/* We need this, to consume a stray line marker for the bogus line. */
-/* { dg-regexp ".*fixits-pr84852-1.c:-812156810:25:" } */
diff --git a/gcc/testsuite/gcc.dg/fixits-pr84852-2.c b/gcc/testsuite/gcc.dg/fixits-pr84852-2.c
index 0674ef54689..9bc70f59b59 100644
--- a/gcc/testsuite/gcc.dg/fixits-pr84852-2.c
+++ b/gcc/testsuite/gcc.dg/fixits-pr84852-2.c
@@ -13,13 +13,10 @@
int foo (void) { return strlen(""); }
/* { dg-warning "incompatible implicit declaration of built-in function 'strlen'" "" { target *-*-* } -812156810 } */
-/* { dg-message "include '<string.h>' or provide a declaration of 'strlen'" "" { target *-*-* } -812156810 } */
+/* { dg-message "include '<string.h>' or provide a declaration of 'strlen'" "" { target *-*-* } 1 } */
#if 0
{ dg-begin-multiline-output "" }
+#include <string.h>
/* This is padding (to avoid the output containing DejaGnu directives). */
{ dg-end-multiline-output "" }
#endif
-
-/* We need this, to consume a stray line marker for the bogus line. */
-/* { dg-regexp ".*fixits-pr84852-2.c:-812156810:25:" } */
diff --git a/gcc/testsuite/gcc.dg/lvalue-5.c b/gcc/testsuite/gcc.dg/lvalue-5.c
index 514f35ed802..ff3598ae8a7 100644
--- a/gcc/testsuite/gcc.dg/lvalue-5.c
+++ b/gcc/testsuite/gcc.dg/lvalue-5.c
@@ -1,7 +1,7 @@
/* Test assignment to elements of a string literal is a warning, not
an error. PR 27676. */
/* { dg-do compile } */
-/* { dg-options "-pedantic-errors" } */
+/* { dg-options "-O -pedantic-errors" } */
void
f (void)
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-3.c b/gcc/testsuite/gcc.dg/missing-header-fixit-3.c
index 8f2fb5b044a..a692b4d21b3 100644
--- a/gcc/testsuite/gcc.dg/missing-header-fixit-3.c
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-3.c
@@ -7,21 +7,15 @@
void test (int i, int j)
{
printf ("%i of %i\n", i, j); /* { dg-warning "implicit declaration" } */
- /* { dg-message "include '<stdio.h>' or provide a declaration of 'printf'" "" { target *-*-* } .-1 } */
+ /* { dg-message "include '<stdio.h>' or provide a declaration of 'printf'" "" { target *-*-* } 1 } */
#if 0
/* { dg-begin-multiline-output "" }
9 | printf ("%i of %i\n", i, j);
| ^~~~~~
{ dg-end-multiline-output "" } */
-/* { dg-regexp ".*missing-header-fixit-3.c:1:1:" } */
/* { dg-begin-multiline-output "" }
+ |+#include <stdio.h>
1 | /* Example of a fix-it hint that adds a #include directive,
{ dg-end-multiline-output "" } */
-/* { dg-regexp ".*missing-header-fixit-3.c:9:3:" } */
-/* { dg-begin-multiline-output "" }
-9 | printf ("%i of %i\n", i, j);
- | ^~~~~~
- { dg-end-multiline-output "" } */
#endif
}
diff --git a/gcc/testsuite/gcc.dg/missing-header-fixit-4.c b/gcc/testsuite/gcc.dg/missing-header-fixit-4.c
new file mode 100644
index 00000000000..0ed3e2c2922
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/missing-header-fixit-4.c
@@ -0,0 +1,23 @@
+/* Example of a fix-it hint that adds a #include directive,
+ adding them after a pre-existing #include directive. */
+#include "empty.h"
+int the_next_line;
+
+/* { dg-options "-fdiagnostics-show-caret -fdiagnostics-show-line-numbers" } */
+
+void test (int i, int j)
+{
+ printf ("%i of %i\n", i, j); /* { dg-line printf } */
+ /* { dg-warning "implicit declaration of function" "" { target *-*-* } printf } */
+ /* { dg-warning "incompatible implicit declaration" "" { target *-*-* } printf } */
+ /* { dg-begin-multiline-output "" }
+10 | printf ("%i of %i\n", i, j);
+ | ^~~~~~
+ { dg-end-multiline-output "" } */
+ /* { dg-message "include '<stdio.h>' or provide a declaration of 'printf'" "" { target *-*-* } 4 } */
+ /* { dg-begin-multiline-output "" }
+3 | #include "empty.h"
++ |+#include <stdio.h>
+4 | int the_next_line;
+ { dg-end-multiline-output "" } */
+}
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c
index f2bbc5854dc..63e585528fa 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw-line-numbers.c
@@ -111,6 +111,7 @@ void test_fixit_insert_newline (void)
x = b;
}
/* { dg-begin-multiline-output "" }
+109 | x = a;
+++ |+ break;
110 | case 'b':
| ^~~~~~~~
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw.c b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw.c
index bdfa420d849..be6f103ba6a 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-bw.c
@@ -332,6 +332,7 @@ void test_fixit_insert_newline (void)
x = b;
}
/* { dg-begin-multiline-output "" }
+ x = a;
+ break;
case 'b':
^~~~~~~~
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-color.c b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-color.c
index 094bc6535d5..7ae38019ffd 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-color.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic-test-show-locus-color.c
@@ -217,6 +217,7 @@ void test_fixit_insert_newline (void)
x = b;
}
/* { dg-begin-multiline-output "" }
+ x = a;
+ break;
case 'b':
^~~~~~~~
diff --git a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
index 3d7853813ae..a55efafddff 100644
--- a/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
+++ b/gcc/testsuite/gcc.dg/plugin/diagnostic_plugin_test_show_locus.c
@@ -145,9 +145,11 @@ custom_diagnostic_finalizer (diagnostic_context *context,
static void
add_range (rich_location *richloc, location_t start, location_t finish,
- bool show_caret_p, const range_label *label = NULL)
+ enum range_display_kind range_display_kind
+ = SHOW_RANGE_WITHOUT_CARET,
+ const range_label *label = NULL)
{
- richloc->add_range (make_location (start, start, finish), show_caret_p,
+ richloc->add_range (make_location (start, start, finish), range_display_kind,
label);
}
@@ -176,8 +178,8 @@ test_show_locus (function *fun)
{
const int line = fnstart_line + 2;
rich_location richloc (line_table, get_loc (line, 15));
- add_range (&richloc, get_loc (line, 10), get_loc (line, 14), false);
- add_range (&richloc, get_loc (line, 16), get_loc (line, 16), false);
+ add_range (&richloc, get_loc (line, 10), get_loc (line, 14));
+ add_range (&richloc, get_loc (line, 16), get_loc (line, 16));
warning_at (&richloc, 0, "test");
}
@@ -185,8 +187,8 @@ test_show_locus (function *fun)
{
const int line = fnstart_line + 2;
rich_location richloc (line_table, get_loc (line, 24));
- add_range (&richloc, get_loc (line, 6), get_loc (line, 22), false);
- add_range (&richloc, get_loc (line, 26), get_loc (line, 43), false);
+ add_range (&richloc, get_loc (line, 6), get_loc (line, 22));
+ add_range (&richloc, get_loc (line, 26), get_loc (line, 43));
warning_at (&richloc, 0, "test");
}
@@ -195,9 +197,8 @@ test_show_locus (function *fun)
const int line = fnstart_line + 2;
text_range_label label ("label");
rich_location richloc (line_table, get_loc (line + 1, 7), &label);
- add_range (&richloc, get_loc (line, 7), get_loc (line, 23), false);
- add_range (&richloc, get_loc (line + 1, 9), get_loc (line + 1, 26),
- false);
+ add_range (&richloc, get_loc (line, 7), get_loc (line, 23));
+ add_range (&richloc, get_loc (line + 1, 9), get_loc (line + 1, 26));
warning_at (&richloc, 0, "test");
}
@@ -208,10 +209,10 @@ test_show_locus (function *fun)
text_range_label label1 ("label 1");
text_range_label label2 ("label 2");
rich_location richloc (line_table, get_loc (line + 5, 7), &label0);
- add_range (&richloc, get_loc (line, 7), get_loc (line + 4, 65), false,
- &label1);
+ add_range (&richloc, get_loc (line, 7), get_loc (line + 4, 65),
+ SHOW_RANGE_WITHOUT_CARET, &label1);
add_range (&richloc, get_loc (line + 5, 9), get_loc (line + 10, 61),
- false, &label2);
+ SHOW_RANGE_WITHOUT_CARET, &label2);
warning_at (&richloc, 0, "test");
}
@@ -250,7 +251,8 @@ test_show_locus (function *fun)
get_loc (line, 90),
get_loc (line, 98)),
&label0);
- richloc.add_range (get_loc (line, 35), false, &label1);
+ richloc.add_range (get_loc (line, 35), SHOW_RANGE_WITHOUT_CARET,
+ &label1);
richloc.add_fixit_replace ("bar * foo");
warning_at (&richloc, 0, "test");
global_dc->show_ruler_p = false;
@@ -270,7 +272,8 @@ test_show_locus (function *fun)
get_loc (line, 98)),
&label0);
richloc.add_fixit_replace ("bar * foo");
- richloc.add_range (get_loc (line, 34), false, &label1);
+ richloc.add_range (get_loc (line, 34), SHOW_RANGE_WITHOUT_CARET,
+ &label1);
warning_at (&richloc, 0, "test");
global_dc->show_ruler_p = false;
}
@@ -282,7 +285,7 @@ test_show_locus (function *fun)
location_t caret_a = get_loc (line, 7);
location_t caret_b = get_loc (line, 11);
rich_location richloc (line_table, caret_a);
- add_range (&richloc, caret_b, caret_b, true);
+ add_range (&richloc, caret_b, caret_b, SHOW_RANGE_WITH_CARET);
global_dc->caret_chars[0] = 'A';
global_dc->caret_chars[1] = 'B';
warning_at (&richloc, 0, "test");
@@ -400,7 +403,7 @@ test_show_locus (function *fun)
location_t caret_a = get_loc (line, 5);
location_t caret_b = get_loc (line - 1, 19);
rich_location richloc (line_table, caret_a);
- richloc.add_range (caret_b, true);
+ richloc.add_range (caret_b, SHOW_RANGE_WITH_CARET);
global_dc->caret_chars[0] = '1';
global_dc->caret_chars[1] = '2';
warning_at (&richloc, 0, "test");
@@ -449,7 +452,7 @@ test_show_locus (function *fun)
location_t word
= make_location (start_of_word, start_of_word,
end_of_word);
- richloc.add_range (word, true, &label);
+ richloc.add_range (word, SHOW_RANGE_WITH_CARET, &label);
/* Add a fixit, converting to upper case. */
char_span word_span = content.subspan (start_idx, idx - start_idx);
diff --git a/gcc/testsuite/gcc.dg/pr79342.c b/gcc/testsuite/gcc.dg/pr79342.c
new file mode 100644
index 00000000000..958de55d09b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr79342.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -gsplit-dwarf -g3" } */
+/* { dg-additional-options "-march=skylake -mrtm -mabm" { target x86_64-*-* i?86-*-* } } */
+
+int a;
+void b(void);
+
+void e(int *);
+int f(void);
+
+void
+c(void)
+{
+ int d;
+ e(&d);
+ if (f())
+ b();
+}
diff --git a/gcc/testsuite/gcc.dg/pr83666.c b/gcc/testsuite/gcc.dg/pr83666.c
index 7bd1ed3ceb2..1c43c438544 100644
--- a/gcc/testsuite/gcc.dg/pr83666.c
+++ b/gcc/testsuite/gcc.dg/pr83666.c
@@ -1,6 +1,6 @@
/* PR debug/83666 */
/* { dg-do compile } */
-/* { dg-options "-O2 -g --param=sccvn-max-scc-size=10 -Wno-psabi" } */
+/* { dg-options "-O2 -g -Wno-psabi" } */
/* { dg-additional-options "-fno-common" { target hppa*-*-hpux* } } */
typedef int __attribute__ ((vector_size (64))) V;
diff --git a/gcc/testsuite/gcc.dg/pr85195.c b/gcc/testsuite/gcc.dg/pr85195.c
index 0cc696ba6ee..fb21ee97c5c 100644
--- a/gcc/testsuite/gcc.dg/pr85195.c
+++ b/gcc/testsuite/gcc.dg/pr85195.c
@@ -1,6 +1,6 @@
/* PR middle-end/85195 */
/* { dg-do compile { target int128 } } */
-/* { dg-options "-Wno-psabi -O -fno-tree-ccp --param=sccvn-max-scc-size=10" } */
+/* { dg-options "-Wno-psabi -O -fno-tree-ccp" } */
typedef __int128 V __attribute__ ((vector_size (16)));
diff --git a/gcc/testsuite/gcc.dg/pr85467.c b/gcc/testsuite/gcc.dg/pr85467.c
index 4895e37d0f9..fad94fcd83e 100644
--- a/gcc/testsuite/gcc.dg/pr85467.c
+++ b/gcc/testsuite/gcc.dg/pr85467.c
@@ -1,6 +1,6 @@
/* PR tree-optimization/85467 */
/* { dg-do compile } */
-/* { dg-options "-O2 -fno-tree-ccp --param=sccvn-max-scc-size=10" } */
+/* { dg-options "-O2 -fno-tree-ccp" } */
#define TEST(N, T) \
typedef T V##N __attribute__ ((__vector_size__ (sizeof (T)))); \
diff --git a/gcc/testsuite/gcc.dg/pr87009.c b/gcc/testsuite/gcc.dg/pr87009.c
new file mode 100644
index 00000000000..eb8a4ecd920
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87009.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fdump-tree-original" } */
+/* { dg-final { scan-tree-dump-times "return s \\^ x;" 4 "original" } } */
+
+int f1 (int x, int s)
+{
+ return ~(~(x|s)|x)|~(~(x|s)|s);
+}
+
+int f2 (int x, int s)
+{
+ return ~(~(~x&s)&~(x&~s));
+}
+
+int f3 (int x, int s)
+{
+ return ~((x|~s)&(~x|s));
+}
+
+int f4 (int x, int s)
+{
+ return (x|~s)^(~x|s);
+}
diff --git a/gcc/testsuite/gcc.dg/pr87024.c b/gcc/testsuite/gcc.dg/pr87024.c
new file mode 100644
index 00000000000..a8a58aafc26
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87024.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fno-tree-dce" } */
+
+static inline void __attribute__((always_inline))
+mp ()
+{
+ (void) __builtin_va_arg_pack_len ();
+}
+
+void
+ui (void)
+{
+ mp ();
+}
diff --git a/gcc/testsuite/gcc.dg/pr87092.c b/gcc/testsuite/gcc.dg/pr87092.c
new file mode 100644
index 00000000000..4a6faebbd93
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87092.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fwrapv" } */
+
+int a, b;
+
+void
+c(void) {
+ if (b)
+ b = a / b;
+}
diff --git a/gcc/testsuite/gcc.dg/pr87099.c b/gcc/testsuite/gcc.dg/pr87099.c
new file mode 100644
index 00000000000..599d721da31
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87099.c
@@ -0,0 +1,21 @@
+/* PR middle-end/87099 */
+/* { dg-do compile } */
+/* { dg-options "-Wstringop-overflow" } */
+
+void bar (char *);
+
+int
+foo (int n)
+{
+ char v[n];
+ bar (v);
+ return __builtin_strncmp (&v[1], "aaa", 3);
+}
+
+int
+baz (int n, char *s)
+{
+ char v[n];
+ bar (v);
+ return __builtin_strncmp (&v[1], s, 3);
+}
diff --git a/gcc/testsuite/gcc.dg/pr87112.c b/gcc/testsuite/gcc.dg/pr87112.c
new file mode 100644
index 00000000000..7510956be4e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87112.c
@@ -0,0 +1,31 @@
+/* PR tree-optimization/87112 - ICE due to strnlen mixing integer types
+ { dg-do compile }
+ { dg-options "-Os -Wall" } */
+
+typedef __SIZE_TYPE__ size_t;
+
+extern size_t strnlen (const char*, size_t);
+
+size_t fi (int i)
+{
+ int n = i & 3;
+ return strnlen ("int", n);
+}
+
+size_t fui (unsigned i)
+{
+ unsigned n = i & 3;
+ return strnlen ("unsigned", n);
+}
+
+size_t fl (long i)
+{
+ long n = i & 3;
+ return strnlen ("long", n);
+}
+
+size_t fsz (size_t i)
+{
+ size_t n = i & 3;
+ return strnlen ("size_t", n);
+}
diff --git a/gcc/testsuite/gcc.dg/pr87117-1.c b/gcc/testsuite/gcc.dg/pr87117-1.c
new file mode 100644
index 00000000000..06d700871be
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87117-1.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-inline -fno-tree-dce" } */
+
+int a, b, c;
+long *d;
+void fn1()
+{
+ for (; 0 < a;)
+ a++;
+}
+void fn3()
+{
+ for (; c; c++)
+ d[c] = 0;
+}
+void fn2()
+{
+ if (b)
+ fn3();
+ fn1();
+}
diff --git a/gcc/testsuite/gcc.dg/pr87117-2.c b/gcc/testsuite/gcc.dg/pr87117-2.c
new file mode 100644
index 00000000000..aefa813918b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr87117-2.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fcode-hoisting" } */
+
+void e();
+
+void a(int c, char **d)
+{
+ char b;
+ if (1 < c)
+ b = (char)(__INTPTR_TYPE__)d[0];
+ if (1 < c && b)
+ e();
+ while (1 < c)
+ ;
+}
diff --git a/gcc/testsuite/gcc.dg/strlenopt-57.c b/gcc/testsuite/gcc.dg/strlenopt-57.c
new file mode 100644
index 00000000000..49dc8cd6fbb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/strlenopt-57.c
@@ -0,0 +1,49 @@
+/* PR tree-optimization/86914 - wrong code with strlen() of poor-man's
+ flexible array member plus offset
+ { dg-do compile }
+ { dg-options "-O2 -Wall -fdump-tree-optimized" } */
+
+#include "strlenopt.h"
+
+struct A0 { char i, a[0]; };
+struct A1 { char i, a[1]; };
+struct A9 { char i, a[9]; };
+struct Ax { char i, a[]; };
+
+extern int a[];
+
+extern struct A0 a0;
+extern struct A1 a1;
+extern struct A9 a9;
+extern struct Ax ax;
+
+void test_var_flexarray_cst_off (void)
+{
+ /* Use arbitrary constants greater than 16 in case GCC ever starts
+ unrolling strlen() calls with small array arguments. */
+ a[0] = 17 < strlen (a0.a + 1);
+ a[1] = 19 < strlen (a1.a + 1);
+ a[2] = 23 < strlen (a9.a + 9);
+ a[3] = 29 < strlen (ax.a + 3);
+}
+
+void test_ptr_flexarray_cst_off (struct A0 *p0, struct A1 *p1,
+ struct A9 *p9, struct Ax *px)
+{
+ a[0] = 17 < strlen (p0->a + 1);
+ a[1] = 19 < strlen (p1->a + 1);
+ a[2] = 23 < strlen (p9->a + 9);
+ a[3] = 29 < strlen (px->a + 3);
+}
+
+void test_ptr_flexarray_var_off (struct A0 *p0, struct A1 *p1,
+ struct A9 *p9, struct Ax *px,
+ int i)
+{
+ a[0] = 17 < strlen (p0->a + i);
+ a[1] = 19 < strlen (p1->a + i);
+ a[2] = 23 < strlen (p9->a + i);
+ a[3] = 29 < strlen (px->a + i);
+}
+
+/* { dg-final { scan-tree-dump-times "strlen" 12 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/strlenopt-58.c b/gcc/testsuite/gcc.dg/strlenopt-58.c
new file mode 100644
index 00000000000..e0e80680936
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/strlenopt-58.c
@@ -0,0 +1,93 @@
+/* PR tree-optimization/86711 - wrong folding of memchr
+
+ Verify that calls to memchr() with constant arrays initialized
+ with wide string literals are folded.
+
+ { dg-do compile }
+ { dg-options "-O1 -Wall -fdump-tree-optimized" } */
+
+#include "strlenopt.h"
+
+typedef __WCHAR_TYPE__ wchar_t;
+
+extern void* memchr (const void*, int, size_t);
+
+#define CONCAT(x, y) x ## y
+#define CAT(x, y) CONCAT (x, y)
+#define FAILNAME(name) CAT (call_ ## name ##_on_line_, __LINE__)
+
+#define FAIL(name) do { \
+ extern void FAILNAME (name) (void); \
+ FAILNAME (name)(); \
+ } while (0)
+
+/* Macro to emit a call to funcation named
+ call_in_true_branch_not_eliminated_on_line_NNN()
+ for each call that's expected to be eliminated. The dg-final
+ scan-tree-dump-time directive at the bottom of the test verifies
+ that no such call appears in output. */
+#define ELIM(expr) \
+ if (!(expr)) FAIL (in_true_branch_not_eliminated); else (void)0
+
+#define T(s, n) ELIM (strlen (s) == n)
+
+
+static const wchar_t wc = L'1';
+static const wchar_t ws1[] = L"1";
+static const wchar_t wsx[] = L"\x12345678";
+static const wchar_t ws4[] = L"\x00123456\x12005678\x12340078\x12345600";
+
+void test_wide (void)
+{
+ int i0 = 0;
+ int i1 = i0 + 1;
+ int i2 = i1 + 1;
+ int i3 = i2 + 1;
+ int i4 = i3 + 1;
+
+ ELIM (memchr (L"" + 1, 0, 0) == 0);
+ ELIM (memchr (&wc + 1, 0, 0) == 0);
+ ELIM (memchr (L"\x12345678", 0, sizeof (wchar_t)) == 0);
+
+ const size_t nb = sizeof ws4;
+ const size_t nwb = sizeof (wchar_t);
+
+ const char *pws1 = (const char*)ws1;
+ const char *pws4 = (const char*)ws4;
+ const char *pwsx = (const char*)wsx;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ ELIM (memchr (ws1, 0, sizeof ws1) == pws1 + 1);
+ ELIM (memchr (wsx, 0, sizeof wsx) == pwsx + sizeof *wsx);
+
+ ELIM (memchr (&ws4[0], 0, nb) == pws4 + 3);
+ ELIM (memchr (&ws4[1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 2);
+ ELIM (memchr (&ws4[2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 1);
+ ELIM (memchr (&ws4[3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 0);
+ ELIM (memchr (&ws4[4], 0, nb - 4 * nwb) == pws4 + 4 * nwb + 0);
+
+ ELIM (memchr (&ws4[i0], 0, nb) == pws4 + 3);
+ ELIM (memchr (&ws4[i1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 2);
+ ELIM (memchr (&ws4[i2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 1);
+ ELIM (memchr (&ws4[i3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 0);
+ ELIM (memchr (&ws4[i4], 0, nb - 4 * nwb) == pws4 + 4 * nwb + 0);
+#else
+ ELIM (memchr (ws1, 0, sizeof ws1) == pws1 + 0);
+ ELIM (memchr (wsx, 0, sizeof wsx) == pwsx + sizeof *wsx);
+
+ ELIM (memchr (&ws4[0], 0, nb) == pws4 + 0);
+ ELIM (memchr (&ws4[1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 1);
+ ELIM (memchr (&ws4[2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 2);
+ ELIM (memchr (&ws4[3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 3);
+ ELIM (memchr (&ws4[4], 0, nb - 4 * nwb) == pws4 + 4 * nwb + 0);
+
+ ELIM (memchr (&ws4[i0], 0, nb) == pws4 + 0);
+ ELIM (memchr (&ws4[i1], 0, nb - 1 * nwb) == pws4 + 1 * nwb + 1);
+ ELIM (memchr (&ws4[i2], 0, nb - 2 * nwb) == pws4 + 2 * nwb + 2);
+ ELIM (memchr (&ws4[i3], 0, nb - 3 * nwb) == pws4 + 3 * nwb + 3);
+ ELIM (memchr (&ws4[i4], 0, nb - 4 * nwb) == pws4 + 4 * nwb + 0);
+#endif
+}
+
+/* { dg-final { scan-tree-dump-times "memchr" 0 "optimized" } }
+ { dg-final { scan-tree-dump-times "call_in_true_branch_not_eliminated" 0 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr81790.c b/gcc/testsuite/gcc.dg/torture/pr81790.c
index b5e02f6c1dd..b69946f9d10 100644
--- a/gcc/testsuite/gcc.dg/torture/pr81790.c
+++ b/gcc/testsuite/gcc.dg/torture/pr81790.c
@@ -1,5 +1,4 @@
/* { dg-do compile } */
-/* { dg-additional-options "--param sccvn-max-scc-size=10" } */
typedef int a __attribute__ ((__vector_size__ (16)));
typedef struct
diff --git a/gcc/testsuite/gcc.dg/torture/pr87132.c b/gcc/testsuite/gcc.dg/torture/pr87132.c
new file mode 100644
index 00000000000..48b8673bba3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr87132.c
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+
+extern void abort (void);
+int c, d;
+int main()
+{
+ int e[] = {4, 4, 4, 4, 4, 4, 4, 4, 4};
+ d = 8;
+ for (; d; d--)
+ for (int a = 0; a <= 8; a++)
+ {
+ c = e[1];
+ e[d] = 0;
+ }
+ if (c != 0)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr87147.c b/gcc/testsuite/gcc.dg/torture/pr87147.c
new file mode 100644
index 00000000000..385cfce7201
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr87147.c
@@ -0,0 +1,22 @@
+/* { dg-do run } */
+
+short a;
+long b;
+int c, d, g;
+char e, h;
+long f[] = {0};
+int main()
+{
+ int i = 1;
+ for (; a <= 3; a++) {
+ c = 0;
+ for (; c <= 2; c++) {
+ b = 0;
+ for (; b <= 3; b++) {
+ h = i && f[d];
+ e = g && i;
+ i = 0;
+ }
+ }
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-prof/val-prof-10.c b/gcc/testsuite/gcc.dg/tree-prof/val-prof-10.c
new file mode 100644
index 00000000000..57854b5911b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-prof/val-prof-10.c
@@ -0,0 +1,31 @@
+/* { dg-options "-O2 -fdump-rtl-expand -mtune=core2" } */
+/* { dg-skip-if "" { ! { i?86-*-* x86_64-*-* } } } */
+
+long buffer1[128], buffer2[128];
+char *x;
+
+void foo(long *r)
+{
+ x = (char *)r;
+ asm volatile("" ::: "memory");
+}
+
+void
+__attribute__((noinline))
+compute()
+{
+ volatile int n = 24;
+ __builtin_memcpy (buffer1, buffer2, n);
+ foo (&buffer1[0]);
+}
+
+int
+main()
+{
+ for (unsigned i = 0; i < 10000; i++)
+ compute ();
+
+ return 0;
+}
+
+/* { dg-final-use-not-autofdo { scan-rtl-dump "Selected stringop expansion strategy: rep_byte" "expand" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr87126.c b/gcc/testsuite/gcc.dg/tree-ssa/pr87126.c
new file mode 100644
index 00000000000..37232ba3a0a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr87126.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-fre1" } */
+
+int a, *b;
+
+void f ()
+{
+ int d = 0, e = d;
+ while (a++)
+ ;
+ if (e)
+ goto L2;
+L1:
+ d = e;
+ b = &d;
+L2:
+ if (d)
+ goto L1;
+}
+
+/* The load of d could be eliminated if we'd value-number the
+ irreducible region in RPO of the reducible result. Likewise
+ a redundant store could be removed. */
+/* { dg-final { scan-tree-dump-times "d = 0;" 1 "fre1" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-not " = d;" "fre1" { xfail *-*-* } } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-14.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-14.c
index 3955bddcf59..65e2d5a664b 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-14.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-14.c
@@ -1,8 +1,5 @@
-/* PR tree-optimization/29738. We used not to realize that "i" can never
- become nonzero. */
-
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* { dg-options "-O2 -fdump-tree-fre1 -fdump-tree-optimized" } */
int i;
@@ -16,5 +13,7 @@ void bar (void)
foo ();
}
-/* Everything except for the "i = 0" assignment should get removed. */
-/* { dg-final { scan-tree-dump-times "if" 0 "optimized" { xfail *-*-* } } } */
+/* Everything except for the "i = 0" assignment should get removed. Value
+ numbering already figures out the if in the loop is never true. */
+/* { dg-final { scan-tree-dump-times "foo" 0 "fre1" } } */
+/* { dg-final { scan-tree-dump-times "if" 0 "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-46.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-46.c
index d6e63518e9f..8d6eaa3ada0 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-46.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-46.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O -fdump-tree-fre1-details" } */
+/* { dg-options "-O2 -fdump-tree-fre1-details" } */
int x[1024];
int foo (int a, int s, unsigned int k)
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-67.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-67.c
new file mode 100644
index 00000000000..fab1e599b81
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-67.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-tree-ccp -fdump-tree-fre1-stats" } */
+
+int foo()
+{
+ int i = 0;
+ do
+ {
+ i++;
+ }
+ while (i != 1);
+ return i;
+}
+
+/* { dg-final { scan-tree-dump "RPO iteration over 3 blocks visited 3 blocks" "fre1" } } */
+/* { dg-final { scan-tree-dump "return 1;" "fre1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/switch-2.c b/gcc/testsuite/gcc.dg/tree-ssa/switch-2.c
new file mode 100644
index 00000000000..710825dc257
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/switch-2.c
@@ -0,0 +1,25 @@
+/* { dg-do compile { target { { x86_64-*-* aarch64-*-* ia64-*-* powerpc64-*-* } && lp64 } } } */
+/* { dg-options "-O2 -fdump-tree-switchlower1" } */
+
+int global;
+
+int foo (int x)
+{
+ switch (x) {
+ case 0:
+ case 10:
+ return 1;
+ case 20:
+ case 30:
+ case 62:
+ return 2;
+ case 1000:
+ case 1010:
+ case 1025 ... 1030:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* { dg-final { scan-tree-dump ";; GIMPLE switch case clusters: BT:0-62 BT:1000-1030" "switchlower1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/switch-3.c b/gcc/testsuite/gcc.dg/tree-ssa/switch-3.c
new file mode 100644
index 00000000000..44981e1d186
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/switch-3.c
@@ -0,0 +1,20 @@
+/* { dg-options "-O2 -fdump-tree-switchlower1" } */
+
+int cipher_to_alg(int cipher)
+{
+ switch (cipher)
+ {
+ case 8: return 2;
+ case 16: return 3;
+ case 32: return 4;
+ case 64: return 6;
+ case 256: return 9;
+ case 512: return 10;
+ case 2048: return 11;
+ case 4096: return 12;
+ case 8192: return 13;
+ }
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "if \\(cipher\[^\n ]*" 12 "switchlower1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp105.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp105.c
deleted file mode 100644
index 7cdd4dd8f3a..00000000000
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp105.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/* PR tree-optimization/18046 */
-/* { dg-options "-O2 -fdump-tree-vrp2-details" } */
-/* { dg-final { scan-tree-dump-times "Threaded jump" 1 "vrp2" } } */
-/* In the 2nd VRP pass (after PRE) we expect to thread the default label of the
- 1st switch straight to that of the 2nd switch. */
-
-extern void foo (void);
-extern void bar (void);
-
-extern int i;
-void
-test (void)
-{
- switch (i)
- {
- case 0:
- foo ();
- break;
- case 1:
- bar ();
- break;
- default:
- break;
- }
-
- switch (i)
- {
- case 0:
- foo ();
- break;
- case 1:
- bar ();
- break;
- default:
- break;
- }
-}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
index 213aa47609a..5a2dbf0108a 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1-details -fdisable-tree-ethread" } */
+/* { dg-options "-O2 -fdisable-tree-evrp -fno-tree-fre -fdump-tree-vrp1-details -fdisable-tree-ethread" } */
void bar (void);
int foo (int i, int j)
diff --git a/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c b/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c
index acad8fc0332..1880d1edb32 100644
--- a/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c
+++ b/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c
@@ -51,7 +51,4 @@ int main (void)
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" {xfail { vect_no_align && { ! vect_hw_misalign } } } } } */
-/* Requires reverse for variable-length SVE, which is implemented for
- by a later patch. Until then we report it twice, once for SVE and
- once for 128-bit Advanced SIMD. */
-/* { dg-final { scan-tree-dump-times "dependence distance negative" 1 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
+/* { dg-final { scan-tree-dump-times "dependence distance negative" 1 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c b/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c
index 1ccfc1edacc..e5914d970e3 100644
--- a/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c
+++ b/gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c
@@ -183,7 +183,4 @@ int main ()
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 4 "vect" {xfail { vect_no_align && { ! vect_hw_misalign } } } } } */
-/* f4 requires reverse for SVE, which is implemented by a later patch.
- Until then we report it twice, once for SVE and once for 128-bit
- Advanced SIMD. */
-/* { dg-final { scan-tree-dump-times "dependence distance negative" 4 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
+/* { dg-final { scan-tree-dump-times "dependence distance negative" 4 "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr65947-13.c b/gcc/testsuite/gcc.dg/vect/pr65947-13.c
index ce290459c50..e1d3ff52f5c 100644
--- a/gcc/testsuite/gcc.dg/vect/pr65947-13.c
+++ b/gcc/testsuite/gcc.dg/vect/pr65947-13.c
@@ -41,4 +41,5 @@ main (void)
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "condition expression based on integer induction." 4 "vect" } } */
+/* { dg-final { scan-tree-dump-times "condition expression based on integer induction." 4 "vect" { xfail vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 4 "vect" { target vect_fold_extract_last } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr80631-2.c b/gcc/testsuite/gcc.dg/vect/pr80631-2.c
index 6bf239adac5..b334ca2345b 100644
--- a/gcc/testsuite/gcc.dg/vect/pr80631-2.c
+++ b/gcc/testsuite/gcc.dg/vect/pr80631-2.c
@@ -72,4 +72,5 @@ main ()
}
/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 5 "vect" { target vect_condition } } } */
-/* { dg-final { scan-tree-dump-times "condition expression based on integer induction." 10 "vect" { target vect_condition } } } */
+/* { dg-final { scan-tree-dump-times "condition expression based on integer induction." 10 "vect" { target vect_condition xfail vect_fold_extract_last } } } */
+/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 10 "vect" { target vect_fold_extract_last } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr86927.c b/gcc/testsuite/gcc.dg/vect/pr86927.c
new file mode 100644
index 00000000000..794092bc4e4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr86927.c
@@ -0,0 +1,15 @@
+#include "tree-vect.h"
+
+int a[28];
+int main()
+{
+ check_vect ();
+ a[4] = 1;
+ int c = 1;
+ for (int b = 0; b < 8; b++)
+ if (a[b])
+ c = 0;
+ if (c)
+ abort();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/slp-23.c b/gcc/testsuite/gcc.dg/vect/slp-23.c
index 3cda497db0c..7d330c787d1 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-23.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-23.c
@@ -107,8 +107,8 @@ int main (void)
/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { vect_strided8 && { ! { vect_no_align} } } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { vect_strided8 || vect_no_align } } } } } */
-/* We fail to vectorize the second loop with variable-length SVE but
- fall back to 128-bit vectors, which does use SLP. */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm } } } */
+/* SLP fails for the second loop with variable-length SVE because
+ the load size is greater than the minimum vector size. */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm xfail { aarch64_sve && vect_variable_length } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-37.c b/gcc/testsuite/gcc.dg/vect/slp-37.c
index 54a5e18c51f..a765cd70a09 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-37.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-37.c
@@ -58,5 +58,5 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_hw_misalign } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_hw_misalign } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-perm-10.c b/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
index 1c2a8580d3c..678152ba416 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-perm-10.c
@@ -50,4 +50,6 @@ int main ()
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_perm } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_perm } } } */
+/* SLP fails for variable-length SVE because the load size is greater
+ than the minimum vector size. */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_perm xfail { aarch64_sve && vect_variable_length } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/slp-perm-9.c b/gcc/testsuite/gcc.dg/vect/slp-perm-9.c
index b01d493b6e7..c54420abd9d 100644
--- a/gcc/testsuite/gcc.dg/vect/slp-perm-9.c
+++ b/gcc/testsuite/gcc.dg/vect/slp-perm-9.c
@@ -59,7 +59,9 @@ int main (int argc, const char* argv[])
/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 2 "vect" { target { ! { vect_perm_short || vect_load_lanes } } } } } */
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { vect_perm_short || vect_load_lanes } } } } */
-/* { dg-final { scan-tree-dump-times "permutation requires at least three vectors" 1 "vect" { target { vect_perm_short && { ! vect_perm3_short } } } } } */
+/* We don't try permutes with a group size of 3 for variable-length
+ vectors. */
+/* { dg-final { scan-tree-dump-times "permutation requires at least three vectors" 1 "vect" { target { vect_perm_short && { ! vect_perm3_short } } xfail vect_variable_length } } } */
/* { dg-final { scan-tree-dump-not "permutation requires at least three vectors" "vect" { target vect_perm3_short } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" { target { { ! vect_perm3_short } || vect_load_lanes } } } } */
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { vect_perm3_short && { ! vect_load_lanes } } } } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/large_struct_copy_2.c b/gcc/testsuite/gcc.target/aarch64/large_struct_copy_2.c
new file mode 100644
index 00000000000..565434244e8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/large_struct_copy_2.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+typedef unsigned __attribute__((mode(DI))) uint64_t;
+
+struct S0 {
+ uint64_t f1;
+ uint64_t f2;
+ uint64_t f3;
+ uint64_t f4;
+ uint64_t f5;
+} a;
+struct S2 {
+ uint64_t f0;
+ uint64_t f2;
+ struct S0 f3;
+};
+
+void fn1 () {
+ struct S2 b = {0, 1, 7, 4073709551611, 4, 8, 7};
+ a = b.f3;
+}
+
+/* { dg-final { scan-assembler-times {ldp\s+x[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-times {stp\s+x[0-9]+} 2 } } */
+/* { dg-final { scan-assembler-not {ld[1-3]} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vect_su_add_sub.c b/gcc/testsuite/gcc.target/aarch64/simd/vect_su_add_sub.c
index 338da54f628..921c5f15c74 100644
--- a/gcc/testsuite/gcc.target/aarch64/simd/vect_su_add_sub.c
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vect_su_add_sub.c
@@ -1,21 +1,27 @@
/* { dg-do compile } */
/* { dg-options "-O3" } */
+typedef int __attribute__ ((mode (SI))) int32_t;
+typedef int __attribute__ ((mode (DI))) int64_t;
+typedef unsigned __attribute__ ((mode (SI))) size_t;
+typedef unsigned __attribute__ ((mode (SI))) uint32_t;
+typedef unsigned __attribute__ ((mode (DI))) uint64_t;
+
/* Ensure we use the signed/unsigned extend vectorized add and sub
instructions. */
#define N 1024
-int a[N];
-long c[N];
-long d[N];
-unsigned int ua[N];
-unsigned long uc[N];
-unsigned long ud[N];
+int32_t a[N];
+int64_t c[N];
+int64_t d[N];
+uint32_t ua[N];
+uint64_t uc[N];
+uint64_t ud[N];
void
add ()
{
- for (int i = 0; i < N; i++)
+ for (size_t i = 0; i < N; i++)
d[i] = a[i] + c[i];
}
/* { dg-final { scan-assembler-times "\[ \t\]saddw2\[ \t\]+" 1 } } */
@@ -24,7 +30,7 @@ add ()
void
subtract ()
{
- for (int i = 0; i < N; i++)
+ for (size_t i = 0; i < N; i++)
d[i] = c[i] - a[i];
}
/* { dg-final { scan-assembler-times "\[ \t\]ssubw2\[ \t\]+" 1 } } */
@@ -33,7 +39,7 @@ subtract ()
void
uadd ()
{
- for (int i = 0; i < N; i++)
+ for (size_t i = 0; i < N; i++)
ud[i] = ua[i] + uc[i];
}
/* { dg-final { scan-assembler-times "\[ \t\]uaddw2\[ \t\]+" 1 } } */
@@ -42,7 +48,7 @@ uadd ()
void
usubtract ()
{
- for (int i = 0; i < N; i++)
+ for (size_t i = 0; i < N; i++)
ud[i] = uc[i] - ua[i];
}
/* { dg-final { scan-assembler-times "\[ \t\]usubw2\[ \t\]+" 1 } } */
diff --git a/gcc/testsuite/gcc.dg/strcmpopt_6.c b/gcc/testsuite/gcc.target/aarch64/strcmpopt_6.c
index 4c6de02824f..4c6de02824f 100644
--- a/gcc/testsuite/gcc.dg/strcmpopt_6.c
+++ b/gcc/testsuite/gcc.target/aarch64/strcmpopt_6.c
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/bswap_1.c b/gcc/testsuite/gcc.target/aarch64/sve/bswap_1.c
new file mode 100644
index 00000000000..aa4c033a9e0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/bswap_1.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint16_t *a, uint16_t *b)
+{
+ for (int i = 0; i < 100; ++i)
+ a[i] = __builtin_bswap16 (b[i]);
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 1 { xfail aarch64_big_endian } } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/bswap_2.c b/gcc/testsuite/gcc.target/aarch64/sve/bswap_2.c
new file mode 100644
index 00000000000..442c65e8983
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/bswap_2.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint32_t *a, uint32_t *b)
+{
+ for (int i = 0; i < 100; ++i)
+ a[i] = __builtin_bswap32 (b[i]);
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 { xfail aarch64_big_endian } } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/bswap_3.c b/gcc/testsuite/gcc.target/aarch64/sve/bswap_3.c
new file mode 100644
index 00000000000..75acc64e09d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/bswap_3.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint64_t *a, uint64_t *b)
+{
+ for (int i = 0; i < 100; ++i)
+ a[i] = __builtin_bswap64 (b[i]);
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d\n} 1 { xfail aarch64_big_endian } } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_1.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_1.c
new file mode 100644
index 00000000000..0d4892eab81
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_1.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8 + 7] + 1;
+ a[i * 8 + 1] = b[i * 8 + 6] + 2;
+ a[i * 8 + 2] = b[i * 8 + 5] + 3;
+ a[i * 8 + 3] = b[i * 8 + 4] + 4;
+ a[i * 8 + 4] = b[i * 8 + 3] + 5;
+ a[i * 8 + 5] = b[i * 8 + 2] + 6;
+ a[i * 8 + 6] = b[i * 8 + 1] + 7;
+ a[i * 8 + 7] = b[i * 8 + 0] + 8;
+ }
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.d, p[0-7]/m, z[0-9]+\.d\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_2.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_2.c
new file mode 100644
index 00000000000..86ace58498f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_2.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8 + 3] + 1;
+ a[i * 8 + 1] = b[i * 8 + 2] + 2;
+ a[i * 8 + 2] = b[i * 8 + 1] + 3;
+ a[i * 8 + 3] = b[i * 8 + 0] + 4;
+ a[i * 8 + 4] = b[i * 8 + 7] + 5;
+ a[i * 8 + 5] = b[i * 8 + 6] + 6;
+ a[i * 8 + 6] = b[i * 8 + 5] + 7;
+ a[i * 8 + 7] = b[i * 8 + 4] + 8;
+ }
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.s, p[0-7]/m, z[0-9]+\.s\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_3.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_3.c
new file mode 100644
index 00000000000..d15215ff94f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_3.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8 + 1] + 1;
+ a[i * 8 + 1] = b[i * 8 + 0] + 2;
+ a[i * 8 + 2] = b[i * 8 + 3] + 3;
+ a[i * 8 + 3] = b[i * 8 + 2] + 4;
+ a[i * 8 + 4] = b[i * 8 + 5] + 5;
+ a[i * 8 + 5] = b[i * 8 + 4] + 6;
+ a[i * 8 + 6] = b[i * 8 + 7] + 7;
+ a[i * 8 + 7] = b[i * 8 + 6] + 8;
+ }
+}
+
+/* { dg-final { scan-assembler-times {\trevb\tz[0-9]+\.h, p[0-7]/m, z[0-9]+\.h\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_4.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_4.c
new file mode 100644
index 00000000000..dc5262a0074
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_4.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b, uint8_t *restrict c)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8] + c[i * 8];
+ a[i * 8 + 1] = b[i * 8] + c[i * 8 + 1];
+ a[i * 8 + 2] = b[i * 8 + 2] + c[i * 8 + 2];
+ a[i * 8 + 3] = b[i * 8 + 2] + c[i * 8 + 3];
+ a[i * 8 + 4] = b[i * 8 + 4] + c[i * 8 + 4];
+ a[i * 8 + 5] = b[i * 8 + 4] + c[i * 8 + 5];
+ a[i * 8 + 6] = b[i * 8 + 6] + c[i * 8 + 6];
+ a[i * 8 + 7] = b[i * 8 + 6] + c[i * 8 + 7];
+ }
+}
+
+/* { dg-final { scan-assembler {\ttrn1\tz[0-9]+\.b, z[0-9]+\.b, z[0-9]+\.b\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_5.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_5.c
new file mode 100644
index 00000000000..d5a48af6d9e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_5.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b,
+ uint8_t *restrict c, uint8_t *restrict d)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = c[i * 8] + d[i * 8];
+ a[i * 8 + 1] = c[i * 8] + d[i * 8 + 1];
+ a[i * 8 + 2] = c[i * 8 + 2] + d[i * 8 + 2];
+ a[i * 8 + 3] = c[i * 8 + 2] + d[i * 8 + 3];
+ a[i * 8 + 4] = c[i * 8 + 4] + d[i * 8 + 4];
+ a[i * 8 + 5] = c[i * 8 + 4] + d[i * 8 + 5];
+ a[i * 8 + 6] = c[i * 8 + 6] + d[i * 8 + 6];
+ a[i * 8 + 7] = c[i * 8 + 6] + d[i * 8 + 7];
+ b[i * 8] = c[i * 8 + 1] + d[i * 8];
+ b[i * 8 + 1] = c[i * 8 + 1] + d[i * 8 + 1];
+ b[i * 8 + 2] = c[i * 8 + 3] + d[i * 8 + 2];
+ b[i * 8 + 3] = c[i * 8 + 3] + d[i * 8 + 3];
+ b[i * 8 + 4] = c[i * 8 + 5] + d[i * 8 + 4];
+ b[i * 8 + 5] = c[i * 8 + 5] + d[i * 8 + 5];
+ b[i * 8 + 6] = c[i * 8 + 7] + d[i * 8 + 6];
+ b[i * 8 + 7] = c[i * 8 + 7] + d[i * 8 + 7];
+ }
+}
+
+/* { dg-final { scan-assembler {\ttrn1\tz[0-9]+\.b, z[0-9]+\.b, z[0-9]+\.b\n} } } */
+/* { dg-final { scan-assembler {\ttrn2\tz[0-9]+\.b, z[0-9]+\.b, z[0-9]+\.b\n} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_6.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_6.c
new file mode 100644
index 00000000000..28824611be8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_6.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8 + 3] + 1;
+ a[i * 8 + 1] = b[i * 8 + 6] + 1;
+ a[i * 8 + 2] = b[i * 8 + 0] + 1;
+ a[i * 8 + 3] = b[i * 8 + 2] + 1;
+ a[i * 8 + 4] = b[i * 8 + 1] + 1;
+ a[i * 8 + 5] = b[i * 8 + 7] + 1;
+ a[i * 8 + 6] = b[i * 8 + 5] + 1;
+ a[i * 8 + 7] = b[i * 8 + 4] + 1;
+ }
+}
+
+/* { dg-final { scan-assembler-times {\ttbl\tz[0-9]+\.b, z[0-9]+\.b, z[0-9]+\.b\n} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_7.c b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_7.c
new file mode 100644
index 00000000000..da9e0a271a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/slp_perm_7.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize" } */
+
+#include <stdint.h>
+
+void
+f (uint8_t *restrict a, uint8_t *restrict b)
+{
+ for (int i = 0; i < 100; ++i)
+ {
+ a[i * 8] = b[i * 8 + 1] + 1;
+ a[i * 8 + 1] = b[i * 8 + 7] + 2;
+ a[i * 8 + 2] = b[i * 8 + 1] + 3;
+ a[i * 8 + 3] = b[i * 8 + 7] + 4;
+ a[i * 8 + 4] = b[i * 8 + 1] + 5;
+ a[i * 8 + 5] = b[i * 8 + 7] + 6;
+ a[i * 8 + 6] = b[i * 8 + 1] + 7;
+ a[i * 8 + 7] = b[i * 8 + 7] + 8;
+ }
+}
+
+/* { dg-final { scan-assembler {\ttbl\tz[0-9]+\.b, z[0-9]+\.b, z[0-9]+\.b\n} } } */
diff --git a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c
index 7d396a31953..0cf8daeb5b1 100644
--- a/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c
+++ b/gcc/testsuite/gcc.target/i386/indirect-thunk-register-1.c
@@ -19,4 +19,3 @@ male_indirect_jump (long offset)
/* { dg-final { scan-assembler-not "push(?:l|q)\[ \t\]*_?dispatch" } } */
/* { dg-final { scan-assembler-not "pushq\[ \t\]%rax" } } */
/* { dg-final { scan-assembler-not "__x86_indirect_thunk\n" } } */
-/* { dg-final { scan-assembler-not "__x86_indirect_thunk_bnd\n" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr87065.c b/gcc/testsuite/gcc.target/i386/pr87065.c
new file mode 100644
index 00000000000..7ee0f0f7b46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr87065.c
@@ -0,0 +1,22 @@
+/* PR rtl-optimization/87065 */
+/* { dg-do compile } */
+/* { dg-options "-O3 -mxop -mprefer-vector-width=128" } */
+
+int a, c, d, e;
+short *b;
+
+void
+foo (void)
+{
+ short *g = b;
+ int h = 1;
+ unsigned i;
+ for (; h <= 1; h++)
+ g = (short *) &c;
+ for (; c; c++)
+ {
+ for (; i <= 1; i++)
+ ;
+ a ^= (a > 0 <= i) + ((e += d) == 0 ?: (*g = 8));
+ }
+}
diff --git a/gcc/testsuite/gcc.target/i386/strcmpopt_6.c b/gcc/testsuite/gcc.target/i386/strcmpopt_6.c
new file mode 100644
index 00000000000..4c6de02824f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/strcmpopt_6.c
@@ -0,0 +1,36 @@
+/* When the specified length exceeds one of the arguments of the call to memcmp,
+ the call to memcmp should NOT be inlined. */
+/* { dg-do compile } */
+/* { dg-options "-O2 -Wno-stringop-overflow" } */
+
+typedef struct { char s[8]; int x; } S;
+
+__attribute__ ((noinline)) int
+f1 (S * s)
+{
+ int result = 0;
+ result += __builtin_memcmp (s->s, "a", 3);
+ return result;
+}
+
+__attribute__ ((noinline)) int
+f2 (char *p)
+{
+ int result = 0;
+ result += __builtin_memcmp (p, "a", 3);
+ return result;
+}
+
+int main (void)
+{
+ S ss = {{'a','b','c'}, 2};
+ char *s = "abcd";
+
+ if (f1 (&ss) < 0 || f2 (s) < 0)
+ __builtin_abort ();
+
+ return 0;
+
+}
+
+/* { dg-final { scan-assembler-times "memcmp" 2 } } */
diff --git a/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90 b/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90
new file mode 100644
index 00000000000..92dc50756d4
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/allocate_with_source_25.f90
@@ -0,0 +1,71 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+!
+! Test the fix for PR86481
+!
+! Contributed by Rich Townsend <townsend@astro.wisc.edu>
+!
+program simple_leak
+
+ implicit none
+
+ type, abstract :: foo_t
+ end type foo_t
+
+ type, extends(foo_t) :: foo_a_t
+ real(8), allocatable :: a(:)
+ end type foo_a_t
+
+ type, extends(foo_t) :: bar_t
+ class(foo_t), allocatable :: f
+ end type bar_t
+
+ integer, parameter :: N = 2
+ integer, parameter :: D = 3
+
+ type(bar_t) :: b(N)
+ integer :: i
+
+ do i = 1, N
+ b(i) = func_bar(D)
+ end do
+
+ do i = 1, N
+ deallocate (b(i)%f)
+ end do
+
+contains
+
+ function func_bar (D) result (b)
+
+ integer, intent(in) :: D
+ type(bar_t) :: b
+
+ allocate(b%f, SOURCE=func_foo(D))
+
+ end function func_bar
+
+ !****
+
+ function func_foo (D) result (f)
+
+ integer, intent(in) :: D
+ class(foo_t), allocatable :: f
+
+ allocate(f, SOURCE=func_foo_a(D)) ! Lose one of these for each allocation
+
+ end function func_foo
+
+ !****
+
+ function func_foo_a (D) result (f)
+
+ integer, intent(in) :: D
+ type(foo_a_t) :: f
+
+ allocate(f%a(D)) ! Lose one of these for each allocation => N*D*elem_size(f%a)
+
+ end function func_foo_a
+
+end program simple_leak
+! { dg-final { scan-tree-dump-times "\>_final" 6 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/associate_3.f03 b/gcc/testsuite/gfortran.dg/associate_3.f03
index 20a375dcfd1..da7bec951d1 100644
--- a/gcc/testsuite/gfortran.dg/associate_3.f03
+++ b/gcc/testsuite/gfortran.dg/associate_3.f03
@@ -13,7 +13,7 @@ PROGRAM main
ASSOCIATE (a => 1) 5 ! { dg-error "Junk after ASSOCIATE" }
- ASSOCIATE (x =>) ! { dg-error "Expected association" }
+ ASSOCIATE (x =>) ! { dg-error "Invalid association target" }
ASSOCIATE (=> 5) ! { dg-error "Expected association" }
diff --git a/gcc/testsuite/gfortran.dg/associate_39.f90 b/gcc/testsuite/gfortran.dg/associate_39.f90
new file mode 100644
index 00000000000..16357c32777
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/associate_39.f90
@@ -0,0 +1,19 @@
+! { dg-do compile }
+!
+! PR 86935: Bad locus in ASSOCIATE statement
+!
+! Contributed by Janus Weil <janus@gcc.gnu.org>
+
+implicit none
+
+type :: t
+ real :: r = 0.5
+ integer :: i = 3
+end type
+
+type(t) :: x
+
+associate (r => x%r, &
+ i => x%ii) ! { dg-error "Invalid association target" }
+
+end
diff --git a/gcc/testsuite/gfortran.dg/class_result_7.f90 b/gcc/testsuite/gfortran.dg/class_result_7.f90
new file mode 100644
index 00000000000..066da549d6d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_result_7.f90
@@ -0,0 +1,36 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+!
+! Test the fix for PR80477
+!
+! Contributed by Stefano Zaghi <stefano.zaghi@cnr.it>
+!
+module a_type_m
+ implicit none
+ type :: a_type_t
+ real :: x
+ endtype
+contains
+ subroutine assign_a_type(lhs, rhs)
+ type(a_type_t), intent(inout) :: lhs
+ type(a_type_t), intent(in) :: rhs
+ lhs%x = rhs%x
+ end subroutine
+
+ function add_a_type(lhs, rhs) result( res )
+ type(a_type_t), intent(in) :: lhs
+ type(a_type_t), intent(in) :: rhs
+ class(a_type_t), allocatable :: res
+ allocate (a_type_t :: res)
+ res%x = lhs%x + rhs%x
+ end function
+end module
+
+program polymorphic_operators_memory_leaks
+ use a_type_m
+ implicit none
+ type(a_type_t) :: a = a_type_t(1) , b = a_type_t(2)
+ call assign_a_type (a, add_a_type(a,b)) ! generated a memory leak
+end
+! { dg-final { scan-tree-dump-times "builtin_free" 1 "original" } }
+! { dg-final { scan-tree-dump-times "builtin_malloc" 1 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/class_result_8.f90 b/gcc/testsuite/gfortran.dg/class_result_8.f90
new file mode 100644
index 00000000000..573dd44daad
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_result_8.f90
@@ -0,0 +1,41 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+!
+! Test the fix for the array version of PR80477
+!
+! Contributed by Stefano Zaghi <stefano.zaghi@cnr.it>
+!
+module a_type_m
+ implicit none
+ type :: a_type_t
+ real :: x
+ real, allocatable :: y(:)
+ endtype
+contains
+ subroutine assign_a_type(lhs, rhs)
+ type(a_type_t), intent(inout) :: lhs
+ type(a_type_t), intent(in) :: rhs(:)
+ lhs%x = rhs(1)%x + rhs(2)%x
+ end subroutine
+
+ function add_a_type(lhs, rhs) result( res )
+ type(a_type_t), intent(in) :: lhs
+ type(a_type_t), intent(in) :: rhs
+ class(a_type_t), allocatable :: res(:)
+ allocate (a_type_t :: res(2))
+ allocate (res(1)%y(1))
+ allocate (res(2)%y(1))
+ res(1)%x = lhs%x
+ res(2)%x = rhs%x
+ end function
+end module
+
+program polymorphic_operators_memory_leaks
+ use a_type_m
+ implicit none
+ type(a_type_t) :: a = a_type_t(1) , b = a_type_t(2)
+ call assign_a_type (a, add_a_type(a,b))
+ print *, a%x
+end
+! { dg-final { scan-tree-dump-times "builtin_free" 6 "original" } }
+! { dg-final { scan-tree-dump-times "builtin_malloc" 7 "original" } }
diff --git a/gcc/testsuite/gfortran.dg/class_result_9.f90 b/gcc/testsuite/gfortran.dg/class_result_9.f90
new file mode 100644
index 00000000000..10bc139aabf
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/class_result_9.f90
@@ -0,0 +1,45 @@
+! { dg-do run }
+!
+! Test the fix for an additional bug found while fixing PR80477
+!
+! Contributed by Paul Thomas <pault@gcc.gnu.org>
+!
+module a_type_m
+ implicit none
+ type :: a_type_t
+ real :: x
+ real, allocatable :: y(:)
+ endtype
+contains
+ subroutine assign_a_type(lhs, rhs)
+ type(a_type_t), intent(inout) :: lhs
+ type(a_type_t), intent(in) :: rhs(:)
+ lhs%x = rhs(1)%x + rhs(2)%x
+ lhs%y = rhs(1)%y + rhs(2)%y
+ end subroutine
+
+ function add_a_type(lhs, rhs) result( res )
+ type(a_type_t), intent(in) :: lhs
+ type(a_type_t), intent(in) :: rhs
+ class(a_type_t), allocatable :: res(:)
+ allocate (a_type_t :: res(2))
+ allocate (res(1)%y(1), source = [10.0])
+ allocate (res(2)%y(1), source = [20.0])
+ res(1)%x = lhs%x + rhs%x
+ res(2)%x = rhs%x + rhs%x
+ end function
+end module
+
+program polymorphic_operators_memory_leaks
+ use a_type_m
+ implicit none
+ type(a_type_t) :: a = a_type_t(1) , b = a_type_t(2)
+ class(a_type_t), allocatable :: res(:)
+
+ res = add_a_type(a,b) ! Remarkably, this ICEd - found while debugging the PR.
+ call assign_a_type (a, res)
+ if (int (res(1)%x + res(2)%x) .ne. int (a%x)) stop 1
+ if (int (sum (res(1)%y + res(2)%y)) .ne. int (sum (a%y))) stop 1
+ deallocate (a%y)
+ deallocate (res)
+end
diff --git a/gcc/testsuite/gfortran.dg/generic_35.f90 b/gcc/testsuite/gfortran.dg/generic_35.f90
new file mode 100644
index 00000000000..24ac270319f
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/generic_35.f90
@@ -0,0 +1,31 @@
+! { dg-do compile }
+!
+! PR 86545: ICE in transfer_expr on invalid WRITE statement
+!
+! Contributed by Janus Weil <janus@gcc.gnu.org>
+
+module m
+
+ type tString
+ character(len=:), allocatable :: cs
+ end type
+
+ interface my_trim
+ module procedure trim_string
+ end interface
+
+contains
+
+ elemental function trim_string(self) result(str)
+ type(tString) :: str
+ class(tString), intent(in) :: self
+ end function
+
+end module
+
+
+program p
+ use m
+ type(tString) :: s
+ write(*,*) my_trim(s) ! { dg-error "cannot have ALLOCATABLE components" }
+end
diff --git a/gcc/testsuite/gfortran.dg/implied_do_io_6.f90 b/gcc/testsuite/gfortran.dg/implied_do_io_6.f90
new file mode 100644
index 00000000000..ebc99b234d1
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/implied_do_io_6.f90
@@ -0,0 +1,39 @@
+! { dg-do run }
+! { dg-options "-ffrontend-optimize" }
+! PR 86837 - this was mis-optimized by trying to turn this into an
+! array I/O statement.
+! Original test case by "Pascal".
+
+Program read_loop
+
+ implicit none
+
+ integer :: i, j
+
+ ! number of values per column
+ integer, dimension(3) :: nvalues
+ data nvalues / 1, 2, 4 /
+
+ ! values in a 1D array
+ real, dimension(7) :: one_d
+ data one_d / 1, 11, 12, 21, 22, 23, 24 /
+
+ ! where to store the data back
+ real, dimension(4, 3) :: two_d
+
+ ! 1 - write our 7 values in one block
+ open(unit=10, file="loop.dta", form="unformatted")
+ write(10) one_d
+ close(unit=10)
+
+ ! 2 - read them back in chosen cells of a 2D array
+ two_d = -9
+ open(unit=10, file="loop.dta", form="unformatted", status='old')
+ read(10) ((two_d(i,j), i=1,nvalues(j)), j=1,3)
+ close(unit=10, status='delete')
+
+ ! 4 - print the whole array, just in case
+
+ if (any(reshape(two_d,[12]) /= [1.,-9.,-9.,-9.,11.,12.,-9.,-9.,21.,22.,23.,24.])) call abort
+
+end Program read_loop
diff --git a/gcc/testsuite/gfortran.dg/matmul_19.f90 b/gcc/testsuite/gfortran.dg/matmul_19.f90
new file mode 100644
index 00000000000..c4549240c1f
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/matmul_19.f90
@@ -0,0 +1,25 @@
+! { dg-do run }
+! { dg-options "-finline-matmul-limit=0" }
+! PR 86704 - this used to segfault.
+
+program testmaticovenasobeni
+implicit none
+
+ character(len=10) :: line
+ write (unit=line,fmt=*) testmatmul(120,1,3)
+
+ contains
+
+ function testmatmul(m,n,o)
+ integer, intent(in) :: m,n,o
+ real :: A(n,m),B(n,o),C(m,o)
+ logical :: testmatmul
+
+ call random_number(A)
+ call random_number(B)
+
+ C=matmul(transpose(A),B)
+ testmatmul=.true.
+ end function
+
+end program testmaticovenasobeni
diff --git a/gcc/testsuite/gfortran.dg/pr87117.f90 b/gcc/testsuite/gfortran.dg/pr87117.f90
new file mode 100644
index 00000000000..afca653d08d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr87117.f90
@@ -0,0 +1,14 @@
+! { dg-do compile }
+! { dg-options "-O" }
+program p
+ real(4) :: a, b
+ integer(4) :: n, m
+ equivalence (a, n)
+ a = 1024.0
+ m = 8
+ a = 1024.0
+ b = set_exponent(a, m)
+ n = 8
+ a = f(a, n)
+ b = set_exponent(a, m)
+end
diff --git a/gcc/testsuite/gfortran.dg/reassoc_4.f b/gcc/testsuite/gfortran.dg/reassoc_4.f
index b155cba768c..07b4affb2a4 100644
--- a/gcc/testsuite/gfortran.dg/reassoc_4.f
+++ b/gcc/testsuite/gfortran.dg/reassoc_4.f
@@ -1,5 +1,5 @@
! { dg-do compile }
-! { dg-options "-O3 -ffast-math -fdump-tree-reassoc1 --param max-completely-peeled-insns=400" }
+! { dg-options "-O3 -ffast-math -fdump-tree-reassoc1 --param max-completely-peeled-insns=200" }
! { dg-additional-options "--param max-completely-peel-times=16" { target spu-*-* } }
subroutine anisonl(w,vo,anisox,s,ii1,jj1,weight)
integer ii1,jj1,i1,iii1,j1,jjj1,k1,l1,m1,n1
diff --git a/gcc/testsuite/gfortran.dg/submodule_32.f08 b/gcc/testsuite/gfortran.dg/submodule_32.f08
new file mode 100644
index 00000000000..529015b86ec
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/submodule_32.f08
@@ -0,0 +1,62 @@
+! { dg-do run }
+!
+! Test the fix for PR86863, where the Type Bound Procedures were
+! not flagged as subroutines thereby causing an error at the call
+! statements.
+!
+! Contributed by Damian Rouson <damian@sourceryinstitute.org>
+!
+module foo
+ implicit none
+ integer :: flag = 0
+ type bar
+ contains
+ procedure, nopass :: foobar
+ procedure, nopass :: barfoo
+ end type
+contains
+ subroutine foobar
+ flag = 1
+ end subroutine
+ subroutine barfoo
+ flag = 0
+ end subroutine
+end module
+
+module foobartoo
+ implicit none
+ interface
+ module subroutine set(object)
+ use foo
+ implicit none
+ type(bar) object
+ end subroutine
+ module subroutine unset(object)
+ use foo
+ implicit none
+ type(bar) object
+ end subroutine
+ end interface
+contains
+ module procedure unset
+ use foo, only : bar
+ call object%barfoo
+ end procedure
+end module
+
+submodule(foobartoo) subfoobar
+contains
+ module procedure set
+ use foo, only : bar
+ call object%foobar
+ end procedure
+end submodule
+
+ use foo
+ use foobartoo
+ type(bar) :: obj
+ call set(obj)
+ if (flag .ne. 1) stop 1
+ call unset(obj)
+ if (flag .ne. 0) stop 2
+end
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index b04ceb6508e..b51e8f0a5e9 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5758,8 +5758,7 @@ proc check_effective_target_vect_perm { } {
} else {
set et_vect_perm_saved($et_index) 0
if { [is-effective-target arm_neon]
- || ([istarget aarch64*-*-*]
- && ![check_effective_target_vect_variable_length])
+ || [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
@@ -5824,7 +5823,9 @@ proc check_effective_target_vect_perm { } {
proc vect_perm_supported { count element_bits } {
set vector_bits [lindex [available_vector_sizes] 0]
- if { $vector_bits <= 0 } {
+ # The number of vectors has to be a power of 2 when permuting
+ # variable-length vectors.
+ if { $vector_bits <= 0 && ($count & -$count) != $count } {
return 0
}
set vf [expr { $vector_bits / $element_bits }]
@@ -5864,8 +5865,7 @@ proc check_effective_target_vect_perm_byte { } {
if { ([is-effective-target arm_neon]
&& [is-effective-target arm_little_endian])
|| ([istarget aarch64*-*-*]
- && [is-effective-target aarch64_little_endian]
- && ![check_effective_target_vect_variable_length])
+ && [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| ([istarget mips-*.*]
@@ -5904,8 +5904,7 @@ proc check_effective_target_vect_perm_short { } {
if { ([is-effective-target arm_neon]
&& [is-effective-target arm_little_endian])
|| ([istarget aarch64*-*-*]
- && [is-effective-target aarch64_little_endian]
- && ![check_effective_target_vect_variable_length])
+ && [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| (([istarget i?86-*-*] || [istarget x86_64-*-*])
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index ca14915ef0d..1d4eb806202 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -235,8 +235,7 @@ is_tm_irrevocable (tree x)
if (TREE_CODE (x) == ADDR_EXPR)
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
+ && fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE))
return true;
return false;
@@ -358,7 +357,8 @@ is_tm_load (gimple *stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
- return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ return (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
}
@@ -374,7 +374,7 @@ is_tm_simple_load (gimple *stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_LOAD_1
@@ -402,7 +402,8 @@ is_tm_store (gimple *stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
- return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ return (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
&& BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
}
@@ -418,7 +419,8 @@ is_tm_simple_store (gimple *stmt)
return false;
fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl
+ && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
return (fcode == BUILT_IN_TM_STORE_1
@@ -440,9 +442,7 @@ is_tm_simple_store (gimple *stmt)
static bool
is_tm_abort (tree fndecl)
{
- return (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
+ return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT));
}
/* Build a GENERIC tree for a user abort. This is called by front ends
@@ -2007,7 +2007,7 @@ tm_region_init_1 (struct tm_region *region, basic_block bb)
if (gimple_code (g) == GIMPLE_CALL)
{
tree fn = gimple_call_fndecl (g);
- if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
+ if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL))
{
if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
|| DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index a47be1d0362..01401cad6e6 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -737,7 +737,7 @@ gen_shrink_wrap_conditions (gcall *bi_call, vec<gimple *> conds,
call = bi_call;
fn = gimple_call_fndecl (call);
- gcc_assert (fn && DECL_BUILT_IN (fn));
+ gcc_assert (fn && fndecl_built_in_p (fn));
fnc = DECL_FUNCTION_CODE (fn);
*nconds = 0;
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 463dd8a3bf9..cf12cb1f391 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -389,7 +389,7 @@ lower_phi_internal_fn ()
{
tree arg = gimple_call_arg (stmt, i);
if (TREE_CODE (arg) == LABEL_DECL)
- pred = label_to_block (arg);
+ pred = label_to_block (cfun, arg);
else
{
edge e = find_edge (pred, bb);
@@ -972,15 +972,15 @@ make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
tree label2 = gimple_transaction_label_uninst (txn);
if (label1)
- make_edge (bb, label_to_block (label1), EDGE_FALLTHRU);
+ make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU);
if (label2)
- make_edge (bb, label_to_block (label2),
+ make_edge (bb, label_to_block (cfun, label2),
EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
tree label3 = gimple_transaction_label_over (txn);
if (gimple_transaction_subcode (txn)
& (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
- make_edge (bb, label_to_block (label3), EDGE_TM_ABORT);
+ make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT);
fallthru = false;
}
@@ -1265,8 +1265,8 @@ make_cond_expr_edges (basic_block bb)
/* Entry basic blocks for each component. */
then_label = gimple_cond_true_label (entry);
else_label = gimple_cond_false_label (entry);
- then_bb = label_to_block (then_label);
- else_bb = label_to_block (else_label);
+ then_bb = label_to_block (cfun, then_label);
+ else_bb = label_to_block (cfun, else_label);
then_stmt = first_stmt (then_bb);
else_stmt = first_stmt (else_bb);
@@ -1373,7 +1373,7 @@ get_cases_for_edge (edge e, gswitch *t)
{
tree elt = gimple_switch_label (t, i);
tree lab = CASE_LABEL (elt);
- basic_block label_bb = label_to_block (lab);
+ basic_block label_bb = label_to_block (cfun, lab);
edge this_edge = find_edge (e->src, label_bb);
/* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create
@@ -1397,8 +1397,7 @@ make_gimple_switch_edges (gswitch *entry, basic_block bb)
for (i = 0; i < n; ++i)
{
- tree lab = CASE_LABEL (gimple_switch_label (entry, i));
- basic_block label_bb = label_to_block (lab);
+ basic_block label_bb = gimple_switch_label_bb (cfun, entry, i);
make_edge (bb, label_bb, 0);
}
}
@@ -1407,7 +1406,7 @@ make_gimple_switch_edges (gswitch *entry, basic_block bb)
/* Return the basic block holding label DEST. */
basic_block
-label_to_block_fn (struct function *ifun, tree dest)
+label_to_block (struct function *ifun, tree dest)
{
int uid = LABEL_DECL_UID (dest);
@@ -1442,7 +1441,7 @@ make_goto_expr_edges (basic_block bb)
if (simple_goto_p (goto_t))
{
tree dest = gimple_goto_dest (goto_t);
- basic_block label_bb = label_to_block (dest);
+ basic_block label_bb = label_to_block (cfun, dest);
edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
e->goto_locus = gimple_location (goto_t);
gsi_remove (&last, true);
@@ -1464,7 +1463,7 @@ make_gimple_asm_edges (basic_block bb)
for (i = 0; i < n; ++i)
{
tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
- basic_block label_bb = label_to_block (label);
+ basic_block label_bb = label_to_block (cfun, label);
make_edge (bb, label_bb, 0);
}
}
@@ -1496,7 +1495,7 @@ static struct label_record
static tree
main_block_label (tree label)
{
- basic_block bb = label_to_block (label);
+ basic_block bb = label_to_block (cfun, label);
tree main_label = label_for_bb[bb->index].label;
/* label_to_block possibly inserted undefined label into the chain. */
@@ -1773,7 +1772,7 @@ group_case_labels_stmt (gswitch *stmt)
int i, next_index, new_size;
basic_block default_bb = NULL;
- default_bb = label_to_block (CASE_LABEL (gimple_switch_default_label (stmt)));
+ default_bb = gimple_switch_default_bb (cfun, stmt);
/* Look for possible opportunities to merge cases. */
new_size = i = 1;
@@ -1785,7 +1784,7 @@ group_case_labels_stmt (gswitch *stmt)
base_case = gimple_switch_label (stmt, i);
gcc_assert (base_case);
- base_bb = label_to_block (CASE_LABEL (base_case));
+ base_bb = label_to_block (cfun, CASE_LABEL (base_case));
/* Discard cases that have the same destination as the default case or
whose destiniation blocks have already been removed as unreachable. */
@@ -1806,7 +1805,7 @@ group_case_labels_stmt (gswitch *stmt)
while (next_index < old_size)
{
tree merge_case = gimple_switch_label (stmt, next_index);
- basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
+ basic_block merge_bb = label_to_block (cfun, CASE_LABEL (merge_case));
wide_int bhp1 = wi::to_wide (base_high) + 1;
/* Merge the cases if they jump to the same place,
@@ -2387,7 +2386,7 @@ find_taken_edge_computed_goto (basic_block bb, tree val)
basic_block dest;
edge e = NULL;
- dest = label_to_block (val);
+ dest = label_to_block (cfun, val);
if (dest)
e = find_edge (bb, dest);
@@ -2455,7 +2454,7 @@ find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val)
else
taken_case = find_case_label_for_value (switch_stmt, val);
}
- dest_bb = label_to_block (CASE_LABEL (taken_case));
+ dest_bb = label_to_block (cfun, CASE_LABEL (taken_case));
e = find_edge (gimple_bb (switch_stmt), dest_bb);
gcc_assert (e);
@@ -3427,7 +3426,7 @@ verify_gimple_call (gcall *stmt)
return true;
}
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
{
switch (DECL_FUNCTION_CODE (fndecl))
{
@@ -5498,7 +5497,7 @@ gimple_verify_flow_info (void)
err = 1;
}
- if (label_to_block (label) != bb)
+ if (label_to_block (cfun, label) != bb)
{
error ("label ");
print_generic_expr (stderr, label);
@@ -5655,8 +5654,7 @@ gimple_verify_flow_info (void)
/* Mark all the destination basic blocks. */
for (i = 0; i < n; ++i)
{
- tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
- basic_block label_bb = label_to_block (lab);
+ basic_block label_bb = gimple_switch_label_bb (cfun, switch_stmt, i);
gcc_assert (!label_bb->aux || label_bb->aux == (void *)1);
label_bb->aux = (void *)1;
}
@@ -5711,8 +5709,8 @@ gimple_verify_flow_info (void)
/* Check that we have all of them. */
for (i = 0; i < n; ++i)
{
- tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
- basic_block label_bb = label_to_block (lab);
+ basic_block label_bb = gimple_switch_label_bb (cfun,
+ switch_stmt, i);
if (label_bb->aux != (void *)2)
{
@@ -5936,7 +5934,7 @@ gimple_redirect_edge_and_branch (edge e, basic_block dest)
for (i = 0; i < n; i++)
{
tree elt = gimple_switch_label (switch_stmt, i);
- if (label_to_block (CASE_LABEL (elt)) == e->dest)
+ if (label_to_block (cfun, CASE_LABEL (elt)) == e->dest)
CASE_LABEL (elt) = label;
}
}
@@ -5952,7 +5950,7 @@ gimple_redirect_edge_and_branch (edge e, basic_block dest)
for (i = 0; i < n; ++i)
{
tree cons = gimple_asm_label_op (asm_stmt, i);
- if (label_to_block (TREE_VALUE (cons)) == e->dest)
+ if (label_to_block (cfun, TREE_VALUE (cons)) == e->dest)
{
if (!label)
label = gimple_block_label (dest);
@@ -6884,7 +6882,7 @@ move_stmt_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
/* Remap the region numbers for __builtin_eh_{pointer,filter}. */
{
tree r, fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_EH_COPY_VALUES:
@@ -8305,15 +8303,14 @@ stmt_can_terminate_bb_p (gimple *t)
if (is_gimple_call (t)
&& fndecl
- && DECL_BUILT_IN (fndecl)
+ && fndecl_built_in_p (fndecl)
&& (call_flags & ECF_NOTHROW)
&& !(call_flags & ECF_RETURNS_TWICE)
/* fork() doesn't really return twice, but the effect of
wrapping it in __gcov_fork() which calls __gcov_flush()
and clears the counters before forking has the same
effect as returning twice. Force a fake edge. */
- && !(DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FORK))
+ && !fndecl_built_in_p (fndecl, BUILT_IN_FORK))
return false;
if (is_gimple_call (t))
@@ -9143,6 +9140,41 @@ generate_range_test (basic_block bb, tree index, tree low, tree high,
gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
}
+/* Return the basic block that belongs to label numbered INDEX
+ of a switch statement. */
+
+basic_block
+gimple_switch_label_bb (function *ifun, gswitch *gs, unsigned index)
+{
+ return label_to_block (ifun, CASE_LABEL (gimple_switch_label (gs, index)));
+}
+
+/* Return the default basic block of a switch statement. */
+
+basic_block
+gimple_switch_default_bb (function *ifun, gswitch *gs)
+{
+ return gimple_switch_label_bb (ifun, gs, 0);
+}
+
+/* Return the edge that belongs to label numbered INDEX
+ of a switch statement. */
+
+edge
+gimple_switch_edge (function *ifun, gswitch *gs, unsigned index)
+{
+ return find_edge (gimple_bb (gs), gimple_switch_label_bb (ifun, gs, index));
+}
+
+/* Return the default edge of a switch statement. */
+
+edge
+gimple_switch_default_edge (function *ifun, gswitch *gs)
+{
+ return gimple_switch_edge (ifun, gs, 0);
+}
+
+
/* Emit return warnings. */
namespace {
diff --git a/gcc/tree-cfg.h b/gcc/tree-cfg.h
index 9491bb45feb..349a9543168 100644
--- a/gcc/tree-cfg.h
+++ b/gcc/tree-cfg.h
@@ -33,8 +33,7 @@ extern void init_empty_tree_cfg_for_function (struct function *);
extern void init_empty_tree_cfg (void);
extern void start_recording_case_labels (void);
extern void end_recording_case_labels (void);
-extern basic_block label_to_block_fn (struct function *, tree);
-#define label_to_block(t) (label_to_block_fn (cfun, t))
+extern basic_block label_to_block (struct function *, tree);
extern void cleanup_dead_labels (void);
extern bool group_case_labels_stmt (gswitch *);
extern bool group_case_labels (void);
@@ -112,6 +111,10 @@ extern bool extract_true_false_controlled_edges (basic_block, basic_block,
edge *, edge *);
extern void generate_range_test (basic_block bb, tree index, tree low,
tree high, tree *lhs, tree *rhs);
+extern basic_block gimple_switch_label_bb (function *, gswitch *, unsigned);
+extern basic_block gimple_switch_default_bb (function *, gswitch *);
+extern edge gimple_switch_edge (function *, gswitch *, unsigned);
+extern edge gimple_switch_default_edge (function *, gswitch *);
/* Return true if the LHS of a call should be removed. */
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index b27ba8a7333..7fd0430d6cf 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -84,13 +84,12 @@ convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
return false;
tree index = gimple_switch_index (swtch);
- tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
tree label = gimple_switch_label (swtch, 1);
tree low = CASE_LOW (label);
tree high = CASE_HIGH (label);
- basic_block default_bb = label_to_block_fn (cfun, default_label);
- basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
+ basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
+ basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
basic_block bb = gimple_bb (swtch);
gcond *cond;
@@ -266,7 +265,7 @@ cleanup_control_flow_bb (basic_block bb)
label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
if (DECL_CONTEXT (label) != cfun->decl)
return retval;
- target_block = label_to_block (label);
+ target_block = label_to_block (cfun, label);
for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
if (e->dest != target_block)
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 4a04e9e8b26..dee27f89dec 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -620,8 +620,6 @@ enum tree_index {
TI_CONST_FEXCEPT_T_PTR_TYPE,
TI_POINTER_SIZED_TYPE,
- TI_POINTER_BOUNDS_TYPE,
-
TI_DFLOAT32_TYPE,
TI_DFLOAT64_TYPE,
TI_DFLOAT128_TYPE,
@@ -1240,6 +1238,9 @@ struct GTY(()) tree_base {
IDENTIFIER_TRANSPARENT_ALIAS in
IDENTIFIER_NODE
+ SSA_NAME_POINTS_TO_READONLY_MEMORY in
+ SSA_NAME
+
visited:
TREE_VISITED in
diff --git a/gcc/tree-diagnostic.c b/gcc/tree-diagnostic.c
index 6b03b31c229..c4a200f7fbc 100644
--- a/gcc/tree-diagnostic.c
+++ b/gcc/tree-diagnostic.c
@@ -290,7 +290,7 @@ default_tree_printer (pretty_printer *pp, text_info *text, const char *spec,
}
if (set_locus)
- text->set_location (0, DECL_SOURCE_LOCATION (t), true);
+ text->set_location (0, DECL_SOURCE_LOCATION (t), SHOW_RANGE_WITH_CARET);
if (DECL_P (t))
{
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index f367040af45..fb931aa4389 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -1984,7 +1984,7 @@ lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
tree fndecl = gimple_call_fndecl (stmt);
tree rhs, lhs;
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_EH_POINTER:
@@ -2231,7 +2231,7 @@ make_eh_dispatch_edges (geh_dispatch *stmt)
case ERT_TRY:
for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
{
- dst = label_to_block (c->label);
+ dst = label_to_block (cfun, c->label);
make_edge (src, dst, 0);
/* A catch-all handler doesn't have a fallthru. */
@@ -2241,7 +2241,7 @@ make_eh_dispatch_edges (geh_dispatch *stmt)
break;
case ERT_ALLOWED_EXCEPTIONS:
- dst = label_to_block (r->u.allowed.label);
+ dst = label_to_block (cfun, r->u.allowed.label);
make_edge (src, dst, 0);
break;
@@ -2270,7 +2270,7 @@ make_eh_edges (gimple *stmt)
gcc_assert (lp != NULL);
src = gimple_bb (stmt);
- dst = label_to_block (lp->post_landing_pad);
+ dst = label_to_block (cfun, lp->post_landing_pad);
make_edge (src, dst, EDGE_EH);
}
@@ -2389,7 +2389,7 @@ redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
case ERT_TRY:
for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
{
- old_bb = label_to_block (c->label);
+ old_bb = label_to_block (cfun, c->label);
if (old_bb == e->dest)
{
c->label = new_lab;
@@ -2399,7 +2399,7 @@ redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
break;
case ERT_ALLOWED_EXCEPTIONS:
- old_bb = label_to_block (r->u.allowed.label);
+ old_bb = label_to_block (cfun, r->u.allowed.label);
gcc_assert (old_bb == e->dest);
r->u.allowed.label = new_lab;
any_changed = true;
@@ -3329,7 +3329,7 @@ lower_resx (basic_block bb, gresx *stmt,
else
{
lab = *slot;
- new_bb = label_to_block (lab);
+ new_bb = label_to_block (cfun, lab);
}
gcc_assert (EDGE_COUNT (bb->succs) == 0);
@@ -3733,7 +3733,7 @@ lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
while (tp_node);
if (! have_label)
{
- remove_edge (find_edge (src, label_to_block (lab)));
+ remove_edge (find_edge (src, label_to_block (cfun, lab)));
redirected = true;
}
}
@@ -4046,7 +4046,7 @@ maybe_remove_unreachable_handlers (void)
FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
if (lp && lp->post_landing_pad)
{
- if (label_to_block (lp->post_landing_pad) == NULL)
+ if (label_to_block (cfun, lp->post_landing_pad) == NULL)
{
remove_unreachable_handlers ();
return;
@@ -4110,7 +4110,7 @@ remove_unreachable_handlers_no_lp (void)
static bool
unsplit_eh (eh_landing_pad lp)
{
- basic_block bb = label_to_block (lp->post_landing_pad);
+ basic_block bb = label_to_block (cfun, lp->post_landing_pad);
gimple_stmt_iterator gsi;
edge e_in, e_out;
@@ -4475,7 +4475,7 @@ infinite_empty_loop_p (edge e_first)
static bool
cleanup_empty_eh (eh_landing_pad lp)
{
- basic_block bb = label_to_block (lp->post_landing_pad);
+ basic_block bb = label_to_block (cfun, lp->post_landing_pad);
gimple_stmt_iterator gsi;
gimple *resx;
eh_region new_region;
@@ -4795,7 +4795,7 @@ verify_eh_edges (gimple *stmt)
return true;
}
- if (eh_edge->dest != label_to_block (lp->post_landing_pad))
+ if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
{
error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
return true;
@@ -4827,7 +4827,7 @@ verify_eh_dispatch_edge (geh_dispatch *stmt)
case ERT_TRY:
for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
{
- dst = label_to_block (c->label);
+ dst = label_to_block (cfun, c->label);
e = find_edge (src, dst);
if (e == NULL)
{
@@ -4846,7 +4846,7 @@ verify_eh_dispatch_edge (geh_dispatch *stmt)
break;
case ERT_ALLOWED_EXCEPTIONS:
- dst = label_to_block (r->u.allowed.label);
+ dst = label_to_block (cfun, r->u.allowed.label);
e = find_edge (src, dst);
if (e == NULL)
{
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index e181468fba9..77eefac4e34 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -1079,7 +1079,7 @@ if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
&& !(flags & ECF_LOOPING_CONST_OR_PURE)
/* We can only vectorize some builtins at the moment,
so restrict if-conversion to those. */
- && DECL_BUILT_IN (fndecl))
+ && fndecl_built_in_p (fndecl))
return true;
}
return false;
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 2b6bb5c0e31..9352acc8af6 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1702,7 +1702,7 @@ remap_gimple_stmt (gimple *stmt, copy_body_data *id)
case GIMPLE_CALL:
{
tree r, fndecl = gimple_call_fndecl (copy);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_EH_COPY_VALUES:
@@ -1939,8 +1939,7 @@ copy_bb (copy_body_data *id, basic_block bb,
else if (call_stmt
&& id->call_stmt
&& (decl = gimple_call_fndecl (stmt))
- && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_ARG_PACK_LEN)
+ && fndecl_built_in_p (decl, BUILT_IN_VA_ARG_PACK_LEN))
{
/* __builtin_va_arg_pack_len () should be replaced by
the number of anonymous arguments. */
@@ -1951,7 +1950,13 @@ copy_bb (copy_body_data *id, basic_block bb,
for (p = DECL_ARGUMENTS (id->src_fn); p; p = DECL_CHAIN (p))
nargs--;
- if (!gimple_call_va_arg_pack_p (id->call_stmt))
+ if (!gimple_call_lhs (stmt))
+ {
+ /* Drop unused calls. */
+ gsi_remove (&copy_gsi, false);
+ continue;
+ }
+ else if (!gimple_call_va_arg_pack_p (id->call_stmt))
{
count = build_int_cst (integer_type_node, nargs);
new_stmt = gimple_build_assign (gimple_call_lhs (stmt), count);
@@ -4033,7 +4038,7 @@ estimate_num_insns (gimple *stmt, eni_weights *weights)
if (gimple_call_internal_p (stmt))
return 0;
else if ((decl = gimple_call_fndecl (stmt))
- && DECL_BUILT_IN (decl))
+ && fndecl_built_in_p (decl))
{
/* Do not special case builtins where we see the body.
This just confuse inliner. */
@@ -4897,7 +4902,7 @@ fold_marked_statements (int first, hash_set<gimple *> *statements)
gimple *old_stmt = gsi_stmt (gsi);
tree old_decl = is_gimple_call (old_stmt) ? gimple_call_fndecl (old_stmt) : 0;
- if (old_decl && DECL_BUILT_IN (old_decl))
+ if (old_decl && fndecl_built_in_p (old_decl))
{
/* Folding builtins can create multiple instructions,
we need to look at all of them. */
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index f4af33c1303..cdae75d1aae 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -2490,6 +2490,28 @@ pass_build_ssa::execute (function *fun)
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, DECL_NAME (decl));
}
+ /* Initialize SSA_NAME_POINTS_TO_READONLY_MEMORY. */
+ tree fnspec = lookup_attribute ("fn spec",
+ TYPE_ATTRIBUTES (TREE_TYPE (fun->decl)));
+ if (fnspec)
+ {
+ fnspec = TREE_VALUE (TREE_VALUE (fnspec));
+ unsigned i = 1;
+ for (tree arg = DECL_ARGUMENTS (cfun->decl);
+ arg; arg = DECL_CHAIN (arg), ++i)
+ {
+ if (i >= (unsigned) TREE_STRING_LENGTH (fnspec))
+ break;
+ if (TREE_STRING_POINTER (fnspec)[i] == 'R'
+ || TREE_STRING_POINTER (fnspec)[i] == 'r')
+ {
+ tree name = ssa_default_def (fun, arg);
+ if (name)
+ SSA_NAME_POINTS_TO_READONLY_MEMORY (name) = 1;
+ }
+ }
+ }
+
return 0;
}
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 622142719ee..990cc2167c7 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -3962,7 +3962,7 @@ newline_and_indent (pretty_printer *pp, int spc)
void
percent_K_format (text_info *text, location_t loc, tree block)
{
- text->set_location (0, loc, true);
+ text->set_location (0, loc, SHOW_RANGE_WITH_CARET);
gcc_assert (pp_ti_abstract_origin (text) != NULL);
*pp_ti_abstract_origin (text) = NULL;
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 69122f2652f..6475743a26a 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -3617,7 +3617,8 @@ final_value_replacement_loop (struct loop *loop)
{
fprintf (dump_file, "\nfinal value replacement:\n ");
print_gimple_stmt (dump_file, phi, 0);
- fprintf (dump_file, " with\n ");
+ fprintf (dump_file, " with expr: ");
+ print_generic_expr (dump_file, def);
}
def = unshare_expr (def);
remove_phi_node (&psi, false);
@@ -3656,6 +3657,7 @@ final_value_replacement_loop (struct loop *loop)
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
if (dump_file)
{
+ fprintf (dump_file, "\n final stmt:\n ");
print_gimple_stmt (dump_file, ass, 0);
fprintf (dump_file, "\n");
}
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 3e30f6bc3d4..a9681ece0ae 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -1498,8 +1498,7 @@ scan_function (void)
if (dest)
{
- if (DECL_BUILT_IN_CLASS (dest) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (dest) == BUILT_IN_APPLY_ARGS)
+ if (fndecl_built_in_p (dest, BUILT_IN_APPLY_ARGS))
encountered_apply_args = true;
if (recursive_call_p (current_function_decl, dest))
{
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 7b25778307f..032e79b8ba0 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -1483,6 +1483,16 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p)
ao_ref_alias_set (ref2)))
return false;
+ /* If the reference is based on a pointer that points to memory
+ that may not be written to then the other reference cannot possibly
+ clobber it. */
+ if ((TREE_CODE (TREE_OPERAND (base2, 0)) == SSA_NAME
+ && SSA_NAME_POINTS_TO_READONLY_MEMORY (TREE_OPERAND (base2, 0)))
+ || (ind1_p
+ && TREE_CODE (TREE_OPERAND (base1, 0)) == SSA_NAME
+ && SSA_NAME_POINTS_TO_READONLY_MEMORY (TREE_OPERAND (base1, 0))))
+ return false;
+
/* Dispatch to the pointer-vs-decl or pointer-vs-pointer disambiguators. */
if (var1_p && ind2_p)
return indirect_ref_may_alias_decl_p (ref2->ref, base2,
@@ -1991,6 +2001,14 @@ call_may_clobber_ref_p_1 (gcall *call, ao_ref *ref)
|| !is_global_var (base)))
return false;
+ /* If the reference is based on a pointer that points to memory
+ that may not be written to then the call cannot possibly clobber it. */
+ if ((TREE_CODE (base) == MEM_REF
+ || TREE_CODE (base) == TARGET_MEM_REF)
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
+ && SSA_NAME_POINTS_TO_READONLY_MEMORY (TREE_OPERAND (base, 0)))
+ return false;
+
callee = gimple_call_fndecl (call);
/* Handle those builtin functions explicitly that do not act as
@@ -2722,7 +2740,14 @@ next:;
if (arg1 == arg0)
;
else if (! maybe_skip_until (phi, arg0, ref, arg1, cnt, visited,
- abort_on_visited, translate, data))
+ abort_on_visited,
+ /* Do not translate when walking over
+ backedges. */
+ dominated_by_p
+ (CDI_DOMINATORS,
+ gimple_bb (SSA_NAME_DEF_STMT (arg1)),
+ phi_bb)
+ ? NULL : translate, data))
return NULL_TREE;
}
@@ -2783,7 +2808,14 @@ walk_non_aliased_vuses (ao_ref *ref, tree vuse,
break;
if (valueize)
- vuse = valueize (vuse);
+ {
+ vuse = valueize (vuse);
+ if (!vuse)
+ {
+ res = NULL;
+ break;
+ }
+ }
def_stmt = SSA_NAME_DEF_STMT (vuse);
if (gimple_nop_p (def_stmt))
break;
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index d0f799eb39d..95368a5c79d 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -807,7 +807,7 @@ surely_varying_stmt_p (gimple *stmt)
tree fndecl, fntype = gimple_call_fntype (stmt);
if (!gimple_call_lhs (stmt)
|| ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
- && !DECL_BUILT_IN (fndecl)
+ && !fndecl_built_in_p (fndecl)
&& !lookup_attribute ("assume_aligned",
TYPE_ATTRIBUTES (fntype))
&& !lookup_attribute ("alloc_align",
@@ -2560,7 +2560,7 @@ optimize_stack_restore (gimple_stmt_iterator i)
callee = gimple_call_fndecl (stmt);
if (!callee
- || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
+ || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
/* All regular builtins are ok, just obviously not alloca. */
|| ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
return NULL_TREE;
@@ -2596,9 +2596,7 @@ optimize_stack_restore (gimple_stmt_iterator i)
if (is_gimple_call (stack_save))
{
callee = gimple_call_fndecl (stack_save);
- if (callee
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
+ if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
{
gimple_stmt_iterator stack_save_gsi;
tree rhs;
@@ -3195,7 +3193,7 @@ pass_fold_builtins::execute (function *fun)
}
callee = gimple_call_fndecl (stmt);
- if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
+ if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
{
gsi_next (&i);
continue;
@@ -3370,8 +3368,7 @@ pass_fold_builtins::execute (function *fun)
}
callee = gimple_call_fndecl (stmt);
if (!callee
- || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
- || DECL_FUNCTION_CODE (callee) == fcode)
+ || !fndecl_built_in_p (callee, fcode))
gsi_next (&i);
}
}
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index d23148675c9..91ce2aa4fc5 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -224,7 +224,7 @@ mark_stmt_if_obviously_necessary (gimple *stmt, bool aggressive)
{
tree callee = gimple_call_fndecl (stmt);
if (callee != NULL_TREE
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (callee, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (callee))
{
case BUILT_IN_MALLOC:
@@ -565,7 +565,7 @@ mark_all_reaching_defs_necessary_1 (ao_ref *ref ATTRIBUTE_UNUSED,
{
tree callee = gimple_call_fndecl (def_stmt);
if (callee != NULL_TREE
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (callee, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (callee))
{
case BUILT_IN_MALLOC:
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 267880f3b5c..f7cc034b26e 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -436,7 +436,8 @@ record_edge_info (basic_block bb)
for (i = 0; i < n_labels; i++)
{
tree label = gimple_switch_label (switch_stmt, i);
- basic_block target_bb = label_to_block (CASE_LABEL (label));
+ basic_block target_bb
+ = label_to_block (cfun, CASE_LABEL (label));
if (CASE_HIGH (label)
|| !CASE_LOW (label)
|| info[target_bb->index])
@@ -1986,8 +1987,7 @@ dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator si)
certain that the value simply isn't constant. */
tree callee = gimple_call_fndecl (stmt);
if (callee
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
+ && fndecl_built_in_p (callee, BUILT_IN_CONSTANT_P))
{
propagate_tree_value_into_stmt (&si, integer_zero_node);
stmt = gsi_stmt (si);
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index 4cb8c0f83ac..8b7aea0e54e 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -248,6 +248,18 @@ compute_trims (ao_ref *ref, sbitmap live, int *trim_head, int *trim_tail,
residual handling in mem* and str* functions is usually
reasonably efficient. */
*trim_tail = last_orig - last_live;
+
+ /* But don't trim away out of bounds accesses, as this defeats
+ proper warnings.
+
+ We could have a type with no TYPE_SIZE_UNIT or we could have a VLA
+ where TYPE_SIZE_UNIT is not a constant. */
+ if (*trim_tail
+ && TYPE_SIZE_UNIT (TREE_TYPE (ref->base))
+ && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref->base))) == INTEGER_CST
+ && compare_tree_int (TYPE_SIZE_UNIT (TREE_TYPE (ref->base)),
+ last_orig) <= 0)
+ *trim_tail = 0;
}
else
*trim_tail = 0;
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 56078110b39..67133983cc3 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -1071,7 +1071,7 @@ simplify_gimple_switch_label_vec (gswitch *stmt, tree index_type)
for (i = 0; i < gimple_switch_num_labels (stmt); i++)
{
tree elt = gimple_switch_label (stmt, i);
- basic_block target = label_to_block (CASE_LABEL (elt));
+ basic_block target = label_to_block (cfun, CASE_LABEL (elt));
bitmap_set_bit (target_blocks, target->index);
}
for (ei = ei_start (gimple_bb (stmt)->succs); (e = ei_safe_edge (ei)); )
@@ -1278,7 +1278,7 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2)
constant length. */
callee1 = gimple_call_fndecl (stmt1);
if (callee1 == NULL_TREE
- || DECL_BUILT_IN_CLASS (callee1) != BUILT_IN_NORMAL
+ || !fndecl_built_in_p (callee1, BUILT_IN_NORMAL)
|| gimple_call_num_args (stmt1) != 3)
break;
if (DECL_FUNCTION_CODE (callee1) != BUILT_IN_MEMCPY
@@ -1290,7 +1290,7 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2)
lhs1 = gimple_call_lhs (stmt1);
if (!tree_fits_uhwi_p (len1))
break;
- str1 = string_constant (src1, &off1);
+ str1 = string_constant (src1, &off1, NULL, NULL);
if (str1 == NULL_TREE)
break;
if (!tree_fits_uhwi_p (off1)
@@ -2538,7 +2538,7 @@ pass_forwprop::execute (function *fun)
{
tree callee = gimple_call_fndecl (stmt);
if (callee != NULL_TREE
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
+ && fndecl_built_in_p (callee, BUILT_IN_NORMAL))
changed = simplify_builtin_call (&gsi, callee);
break;
}
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 01a954eeb1e..9c62f20866c 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -471,9 +471,7 @@ stmt_cost (gimple *stmt)
/* Unless the call is a builtin_constant_p; this always folds to a
constant, so moving it is useless. */
fndecl = gimple_call_fndecl (stmt);
- if (fndecl
- && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
return 0;
return LIM_EXPENSIVE;
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index 326589f63c3..97c2ad94985 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-inline.h"
#include "tree-cfgcleanup.h"
#include "builtins.h"
+#include "tree-ssa-sccvn.h"
/* Specifies types of loops that may be unrolled. */
@@ -1318,50 +1319,6 @@ canonicalize_induction_variables (void)
return 0;
}
-/* Propagate constant SSA_NAMEs defined in basic block BB. */
-
-static void
-propagate_constants_for_unrolling (basic_block bb)
-{
- /* Look for degenerate PHI nodes with constant argument. */
- for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
- {
- gphi *phi = gsi.phi ();
- tree result = gimple_phi_result (phi);
- tree arg = gimple_phi_arg_def (phi, 0);
-
- if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result)
- && gimple_phi_num_args (phi) == 1
- && CONSTANT_CLASS_P (arg))
- {
- replace_uses_by (result, arg);
- gsi_remove (&gsi, true);
- release_ssa_name (result);
- }
- else
- gsi_next (&gsi);
- }
-
- /* Look for assignments to SSA names with constant RHS. */
- for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
- {
- gimple *stmt = gsi_stmt (gsi);
- tree lhs;
-
- if (is_gimple_assign (stmt)
- && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_constant
- && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
- && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
- {
- replace_uses_by (lhs, gimple_assign_rhs1 (stmt));
- gsi_remove (&gsi, true);
- release_ssa_name (lhs);
- }
- else
- gsi_next (&gsi);
- }
-}
-
/* Process loops from innermost to outer, stopping at the innermost
loop we unrolled. */
@@ -1512,10 +1469,14 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi)
{
loop_p father = get_loop (cfun, i);
- basic_block *body = get_loop_body_in_dom_order (father);
- for (unsigned j = 0; j < father->num_nodes; j++)
- propagate_constants_for_unrolling (body[j]);
- free (body);
+ bitmap exit_bbs = BITMAP_ALLOC (NULL);
+ loop_exit *exit = father->exits->next;
+ while (exit->e)
+ {
+ bitmap_set_bit (exit_bbs, exit->e->dest->index);
+ exit = exit->next;
+ }
+ do_rpo_vn (cfun, loop_preheader_edge (father), exit_bbs);
}
BITMAP_FREE (fathers);
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index a90d9d28e4e..25378da6f4a 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -793,7 +793,7 @@ pass_cse_reciprocals::execute (function *fun)
{
fndecl = gimple_call_fndecl (call);
if (!fndecl
- || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
+ || !fndecl_built_in_p (fndecl, BUILT_IN_MD))
continue;
fndecl = targetm.builtin_reciprocal (fndecl);
if (!fndecl)
diff --git a/gcc/tree-ssa-operands.c b/gcc/tree-ssa-operands.c
index eefe270c32d..7bff676cf52 100644
--- a/gcc/tree-ssa-operands.c
+++ b/gcc/tree-ssa-operands.c
@@ -515,7 +515,7 @@ add_stmt_operand (struct function *fn, tree *var_p, gimple *stmt, int flags)
{
tree var = *var_p;
- gcc_assert (SSA_VAR_P (*var_p));
+ gcc_assert (SSA_VAR_P (*var_p) || TREE_CODE (*var_p) == STRING_CST);
if (is_gimple_reg (var))
{
@@ -740,6 +740,7 @@ get_expr_operands (struct function *fn, gimple *stmt, tree *expr_p, int flags)
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
+ case STRING_CST:
if (!(flags & opf_address_taken))
add_stmt_operand (fn, expr_p, stmt, flags);
return;
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 144902d15dc..267086cdf48 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -663,6 +663,7 @@ get_expr_value_id (pre_expr expr)
id = VN_INFO (PRE_EXPR_NAME (expr))->value_id;
break;
case NARY:
+ gcc_assert (!PRE_EXPR_NARY (expr)->predicated_values);
id = PRE_EXPR_NARY (expr)->value_id;
break;
case REFERENCE:
@@ -677,10 +678,10 @@ get_expr_value_id (pre_expr expr)
return id;
}
-/* Return a SCCVN valnum (SSA name or constant) for the PRE value-id VAL. */
+/* Return a VN valnum (SSA name or constant) for the PRE value-id VAL. */
static tree
-sccvn_valnum_from_value_id (unsigned int val)
+vn_valnum_from_value_id (unsigned int val)
{
bitmap_iterator bi;
unsigned int i;
@@ -1308,9 +1309,9 @@ get_representative_for (const pre_expr e, basic_block b = NULL)
??? We should be able to re-use this when we insert the statement
to compute it. */
name = make_temp_ssa_name (get_expr_type (e), gimple_build_nop (), "pretmp");
- VN_INFO_GET (name)->value_id = value_id;
+ VN_INFO (name)->value_id = value_id;
VN_INFO (name)->valnum = valnum ? valnum : name;
- /* ??? For now mark this SSA name for release by SCCVN. */
+ /* ??? For now mark this SSA name for release by VN. */
VN_INFO (name)->needs_insertion = true;
add_to_value (value_id, get_or_alloc_expr_for_name (name));
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1404,7 +1405,22 @@ phi_translate_1 (bitmap_set_t dest,
constant = find_leader_in_sets (value_id, dest,
AVAIL_OUT (pred));
if (constant)
- return constant;
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "simplifying ");
+ print_pre_expr (dump_file, expr);
+ fprintf (dump_file, " translated %d -> %d to ",
+ phiblock->index, pred->index);
+ PRE_EXPR_NARY (expr) = newnary;
+ print_pre_expr (dump_file, expr);
+ PRE_EXPR_NARY (expr) = nary;
+ fprintf (dump_file, " to ");
+ print_pre_expr (dump_file, constant);
+ fprintf (dump_file, "\n");
+ }
+ return constant;
+ }
}
}
else
@@ -1426,7 +1442,7 @@ phi_translate_1 (bitmap_set_t dest,
expr = pre_expr_pool.allocate ();
expr->kind = NARY;
expr->id = 0;
- if (nary)
+ if (nary && !nary->predicated_values)
{
PRE_EXPR_NARY (expr) = nary;
new_val_id = nary->value_id;
@@ -1664,7 +1680,10 @@ phi_translate (bitmap_set_t dest, pre_expr expr,
}
/* Translate. */
+ basic_block saved_valueize_bb = vn_context_bb;
+ vn_context_bb = e->src;
phitrans = phi_translate_1 (dest, expr, set1, set2, e);
+ vn_context_bb = saved_valueize_bb;
if (slot)
{
@@ -2414,9 +2433,7 @@ compute_antic (void)
{
/* For partial antic we ignore backedges and thus we do not need
to perform any iteration when we process blocks in postorder. */
- int postorder_num
- = pre_and_rev_post_order_compute (NULL, postorder.address (), false);
- for (i = postorder_num - 1 ; i >= 0; i--)
+ for (i = postorder.length () - 1; i >= 0; i--)
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
compute_partial_antic_aux (block,
@@ -2925,7 +2942,7 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
if (forcedname != folded)
{
- VN_INFO_GET (forcedname)->valnum = forcedname;
+ VN_INFO (forcedname)->valnum = forcedname;
VN_INFO (forcedname)->value_id = get_next_value_id ();
nameexpr = get_or_alloc_expr_for_name (forcedname);
add_to_value (VN_INFO (forcedname)->value_id, nameexpr);
@@ -2951,8 +2968,8 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
the expression may have been represented. There is no harm in replacing
here. */
value_id = get_expr_value_id (expr);
- VN_INFO_GET (name)->value_id = value_id;
- VN_INFO (name)->valnum = sccvn_valnum_from_value_id (value_id);
+ VN_INFO (name)->value_id = value_id;
+ VN_INFO (name)->valnum = vn_valnum_from_value_id (value_id);
if (VN_INFO (name)->valnum == NULL_TREE)
VN_INFO (name)->valnum = name;
gcc_assert (VN_INFO (name)->valnum != NULL_TREE);
@@ -3057,8 +3074,8 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
temp = make_temp_ssa_name (type, NULL, "prephitmp");
phi = create_phi_node (temp, block);
- VN_INFO_GET (temp)->value_id = val;
- VN_INFO (temp)->valnum = sccvn_valnum_from_value_id (val);
+ VN_INFO (temp)->value_id = val;
+ VN_INFO (temp)->valnum = vn_valnum_from_value_id (val);
if (VN_INFO (temp)->valnum == NULL_TREE)
VN_INFO (temp)->valnum = temp;
bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (temp));
@@ -3302,8 +3319,8 @@ do_pre_regular_insertion (basic_block block, basic_block dom)
gimple_stmt_iterator gsi = gsi_after_labels (block);
gsi_insert_before (&gsi, assign, GSI_NEW_STMT);
- VN_INFO_GET (temp)->value_id = val;
- VN_INFO (temp)->valnum = sccvn_valnum_from_value_id (val);
+ VN_INFO (temp)->value_id = val;
+ VN_INFO (temp)->valnum = vn_valnum_from_value_id (val);
if (VN_INFO (temp)->valnum == NULL_TREE)
VN_INFO (temp)->valnum = temp;
bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (temp));
@@ -3744,6 +3761,7 @@ compute_avail (void)
/* Pick a block from the worklist. */
block = worklist[--sp];
+ vn_context_bb = block;
/* Initially, the set of available values in BLOCK is that of
its immediate dominator. */
@@ -3885,7 +3903,7 @@ compute_avail (void)
continue;
vn_nary_op_lookup_stmt (stmt, &nary);
- if (!nary)
+ if (!nary || nary->predicated_values)
continue;
/* If the NARY traps and there was a preceding
@@ -4045,6 +4063,7 @@ compute_avail (void)
son = next_dom_son (CDI_DOMINATORS, son))
worklist[sp++] = son;
}
+ vn_context_bb = NULL;
free (worklist);
}
@@ -4135,6 +4154,34 @@ public:
}; // class pass_pre
+/* Valueization hook for RPO VN when we are calling back to it
+ at ANTIC compute time. */
+
+static tree
+pre_valueize (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ tree tem = VN_INFO (name)->valnum;
+ if (tem != VN_TOP && tem != name)
+ {
+ if (TREE_CODE (tem) != SSA_NAME
+ || SSA_NAME_IS_DEFAULT_DEF (tem))
+ return tem;
+ /* We create temporary SSA names for representatives that
+ do not have a definition (yet) but are not default defs either
+ assume they are fine to use. */
+ basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (tem));
+ if (! def_bb
+ || dominated_by_p (CDI_DOMINATORS, vn_context_bb, def_bb))
+ return tem;
+ /* ??? Now we could look for a leader. Ideally we'd somehow
+ expose RPO VN leaders and get rid of AVAIL_OUT as well... */
+ }
+ }
+ return name;
+}
+
unsigned int
pass_pre::execute (function *fun)
{
@@ -4143,16 +4190,18 @@ pass_pre::execute (function *fun)
do_partial_partial =
flag_tree_partial_pre && optimize_function_for_speed_p (fun);
- /* This has to happen before SCCVN runs because
+ /* This has to happen before VN runs because
loop_optimizer_init may create new phis, etc. */
loop_optimizer_init (LOOPS_NORMAL);
split_critical_edges ();
scev_initialize ();
- run_scc_vn (VN_WALK);
+ run_rpo_vn (VN_WALK);
init_pre ();
+ vn_valueize = pre_valueize;
+
/* Insert can get quite slow on an incredibly large number of basic
blocks due to some quadratic behavior. Until this behavior is
fixed, don't run it when he have an incredibly large number of
@@ -4181,8 +4230,9 @@ pass_pre::execute (function *fun)
statistics_counter_event (fun, "HOIST inserted", pre_stats.hoist_insert);
statistics_counter_event (fun, "New PHIs", pre_stats.phis);
- /* Remove all the redundant expressions. */
- todo |= vn_eliminate (inserted_exprs);
+ todo |= eliminate_with_rpo_vn (inserted_exprs);
+
+ vn_valueize = NULL;
/* Because we don't follow exactly the standard PRE algorithm, and decide not
to insert PHI nodes sometimes, and because value numbering of casts isn't
@@ -4195,9 +4245,6 @@ pass_pre::execute (function *fun)
scev_finalize ();
loop_optimizer_finalize ();
- /* Restore SSA info before tail-merging as that resets it as well. */
- scc_vn_restore_ssa_info ();
-
/* TODO: tail_merge_optimize may merge all predecessors of a block, in which
case we can merge the block with the remaining predecessor of the block.
It should either:
@@ -4207,7 +4254,7 @@ pass_pre::execute (function *fun)
- share the cfg cleanup with fini_pre. */
todo |= tail_merge_optimize (todo);
- free_scc_vn ();
+ free_rpo_vn ();
/* Tail merging invalidates the virtual SSA web, together with
cfg-cleanup opportunities exposed by PRE this will wreck the
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 43f3313911f..2bf71e5b15b 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -127,11 +127,12 @@ along with GCC; see the file COPYING3. If not see
structure copies.
*/
+/* There's no BB_EXECUTABLE but we can use BB_VISITED. */
+#define BB_EXECUTABLE BB_VISITED
static tree *last_vuse_ptr;
static vn_lookup_kind vn_walk_kind;
static vn_lookup_kind default_vn_walk_kind;
-bitmap const_parms;
/* vn_nary_op hashtable helpers. */
@@ -304,34 +305,10 @@ static vn_nary_op_t last_inserted_nary;
static vn_tables_t valid_info;
-/* Reverse post order index for each basic block. */
-static int *rpo_numbers;
+/* Valueization hook. Valueize NAME if it is an SSA name, otherwise
+ just return it. */
+tree (*vn_valueize) (tree);
-#define SSA_VAL(x) (VN_INFO ((x))->valnum)
-
-/* Return the SSA value of the VUSE x, supporting released VDEFs
- during elimination which will value-number the VDEF to the
- associated VUSE (but not substitute in the whole lattice). */
-
-static inline tree
-vuse_ssa_val (tree x)
-{
- if (!x)
- return NULL_TREE;
-
- do
- {
- tree tem = SSA_VAL (x);
- /* stmt walking can walk over a backedge and reach code we didn't
- value-number yet. */
- if (tem == VN_TOP)
- return x;
- x = tem;
- }
- while (SSA_NAME_IN_FREE_LIST (x));
-
- return x;
-}
/* This represents the top of the VN lattice, which is the universal
value. */
@@ -342,66 +319,184 @@ tree VN_TOP;
static unsigned int next_value_id;
-/* Next DFS number and the stack for strongly connected component
- detection. */
-
-static unsigned int next_dfs_num;
-static vec<tree> sccstack;
-
-
/* Table of vn_ssa_aux_t's, one per ssa_name. The vn_ssa_aux_t objects
are allocated on an obstack for locality reasons, and to free them
without looping over the vec. */
-static vec<vn_ssa_aux_t> vn_ssa_aux_table;
+struct vn_ssa_aux_hasher : typed_noop_remove <vn_ssa_aux_t>
+{
+ typedef vn_ssa_aux_t value_type;
+ typedef tree compare_type;
+ static inline hashval_t hash (const value_type &);
+ static inline bool equal (const value_type &, const compare_type &);
+ static inline void mark_deleted (value_type &) {}
+ static inline void mark_empty (value_type &e) { e = NULL; }
+ static inline bool is_deleted (value_type &) { return false; }
+ static inline bool is_empty (value_type &e) { return e == NULL; }
+};
+
+hashval_t
+vn_ssa_aux_hasher::hash (const value_type &entry)
+{
+ return SSA_NAME_VERSION (entry->name);
+}
+
+bool
+vn_ssa_aux_hasher::equal (const value_type &entry, const compare_type &name)
+{
+ return name == entry->name;
+}
+
+static hash_table<vn_ssa_aux_hasher> *vn_ssa_aux_hash;
+typedef hash_table<vn_ssa_aux_hasher>::iterator vn_ssa_aux_iterator_type;
static struct obstack vn_ssa_aux_obstack;
+static vn_nary_op_t vn_nary_op_insert_stmt (gimple *, tree);
+static unsigned int vn_nary_length_from_stmt (gimple *);
+static vn_nary_op_t alloc_vn_nary_op_noinit (unsigned int, obstack *);
+static vn_nary_op_t vn_nary_op_insert_into (vn_nary_op_t,
+ vn_nary_op_table_type *, bool);
+static void init_vn_nary_op_from_stmt (vn_nary_op_t, gimple *);
+static void init_vn_nary_op_from_pieces (vn_nary_op_t, unsigned int,
+ enum tree_code, tree, tree *);
+static tree vn_lookup_simplify_result (gimple_match_op *);
+
/* Return whether there is value numbering information for a given SSA name. */
bool
has_VN_INFO (tree name)
{
- if (SSA_NAME_VERSION (name) < vn_ssa_aux_table.length ())
- return vn_ssa_aux_table[SSA_NAME_VERSION (name)] != NULL;
- return false;
+ return vn_ssa_aux_hash->find_with_hash (name, SSA_NAME_VERSION (name));
}
-/* Return the value numbering information for a given SSA name. */
-
vn_ssa_aux_t
VN_INFO (tree name)
{
- vn_ssa_aux_t res = vn_ssa_aux_table[SSA_NAME_VERSION (name)];
- gcc_checking_assert (res);
- return res;
+ vn_ssa_aux_t *res
+ = vn_ssa_aux_hash->find_slot_with_hash (name, SSA_NAME_VERSION (name),
+ INSERT);
+ if (*res != NULL)
+ return *res;
+
+ vn_ssa_aux_t newinfo = *res = XOBNEW (&vn_ssa_aux_obstack, struct vn_ssa_aux);
+ memset (newinfo, 0, sizeof (struct vn_ssa_aux));
+ newinfo->name = name;
+ newinfo->valnum = VN_TOP;
+ /* We are using the visited flag to handle uses with defs not within the
+ region being value-numbered. */
+ newinfo->visited = false;
+
+ /* Given we create the VN_INFOs on-demand now we have to do initialization
+ different than VN_TOP here. */
+ if (SSA_NAME_IS_DEFAULT_DEF (name))
+ switch (TREE_CODE (SSA_NAME_VAR (name)))
+ {
+ case VAR_DECL:
+ /* All undefined vars are VARYING. */
+ newinfo->valnum = name;
+ newinfo->visited = true;
+ break;
+
+ case PARM_DECL:
+ /* Parameters are VARYING but we can record a condition
+ if we know it is a non-NULL pointer. */
+ newinfo->visited = true;
+ newinfo->valnum = name;
+ if (POINTER_TYPE_P (TREE_TYPE (name))
+ && nonnull_arg_p (SSA_NAME_VAR (name)))
+ {
+ tree ops[2];
+ ops[0] = name;
+ ops[1] = build_int_cst (TREE_TYPE (name), 0);
+ vn_nary_op_t nary;
+ /* Allocate from non-unwinding stack. */
+ nary = alloc_vn_nary_op_noinit (2, &vn_tables_insert_obstack);
+ init_vn_nary_op_from_pieces (nary, 2, NE_EXPR,
+ boolean_type_node, ops);
+ nary->predicated_values = 0;
+ nary->u.result = boolean_true_node;
+ vn_nary_op_insert_into (nary, valid_info->nary, true);
+ gcc_assert (nary->unwind_to == NULL);
+ /* Also do not link it into the undo chain. */
+ last_inserted_nary = nary->next;
+ nary->next = (vn_nary_op_t)(void *)-1;
+ nary = alloc_vn_nary_op_noinit (2, &vn_tables_insert_obstack);
+ init_vn_nary_op_from_pieces (nary, 2, EQ_EXPR,
+ boolean_type_node, ops);
+ nary->predicated_values = 0;
+ nary->u.result = boolean_false_node;
+ vn_nary_op_insert_into (nary, valid_info->nary, true);
+ gcc_assert (nary->unwind_to == NULL);
+ last_inserted_nary = nary->next;
+ nary->next = (vn_nary_op_t)(void *)-1;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Recording ");
+ print_generic_expr (dump_file, name, TDF_SLIM);
+ fprintf (dump_file, " != 0\n");
+ }
+ }
+ break;
+
+ case RESULT_DECL:
+ /* If the result is passed by invisible reference the default
+ def is initialized, otherwise it's uninitialized. Still
+ undefined is varying. */
+ newinfo->visited = true;
+ newinfo->valnum = name;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return newinfo;
}
-/* Set the value numbering info for a given SSA name to a given
- value. */
+/* Return the SSA value of X. */
-static inline void
-VN_INFO_SET (tree name, vn_ssa_aux_t value)
+inline tree
+SSA_VAL (tree x)
{
- vn_ssa_aux_table[SSA_NAME_VERSION (name)] = value;
+ vn_ssa_aux_t tem = vn_ssa_aux_hash->find_with_hash (x, SSA_NAME_VERSION (x));
+ return tem && tem->visited ? tem->valnum : x;
}
-/* Initialize the value numbering info for a given SSA name.
- This should be called just once for every SSA name. */
+/* Return whether X was visited. */
-vn_ssa_aux_t
-VN_INFO_GET (tree name)
+inline bool
+SSA_VISITED (tree x)
{
- vn_ssa_aux_t newinfo;
+ vn_ssa_aux_t tem = vn_ssa_aux_hash->find_with_hash (x, SSA_NAME_VERSION (x));
+ return tem && tem->visited;
+}
- gcc_assert (SSA_NAME_VERSION (name) >= vn_ssa_aux_table.length ()
- || vn_ssa_aux_table[SSA_NAME_VERSION (name)] == NULL);
- newinfo = XOBNEW (&vn_ssa_aux_obstack, struct vn_ssa_aux);
- memset (newinfo, 0, sizeof (struct vn_ssa_aux));
- if (SSA_NAME_VERSION (name) >= vn_ssa_aux_table.length ())
- vn_ssa_aux_table.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
- vn_ssa_aux_table[SSA_NAME_VERSION (name)] = newinfo;
- return newinfo;
+/* Return the SSA value of the VUSE x, supporting released VDEFs
+ during elimination which will value-number the VDEF to the
+ associated VUSE (but not substitute in the whole lattice). */
+
+static inline tree
+vuse_ssa_val (tree x)
+{
+ if (!x)
+ return NULL_TREE;
+
+ do
+ {
+ if (SSA_NAME_IS_DEFAULT_DEF (x))
+ return x;
+ vn_ssa_aux_t tem
+ = vn_ssa_aux_hash->find_with_hash (x, SSA_NAME_VERSION (x));
+ /* For region-based VN this makes walk_non_aliased_vuses stop walking
+ when we are about to look at a def outside of the region. */
+ if (!tem || !tem->visited)
+ return NULL_TREE;
+ gcc_assert (tem->valnum != VN_TOP);
+ x = tem->valnum;
+ }
+ while (SSA_NAME_IN_FREE_LIST (x));
+
+ return x;
}
@@ -490,6 +585,11 @@ get_or_alloc_constant_value_id (tree constant)
struct vn_constant_s vc;
vn_constant_t vcp;
+ /* If the hashtable isn't initialized we're not running from PRE and thus
+ do not need value-ids. */
+ if (!constant_to_value_id)
+ return 0;
+
vc.hashcode = vn_hash_constant_with_type (constant);
vc.constant = constant;
slot = constant_to_value_id->find_slot (&vc, INSERT);
@@ -1239,6 +1339,10 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
ptroff = gimple_assign_rhs2 (def_stmt);
if (TREE_CODE (ptr) != SSA_NAME
|| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ptr)
+ /* Make sure to not endlessly recurse.
+ See gcc.dg/tree-ssa/20040408-1.c for an example. Can easily
+ happen when we value-number a PHI to its backedge value. */
+ || SSA_VAL (ptr) == op->op0
|| !poly_int_tree_p (ptroff))
return false;
@@ -1251,6 +1355,8 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
mem_op->off = tree_to_shwi (mem_op->op0);
else
mem_op->off = -1;
+ /* ??? Can end up with endless recursion here!?
+ gcc.c-torture/execute/strcmp-1.c */
if (TREE_CODE (op->op0) == SSA_NAME)
op->op0 = SSA_VAL (op->op0);
if (TREE_CODE (op->op0) != SSA_NAME)
@@ -1279,7 +1385,7 @@ fully_constant_vn_reference_p (vn_reference_t ref)
if (op->opcode == CALL_EXPR
&& TREE_CODE (op->op0) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (op->op0, 0)) == FUNCTION_DECL
- && DECL_BUILT_IN (TREE_OPERAND (op->op0, 0))
+ && fndecl_built_in_p (TREE_OPERAND (op->op0, 0))
&& operands.length () >= 2
&& operands.length () <= 3)
{
@@ -1314,16 +1420,17 @@ fully_constant_vn_reference_p (vn_reference_t ref)
/* Simplify reads from constants or constant initializers. */
else if (BITS_PER_UNIT == 8
- && is_gimple_reg_type (ref->type)
- && (!INTEGRAL_TYPE_P (ref->type)
- || TYPE_PRECISION (ref->type) % BITS_PER_UNIT == 0))
+ && COMPLETE_TYPE_P (ref->type)
+ && is_gimple_reg_type (ref->type))
{
poly_int64 off = 0;
HOST_WIDE_INT size;
if (INTEGRAL_TYPE_P (ref->type))
size = TYPE_PRECISION (ref->type);
- else
+ else if (tree_fits_shwi_p (TYPE_SIZE (ref->type)))
size = tree_to_shwi (TYPE_SIZE (ref->type));
+ else
+ return NULL_TREE;
if (size % BITS_PER_UNIT != 0
|| size > MAX_BITSIZE_MODE_ANY_MODE)
return NULL_TREE;
@@ -1413,7 +1520,8 @@ contains_storage_order_barrier_p (vec<vn_reference_op_s> ops)
whether any operands were valueized. */
static vec<vn_reference_op_s>
-valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
+valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything,
+ bool with_avail = false)
{
vn_reference_op_t vro;
unsigned int i;
@@ -1425,7 +1533,7 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
if (vro->opcode == SSA_NAME
|| (vro->op0 && TREE_CODE (vro->op0) == SSA_NAME))
{
- tree tem = SSA_VAL (vro->op0);
+ tree tem = with_avail ? vn_valueize (vro->op0) : SSA_VAL (vro->op0);
if (tem != vro->op0)
{
*valueized_anything = true;
@@ -1438,7 +1546,7 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
}
if (vro->op1 && TREE_CODE (vro->op1) == SSA_NAME)
{
- tree tem = SSA_VAL (vro->op1);
+ tree tem = with_avail ? vn_valueize (vro->op1) : SSA_VAL (vro->op1);
if (tem != vro->op1)
{
*valueized_anything = true;
@@ -1447,7 +1555,7 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
}
if (vro->op2 && TREE_CODE (vro->op2) == SSA_NAME)
{
- tree tem = SSA_VAL (vro->op2);
+ tree tem = with_avail ? vn_valueize (vro->op2) : SSA_VAL (vro->op2);
if (tem != vro->op2)
{
*valueized_anything = true;
@@ -1619,37 +1727,6 @@ vn_reference_lookup_or_insert_for_pieces (tree vuse,
operands.copy (), value, value_id);
}
-static vn_nary_op_t vn_nary_op_insert_stmt (gimple *, tree);
-static unsigned int vn_nary_length_from_stmt (gimple *);
-static vn_nary_op_t alloc_vn_nary_op_noinit (unsigned int, obstack *);
-static vn_nary_op_t vn_nary_op_insert_into (vn_nary_op_t,
- vn_nary_op_table_type *, bool);
-static void init_vn_nary_op_from_stmt (vn_nary_op_t, gimple *);
-
-/* Hook for maybe_push_res_to_seq, lookup the expression in the VN tables. */
-
-static tree
-vn_lookup_simplify_result (gimple_match_op *res_op)
-{
- if (!res_op->code.is_tree_code ())
- return NULL_TREE;
- tree *ops = res_op->ops;
- unsigned int length = res_op->num_ops;
- if (res_op->code == CONSTRUCTOR
- /* ??? We're arriving here with SCCVNs view, decomposed CONSTRUCTOR
- and GIMPLEs / match-and-simplifies, CONSTRUCTOR as GENERIC tree. */
- && TREE_CODE (res_op->ops[0]) == CONSTRUCTOR)
- {
- length = CONSTRUCTOR_NELTS (res_op->ops[0]);
- ops = XALLOCAVEC (tree, length);
- for (unsigned i = 0; i < length; ++i)
- ops[i] = CONSTRUCTOR_ELT (res_op->ops[0], i)->value;
- }
- vn_nary_op_t vnresult = NULL;
- return vn_nary_op_lookup_pieces (length, (tree_code) res_op->code,
- res_op->type, ops, &vnresult);
-}
-
/* Return a value-number for RCODE OPS... either by looking up an existing
value-number for the simplified result or by inserting the operation if
INSERT is true. */
@@ -1704,7 +1781,7 @@ vn_nary_build_or_lookup_1 (gimple_match_op *res_op, bool insert)
/* The expression is not yet available, value-number lhs to
the new SSA_NAME we created. */
/* Initialize value-number information properly. */
- VN_INFO_GET (result)->valnum = result;
+ VN_INFO (result)->valnum = result;
VN_INFO (result)->value_id = get_next_value_id ();
gimple_seq_add_stmt_without_update (&VN_INFO (result)->expr,
new_stmt);
@@ -1716,8 +1793,8 @@ vn_nary_build_or_lookup_1 (gimple_match_op *res_op, bool insert)
vn_nary_op_lookup_stmt (new_stmt, &nary);
if (nary)
{
- gcc_assert (nary->result == NULL_TREE);
- nary->result = gimple_assign_lhs (new_stmt);
+ gcc_assert (! nary->predicated_values && nary->u.result == NULL_TREE);
+ nary->u.result = gimple_assign_lhs (new_stmt);
}
/* As all "inserted" statements are singleton SCCs, insert
to the valid table. This is strictly needed to
@@ -1733,7 +1810,8 @@ vn_nary_build_or_lookup_1 (gimple_match_op *res_op, bool insert)
= alloc_vn_nary_op_noinit (length, &vn_tables_insert_obstack);
vno1->value_id = VN_INFO (result)->value_id;
vno1->length = length;
- vno1->result = result;
+ vno1->predicated_values = 0;
+ vno1->u.result = result;
init_vn_nary_op_from_stmt (vno1, new_stmt);
vn_nary_op_insert_into (vno1, valid_info->nary, true);
/* Also do not link it into the undo chain. */
@@ -1775,6 +1853,7 @@ vn_nary_simplify (vn_nary_op_t nary)
return vn_nary_build_or_lookup_1 (&op, false);
}
+basic_block vn_context_bb;
/* Callback for walk_non_aliased_vuses. Tries to perform a lookup
from the statement defining VUSE and if not successful tries to
@@ -1796,18 +1875,6 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
bool lhs_ref_ok = false;
poly_int64 copy_size;
- /* If the reference is based on a parameter that was determined as
- pointing to readonly memory it doesn't change. */
- if (TREE_CODE (base) == MEM_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
- && SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (base, 0))
- && bitmap_bit_p (const_parms,
- SSA_NAME_VERSION (TREE_OPERAND (base, 0))))
- {
- *disambiguate_only = true;
- return NULL;
- }
-
/* First try to disambiguate after value-replacing in the definitions LHS. */
if (is_gimple_assign (def_stmt))
{
@@ -1815,8 +1882,11 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
bool valueized_anything = false;
/* Avoid re-allocation overhead. */
lhs_ops.truncate (0);
+ basic_block saved_rpo_bb = vn_context_bb;
+ vn_context_bb = gimple_bb (def_stmt);
copy_reference_ops_from_ref (lhs, &lhs_ops);
- lhs_ops = valueize_refs_1 (lhs_ops, &valueized_anything);
+ lhs_ops = valueize_refs_1 (lhs_ops, &valueized_anything, true);
+ vn_context_bb = saved_rpo_bb;
if (valueized_anything)
{
lhs_ref_ok = ao_ref_init_from_vn_reference (&lhs_ref,
@@ -2141,7 +2211,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
{
gimple_match_op op (gimple_match_cond::UNCOND,
BIT_FIELD_REF, vr->type,
- SSA_VAL (gimple_assign_rhs1 (def_stmt)),
+ vn_valueize (gimple_assign_rhs1 (def_stmt)),
bitsize_int (ref->size),
bitsize_int (offset - offset2));
tree val = vn_nary_build_or_lookup (&op);
@@ -2316,7 +2386,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
lhs_offset = 0;
if (TREE_CODE (lhs) == SSA_NAME)
{
- lhs = SSA_VAL (lhs);
+ lhs = vn_valueize (lhs);
if (TREE_CODE (lhs) == SSA_NAME)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (lhs);
@@ -2336,7 +2406,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
{
lhs = TREE_OPERAND (tem, 0);
if (TREE_CODE (lhs) == SSA_NAME)
- lhs = SSA_VAL (lhs);
+ lhs = vn_valueize (lhs);
lhs_offset += mem_offset;
}
else if (DECL_P (tem))
@@ -2352,7 +2422,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_,
rhs = gimple_call_arg (def_stmt, 1);
rhs_offset = 0;
if (TREE_CODE (rhs) == SSA_NAME)
- rhs = SSA_VAL (rhs);
+ rhs = vn_valueize (rhs);
if (TREE_CODE (rhs) == ADDR_EXPR)
{
tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs, 0),
@@ -2593,10 +2663,9 @@ vn_reference_lookup_call (gcall *call, vn_reference_t *vnresult,
vn_reference_lookup_1 (vr, vnresult);
}
-/* Insert OP into the current hash table with a value number of
- RESULT, and return the resulting reference structure we created. */
+/* Insert OP into the current hash table with a value number of RESULT. */
-static vn_reference_t
+static void
vn_reference_insert (tree op, tree result, tree vuse, tree vdef)
{
vn_reference_s **slot;
@@ -2608,7 +2677,7 @@ vn_reference_insert (tree op, tree result, tree vuse, tree vdef)
vr1->value_id = VN_INFO (result)->value_id;
else
vr1->value_id = get_or_alloc_constant_value_id (result);
- vr1->vuse = vuse ? SSA_VAL (vuse) : NULL_TREE;
+ vr1->vuse = vuse_ssa_val (vuse);
vr1->operands = valueize_shared_reference_ops_from_ref (op, &tem).copy ();
vr1->type = TREE_TYPE (op);
vr1->set = get_alias_set (op);
@@ -2617,24 +2686,35 @@ vn_reference_insert (tree op, tree result, tree vuse, tree vdef)
vr1->result_vdef = vdef;
slot = valid_info->references->find_slot_with_hash (vr1, vr1->hashcode,
- INSERT);
-
- /* Because we lookup stores using vuses, and value number failures
- using the vdefs (see visit_reference_op_store for how and why),
- it's possible that on failure we may try to insert an already
- inserted store. This is not wrong, there is no ssa name for a
- store that we could use as a differentiator anyway. Thus, unlike
- the other lookup functions, you cannot gcc_assert (!*slot)
- here. */
-
- /* But free the old slot in case of a collision. */
+ INSERT);
+
+ /* Because IL walking on reference lookup can end up visiting
+ a def that is only to be visited later in iteration order
+ when we are about to make an irreducible region reducible
+ the def can be effectively processed and its ref being inserted
+ by vn_reference_lookup_3 already. So we cannot assert (!*slot)
+ but save a lookup if we deal with already inserted refs here. */
if (*slot)
- free_reference (*slot);
+ {
+ /* We cannot assert that we have the same value either because
+ when disentangling an irreducible region we may end up visiting
+ a use before the corresponding def. That's a missed optimization
+ only though. See gcc.dg/tree-ssa/pr87126.c for example. */
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ && !operand_equal_p ((*slot)->result, vr1->result, 0))
+ {
+ fprintf (dump_file, "Keeping old value ");
+ print_generic_expr (dump_file, (*slot)->result);
+ fprintf (dump_file, " because of collision\n");
+ }
+ free_reference (vr1);
+ obstack_free (&vn_tables_obstack, vr1);
+ return;
+ }
*slot = vr1;
vr1->next = last_inserted_ref;
last_inserted_ref = vr1;
- return vr1;
}
/* Insert a reference by it's pieces into the current hash table with
@@ -2652,7 +2732,7 @@ vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
vr1 = XOBNEW (&vn_tables_obstack, vn_reference_s);
vr1->value_id = value_id;
- vr1->vuse = vuse ? SSA_VAL (vuse) : NULL_TREE;
+ vr1->vuse = vuse_ssa_val (vuse);
vr1->operands = valueize_refs (operands);
vr1->type = type;
vr1->set = set;
@@ -2662,14 +2742,12 @@ vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
vr1->result = result;
slot = valid_info->references->find_slot_with_hash (vr1, vr1->hashcode,
- INSERT);
+ INSERT);
/* At this point we should have all the things inserted that we have
seen before, and we should never try inserting something that
already exists. */
gcc_assert (!*slot);
- if (*slot)
- free_reference (*slot);
*slot = vr1;
vr1->next = last_inserted_ref;
@@ -2845,13 +2923,12 @@ vn_nary_op_lookup_1 (vn_nary_op_t vno, vn_nary_op_t *vnresult)
*vnresult = NULL;
vno->hashcode = vn_nary_op_compute_hash (vno);
- slot = valid_info->nary->find_slot_with_hash (vno, vno->hashcode,
- NO_INSERT);
+ slot = valid_info->nary->find_slot_with_hash (vno, vno->hashcode, NO_INSERT);
if (!slot)
return NULL_TREE;
if (vnresult)
*vnresult = *slot;
- return (*slot)->result;
+ return (*slot)->predicated_values ? NULL_TREE : (*slot)->u.result;
}
/* Lookup a n-ary operation by its pieces and return the resulting value
@@ -2919,7 +2996,8 @@ alloc_vn_nary_op (unsigned int length, tree result, unsigned int value_id)
vno1->value_id = value_id;
vno1->length = length;
- vno1->result = result;
+ vno1->predicated_values = 0;
+ vno1->u.result = result;
return vno1;
}
@@ -2934,18 +3012,125 @@ vn_nary_op_insert_into (vn_nary_op_t vno, vn_nary_op_table_type *table,
vn_nary_op_s **slot;
if (compute_hash)
- vno->hashcode = vn_nary_op_compute_hash (vno);
+ {
+ vno->hashcode = vn_nary_op_compute_hash (vno);
+ gcc_assert (! vno->predicated_values
+ || (! vno->u.values->next
+ && vno->u.values->valid_dominated_by_p[0] != EXIT_BLOCK
+ && vno->u.values->valid_dominated_by_p[1] == EXIT_BLOCK));
+ }
slot = table->find_slot_with_hash (vno, vno->hashcode, INSERT);
- /* While we do not want to insert things twice it's awkward to
- avoid it in the case where visit_nary_op pattern-matches stuff
- and ends up simplifying the replacement to itself. We then
- get two inserts, one from visit_nary_op and one from
- vn_nary_build_or_lookup.
- So allow inserts with the same value number. */
- if (*slot && (*slot)->result == vno->result)
- return *slot;
+ vno->unwind_to = *slot;
+ if (*slot)
+ {
+ /* Prefer non-predicated values.
+ ??? Only if those are constant, otherwise, with constant predicated
+ value, turn them into predicated values with entry-block validity
+ (??? but we always find the first valid result currently). */
+ if ((*slot)->predicated_values
+ && ! vno->predicated_values)
+ {
+ /* ??? We cannot remove *slot from the unwind stack list.
+ For the moment we deal with this by skipping not found
+ entries but this isn't ideal ... */
+ *slot = vno;
+ /* ??? Maintain a stack of states we can unwind in
+ vn_nary_op_s? But how far do we unwind? In reality
+ we need to push change records somewhere... Or not
+ unwind vn_nary_op_s and linking them but instead
+ unwind the results "list", linking that, which also
+ doesn't move on hashtable resize. */
+ /* We can also have a ->unwind_to recording *slot there.
+ That way we can make u.values a fixed size array with
+ recording the number of entries but of course we then
+ have always N copies for each unwind_to-state. Or we
+ make sure to only ever append and each unwinding will
+ pop off one entry (but how to deal with predicated
+ replaced with non-predicated here?) */
+ vno->next = last_inserted_nary;
+ last_inserted_nary = vno;
+ return vno;
+ }
+ else if (vno->predicated_values
+ && ! (*slot)->predicated_values)
+ return *slot;
+ else if (vno->predicated_values
+ && (*slot)->predicated_values)
+ {
+ /* ??? Factor this all into a insert_single_predicated_value
+ routine. */
+ gcc_assert (!vno->u.values->next && vno->u.values->n == 1);
+ basic_block vno_bb
+ = BASIC_BLOCK_FOR_FN (cfun, vno->u.values->valid_dominated_by_p[0]);
+ vn_pval *nval = vno->u.values;
+ vn_pval **next = &vno->u.values;
+ bool found = false;
+ for (vn_pval *val = (*slot)->u.values; val; val = val->next)
+ {
+ if (expressions_equal_p (val->result, vno->u.values->result))
+ {
+ found = true;
+ for (unsigned i = 0; i < val->n; ++i)
+ {
+ basic_block val_bb
+ = BASIC_BLOCK_FOR_FN (cfun,
+ val->valid_dominated_by_p[i]);
+ if (dominated_by_p (CDI_DOMINATORS, vno_bb, val_bb))
+ /* Value registered with more generic predicate. */
+ return *slot;
+ else if (dominated_by_p (CDI_DOMINATORS, val_bb, vno_bb))
+ /* Shouldn't happen, we insert in RPO order. */
+ gcc_unreachable ();
+ }
+ /* Append value. */
+ *next = (vn_pval *) obstack_alloc (&vn_tables_obstack,
+ sizeof (vn_pval)
+ + val->n * sizeof (int));
+ (*next)->next = NULL;
+ (*next)->result = val->result;
+ (*next)->n = val->n + 1;
+ memcpy ((*next)->valid_dominated_by_p,
+ val->valid_dominated_by_p,
+ val->n * sizeof (int));
+ (*next)->valid_dominated_by_p[val->n] = vno_bb->index;
+ next = &(*next)->next;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Appending predicate to value.\n");
+ continue;
+ }
+ /* Copy other predicated values. */
+ *next = (vn_pval *) obstack_alloc (&vn_tables_obstack,
+ sizeof (vn_pval)
+ + (val->n-1) * sizeof (int));
+ memcpy (*next, val, sizeof (vn_pval) + (val->n-1) * sizeof (int));
+ (*next)->next = NULL;
+ next = &(*next)->next;
+ }
+ if (!found)
+ *next = nval;
+ *slot = vno;
+ vno->next = last_inserted_nary;
+ last_inserted_nary = vno;
+ return vno;
+ }
+
+ /* While we do not want to insert things twice it's awkward to
+ avoid it in the case where visit_nary_op pattern-matches stuff
+ and ends up simplifying the replacement to itself. We then
+ get two inserts, one from visit_nary_op and one from
+ vn_nary_build_or_lookup.
+ So allow inserts with the same value number. */
+ if ((*slot)->u.result == vno->u.result)
+ return *slot;
+ }
+
+ /* ??? There's also optimistic vs. previous commited state merging
+ that is problematic for the case of unwinding. */
+
+ /* ??? We should return NULL if we do not use 'vno' and have the
+ caller release it. */
gcc_assert (!*slot);
*slot = vno;
@@ -2968,6 +3153,70 @@ vn_nary_op_insert_pieces (unsigned int length, enum tree_code code,
return vn_nary_op_insert_into (vno1, valid_info->nary, true);
}
+static vn_nary_op_t
+vn_nary_op_insert_pieces_predicated (unsigned int length, enum tree_code code,
+ tree type, tree *ops,
+ tree result, unsigned int value_id,
+ edge pred_e)
+{
+ /* ??? Currently tracking BBs. */
+ if (! single_pred_p (pred_e->dest))
+ {
+ /* Never record for backedges. */
+ if (pred_e->flags & EDGE_DFS_BACK)
+ return NULL;
+ edge_iterator ei;
+ edge e;
+ int cnt = 0;
+ /* Ignore backedges. */
+ FOR_EACH_EDGE (e, ei, pred_e->dest->preds)
+ if (! dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
+ cnt++;
+ if (cnt != 1)
+ return NULL;
+ }
+ if (dump_file && (dump_flags & TDF_DETAILS)
+ /* ??? Fix dumping, but currently we only get comparisons. */
+ && TREE_CODE_CLASS (code) == tcc_comparison)
+ {
+ fprintf (dump_file, "Recording on edge %d->%d ", pred_e->src->index,
+ pred_e->dest->index);
+ print_generic_expr (dump_file, ops[0], TDF_SLIM);
+ fprintf (dump_file, " %s ", get_tree_code_name (code));
+ print_generic_expr (dump_file, ops[1], TDF_SLIM);
+ fprintf (dump_file, " == %s\n",
+ integer_zerop (result) ? "false" : "true");
+ }
+ vn_nary_op_t vno1 = alloc_vn_nary_op (length, NULL_TREE, value_id);
+ init_vn_nary_op_from_pieces (vno1, length, code, type, ops);
+ vno1->predicated_values = 1;
+ vno1->u.values = (vn_pval *) obstack_alloc (&vn_tables_obstack,
+ sizeof (vn_pval));
+ vno1->u.values->next = NULL;
+ vno1->u.values->result = result;
+ vno1->u.values->n = 1;
+ vno1->u.values->valid_dominated_by_p[0] = pred_e->dest->index;
+ vno1->u.values->valid_dominated_by_p[1] = EXIT_BLOCK;
+ return vn_nary_op_insert_into (vno1, valid_info->nary, true);
+}
+
+static bool
+dominated_by_p_w_unex (basic_block bb1, basic_block bb2);
+
+static tree
+vn_nary_op_get_predicated_value (vn_nary_op_t vno, basic_block bb)
+{
+ if (! vno->predicated_values)
+ return vno->u.result;
+ for (vn_pval *val = vno->u.values; val; val = val->next)
+ for (unsigned i = 0; i < val->n; ++i)
+ if (dominated_by_p_w_unex (bb,
+ BASIC_BLOCK_FOR_FN
+ (cfun, val->valid_dominated_by_p[i])))
+ return val->result;
+ return NULL_TREE;
+}
+
/* Insert OP into the current hash table with a value number of
RESULT. Return the vn_nary_op_t structure we created and put in
the hashtable. */
@@ -3170,7 +3419,7 @@ vn_phi_eq (const_vn_phi_t const vp1, const_vn_phi_t const vp2)
it does not exist in the hash table. */
static tree
-vn_phi_lookup (gimple *phi)
+vn_phi_lookup (gimple *phi, bool backedges_varying_p)
{
vn_phi_s **slot;
struct vn_phi_s *vp1;
@@ -3185,7 +3434,9 @@ vn_phi_lookup (gimple *phi)
FOR_EACH_EDGE (e, ei, gimple_bb (phi)->preds)
{
tree def = PHI_ARG_DEF_FROM_EDGE (phi, e);
- def = TREE_CODE (def) == SSA_NAME ? SSA_VAL (def) : def;
+ if (TREE_CODE (def) == SSA_NAME
+ && (!backedges_varying_p || !(e->flags & EDGE_DFS_BACK)))
+ def = SSA_VAL (def);
vp1->phiargs[e->dest_idx] = def;
}
vp1->type = TREE_TYPE (gimple_phi_result (phi));
@@ -3197,12 +3448,13 @@ vn_phi_lookup (gimple *phi)
if (EDGE_COUNT (idom1->succs) == 2)
if (gcond *last1 = safe_dyn_cast <gcond *> (last_stmt (idom1)))
{
+ /* ??? We want to use SSA_VAL here. But possibly not
+ allow VN_TOP. */
vp1->cclhs = vn_valueize (gimple_cond_lhs (last1));
vp1->ccrhs = vn_valueize (gimple_cond_rhs (last1));
}
vp1->hashcode = vn_phi_compute_hash (vp1);
- slot = valid_info->phis->find_slot_with_hash (vp1, vp1->hashcode,
- NO_INSERT);
+ slot = valid_info->phis->find_slot_with_hash (vp1, vp1->hashcode, NO_INSERT);
if (!slot)
return NULL_TREE;
return (*slot)->result;
@@ -3212,7 +3464,7 @@ vn_phi_lookup (gimple *phi)
RESULT. */
static vn_phi_t
-vn_phi_insert (gimple *phi, tree result)
+vn_phi_insert (gimple *phi, tree result, bool backedges_varying_p)
{
vn_phi_s **slot;
vn_phi_t vp1 = (vn_phi_t) obstack_alloc (&vn_tables_obstack,
@@ -3226,7 +3478,9 @@ vn_phi_insert (gimple *phi, tree result)
FOR_EACH_EDGE (e, ei, gimple_bb (phi)->preds)
{
tree def = PHI_ARG_DEF_FROM_EDGE (phi, e);
- def = TREE_CODE (def) == SSA_NAME ? SSA_VAL (def) : def;
+ if (TREE_CODE (def) == SSA_NAME
+ && (!backedges_varying_p || !(e->flags & EDGE_DFS_BACK)))
+ def = SSA_VAL (def);
vp1->phiargs[e->dest_idx] = def;
}
vp1->value_id = VN_INFO (result)->value_id;
@@ -3239,6 +3493,8 @@ vn_phi_insert (gimple *phi, tree result)
if (EDGE_COUNT (idom1->succs) == 2)
if (gcond *last1 = safe_dyn_cast <gcond *> (last_stmt (idom1)))
{
+ /* ??? We want to use SSA_VAL here. But possibly not
+ allow VN_TOP. */
vp1->cclhs = vn_valueize (gimple_cond_lhs (last1));
vp1->ccrhs = vn_valueize (gimple_cond_rhs (last1));
}
@@ -3246,9 +3502,8 @@ vn_phi_insert (gimple *phi, tree result)
vp1->hashcode = vn_phi_compute_hash (vp1);
slot = valid_info->phis->find_slot_with_hash (vp1, vp1->hashcode, INSERT);
+ gcc_assert (!*slot);
- /* Because we iterate over phi operations more than once, it's
- possible the slot might already exist here, hence no assert.*/
*slot = vp1;
vp1->next = last_inserted_phi;
last_inserted_phi = vp1;
@@ -3256,23 +3511,6 @@ vn_phi_insert (gimple *phi, tree result)
}
-/* Print set of components in strongly connected component SCC to OUT. */
-
-static void
-print_scc (FILE *out, vec<tree> scc)
-{
- tree var;
- unsigned int i;
-
- fprintf (out, "SCC consists of %u:", scc.length ());
- FOR_EACH_VEC_ELT (scc, i, var)
- {
- fprintf (out, " ");
- print_generic_expr (out, var);
- }
- fprintf (out, "\n");
-}
-
/* Return true if BB1 is dominated by BB2 taking into account edges
that are not executable. */
@@ -3360,7 +3598,8 @@ dominated_by_p_w_unex (basic_block bb1, basic_block bb2)
static inline bool
set_ssa_val_to (tree from, tree to)
{
- tree currval = SSA_VAL (from);
+ vn_ssa_aux_t from_info = VN_INFO (from);
+ tree currval = from_info->valnum; // SSA_VAL (from)
poly_int64 toff, coff;
/* The only thing we allow as value numbers are ssa_names
@@ -3372,16 +3611,23 @@ set_ssa_val_to (tree from, tree to)
get VN_TOP on valueization. */
if (to == VN_TOP)
{
+ /* ??? When iterating and visiting PHI <undef, backedge-value>
+ for the first time we rightfully get VN_TOP and we need to
+ preserve that to optimize for example gcc.dg/tree-ssa/ssa-sccvn-2.c.
+ With SCCVN we were simply lucky we iterated the other PHI
+ cycles first and thus visited the backedge-value DEF. */
+ if (currval == VN_TOP)
+ goto set_and_exit;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Forcing value number to varying on "
"receiving VN_TOP\n");
to = from;
}
- gcc_assert (to != NULL_TREE
- && ((TREE_CODE (to) == SSA_NAME
- && (to == from || SSA_VAL (to) == to))
- || is_gimple_min_invariant (to)));
+ gcc_checking_assert (to != NULL_TREE
+ && ((TREE_CODE (to) == SSA_NAME
+ && (to == from || SSA_VAL (to) == to))
+ || is_gimple_min_invariant (to)));
if (from != to)
{
@@ -3399,6 +3645,7 @@ set_ssa_val_to (tree from, tree to)
}
else if (currval != VN_TOP
&& ! is_gimple_min_invariant (currval)
+ && ! ssa_undefined_value_p (currval, false)
&& is_gimple_min_invariant (to))
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3419,6 +3666,7 @@ set_ssa_val_to (tree from, tree to)
to = from;
}
+set_and_exit:
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Setting value number of ");
@@ -3447,73 +3695,7 @@ set_ssa_val_to (tree from, tree to)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " (changed)\n");
-
- /* If we equate two SSA names we have to make the side-band info
- of the leader conservative (and remember whatever original value
- was present). */
- if (TREE_CODE (to) == SSA_NAME)
- {
- if (INTEGRAL_TYPE_P (TREE_TYPE (to))
- && SSA_NAME_RANGE_INFO (to))
- {
- if (SSA_NAME_IS_DEFAULT_DEF (to)
- || dominated_by_p_w_unex
- (gimple_bb (SSA_NAME_DEF_STMT (from)),
- gimple_bb (SSA_NAME_DEF_STMT (to))))
- /* Keep the info from the dominator. */
- ;
- else
- {
- /* Save old info. */
- if (! VN_INFO (to)->info.range_info)
- {
- VN_INFO (to)->info.range_info = SSA_NAME_RANGE_INFO (to);
- VN_INFO (to)->range_info_anti_range_p
- = SSA_NAME_ANTI_RANGE_P (to);
- }
- /* Rather than allocating memory and unioning the info
- just clear it. */
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "clearing range info of ");
- print_generic_expr (dump_file, to);
- fprintf (dump_file, "\n");
- }
- SSA_NAME_RANGE_INFO (to) = NULL;
- }
- }
- else if (POINTER_TYPE_P (TREE_TYPE (to))
- && SSA_NAME_PTR_INFO (to))
- {
- if (SSA_NAME_IS_DEFAULT_DEF (to)
- || dominated_by_p_w_unex
- (gimple_bb (SSA_NAME_DEF_STMT (from)),
- gimple_bb (SSA_NAME_DEF_STMT (to))))
- /* Keep the info from the dominator. */
- ;
- else if (! SSA_NAME_PTR_INFO (from)
- /* Handle the case of trivially equivalent info. */
- || memcmp (SSA_NAME_PTR_INFO (to),
- SSA_NAME_PTR_INFO (from),
- sizeof (ptr_info_def)) != 0)
- {
- /* Save old info. */
- if (! VN_INFO (to)->info.ptr_info)
- VN_INFO (to)->info.ptr_info = SSA_NAME_PTR_INFO (to);
- /* Rather than allocating memory and unioning the info
- just clear it. */
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "clearing points-to info of ");
- print_generic_expr (dump_file, to);
- fprintf (dump_file, "\n");
- }
- SSA_NAME_PTR_INFO (to) = NULL;
- }
- }
- }
-
- VN_INFO (from)->valnum = to;
+ from_info->valnum = to;
return true;
}
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3521,30 +3703,6 @@ set_ssa_val_to (tree from, tree to)
return false;
}
-/* Mark as processed all the definitions in the defining stmt of USE, or
- the USE itself. */
-
-static void
-mark_use_processed (tree use)
-{
- ssa_op_iter iter;
- def_operand_p defp;
- gimple *stmt = SSA_NAME_DEF_STMT (use);
-
- if (SSA_NAME_IS_DEFAULT_DEF (use) || gimple_code (stmt) == GIMPLE_PHI)
- {
- VN_INFO (use)->use_processed = true;
- return;
- }
-
- FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_ALL_DEFS)
- {
- tree def = DEF_FROM_PTR (defp);
-
- VN_INFO (def)->use_processed = true;
- }
-}
-
/* Set all definitions in STMT to value number to themselves.
Return true if a value number changed. */
@@ -3582,7 +3740,7 @@ static tree
valueized_wider_op (tree wide_type, tree op)
{
if (TREE_CODE (op) == SSA_NAME)
- op = SSA_VAL (op);
+ op = vn_valueize (op);
/* Either we have the op widened available. */
tree ops[3] = {};
@@ -3603,7 +3761,7 @@ valueized_wider_op (tree wide_type, tree op)
if (useless_type_conversion_p (wide_type, TREE_TYPE (tem)))
{
if (TREE_CODE (tem) == SSA_NAME)
- tem = SSA_VAL (tem);
+ tem = vn_valueize (tem);
return tem;
}
}
@@ -3622,7 +3780,10 @@ valueized_wider_op (tree wide_type, tree op)
static bool
visit_nary_op (tree lhs, gassign *stmt)
{
- tree result = vn_nary_op_lookup_stmt (stmt, NULL);
+ vn_nary_op_t vnresult;
+ tree result = vn_nary_op_lookup_stmt (stmt, &vnresult);
+ if (! result && vnresult)
+ result = vn_nary_op_get_predicated_value (vnresult, gimple_bb (stmt));
if (result)
return set_ssa_val_to (lhs, result);
@@ -3773,7 +3934,7 @@ visit_reference_op_call (tree lhs, gcall *stmt)
vr2->result = lhs;
vr2->result_vdef = vdef_val;
slot = valid_info->references->find_slot_with_hash (vr2, vr2->hashcode,
- INSERT);
+ INSERT);
gcc_assert (!*slot);
*slot = vr2;
vr2->next = last_inserted_ref;
@@ -3812,6 +3973,10 @@ visit_reference_op_load (tree lhs, tree op, gimple *stmt)
gimple_match_op res_op (gimple_match_cond::UNCOND,
VIEW_CONVERT_EXPR, TREE_TYPE (op), result);
result = vn_nary_build_or_lookup (&res_op);
+ /* When building the conversion fails avoid inserting the reference
+ again. */
+ if (!result)
+ return set_ssa_val_to (lhs, lhs);
}
if (result)
@@ -3863,8 +4028,8 @@ visit_reference_op_store (tree lhs, tree op, gimple *stmt)
&& vnresult->result)
{
tree result = vnresult->result;
- if (TREE_CODE (result) == SSA_NAME)
- result = SSA_VAL (result);
+ gcc_checking_assert (TREE_CODE (result) != SSA_NAME
+ || result == SSA_VAL (result));
resultsame = expressions_equal_p (result, op);
if (resultsame)
{
@@ -3887,7 +4052,7 @@ visit_reference_op_store (tree lhs, tree op, gimple *stmt)
vn_reference_lookup (assign, vuse, VN_NOWALK, &vnresult, false);
if (vnresult)
{
- VN_INFO (vdef)->use_processed = true;
+ VN_INFO (vdef)->visited = true;
return set_ssa_val_to (vdef, vnresult->result_vdef);
}
}
@@ -3935,12 +4100,16 @@ visit_reference_op_store (tree lhs, tree op, gimple *stmt)
}
/* Visit and value number PHI, return true if the value number
- changed. */
+ changed. When BACKEDGES_VARYING_P is true then assume all
+ backedge values are varying. When INSERTED is not NULL then
+ this is just a ahead query for a possible iteration, set INSERTED
+ to true if we'd insert into the hashtable. */
static bool
-visit_phi (gimple *phi)
+visit_phi (gimple *phi, bool *inserted, bool backedges_varying_p)
{
tree result, sameval = VN_TOP, seen_undef = NULL_TREE;
+ tree backedge_name = NULL_TREE;
tree sameval_base = NULL_TREE;
poly_int64 soff, doff;
unsigned n_executable = 0;
@@ -3948,11 +4117,17 @@ visit_phi (gimple *phi)
edge_iterator ei;
edge e;
- /* TODO: We could check for this in init_sccvn, and replace this
+ /* TODO: We could check for this in initialization, and replace this
with a gcc_assert. */
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)))
return set_ssa_val_to (PHI_RESULT (phi), PHI_RESULT (phi));
+ /* We track whether a PHI was CSEd to to avoid excessive iterations
+ that would be necessary only because the PHI changed arguments
+ but not value. */
+ if (!inserted)
+ gimple_set_plf (phi, GF_PLF_1, false);
+
/* See if all non-TOP arguments have the same value. TOP is
equivalent to everything, so we can ignore it. */
FOR_EACH_EDGE (e, ei, gimple_bb (phi)->preds)
@@ -3962,11 +4137,17 @@ visit_phi (gimple *phi)
++n_executable;
if (TREE_CODE (def) == SSA_NAME)
- def = SSA_VAL (def);
+ {
+ if (e->flags & EDGE_DFS_BACK)
+ backedge_name = def;
+ if (!backedges_varying_p || !(e->flags & EDGE_DFS_BACK))
+ def = SSA_VAL (def);
+ }
if (def == VN_TOP)
;
/* Ignore undefined defs for sameval but record one. */
else if (TREE_CODE (def) == SSA_NAME
+ && ! virtual_operand_p (def)
&& ssa_undefined_value_p (def, false))
seen_undef = def;
else if (sameval == VN_TOP)
@@ -3995,10 +4176,15 @@ visit_phi (gimple *phi)
}
}
-
+ /* If the value we want to use is the backedge and that wasn't visited
+ yet drop to VARYING. */
+ if (backedge_name
+ && sameval == backedge_name
+ && !SSA_VISITED (backedge_name))
+ result = PHI_RESULT (phi);
/* If none of the edges was executable keep the value-number at VN_TOP,
if only a single edge is exectuable use its value. */
- if (n_executable <= 1)
+ else if (n_executable <= 1)
result = seen_undef ? seen_undef : sameval;
/* If we saw only undefined values and VN_TOP use one of the
undefined values. */
@@ -4006,8 +4192,22 @@ visit_phi (gimple *phi)
result = seen_undef ? seen_undef : sameval;
/* First see if it is equivalent to a phi node in this block. We prefer
this as it allows IV elimination - see PRs 66502 and 67167. */
- else if ((result = vn_phi_lookup (phi)))
- ;
+ else if ((result = vn_phi_lookup (phi, backedges_varying_p)))
+ {
+ if (!inserted
+ && TREE_CODE (result) == SSA_NAME
+ && gimple_code (SSA_NAME_DEF_STMT (result)) == GIMPLE_PHI)
+ {
+ gimple_set_plf (SSA_NAME_DEF_STMT (result), GF_PLF_1, true);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Marking CSEd to PHI node ");
+ print_gimple_expr (dump_file, SSA_NAME_DEF_STMT (result),
+ 0, TDF_SLIM);
+ fprintf (dump_file, "\n");
+ }
+ }
+ }
/* If all values are the same use that, unless we've seen undefined
values as well and the value isn't constant.
CCP/copyprop have the same restriction to not remove uninit warnings. */
@@ -4020,7 +4220,9 @@ visit_phi (gimple *phi)
/* Only insert PHIs that are varying, for constant value numbers
we mess up equivalences otherwise as we are only comparing
the immediate controlling predicates. */
- vn_phi_insert (phi, result);
+ vn_phi_insert (phi, result, backedges_varying_p);
+ if (inserted)
+ *inserted = true;
}
return set_ssa_val_to (PHI_RESULT (phi), result);
@@ -4051,30 +4253,22 @@ try_to_simplify (gassign *stmt)
return NULL_TREE;
}
-/* Visit and value number USE, return true if the value number
- changed. */
+/* Visit and value number STMT, return true if the value number
+ changed. */
static bool
-visit_use (tree use)
+visit_stmt (gimple *stmt, bool backedges_varying_p = false)
{
bool changed = false;
- gimple *stmt = SSA_NAME_DEF_STMT (use);
-
- mark_use_processed (use);
-
- gcc_assert (!SSA_NAME_IN_FREE_LIST (use)
- && !SSA_NAME_IS_DEFAULT_DEF (use));
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "Value numbering ");
- print_generic_expr (dump_file, use);
- fprintf (dump_file, " stmt = ");
+ fprintf (dump_file, "Value numbering stmt = ");
print_gimple_stmt (dump_file, stmt, 0);
}
if (gimple_code (stmt) == GIMPLE_PHI)
- changed = visit_phi (stmt);
+ changed = visit_phi (stmt, NULL, backedges_varying_p);
else if (gimple_has_volatile_ops (stmt))
changed = defs_to_varying (stmt);
else if (gassign *ass = dyn_cast <gassign *> (stmt))
@@ -4261,313 +4455,15 @@ visit_use (tree use)
return changed;
}
-/* Compare two operands by reverse postorder index */
-
-static int
-compare_ops (const void *pa, const void *pb)
-{
- const tree opa = *((const tree *)pa);
- const tree opb = *((const tree *)pb);
- gimple *opstmta = SSA_NAME_DEF_STMT (opa);
- gimple *opstmtb = SSA_NAME_DEF_STMT (opb);
- basic_block bba;
- basic_block bbb;
-
- if (gimple_nop_p (opstmta) && gimple_nop_p (opstmtb))
- return SSA_NAME_VERSION (opa) - SSA_NAME_VERSION (opb);
- else if (gimple_nop_p (opstmta))
- return -1;
- else if (gimple_nop_p (opstmtb))
- return 1;
-
- bba = gimple_bb (opstmta);
- bbb = gimple_bb (opstmtb);
-
- if (!bba && !bbb)
- return SSA_NAME_VERSION (opa) - SSA_NAME_VERSION (opb);
- else if (!bba)
- return -1;
- else if (!bbb)
- return 1;
-
- if (bba == bbb)
- {
- if (gimple_code (opstmta) == GIMPLE_PHI
- && gimple_code (opstmtb) == GIMPLE_PHI)
- return SSA_NAME_VERSION (opa) - SSA_NAME_VERSION (opb);
- else if (gimple_code (opstmta) == GIMPLE_PHI)
- return -1;
- else if (gimple_code (opstmtb) == GIMPLE_PHI)
- return 1;
- else if (gimple_uid (opstmta) != gimple_uid (opstmtb))
- return gimple_uid (opstmta) - gimple_uid (opstmtb);
- else
- return SSA_NAME_VERSION (opa) - SSA_NAME_VERSION (opb);
- }
- return rpo_numbers[bba->index] - rpo_numbers[bbb->index];
-}
-
-/* Sort an array containing members of a strongly connected component
- SCC so that the members are ordered by RPO number.
- This means that when the sort is complete, iterating through the
- array will give you the members in RPO order. */
-
-static void
-sort_scc (vec<tree> scc)
-{
- scc.qsort (compare_ops);
-}
-
-/* Process a strongly connected component in the SSA graph. */
-
-static void
-process_scc (vec<tree> scc)
-{
- tree var;
- unsigned int i;
- unsigned int iterations = 0;
- bool changed = true;
-
- /* If the SCC has a single member, just visit it. */
- if (scc.length () == 1)
- {
- tree use = scc[0];
- if (VN_INFO (use)->use_processed)
- return;
- /* We need to make sure it doesn't form a cycle itself, which can
- happen for self-referential PHI nodes. In that case we would
- end up inserting an expression with VN_TOP operands into the
- valid table which makes us derive bogus equivalences later.
- The cheapest way to check this is to assume it for all PHI nodes. */
- if (gimple_code (SSA_NAME_DEF_STMT (use)) == GIMPLE_PHI)
- /* Fallthru to iteration. */ ;
- else
- {
- visit_use (use);
- return;
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- print_scc (dump_file, scc);
-
- /* Iterate over the SCC with the optimistic table until it stops
- changing. */
- while (changed)
- {
- changed = false;
- iterations++;
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Starting iteration %d\n", iterations);
- /* As we are value-numbering optimistically we have to
- clear the expression tables and the simplified expressions
- in each iteration until we converge. */
- void *ob_top = obstack_alloc (&vn_tables_obstack, 0);
- vn_reference_t ref_top = last_inserted_ref;
- vn_phi_t phi_top = last_inserted_phi;
- vn_nary_op_t nary_top = last_inserted_nary;
- FOR_EACH_VEC_ELT (scc, i, var)
- gcc_assert (!VN_INFO (var)->needs_insertion
- && VN_INFO (var)->expr == NULL);
- FOR_EACH_VEC_ELT (scc, i, var)
- changed |= visit_use (var);
- if (changed)
- {
- for (; last_inserted_nary != nary_top;
- last_inserted_nary = last_inserted_nary->next)
- {
- vn_nary_op_t *slot;
- slot = valid_info->nary->find_slot_with_hash (last_inserted_nary,
- last_inserted_nary->hashcode,
- NO_INSERT);
- gcc_assert (slot);
- valid_info->nary->clear_slot (slot);
- }
- for (; last_inserted_phi != phi_top;
- last_inserted_phi = last_inserted_phi->next)
- {
- vn_phi_t *slot;
- slot = valid_info->phis->find_slot_with_hash (last_inserted_phi,
- last_inserted_phi->hashcode,
- NO_INSERT);
- gcc_assert (slot);
- valid_info->phis->clear_slot (slot);
- }
- for (; last_inserted_ref != ref_top;
- last_inserted_ref = last_inserted_ref->next)
- {
- vn_reference_t *slot;
- slot = valid_info->references->find_slot_with_hash (last_inserted_ref,
- last_inserted_ref->hashcode,
- NO_INSERT);
- gcc_assert (slot);
- (*slot)->operands.release ();
- valid_info->references->clear_slot (slot);
- }
- obstack_free (&vn_tables_obstack, ob_top);
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Processing SCC needed %d iterations\n", iterations);
- statistics_histogram_event (cfun, "SCC iterations", iterations);
-}
-
-
-/* Pop the components of the found SCC for NAME off the SCC stack
- and process them. Returns true if all went well, false if
- we run into resource limits. */
-
-static void
-extract_and_process_scc_for_name (tree name)
-{
- auto_vec<tree> scc;
- tree x;
-
- /* Found an SCC, pop the components off the SCC stack and
- process them. */
- do
- {
- x = sccstack.pop ();
-
- VN_INFO (x)->on_sccstack = false;
- scc.safe_push (x);
- } while (x != name);
-
- /* Drop all defs in the SCC to varying in case a SCC turns out to be
- incredibly large.
- ??? Just switch to a non-optimistic mode that avoids any iteration. */
- if (scc.length () > (unsigned)PARAM_VALUE (PARAM_SCCVN_MAX_SCC_SIZE))
- {
- if (dump_file)
- {
- print_scc (dump_file, scc);
- fprintf (dump_file, "WARNING: Giving up value-numbering SCC due to "
- "size %u exceeding %u\n", scc.length (),
- (unsigned)PARAM_VALUE (PARAM_SCCVN_MAX_SCC_SIZE));
- }
- tree var;
- unsigned i;
- FOR_EACH_VEC_ELT (scc, i, var)
- {
- gimple *def = SSA_NAME_DEF_STMT (var);
- mark_use_processed (var);
- if (SSA_NAME_IS_DEFAULT_DEF (var)
- || gimple_code (def) == GIMPLE_PHI)
- set_ssa_val_to (var, var);
- else
- defs_to_varying (def);
- }
- return;
- }
-
- if (scc.length () > 1)
- sort_scc (scc);
-
- process_scc (scc);
-}
-
-/* Depth first search on NAME to discover and process SCC's in the SSA
- graph.
- Execution of this algorithm relies on the fact that the SCC's are
- popped off the stack in topological order.
- Returns true if successful, false if we stopped processing SCC's due
- to resource constraints. */
-
-static void
-DFS (tree name)
-{
- auto_vec<ssa_op_iter> itervec;
- auto_vec<tree> namevec;
- use_operand_p usep = NULL;
- gimple *defstmt;
- tree use;
- ssa_op_iter iter;
-
-start_over:
- /* SCC info */
- VN_INFO (name)->dfsnum = next_dfs_num++;
- VN_INFO (name)->visited = true;
- VN_INFO (name)->low = VN_INFO (name)->dfsnum;
-
- sccstack.safe_push (name);
- VN_INFO (name)->on_sccstack = true;
- defstmt = SSA_NAME_DEF_STMT (name);
-
- /* Recursively DFS on our operands, looking for SCC's. */
- if (!gimple_nop_p (defstmt))
- {
- /* Push a new iterator. */
- if (gphi *phi = dyn_cast <gphi *> (defstmt))
- usep = op_iter_init_phiuse (&iter, phi, SSA_OP_ALL_USES);
- else
- usep = op_iter_init_use (&iter, defstmt, SSA_OP_ALL_USES);
- }
- else
- clear_and_done_ssa_iter (&iter);
-
- while (1)
- {
- /* If we are done processing uses of a name, go up the stack
- of iterators and process SCCs as we found them. */
- if (op_iter_done (&iter))
- {
- /* See if we found an SCC. */
- if (VN_INFO (name)->low == VN_INFO (name)->dfsnum)
- extract_and_process_scc_for_name (name);
-
- /* Check if we are done. */
- if (namevec.is_empty ())
- return;
-
- /* Restore the last use walker and continue walking there. */
- use = name;
- name = namevec.pop ();
- memcpy (&iter, &itervec.last (),
- sizeof (ssa_op_iter));
- itervec.pop ();
- goto continue_walking;
- }
-
- use = USE_FROM_PTR (usep);
-
- /* Since we handle phi nodes, we will sometimes get
- invariants in the use expression. */
- if (TREE_CODE (use) == SSA_NAME)
- {
- if (! (VN_INFO (use)->visited))
- {
- /* Recurse by pushing the current use walking state on
- the stack and starting over. */
- itervec.safe_push (iter);
- namevec.safe_push (name);
- name = use;
- goto start_over;
-
-continue_walking:
- VN_INFO (name)->low = MIN (VN_INFO (name)->low,
- VN_INFO (use)->low);
- }
- if (VN_INFO (use)->dfsnum < VN_INFO (name)->dfsnum
- && VN_INFO (use)->on_sccstack)
- {
- VN_INFO (name)->low = MIN (VN_INFO (use)->dfsnum,
- VN_INFO (name)->low);
- }
- }
-
- usep = op_iter_next_use (&iter);
- }
-}
/* Allocate a value number table. */
static void
-allocate_vn_table (vn_tables_t table)
+allocate_vn_table (vn_tables_t table, unsigned size)
{
- table->phis = new vn_phi_table_type (23);
- table->nary = new vn_nary_op_table_type (23);
- table->references = new vn_reference_table_type (23);
+ table->phis = new vn_phi_table_type (size);
+ table->nary = new vn_nary_op_table_type (size);
+ table->references = new vn_reference_table_type (size);
}
/* Free a value number table. */
@@ -4588,174 +4484,6 @@ free_vn_table (vn_tables_t table)
table->references = NULL;
}
-static void
-init_scc_vn (void)
-{
- int j;
- int *rpo_numbers_temp;
-
- calculate_dominance_info (CDI_DOMINATORS);
- mark_dfs_back_edges ();
-
- sccstack.create (0);
- constant_to_value_id = new hash_table<vn_constant_hasher> (23);
-
- constant_value_ids = BITMAP_ALLOC (NULL);
-
- next_dfs_num = 1;
- next_value_id = 1;
-
- vn_ssa_aux_table.create (num_ssa_names + 1);
- /* VEC_alloc doesn't actually grow it to the right size, it just
- preallocates the space to do so. */
- vn_ssa_aux_table.safe_grow_cleared (num_ssa_names + 1);
- gcc_obstack_init (&vn_ssa_aux_obstack);
-
- shared_lookup_references.create (0);
- rpo_numbers = XNEWVEC (int, last_basic_block_for_fn (cfun));
- rpo_numbers_temp =
- XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
- pre_and_rev_post_order_compute (NULL, rpo_numbers_temp, false);
-
- /* RPO numbers is an array of rpo ordering, rpo[i] = bb means that
- the i'th block in RPO order is bb. We want to map bb's to RPO
- numbers, so we need to rearrange this array. */
- for (j = 0; j < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; j++)
- rpo_numbers[rpo_numbers_temp[j]] = j;
-
- XDELETE (rpo_numbers_temp);
-
- VN_TOP = build_decl (UNKNOWN_LOCATION, VAR_DECL,
- get_identifier ("VN_TOP"), error_mark_node);
-
- renumber_gimple_stmt_uids ();
-
- /* Create the valid and optimistic value numbering tables. */
- gcc_obstack_init (&vn_tables_obstack);
- gcc_obstack_init (&vn_tables_insert_obstack);
- valid_info = XCNEW (struct vn_tables_s);
- allocate_vn_table (valid_info);
- last_inserted_ref = NULL;
- last_inserted_phi = NULL;
- last_inserted_nary = NULL;
-
- /* Create the VN_INFO structures, and initialize value numbers to
- TOP or VARYING for parameters. */
- size_t i;
- tree name;
-
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- VN_INFO_GET (name)->valnum = VN_TOP;
- VN_INFO (name)->needs_insertion = false;
- VN_INFO (name)->expr = NULL;
- VN_INFO (name)->value_id = 0;
-
- if (!SSA_NAME_IS_DEFAULT_DEF (name))
- continue;
-
- switch (TREE_CODE (SSA_NAME_VAR (name)))
- {
- case VAR_DECL:
- /* All undefined vars are VARYING. */
- VN_INFO (name)->valnum = name;
- VN_INFO (name)->visited = true;
- break;
-
- case PARM_DECL:
- /* Parameters are VARYING but we can record a condition
- if we know it is a non-NULL pointer. */
- VN_INFO (name)->visited = true;
- VN_INFO (name)->valnum = name;
- if (POINTER_TYPE_P (TREE_TYPE (name))
- && nonnull_arg_p (SSA_NAME_VAR (name)))
- {
- tree ops[2];
- ops[0] = name;
- ops[1] = build_int_cst (TREE_TYPE (name), 0);
- vn_nary_op_insert_pieces (2, NE_EXPR, boolean_type_node, ops,
- boolean_true_node, 0);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Recording ");
- print_generic_expr (dump_file, name, TDF_SLIM);
- fprintf (dump_file, " != 0\n");
- }
- }
- break;
-
- case RESULT_DECL:
- /* If the result is passed by invisible reference the default
- def is initialized, otherwise it's uninitialized. Still
- undefined is varying. */
- VN_INFO (name)->visited = true;
- VN_INFO (name)->valnum = name;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
-}
-
-/* Restore SSA info that has been reset on value leaders. */
-
-void
-scc_vn_restore_ssa_info (void)
-{
- unsigned i;
- tree name;
-
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- if (has_VN_INFO (name))
- {
- if (VN_INFO (name)->needs_insertion)
- ;
- else if (POINTER_TYPE_P (TREE_TYPE (name))
- && VN_INFO (name)->info.ptr_info)
- SSA_NAME_PTR_INFO (name) = VN_INFO (name)->info.ptr_info;
- else if (INTEGRAL_TYPE_P (TREE_TYPE (name))
- && VN_INFO (name)->info.range_info)
- {
- SSA_NAME_RANGE_INFO (name) = VN_INFO (name)->info.range_info;
- SSA_NAME_ANTI_RANGE_P (name)
- = VN_INFO (name)->range_info_anti_range_p;
- }
- }
- }
-}
-
-void
-free_scc_vn (void)
-{
- size_t i;
- tree name;
-
- delete constant_to_value_id;
- constant_to_value_id = NULL;
- BITMAP_FREE (constant_value_ids);
- shared_lookup_references.release ();
- XDELETEVEC (rpo_numbers);
-
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- if (has_VN_INFO (name)
- && VN_INFO (name)->needs_insertion)
- release_ssa_name (name);
- }
- obstack_free (&vn_ssa_aux_obstack, NULL);
- vn_ssa_aux_table.release ();
-
- sccstack.release ();
- free_vn_table (valid_info);
- XDELETE (valid_info);
- obstack_free (&vn_tables_obstack, NULL);
- obstack_free (&vn_tables_insert_obstack, NULL);
-
- BITMAP_FREE (const_parms);
-}
-
/* Set *ID according to RESULT. */
static void
@@ -4785,7 +4513,8 @@ set_hashtable_value_ids (void)
table. */
FOR_EACH_HASH_TABLE_ELEMENT (*valid_info->nary, vno, vn_nary_op_t, hin)
- set_value_id_for_result (vno->result, &vno->value_id);
+ if (! vno->predicated_values)
+ set_value_id_for_result (vno->u.result, &vno->value_id);
FOR_EACH_HASH_TABLE_ELEMENT (*valid_info->phis, vp, vn_phi_t, hip)
set_value_id_for_result (vp->result, &vp->value_id);
@@ -4795,317 +4524,6 @@ set_hashtable_value_ids (void)
set_value_id_for_result (vr->result, &vr->value_id);
}
-class sccvn_dom_walker : public dom_walker
-{
-public:
- sccvn_dom_walker ()
- : dom_walker (CDI_DOMINATORS, REACHABLE_BLOCKS), cond_stack (0) {}
-
- virtual edge before_dom_children (basic_block);
- virtual void after_dom_children (basic_block);
-
- void record_cond (basic_block,
- enum tree_code code, tree lhs, tree rhs, bool value);
- void record_conds (basic_block,
- enum tree_code code, tree lhs, tree rhs, bool value);
-
- auto_vec<std::pair <basic_block, std::pair <vn_nary_op_t, vn_nary_op_t> > >
- cond_stack;
-};
-
-/* Record a temporary condition for the BB and its dominated blocks. */
-
-void
-sccvn_dom_walker::record_cond (basic_block bb,
- enum tree_code code, tree lhs, tree rhs,
- bool value)
-{
- tree ops[2] = { lhs, rhs };
- vn_nary_op_t old = NULL;
- if (vn_nary_op_lookup_pieces (2, code, boolean_type_node, ops, &old))
- valid_info->nary->remove_elt_with_hash (old, old->hashcode);
- vn_nary_op_t cond
- = vn_nary_op_insert_pieces (2, code, boolean_type_node, ops,
- value
- ? boolean_true_node
- : boolean_false_node, 0);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Recording temporarily ");
- print_generic_expr (dump_file, ops[0], TDF_SLIM);
- fprintf (dump_file, " %s ", get_tree_code_name (code));
- print_generic_expr (dump_file, ops[1], TDF_SLIM);
- fprintf (dump_file, " == %s%s\n",
- value ? "true" : "false",
- old ? " (old entry saved)" : "");
- }
- cond_stack.safe_push (std::make_pair (bb, std::make_pair (cond, old)));
-}
-
-/* Record temporary conditions for the BB and its dominated blocks
- according to LHS CODE RHS == VALUE and its dominated conditions. */
-
-void
-sccvn_dom_walker::record_conds (basic_block bb,
- enum tree_code code, tree lhs, tree rhs,
- bool value)
-{
- /* Record the original condition. */
- record_cond (bb, code, lhs, rhs, value);
-
- if (!value)
- return;
-
- /* Record dominated conditions if the condition is true. Note that
- the inversion is already recorded. */
- switch (code)
- {
- case LT_EXPR:
- case GT_EXPR:
- record_cond (bb, code == LT_EXPR ? LE_EXPR : GE_EXPR, lhs, rhs, true);
- record_cond (bb, NE_EXPR, lhs, rhs, true);
- record_cond (bb, EQ_EXPR, lhs, rhs, false);
- break;
-
- case EQ_EXPR:
- record_cond (bb, LE_EXPR, lhs, rhs, true);
- record_cond (bb, GE_EXPR, lhs, rhs, true);
- record_cond (bb, LT_EXPR, lhs, rhs, false);
- record_cond (bb, GT_EXPR, lhs, rhs, false);
- break;
-
- default:
- break;
- }
-}
-
-/* Restore expressions and values derived from conditionals. */
-
-void
-sccvn_dom_walker::after_dom_children (basic_block bb)
-{
- while (!cond_stack.is_empty ()
- && cond_stack.last ().first == bb)
- {
- vn_nary_op_t cond = cond_stack.last ().second.first;
- vn_nary_op_t old = cond_stack.last ().second.second;
- valid_info->nary->remove_elt_with_hash (cond, cond->hashcode);
- if (old)
- vn_nary_op_insert_into (old, valid_info->nary, false);
- cond_stack.pop ();
- }
-}
-
-/* Value number all statements in BB. */
-
-edge
-sccvn_dom_walker::before_dom_children (basic_block bb)
-{
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Visiting BB %d\n", bb->index);
-
- /* If we have a single predecessor record the equivalence from a
- possible condition on the predecessor edge. */
- edge pred_e = single_pred_edge_ignoring_loop_edges (bb, false);
- if (pred_e)
- {
- /* Check if there are multiple executable successor edges in
- the source block. Otherwise there is no additional info
- to be recorded. */
- edge_iterator ei;
- edge e2;
- FOR_EACH_EDGE (e2, ei, pred_e->src->succs)
- if (e2 != pred_e
- && e2->flags & EDGE_EXECUTABLE)
- break;
- if (e2 && (e2->flags & EDGE_EXECUTABLE))
- {
- gimple *stmt = last_stmt (pred_e->src);
- if (stmt
- && gimple_code (stmt) == GIMPLE_COND)
- {
- enum tree_code code = gimple_cond_code (stmt);
- tree lhs = gimple_cond_lhs (stmt);
- tree rhs = gimple_cond_rhs (stmt);
- record_conds (bb, code, lhs, rhs,
- (pred_e->flags & EDGE_TRUE_VALUE) != 0);
- code = invert_tree_comparison (code, HONOR_NANS (lhs));
- if (code != ERROR_MARK)
- record_conds (bb, code, lhs, rhs,
- (pred_e->flags & EDGE_TRUE_VALUE) == 0);
- }
- }
- }
-
- /* Value-number all defs in the basic-block. */
- for (gphi_iterator gsi = gsi_start_phis (bb);
- !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gphi *phi = gsi.phi ();
- tree res = PHI_RESULT (phi);
- if (!VN_INFO (res)->visited)
- DFS (res);
- }
- for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
- !gsi_end_p (gsi); gsi_next (&gsi))
- {
- ssa_op_iter i;
- tree op;
- FOR_EACH_SSA_TREE_OPERAND (op, gsi_stmt (gsi), i, SSA_OP_ALL_DEFS)
- if (!VN_INFO (op)->visited)
- DFS (op);
- }
-
- /* Finally look at the last stmt. */
- gimple *stmt = last_stmt (bb);
- if (!stmt)
- return NULL;
-
- enum gimple_code code = gimple_code (stmt);
- if (code != GIMPLE_COND
- && code != GIMPLE_SWITCH
- && code != GIMPLE_GOTO)
- return NULL;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Visiting control stmt ending BB %d: ", bb->index);
- print_gimple_stmt (dump_file, stmt, 0);
- }
-
- /* ??? We can even handle stmts with outgoing EH or ABNORMAL edges
- if value-numbering can prove they are not reachable. Handling
- computed gotos is also possible. */
- tree val;
- switch (code)
- {
- case GIMPLE_COND:
- {
- tree lhs = vn_valueize (gimple_cond_lhs (stmt));
- tree rhs = vn_valueize (gimple_cond_rhs (stmt));
- val = gimple_simplify (gimple_cond_code (stmt),
- boolean_type_node, lhs, rhs,
- NULL, vn_valueize);
- /* If that didn't simplify to a constant see if we have recorded
- temporary expressions from taken edges. */
- if (!val || TREE_CODE (val) != INTEGER_CST)
- {
- tree ops[2];
- ops[0] = lhs;
- ops[1] = rhs;
- val = vn_nary_op_lookup_pieces (2, gimple_cond_code (stmt),
- boolean_type_node, ops, NULL);
- }
- break;
- }
- case GIMPLE_SWITCH:
- val = gimple_switch_index (as_a <gswitch *> (stmt));
- break;
- case GIMPLE_GOTO:
- val = gimple_goto_dest (stmt);
- break;
- default:
- gcc_unreachable ();
- }
- if (!val)
- return NULL;
-
- edge taken = find_taken_edge (bb, vn_valueize (val));
- if (!taken)
- return NULL;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Marking all edges out of BB %d but (%d -> %d) as "
- "not executable\n", bb->index, bb->index, taken->dest->index);
-
- return taken;
-}
-
-/* Do SCCVN. Returns true if it finished, false if we bailed out
- due to resource constraints. DEFAULT_VN_WALK_KIND_ specifies
- how we use the alias oracle walking during the VN process. */
-
-void
-run_scc_vn (vn_lookup_kind default_vn_walk_kind_)
-{
- size_t i;
-
- default_vn_walk_kind = default_vn_walk_kind_;
-
- init_scc_vn ();
-
- /* Collect pointers we know point to readonly memory. */
- const_parms = BITMAP_ALLOC (NULL);
- tree fnspec = lookup_attribute ("fn spec",
- TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)));
- if (fnspec)
- {
- fnspec = TREE_VALUE (TREE_VALUE (fnspec));
- i = 1;
- for (tree arg = DECL_ARGUMENTS (cfun->decl);
- arg; arg = DECL_CHAIN (arg), ++i)
- {
- if (i >= (unsigned) TREE_STRING_LENGTH (fnspec))
- break;
- if (TREE_STRING_POINTER (fnspec)[i] == 'R'
- || TREE_STRING_POINTER (fnspec)[i] == 'r')
- {
- tree name = ssa_default_def (cfun, arg);
- if (name)
- bitmap_set_bit (const_parms, SSA_NAME_VERSION (name));
- }
- }
- }
-
- /* Walk all blocks in dominator order, value-numbering stmts
- SSA defs and decide whether outgoing edges are not executable. */
- sccvn_dom_walker walker;
- walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
-
- /* Initialize the value ids and prune out remaining VN_TOPs
- from dead code. */
- tree name;
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- vn_ssa_aux_t info = VN_INFO (name);
- if (!info->visited
- || info->valnum == VN_TOP)
- info->valnum = name;
- if (info->valnum == name)
- info->value_id = get_next_value_id ();
- else if (is_gimple_min_invariant (info->valnum))
- info->value_id = get_or_alloc_constant_value_id (info->valnum);
- }
-
- /* Propagate. */
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- vn_ssa_aux_t info = VN_INFO (name);
- if (TREE_CODE (info->valnum) == SSA_NAME
- && info->valnum != name
- && info->value_id != VN_INFO (info->valnum)->value_id)
- info->value_id = VN_INFO (info->valnum)->value_id;
- }
-
- set_hashtable_value_ids ();
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Value numbers:\n");
- FOR_EACH_SSA_NAME (i, name, cfun)
- {
- if (VN_INFO (name)->visited
- && SSA_VAL (name) != name)
- {
- print_generic_expr (dump_file, name);
- fprintf (dump_file, " = ");
- print_generic_expr (dump_file, SSA_VAL (name));
- fprintf (dump_file, "\n");
- }
- }
- }
-}
-
/* Return the maximum value id we have ever seen. */
unsigned int
@@ -5206,9 +4624,13 @@ public:
virtual edge before_dom_children (basic_block);
virtual void after_dom_children (basic_block);
- tree eliminate_avail (tree op);
- void eliminate_push_avail (tree op);
- tree eliminate_insert (gimple_stmt_iterator *gsi, tree val);
+ virtual tree eliminate_avail (basic_block, tree op);
+ virtual void eliminate_push_avail (basic_block, tree op);
+ tree eliminate_insert (basic_block, gimple_stmt_iterator *gsi, tree val);
+
+ void eliminate_stmt (basic_block, gimple_stmt_iterator *);
+
+ unsigned eliminate_cleanup (bool region_p = false);
bool do_pre;
unsigned int el_todo;
@@ -5224,6 +4646,7 @@ public:
/* Blocks with statements that have had their AB properties changed. */
bitmap need_ab_cleanup;
+ /* Local state for the eliminate domwalk. */
auto_vec<gimple *> to_remove;
auto_vec<gimple *> to_fixup;
auto_vec<tree> avail;
@@ -5250,7 +4673,7 @@ eliminate_dom_walker::~eliminate_dom_walker ()
eliminate domwalk. */
tree
-eliminate_dom_walker::eliminate_avail (tree op)
+eliminate_dom_walker::eliminate_avail (basic_block, tree op)
{
tree valnum = VN_INFO (op)->valnum;
if (TREE_CODE (valnum) == SSA_NAME)
@@ -5268,7 +4691,7 @@ eliminate_dom_walker::eliminate_avail (tree op)
/* At the current point of the eliminate domwalk make OP available. */
void
-eliminate_dom_walker::eliminate_push_avail (tree op)
+eliminate_dom_walker::eliminate_push_avail (basic_block, tree op)
{
tree valnum = VN_INFO (op)->valnum;
if (TREE_CODE (valnum) == SSA_NAME)
@@ -5287,7 +4710,8 @@ eliminate_dom_walker::eliminate_push_avail (tree op)
the leader for the expression if insertion was successful. */
tree
-eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
+eliminate_dom_walker::eliminate_insert (basic_block bb,
+ gimple_stmt_iterator *gsi, tree val)
{
/* We can insert a sequence with a single assignment only. */
gimple_seq stmts = VN_INFO (val)->expr;
@@ -5306,7 +4730,7 @@ eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
if (gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
|| gimple_assign_rhs_code (stmt) == BIT_FIELD_REF)
op = TREE_OPERAND (op, 0);
- tree leader = TREE_CODE (op) == SSA_NAME ? eliminate_avail (op) : op;
+ tree leader = TREE_CODE (op) == SSA_NAME ? eliminate_avail (bb, op) : op;
if (!leader)
return NULL_TREE;
@@ -5338,7 +4762,7 @@ eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
if (dump_file && (dump_flags & TDF_DETAILS))
{
if (TREE_CODE (res) == SSA_NAME)
- res = eliminate_avail (res);
+ res = eliminate_avail (bb, res);
if (res)
{
fprintf (dump_file, "Failed to insert expression for value ");
@@ -5354,7 +4778,8 @@ eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
else
{
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
- VN_INFO_GET (res)->valnum = val;
+ VN_INFO (res)->valnum = val;
+ VN_INFO (res)->visited = true;
}
insertions++;
@@ -5367,7 +4792,480 @@ eliminate_dom_walker::eliminate_insert (gimple_stmt_iterator *gsi, tree val)
return res;
}
+void
+eliminate_dom_walker::eliminate_stmt (basic_block b, gimple_stmt_iterator *gsi)
+{
+ tree sprime = NULL_TREE;
+ gimple *stmt = gsi_stmt (*gsi);
+ tree lhs = gimple_get_lhs (stmt);
+ if (lhs && TREE_CODE (lhs) == SSA_NAME
+ && !gimple_has_volatile_ops (stmt)
+ /* See PR43491. Do not replace a global register variable when
+ it is a the RHS of an assignment. Do replace local register
+ variables since gcc does not guarantee a local variable will
+ be allocated in register.
+ ??? The fix isn't effective here. This should instead
+ be ensured by not value-numbering them the same but treating
+ them like volatiles? */
+ && !(gimple_assign_single_p (stmt)
+ && (TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL
+ && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))
+ && is_global_var (gimple_assign_rhs1 (stmt)))))
+ {
+ sprime = eliminate_avail (b, lhs);
+ if (!sprime)
+ {
+ /* If there is no existing usable leader but SCCVN thinks
+ it has an expression it wants to use as replacement,
+ insert that. */
+ tree val = VN_INFO (lhs)->valnum;
+ if (val != VN_TOP
+ && TREE_CODE (val) == SSA_NAME
+ && VN_INFO (val)->needs_insertion
+ && VN_INFO (val)->expr != NULL
+ && (sprime = eliminate_insert (b, gsi, val)) != NULL_TREE)
+ eliminate_push_avail (b, sprime);
+ }
+
+ /* If this now constitutes a copy duplicate points-to
+ and range info appropriately. This is especially
+ important for inserted code. See tree-ssa-copy.c
+ for similar code. */
+ if (sprime
+ && TREE_CODE (sprime) == SSA_NAME)
+ {
+ basic_block sprime_b = gimple_bb (SSA_NAME_DEF_STMT (sprime));
+ if (POINTER_TYPE_P (TREE_TYPE (lhs))
+ && SSA_NAME_PTR_INFO (lhs)
+ && ! SSA_NAME_PTR_INFO (sprime))
+ {
+ duplicate_ssa_name_ptr_info (sprime,
+ SSA_NAME_PTR_INFO (lhs));
+ if (b != sprime_b)
+ mark_ptr_info_alignment_unknown
+ (SSA_NAME_PTR_INFO (sprime));
+ }
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && SSA_NAME_RANGE_INFO (lhs)
+ && ! SSA_NAME_RANGE_INFO (sprime)
+ && b == sprime_b)
+ duplicate_ssa_name_range_info (sprime,
+ SSA_NAME_RANGE_TYPE (lhs),
+ SSA_NAME_RANGE_INFO (lhs));
+ }
+
+ /* Inhibit the use of an inserted PHI on a loop header when
+ the address of the memory reference is a simple induction
+ variable. In other cases the vectorizer won't do anything
+ anyway (either it's loop invariant or a complicated
+ expression). */
+ if (sprime
+ && TREE_CODE (sprime) == SSA_NAME
+ && do_pre
+ && (flag_tree_loop_vectorize || flag_tree_parallelize_loops > 1)
+ && loop_outer (b->loop_father)
+ && has_zero_uses (sprime)
+ && bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))
+ && gimple_assign_load_p (stmt))
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (sprime);
+ basic_block def_bb = gimple_bb (def_stmt);
+ if (gimple_code (def_stmt) == GIMPLE_PHI
+ && def_bb->loop_father->header == def_bb)
+ {
+ loop_p loop = def_bb->loop_father;
+ ssa_op_iter iter;
+ tree op;
+ bool found = false;
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
+ {
+ affine_iv iv;
+ def_bb = gimple_bb (SSA_NAME_DEF_STMT (op));
+ if (def_bb
+ && flow_bb_inside_loop_p (loop, def_bb)
+ && simple_iv (loop, loop, op, &iv, true))
+ {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Not replacing ");
+ print_gimple_expr (dump_file, stmt, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime);
+ fprintf (dump_file, " which would add a loop"
+ " carried dependence to loop %d\n",
+ loop->num);
+ }
+ /* Don't keep sprime available. */
+ sprime = NULL_TREE;
+ }
+ }
+ }
+
+ if (sprime)
+ {
+ /* If we can propagate the value computed for LHS into
+ all uses don't bother doing anything with this stmt. */
+ if (may_propagate_copy (lhs, sprime))
+ {
+ /* Mark it for removal. */
+ to_remove.safe_push (stmt);
+
+ /* ??? Don't count copy/constant propagations. */
+ if (gimple_assign_single_p (stmt)
+ && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
+ || gimple_assign_rhs1 (stmt) == sprime))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Replaced ");
+ print_gimple_expr (dump_file, stmt, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime);
+ fprintf (dump_file, " in all uses of ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ eliminations++;
+ return;
+ }
+
+ /* If this is an assignment from our leader (which
+ happens in the case the value-number is a constant)
+ then there is nothing to do. */
+ if (gimple_assign_single_p (stmt)
+ && sprime == gimple_assign_rhs1 (stmt))
+ return;
+
+ /* Else replace its RHS. */
+ bool can_make_abnormal_goto
+ = is_gimple_call (stmt)
+ && stmt_can_make_abnormal_goto (stmt);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Replaced ");
+ print_gimple_expr (dump_file, stmt, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime);
+ fprintf (dump_file, " in ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ eliminations++;
+ gimple *orig_stmt = stmt;
+ if (!useless_type_conversion_p (TREE_TYPE (lhs),
+ TREE_TYPE (sprime)))
+ sprime = fold_convert (TREE_TYPE (lhs), sprime);
+ tree vdef = gimple_vdef (stmt);
+ tree vuse = gimple_vuse (stmt);
+ propagate_tree_value_into_stmt (gsi, sprime);
+ stmt = gsi_stmt (*gsi);
+ update_stmt (stmt);
+ /* In case the VDEF on the original stmt was released, value-number
+ it to the VUSE. This is to make vuse_ssa_val able to skip
+ released virtual operands. */
+ if (vdef != gimple_vdef (stmt))
+ {
+ gcc_assert (SSA_NAME_IN_FREE_LIST (vdef));
+ VN_INFO (vdef)->valnum = vuse;
+ }
+
+ /* If we removed EH side-effects from the statement, clean
+ its EH information. */
+ if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
+ {
+ bitmap_set_bit (need_eh_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed EH side-effects.\n");
+ }
+
+ /* Likewise for AB side-effects. */
+ if (can_make_abnormal_goto
+ && !stmt_can_make_abnormal_goto (stmt))
+ {
+ bitmap_set_bit (need_ab_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed AB side-effects.\n");
+ }
+
+ return;
+ }
+ }
+
+ /* If the statement is a scalar store, see if the expression
+ has the same value number as its rhs. If so, the store is
+ dead. */
+ if (gimple_assign_single_p (stmt)
+ && !gimple_has_volatile_ops (stmt)
+ && !is_gimple_reg (gimple_assign_lhs (stmt))
+ && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
+ || is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
+ {
+ tree val;
+ tree rhs = gimple_assign_rhs1 (stmt);
+ vn_reference_t vnresult;
+ val = vn_reference_lookup (lhs, gimple_vuse (stmt), VN_WALKREWRITE,
+ &vnresult, false);
+ if (TREE_CODE (rhs) == SSA_NAME)
+ rhs = VN_INFO (rhs)->valnum;
+ if (val
+ && operand_equal_p (val, rhs, 0))
+ {
+ /* We can only remove the later store if the former aliases
+ at least all accesses the later one does or if the store
+ was to readonly memory storing the same value. */
+ alias_set_type set = get_alias_set (lhs);
+ if (! vnresult
+ || vnresult->set == set
+ || alias_set_subset_of (set, vnresult->set))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Deleted redundant store ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ /* Queue stmt for removal. */
+ to_remove.safe_push (stmt);
+ return;
+ }
+ }
+ }
+
+ /* If this is a control statement value numbering left edges
+ unexecuted on force the condition in a way consistent with
+ that. */
+ if (gcond *cond = dyn_cast <gcond *> (stmt))
+ {
+ if ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE)
+ ^ (EDGE_SUCC (b, 1)->flags & EDGE_EXECUTABLE))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Removing unexecutable edge from ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+ if (((EDGE_SUCC (b, 0)->flags & EDGE_TRUE_VALUE) != 0)
+ == ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE) != 0))
+ gimple_cond_make_true (cond);
+ else
+ gimple_cond_make_false (cond);
+ update_stmt (cond);
+ el_todo |= TODO_cleanup_cfg;
+ return;
+ }
+ }
+
+ bool can_make_abnormal_goto = stmt_can_make_abnormal_goto (stmt);
+ bool was_noreturn = (is_gimple_call (stmt)
+ && gimple_call_noreturn_p (stmt));
+ tree vdef = gimple_vdef (stmt);
+ tree vuse = gimple_vuse (stmt);
+
+ /* If we didn't replace the whole stmt (or propagate the result
+ into all uses), replace all uses on this stmt with their
+ leaders. */
+ bool modified = false;
+ use_operand_p use_p;
+ ssa_op_iter iter;
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
+ {
+ tree use = USE_FROM_PTR (use_p);
+ /* ??? The call code above leaves stmt operands un-updated. */
+ if (TREE_CODE (use) != SSA_NAME)
+ continue;
+ tree sprime;
+ if (SSA_NAME_IS_DEFAULT_DEF (use))
+ /* ??? For default defs BB shouldn't matter, but we have to
+ solve the inconsistency between rpo eliminate and
+ dom eliminate avail valueization first. */
+ sprime = eliminate_avail (b, use);
+ else
+ /* Look for sth available at the definition block of the argument.
+ This avoids inconsistencies between availability there which
+ decides if the stmt can be removed and availability at the
+ use site. The SSA property ensures that things available
+ at the definition are also available at uses. */
+ sprime = eliminate_avail (gimple_bb (SSA_NAME_DEF_STMT (use)), use);
+ if (sprime && sprime != use
+ && may_propagate_copy (use, sprime)
+ /* We substitute into debug stmts to avoid excessive
+ debug temporaries created by removed stmts, but we need
+ to avoid doing so for inserted sprimes as we never want
+ to create debug temporaries for them. */
+ && (!inserted_exprs
+ || TREE_CODE (sprime) != SSA_NAME
+ || !is_gimple_debug (stmt)
+ || !bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))))
+ {
+ propagate_value (use_p, sprime);
+ modified = true;
+ }
+ }
+
+ /* Fold the stmt if modified, this canonicalizes MEM_REFs we propagated
+ into which is a requirement for the IPA devirt machinery. */
+ gimple *old_stmt = stmt;
+ if (modified)
+ {
+ /* If a formerly non-invariant ADDR_EXPR is turned into an
+ invariant one it was on a separate stmt. */
+ if (gimple_assign_single_p (stmt)
+ && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR)
+ recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt));
+ gimple_stmt_iterator prev = *gsi;
+ gsi_prev (&prev);
+ if (fold_stmt (gsi))
+ {
+ /* fold_stmt may have created new stmts inbetween
+ the previous stmt and the folded stmt. Mark
+ all defs created there as varying to not confuse
+ the SCCVN machinery as we're using that even during
+ elimination. */
+ if (gsi_end_p (prev))
+ prev = gsi_start_bb (b);
+ else
+ gsi_next (&prev);
+ if (gsi_stmt (prev) != gsi_stmt (*gsi))
+ do
+ {
+ tree def;
+ ssa_op_iter dit;
+ FOR_EACH_SSA_TREE_OPERAND (def, gsi_stmt (prev),
+ dit, SSA_OP_ALL_DEFS)
+ /* As existing DEFs may move between stmts
+ only process new ones. */
+ if (! has_VN_INFO (def))
+ {
+ VN_INFO (def)->valnum = def;
+ VN_INFO (def)->visited = true;
+ }
+ if (gsi_stmt (prev) == gsi_stmt (*gsi))
+ break;
+ gsi_next (&prev);
+ }
+ while (1);
+ }
+ stmt = gsi_stmt (*gsi);
+ /* In case we folded the stmt away schedule the NOP for removal. */
+ if (gimple_nop_p (stmt))
+ to_remove.safe_push (stmt);
+ }
+
+ /* Visit indirect calls and turn them into direct calls if
+ possible using the devirtualization machinery. Do this before
+ checking for required EH/abnormal/noreturn cleanup as devird
+ may expose more of those. */
+ if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
+ {
+ tree fn = gimple_call_fn (call_stmt);
+ if (fn
+ && flag_devirtualize
+ && virtual_method_call_p (fn))
+ {
+ tree otr_type = obj_type_ref_class (fn);
+ unsigned HOST_WIDE_INT otr_tok
+ = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (fn));
+ tree instance;
+ ipa_polymorphic_call_context context (current_function_decl,
+ fn, stmt, &instance);
+ context.get_dynamic_type (instance, OBJ_TYPE_REF_OBJECT (fn),
+ otr_type, stmt);
+ bool final;
+ vec <cgraph_node *> targets
+ = possible_polymorphic_call_targets (obj_type_ref_class (fn),
+ otr_tok, context, &final);
+ if (dump_file)
+ dump_possible_polymorphic_call_targets (dump_file,
+ obj_type_ref_class (fn),
+ otr_tok, context);
+ if (final && targets.length () <= 1 && dbg_cnt (devirt))
+ {
+ tree fn;
+ if (targets.length () == 1)
+ fn = targets[0]->decl;
+ else
+ fn = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
+ if (dump_enabled_p ())
+ {
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, stmt,
+ "converting indirect call to "
+ "function %s\n",
+ lang_hooks.decl_printable_name (fn, 2));
+ }
+ gimple_call_set_fndecl (call_stmt, fn);
+ /* If changing the call to __builtin_unreachable
+ or similar noreturn function, adjust gimple_call_fntype
+ too. */
+ if (gimple_call_noreturn_p (call_stmt)
+ && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fn)))
+ && TYPE_ARG_TYPES (TREE_TYPE (fn))
+ && (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fn)))
+ == void_type_node))
+ gimple_call_set_fntype (call_stmt, TREE_TYPE (fn));
+ maybe_remove_unused_call_args (cfun, call_stmt);
+ modified = true;
+ }
+ }
+ }
+ if (modified)
+ {
+ /* When changing a call into a noreturn call, cfg cleanup
+ is needed to fix up the noreturn call. */
+ if (!was_noreturn
+ && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
+ to_fixup.safe_push (stmt);
+ /* When changing a condition or switch into one we know what
+ edge will be executed, schedule a cfg cleanup. */
+ if ((gimple_code (stmt) == GIMPLE_COND
+ && (gimple_cond_true_p (as_a <gcond *> (stmt))
+ || gimple_cond_false_p (as_a <gcond *> (stmt))))
+ || (gimple_code (stmt) == GIMPLE_SWITCH
+ && TREE_CODE (gimple_switch_index
+ (as_a <gswitch *> (stmt))) == INTEGER_CST))
+ el_todo |= TODO_cleanup_cfg;
+ /* If we removed EH side-effects from the statement, clean
+ its EH information. */
+ if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
+ {
+ bitmap_set_bit (need_eh_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed EH side-effects.\n");
+ }
+ /* Likewise for AB side-effects. */
+ if (can_make_abnormal_goto
+ && !stmt_can_make_abnormal_goto (stmt))
+ {
+ bitmap_set_bit (need_ab_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed AB side-effects.\n");
+ }
+ update_stmt (stmt);
+ /* In case the VDEF on the original stmt was released, value-number
+ it to the VUSE. This is to make vuse_ssa_val able to skip
+ released virtual operands. */
+ if (vdef && SSA_NAME_IN_FREE_LIST (vdef))
+ VN_INFO (vdef)->valnum = vuse;
+ }
+
+ /* Make new values available - for fully redundant LHS we
+ continue with the next stmt above and skip this. */
+ def_operand_p defp;
+ FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_DEF)
+ eliminate_push_avail (b, DEF_FROM_PTR (defp));
+}
/* Perform elimination for the basic-block B during the domwalk. */
@@ -5378,14 +5276,11 @@ eliminate_dom_walker::before_dom_children (basic_block b)
avail_stack.safe_push (NULL_TREE);
/* Skip unreachable blocks marked unreachable during the SCCVN domwalk. */
- edge_iterator ei;
- edge e;
- FOR_EACH_EDGE (e, ei, b->preds)
- if (e->flags & EDGE_EXECUTABLE)
- break;
- if (! e)
+ if (!(b->flags & BB_EXECUTABLE))
return NULL;
+ vn_context_bb = b;
+
for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (gsi);)
{
gphi *phi = gsi.phi ();
@@ -5397,7 +5292,7 @@ eliminate_dom_walker::before_dom_children (basic_block b)
continue;
}
- tree sprime = eliminate_avail (res);
+ tree sprime = eliminate_avail (b, res);
if (sprime
&& sprime != res)
{
@@ -5435,468 +5330,796 @@ eliminate_dom_walker::before_dom_children (basic_block b)
continue;
}
- eliminate_push_avail (res);
+ eliminate_push_avail (b, res);
gsi_next (&gsi);
}
for (gimple_stmt_iterator gsi = gsi_start_bb (b);
!gsi_end_p (gsi);
gsi_next (&gsi))
- {
- tree sprime = NULL_TREE;
- gimple *stmt = gsi_stmt (gsi);
- tree lhs = gimple_get_lhs (stmt);
- if (lhs && TREE_CODE (lhs) == SSA_NAME
- && !gimple_has_volatile_ops (stmt)
- /* See PR43491. Do not replace a global register variable when
- it is a the RHS of an assignment. Do replace local register
- variables since gcc does not guarantee a local variable will
- be allocated in register.
- ??? The fix isn't effective here. This should instead
- be ensured by not value-numbering them the same but treating
- them like volatiles? */
- && !(gimple_assign_single_p (stmt)
- && (TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL
- && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))
- && is_global_var (gimple_assign_rhs1 (stmt)))))
+ eliminate_stmt (b, &gsi);
+
+ /* Replace destination PHI arguments. */
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, b->succs)
+ if (e->flags & EDGE_EXECUTABLE)
+ for (gphi_iterator gsi = gsi_start_phis (e->dest);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- sprime = eliminate_avail (lhs);
- if (!sprime)
- {
- /* If there is no existing usable leader but SCCVN thinks
- it has an expression it wants to use as replacement,
- insert that. */
- tree val = VN_INFO (lhs)->valnum;
- if (val != VN_TOP
- && TREE_CODE (val) == SSA_NAME
- && VN_INFO (val)->needs_insertion
- && VN_INFO (val)->expr != NULL
- && (sprime = eliminate_insert (&gsi, val)) != NULL_TREE)
- eliminate_push_avail (sprime);
- }
+ gphi *phi = gsi.phi ();
+ use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
+ tree arg = USE_FROM_PTR (use_p);
+ if (TREE_CODE (arg) != SSA_NAME
+ || virtual_operand_p (arg))
+ continue;
+ tree sprime = eliminate_avail (b, arg);
+ if (sprime && may_propagate_copy (arg, sprime))
+ propagate_value (use_p, sprime);
+ }
- /* If this now constitutes a copy duplicate points-to
- and range info appropriately. This is especially
- important for inserted code. See tree-ssa-copy.c
- for similar code. */
- if (sprime
- && TREE_CODE (sprime) == SSA_NAME)
+ vn_context_bb = NULL;
+
+ return NULL;
+}
+
+/* Make no longer available leaders no longer available. */
+
+void
+eliminate_dom_walker::after_dom_children (basic_block)
+{
+ tree entry;
+ while ((entry = avail_stack.pop ()) != NULL_TREE)
+ {
+ tree valnum = VN_INFO (entry)->valnum;
+ tree old = avail[SSA_NAME_VERSION (valnum)];
+ if (old == entry)
+ avail[SSA_NAME_VERSION (valnum)] = NULL_TREE;
+ else
+ avail[SSA_NAME_VERSION (valnum)] = entry;
+ }
+}
+
+/* Remove queued stmts and perform delayed cleanups. */
+
+unsigned
+eliminate_dom_walker::eliminate_cleanup (bool region_p)
+{
+ statistics_counter_event (cfun, "Eliminated", eliminations);
+ statistics_counter_event (cfun, "Insertions", insertions);
+
+ /* We cannot remove stmts during BB walk, especially not release SSA
+ names there as this confuses the VN machinery. The stmts ending
+ up in to_remove are either stores or simple copies.
+ Remove stmts in reverse order to make debug stmt creation possible. */
+ while (!to_remove.is_empty ())
+ {
+ bool do_release_defs = true;
+ gimple *stmt = to_remove.pop ();
+
+ /* When we are value-numbering a region we do not require exit PHIs to
+ be present so we have to make sure to deal with uses outside of the
+ region of stmts that we thought are eliminated.
+ ??? Note we may be confused by uses in dead regions we didn't run
+ elimination on. Rather than checking individual uses we accept
+ dead copies to be generated here (gcc.c-torture/execute/20060905-1.c
+ contains such example). */
+ if (region_p)
+ {
+ if (gphi *phi = dyn_cast <gphi *> (stmt))
{
- basic_block sprime_b = gimple_bb (SSA_NAME_DEF_STMT (sprime));
- if (POINTER_TYPE_P (TREE_TYPE (lhs))
- && VN_INFO_PTR_INFO (lhs)
- && ! VN_INFO_PTR_INFO (sprime))
+ tree lhs = gimple_phi_result (phi);
+ if (!has_zero_uses (lhs))
{
- duplicate_ssa_name_ptr_info (sprime,
- VN_INFO_PTR_INFO (lhs));
- if (b != sprime_b)
- mark_ptr_info_alignment_unknown
- (SSA_NAME_PTR_INFO (sprime));
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Keeping eliminated stmt live "
+ "as copy because of out-of-region uses\n");
+ tree sprime = eliminate_avail (gimple_bb (stmt), lhs);
+ gimple *copy = gimple_build_assign (lhs, sprime);
+ gimple_stmt_iterator gsi
+ = gsi_after_labels (gimple_bb (stmt));
+ gsi_insert_before (&gsi, copy, GSI_SAME_STMT);
+ do_release_defs = false;
}
- else if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- && VN_INFO_RANGE_INFO (lhs)
- && ! VN_INFO_RANGE_INFO (sprime)
- && b == sprime_b)
- duplicate_ssa_name_range_info (sprime,
- VN_INFO_RANGE_TYPE (lhs),
- VN_INFO_RANGE_INFO (lhs));
}
+ else if (tree lhs = gimple_get_lhs (stmt))
+ if (TREE_CODE (lhs) == SSA_NAME
+ && !has_zero_uses (lhs))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Keeping eliminated stmt live "
+ "as copy because of out-of-region uses\n");
+ tree sprime = eliminate_avail (gimple_bb (stmt), lhs);
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ if (is_gimple_assign (stmt))
+ {
+ gimple_assign_set_rhs_from_tree (&gsi, sprime);
+ update_stmt (gsi_stmt (gsi));
+ continue;
+ }
+ else
+ {
+ gimple *copy = gimple_build_assign (lhs, sprime);
+ gsi_insert_before (&gsi, copy, GSI_SAME_STMT);
+ do_release_defs = false;
+ }
+ }
+ }
- /* Inhibit the use of an inserted PHI on a loop header when
- the address of the memory reference is a simple induction
- variable. In other cases the vectorizer won't do anything
- anyway (either it's loop invariant or a complicated
- expression). */
- if (sprime
- && TREE_CODE (sprime) == SSA_NAME
- && do_pre
- && (flag_tree_loop_vectorize || flag_tree_parallelize_loops > 1)
- && loop_outer (b->loop_father)
- && has_zero_uses (sprime)
- && bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))
- && gimple_assign_load_p (stmt))
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Removing dead stmt ");
+ print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+ }
+
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ remove_phi_node (&gsi, do_release_defs);
+ else
+ {
+ basic_block bb = gimple_bb (stmt);
+ unlink_stmt_vdef (stmt);
+ if (gsi_remove (&gsi, true))
+ bitmap_set_bit (need_eh_cleanup, bb->index);
+ if (is_gimple_call (stmt) && stmt_can_make_abnormal_goto (stmt))
+ bitmap_set_bit (need_ab_cleanup, bb->index);
+ if (do_release_defs)
+ release_defs (stmt);
+ }
+
+ /* Removing a stmt may expose a forwarder block. */
+ el_todo |= TODO_cleanup_cfg;
+ }
+
+ /* Fixup stmts that became noreturn calls. This may require splitting
+ blocks and thus isn't possible during the dominator walk. Do this
+ in reverse order so we don't inadvertedly remove a stmt we want to
+ fixup by visiting a dominating now noreturn call first. */
+ while (!to_fixup.is_empty ())
+ {
+ gimple *stmt = to_fixup.pop ();
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Fixing up noreturn call ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (fixup_noreturn_call (stmt))
+ el_todo |= TODO_cleanup_cfg;
+ }
+
+ bool do_eh_cleanup = !bitmap_empty_p (need_eh_cleanup);
+ bool do_ab_cleanup = !bitmap_empty_p (need_ab_cleanup);
+
+ if (do_eh_cleanup)
+ gimple_purge_all_dead_eh_edges (need_eh_cleanup);
+
+ if (do_ab_cleanup)
+ gimple_purge_all_dead_abnormal_call_edges (need_ab_cleanup);
+
+ if (do_eh_cleanup || do_ab_cleanup)
+ el_todo |= TODO_cleanup_cfg;
+
+ return el_todo;
+}
+
+/* Eliminate fully redundant computations. */
+
+unsigned
+eliminate_with_rpo_vn (bitmap inserted_exprs)
+{
+ eliminate_dom_walker walker (CDI_DOMINATORS, inserted_exprs);
+
+ walker.walk (cfun->cfg->x_entry_block_ptr);
+ return walker.eliminate_cleanup ();
+}
+
+static unsigned
+do_rpo_vn (function *fn, edge entry, bitmap exit_bbs,
+ bool iterate, bool eliminate);
+
+void
+run_rpo_vn (vn_lookup_kind kind)
+{
+ default_vn_walk_kind = kind;
+ do_rpo_vn (cfun, NULL, NULL, true, false);
+
+ /* ??? Prune requirement of these. */
+ constant_to_value_id = new hash_table<vn_constant_hasher> (23);
+ constant_value_ids = BITMAP_ALLOC (NULL);
+
+ /* Initialize the value ids and prune out remaining VN_TOPs
+ from dead code. */
+ tree name;
+ unsigned i;
+ FOR_EACH_SSA_NAME (i, name, cfun)
+ {
+ vn_ssa_aux_t info = VN_INFO (name);
+ if (!info->visited
+ || info->valnum == VN_TOP)
+ info->valnum = name;
+ if (info->valnum == name)
+ info->value_id = get_next_value_id ();
+ else if (is_gimple_min_invariant (info->valnum))
+ info->value_id = get_or_alloc_constant_value_id (info->valnum);
+ }
+
+ /* Propagate. */
+ FOR_EACH_SSA_NAME (i, name, cfun)
+ {
+ vn_ssa_aux_t info = VN_INFO (name);
+ if (TREE_CODE (info->valnum) == SSA_NAME
+ && info->valnum != name
+ && info->value_id != VN_INFO (info->valnum)->value_id)
+ info->value_id = VN_INFO (info->valnum)->value_id;
+ }
+
+ set_hashtable_value_ids ();
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Value numbers:\n");
+ FOR_EACH_SSA_NAME (i, name, cfun)
+ {
+ if (VN_INFO (name)->visited
+ && SSA_VAL (name) != name)
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (sprime);
- basic_block def_bb = gimple_bb (def_stmt);
- if (gimple_code (def_stmt) == GIMPLE_PHI
- && def_bb->loop_father->header == def_bb)
- {
- loop_p loop = def_bb->loop_father;
- ssa_op_iter iter;
- tree op;
- bool found = false;
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
- {
- affine_iv iv;
- def_bb = gimple_bb (SSA_NAME_DEF_STMT (op));
- if (def_bb
- && flow_bb_inside_loop_p (loop, def_bb)
- && simple_iv (loop, loop, op, &iv, true))
- {
- found = true;
- break;
- }
- }
- if (found)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Not replacing ");
- print_gimple_expr (dump_file, stmt, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime);
- fprintf (dump_file, " which would add a loop"
- " carried dependence to loop %d\n",
- loop->num);
- }
- /* Don't keep sprime available. */
- sprime = NULL_TREE;
- }
- }
+ print_generic_expr (dump_file, name);
+ fprintf (dump_file, " = ");
+ print_generic_expr (dump_file, SSA_VAL (name));
+ fprintf (dump_file, " (%04d)\n", VN_INFO (name)->value_id);
}
+ }
+ }
+}
- if (sprime)
- {
- /* If we can propagate the value computed for LHS into
- all uses don't bother doing anything with this stmt. */
- if (may_propagate_copy (lhs, sprime))
- {
- /* Mark it for removal. */
- to_remove.safe_push (stmt);
+/* Free VN associated data structures. */
- /* ??? Don't count copy/constant propagations. */
- if (gimple_assign_single_p (stmt)
- && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
- || gimple_assign_rhs1 (stmt) == sprime))
- continue;
+void
+free_rpo_vn (void)
+{
+ free_vn_table (valid_info);
+ XDELETE (valid_info);
+ obstack_free (&vn_tables_obstack, NULL);
+ obstack_free (&vn_tables_insert_obstack, NULL);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Replaced ");
- print_gimple_expr (dump_file, stmt, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime);
- fprintf (dump_file, " in all uses of ");
- print_gimple_stmt (dump_file, stmt, 0);
- }
+ vn_ssa_aux_iterator_type it;
+ vn_ssa_aux_t info;
+ FOR_EACH_HASH_TABLE_ELEMENT (*vn_ssa_aux_hash, info, vn_ssa_aux_t, it)
+ if (info->needs_insertion)
+ release_ssa_name (info->name);
+ obstack_free (&vn_ssa_aux_obstack, NULL);
+ delete vn_ssa_aux_hash;
- eliminations++;
- continue;
- }
+ delete constant_to_value_id;
+ constant_to_value_id = NULL;
+ BITMAP_FREE (constant_value_ids);
+}
- /* If this is an assignment from our leader (which
- happens in the case the value-number is a constant)
- then there is nothing to do. */
- if (gimple_assign_single_p (stmt)
- && sprime == gimple_assign_rhs1 (stmt))
- continue;
+/* Adaptor to the elimination engine using RPO availability. */
- /* Else replace its RHS. */
- bool can_make_abnormal_goto
- = is_gimple_call (stmt)
- && stmt_can_make_abnormal_goto (stmt);
+class rpo_elim : public eliminate_dom_walker
+{
+public:
+ rpo_elim(basic_block entry_)
+ : eliminate_dom_walker (CDI_DOMINATORS, NULL), entry (entry_) {}
+ ~rpo_elim();
+
+ virtual tree eliminate_avail (basic_block, tree op);
+
+ virtual void eliminate_push_avail (basic_block, tree);
+
+ basic_block entry;
+ /* Instead of having a local availability lattice for each
+ basic-block and availability at X defined as union of
+ the local availabilities at X and its dominators we're
+ turning this upside down and track availability per
+ value given values are usually made available at very
+ few points (at least one).
+ So we have a value -> vec<location, leader> map where
+ LOCATION is specifying the basic-block LEADER is made
+ available for VALUE. We push to this vector in RPO
+ order thus for iteration we can simply pop the last
+ entries.
+ LOCATION is the basic-block index and LEADER is its
+ SSA name version. */
+ /* ??? We'd like to use auto_vec here with embedded storage
+ but that doesn't play well until we can provide move
+ constructors and use std::move on hash-table expansion.
+ So for now this is a bit more expensive than necessary.
+ We eventually want to switch to a chaining scheme like
+ for hashtable entries for unwinding which would make
+ making the vector part of the vn_ssa_aux structure possible. */
+ typedef hash_map<tree, vec<std::pair<int, int> > > rpo_avail_t;
+ rpo_avail_t m_rpo_avail;
+};
+/* Global RPO state for access from hooks. */
+static rpo_elim *rpo_avail;
+
+/* Hook for maybe_push_res_to_seq, lookup the expression in the VN tables. */
+
+static tree
+vn_lookup_simplify_result (gimple_match_op *res_op)
+{
+ if (!res_op->code.is_tree_code ())
+ return NULL_TREE;
+ tree *ops = res_op->ops;
+ unsigned int length = res_op->num_ops;
+ if (res_op->code == CONSTRUCTOR
+ /* ??? We're arriving here with SCCVNs view, decomposed CONSTRUCTOR
+ and GIMPLEs / match-and-simplifies, CONSTRUCTOR as GENERIC tree. */
+ && TREE_CODE (res_op->ops[0]) == CONSTRUCTOR)
+ {
+ length = CONSTRUCTOR_NELTS (res_op->ops[0]);
+ ops = XALLOCAVEC (tree, length);
+ for (unsigned i = 0; i < length; ++i)
+ ops[i] = CONSTRUCTOR_ELT (res_op->ops[0], i)->value;
+ }
+ vn_nary_op_t vnresult = NULL;
+ tree res = vn_nary_op_lookup_pieces (length, (tree_code) res_op->code,
+ res_op->type, ops, &vnresult);
+ /* If this is used from expression simplification make sure to
+ return an available expression. */
+ if (res && TREE_CODE (res) == SSA_NAME && mprts_hook && rpo_avail)
+ res = rpo_avail->eliminate_avail (vn_context_bb, res);
+ return res;
+}
+
+rpo_elim::~rpo_elim ()
+{
+ /* Release the avail vectors. */
+ for (rpo_avail_t::iterator i = m_rpo_avail.begin ();
+ i != m_rpo_avail.end (); ++i)
+ (*i).second.release ();
+}
+
+/* Return a leader for OPs value that is valid at BB. */
+
+tree
+rpo_elim::eliminate_avail (basic_block bb, tree op)
+{
+ tree valnum = SSA_VAL (op);
+ if (TREE_CODE (valnum) == SSA_NAME)
+ {
+ if (SSA_NAME_IS_DEFAULT_DEF (valnum))
+ return valnum;
+ vec<std::pair<int, int> > *av = m_rpo_avail.get (valnum);
+ if (!av || av->is_empty ())
+ return NULL_TREE;
+ int i = av->length () - 1;
+ if ((*av)[i].first == bb->index)
+ /* On tramp3d 90% of the cases are here. */
+ return ssa_name ((*av)[i].second);
+ do
+ {
+ basic_block abb = BASIC_BLOCK_FOR_FN (cfun, (*av)[i].first);
+ /* ??? During elimination we have to use availability at the
+ definition site of a use we try to replace. This
+ is required to not run into inconsistencies because
+ of dominated_by_p_w_unex behavior and removing a definition
+ while not replacing all uses.
+ ??? We could try to consistently walk dominators
+ ignoring non-executable regions. The nearest common
+ dominator of bb and abb is where we can stop walking. We
+ may also be able to "pre-compute" (bits of) the next immediate
+ (non-)dominator during the RPO walk when marking edges as
+ executable. */
+ if (dominated_by_p_w_unex (bb, abb))
+ {
+ tree leader = ssa_name ((*av)[i].second);
+ /* Prevent eliminations that break loop-closed SSA. */
+ if (loops_state_satisfies_p (LOOP_CLOSED_SSA)
+ && ! SSA_NAME_IS_DEFAULT_DEF (leader)
+ && ! flow_bb_inside_loop_p (gimple_bb (SSA_NAME_DEF_STMT
+ (leader))->loop_father,
+ bb))
+ return NULL_TREE;
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "Replaced ");
- print_gimple_expr (dump_file, stmt, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime);
- fprintf (dump_file, " in ");
- print_gimple_stmt (dump_file, stmt, 0);
+ print_generic_expr (dump_file, leader);
+ fprintf (dump_file, " is available for ");
+ print_generic_expr (dump_file, valnum);
+ fprintf (dump_file, "\n");
}
+ /* On tramp3d 99% of the _remaining_ cases succeed at
+ the first enty. */
+ return leader;
+ }
+ /* ??? Can we somehow skip to the immediate dominator
+ RPO index (bb_to_rpo)? Again, maybe not worth, on
+ tramp3d the worst number of elements in the vector is 9. */
+ }
+ while (--i >= 0);
+ }
+ else if (valnum != VN_TOP)
+ /* valnum is is_gimple_min_invariant. */
+ return valnum;
+ return NULL_TREE;
+}
- eliminations++;
- gimple *orig_stmt = stmt;
- if (!useless_type_conversion_p (TREE_TYPE (lhs),
- TREE_TYPE (sprime)))
- sprime = fold_convert (TREE_TYPE (lhs), sprime);
- tree vdef = gimple_vdef (stmt);
- tree vuse = gimple_vuse (stmt);
- propagate_tree_value_into_stmt (&gsi, sprime);
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
- if (vdef != gimple_vdef (stmt))
- VN_INFO (vdef)->valnum = vuse;
-
- /* If we removed EH side-effects from the statement, clean
- its EH information. */
- if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
- {
- bitmap_set_bit (need_eh_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed EH side-effects.\n");
- }
+/* Make LEADER a leader for its value at BB. */
- /* Likewise for AB side-effects. */
- if (can_make_abnormal_goto
- && !stmt_can_make_abnormal_goto (stmt))
- {
- bitmap_set_bit (need_ab_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed AB side-effects.\n");
- }
+void
+rpo_elim::eliminate_push_avail (basic_block bb, tree leader)
+{
+ tree valnum = VN_INFO (leader)->valnum;
+ if (valnum == VN_TOP)
+ return;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Making available beyond BB%d ", bb->index);
+ print_generic_expr (dump_file, leader);
+ fprintf (dump_file, " for value ");
+ print_generic_expr (dump_file, valnum);
+ fprintf (dump_file, "\n");
+ }
+ bool existed;
+ vec<std::pair<int, int> > &av = m_rpo_avail.get_or_insert (valnum, &existed);
+ if (!existed)
+ {
+ new (&av) vec<std::pair<int, int> >;
+ av.reserve_exact (2);
+ }
+ av.safe_push (std::make_pair (bb->index, SSA_NAME_VERSION (leader)));
+}
- continue;
+/* Valueization hook for RPO VN plus required state. */
+
+tree
+rpo_vn_valueize (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ vn_ssa_aux_t val = VN_INFO (name);
+ if (val)
+ {
+ tree tem = val->valnum;
+ if (tem != VN_TOP && tem != name)
+ {
+ if (TREE_CODE (tem) != SSA_NAME)
+ return tem;
+ /* For all values we only valueize to an available leader
+ which means we can use SSA name info without restriction. */
+ tem = rpo_avail->eliminate_avail (vn_context_bb, tem);
+ if (tem)
+ return tem;
}
}
+ }
+ return name;
+}
- /* If the statement is a scalar store, see if the expression
- has the same value number as its rhs. If so, the store is
- dead. */
- if (gimple_assign_single_p (stmt)
- && !gimple_has_volatile_ops (stmt)
- && !is_gimple_reg (gimple_assign_lhs (stmt))
- && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
- || is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
+/* Insert on PRED_E predicates derived from CODE OPS being true besides the
+ inverted condition. */
+
+static void
+insert_related_predicates_on_edge (enum tree_code code, tree *ops, edge pred_e)
+{
+ switch (code)
+ {
+ case LT_EXPR:
+ /* a < b -> a {!,<}= b */
+ vn_nary_op_insert_pieces_predicated (2, NE_EXPR, boolean_type_node,
+ ops, boolean_true_node, 0, pred_e);
+ vn_nary_op_insert_pieces_predicated (2, LE_EXPR, boolean_type_node,
+ ops, boolean_true_node, 0, pred_e);
+ /* a < b -> ! a {>,=} b */
+ vn_nary_op_insert_pieces_predicated (2, GT_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ vn_nary_op_insert_pieces_predicated (2, EQ_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ break;
+ case GT_EXPR:
+ /* a > b -> a {!,>}= b */
+ vn_nary_op_insert_pieces_predicated (2, NE_EXPR, boolean_type_node,
+ ops, boolean_true_node, 0, pred_e);
+ vn_nary_op_insert_pieces_predicated (2, GE_EXPR, boolean_type_node,
+ ops, boolean_true_node, 0, pred_e);
+ /* a > b -> ! a {<,=} b */
+ vn_nary_op_insert_pieces_predicated (2, LT_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ vn_nary_op_insert_pieces_predicated (2, EQ_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ break;
+ case EQ_EXPR:
+ /* a == b -> ! a {<,>} b */
+ vn_nary_op_insert_pieces_predicated (2, LT_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ vn_nary_op_insert_pieces_predicated (2, GT_EXPR, boolean_type_node,
+ ops, boolean_false_node, 0, pred_e);
+ break;
+ case LE_EXPR:
+ case GE_EXPR:
+ case NE_EXPR:
+ /* Nothing besides inverted condition. */
+ break;
+ default:;
+ }
+}
+
+/* Main stmt worker for RPO VN, process BB. */
+
+static unsigned
+process_bb (rpo_elim &avail, basic_block bb,
+ bool bb_visited, bool iterate_phis, bool iterate, bool eliminate,
+ bool do_region, bitmap exit_bbs)
+{
+ unsigned todo = 0;
+ edge_iterator ei;
+ edge e;
+
+ vn_context_bb = bb;
+
+ /* If we are in loop-closed SSA preserve this state. This is
+ relevant when called on regions from outside of FRE/PRE. */
+ bool lc_phi_nodes = false;
+ if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->src->loop_father != e->dest->loop_father
+ && flow_loop_nested_p (e->dest->loop_father,
+ e->src->loop_father))
{
- tree val;
- tree rhs = gimple_assign_rhs1 (stmt);
- vn_reference_t vnresult;
- val = vn_reference_lookup (lhs, gimple_vuse (stmt), VN_WALKREWRITE,
- &vnresult, false);
- if (TREE_CODE (rhs) == SSA_NAME)
- rhs = VN_INFO (rhs)->valnum;
- if (val
- && operand_equal_p (val, rhs, 0))
+ lc_phi_nodes = true;
+ break;
+ }
+
+ /* Value-number all defs in the basic-block. */
+ for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gphi *phi = gsi.phi ();
+ tree res = PHI_RESULT (phi);
+ vn_ssa_aux_t res_info = VN_INFO (res);
+ if (!bb_visited)
+ {
+ gcc_assert (!res_info->visited);
+ res_info->valnum = VN_TOP;
+ res_info->visited = true;
+ }
+
+ /* When not iterating force backedge values to varying. */
+ visit_stmt (phi, !iterate_phis);
+ if (virtual_operand_p (res))
+ continue;
+
+ /* Eliminate */
+ /* The interesting case is gcc.dg/tree-ssa/pr22230.c for correctness
+ how we handle backedges and availability.
+ And gcc.dg/tree-ssa/ssa-sccvn-2.c for optimization. */
+ tree val = res_info->valnum;
+ if (res != val && !iterate && eliminate)
+ {
+ if (tree leader = avail.eliminate_avail (bb, res))
{
- /* We can only remove the later store if the former aliases
- at least all accesses the later one does or if the store
- was to readonly memory storing the same value. */
- alias_set_type set = get_alias_set (lhs);
- if (! vnresult
- || vnresult->set == set
- || alias_set_subset_of (set, vnresult->set))
+ if (leader != res
+ /* Preserve loop-closed SSA form. */
+ && (! lc_phi_nodes
+ || is_gimple_min_invariant (leader)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file, "Deleted redundant store ");
- print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "Replaced redundant PHI node "
+ "defining ");
+ print_generic_expr (dump_file, res);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, leader);
+ fprintf (dump_file, "\n");
}
+ avail.eliminations++;
- /* Queue stmt for removal. */
- to_remove.safe_push (stmt);
- continue;
+ if (may_propagate_copy (res, leader))
+ {
+ /* Schedule for removal. */
+ avail.to_remove.safe_push (phi);
+ continue;
+ }
+ /* ??? Else generate a copy stmt. */
}
}
}
+ /* Only make defs available that not already are. But make
+ sure loop-closed SSA PHI node defs are picked up for
+ downstream uses. */
+ if (lc_phi_nodes
+ || res == val
+ || ! avail.eliminate_avail (bb, res))
+ avail.eliminate_push_avail (bb, res);
+ }
- /* If this is a control statement value numbering left edges
- unexecuted on force the condition in a way consistent with
- that. */
- if (gcond *cond = dyn_cast <gcond *> (stmt))
+ /* For empty BBs mark outgoing edges executable. For non-empty BBs
+ we do this when processing the last stmt as we have to do this
+ before elimination which otherwise forces GIMPLE_CONDs to
+ if (1 != 0) style when seeing non-executable edges. */
+ if (gsi_end_p (gsi_start_bb (bb)))
+ {
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
- if ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE)
- ^ (EDGE_SUCC (b, 1)->flags & EDGE_EXECUTABLE))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Removing unexecutable edge from ");
- print_gimple_stmt (dump_file, stmt, 0);
- }
- if (((EDGE_SUCC (b, 0)->flags & EDGE_TRUE_VALUE) != 0)
- == ((EDGE_SUCC (b, 0)->flags & EDGE_EXECUTABLE) != 0))
- gimple_cond_make_true (cond);
- else
- gimple_cond_make_false (cond);
- update_stmt (cond);
- el_todo |= TODO_cleanup_cfg;
- continue;
- }
+ if (e->flags & EDGE_EXECUTABLE)
+ continue;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "marking outgoing edge %d -> %d executable\n",
+ e->src->index, e->dest->index);
+ gcc_checking_assert (iterate || !(e->flags & EDGE_DFS_BACK));
+ e->flags |= EDGE_EXECUTABLE;
+ e->dest->flags |= BB_EXECUTABLE;
}
-
- bool can_make_abnormal_goto = stmt_can_make_abnormal_goto (stmt);
- bool was_noreturn = (is_gimple_call (stmt)
- && gimple_call_noreturn_p (stmt));
- tree vdef = gimple_vdef (stmt);
- tree vuse = gimple_vuse (stmt);
-
- /* If we didn't replace the whole stmt (or propagate the result
- into all uses), replace all uses on this stmt with their
- leaders. */
- bool modified = false;
- use_operand_p use_p;
- ssa_op_iter iter;
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
+ }
+ for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ ssa_op_iter i;
+ tree op;
+ if (!bb_visited)
{
- tree use = USE_FROM_PTR (use_p);
- /* ??? The call code above leaves stmt operands un-updated. */
- if (TREE_CODE (use) != SSA_NAME)
- continue;
- tree sprime = eliminate_avail (use);
- if (sprime && sprime != use
- && may_propagate_copy (use, sprime)
- /* We substitute into debug stmts to avoid excessive
- debug temporaries created by removed stmts, but we need
- to avoid doing so for inserted sprimes as we never want
- to create debug temporaries for them. */
- && (!inserted_exprs
- || TREE_CODE (sprime) != SSA_NAME
- || !is_gimple_debug (stmt)
- || !bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (sprime))))
+ FOR_EACH_SSA_TREE_OPERAND (op, gsi_stmt (gsi), i, SSA_OP_ALL_DEFS)
{
- propagate_value (use_p, sprime);
- modified = true;
+ vn_ssa_aux_t op_info = VN_INFO (op);
+ gcc_assert (!op_info->visited);
+ op_info->valnum = VN_TOP;
+ op_info->visited = true;
}
+
+ /* We somehow have to deal with uses that are not defined
+ in the processed region. Forcing unvisited uses to
+ varying here doesn't play well with def-use following during
+ expression simplification, so we deal with this by checking
+ the visited flag in SSA_VAL. */
}
- /* Fold the stmt if modified, this canonicalizes MEM_REFs we propagated
- into which is a requirement for the IPA devirt machinery. */
- gimple *old_stmt = stmt;
- if (modified)
+ visit_stmt (gsi_stmt (gsi));
+
+ gimple *last = gsi_stmt (gsi);
+ e = NULL;
+ switch (gimple_code (last))
{
- /* If a formerly non-invariant ADDR_EXPR is turned into an
- invariant one it was on a separate stmt. */
- if (gimple_assign_single_p (stmt)
- && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR)
- recompute_tree_invariant_for_addr_expr (gimple_assign_rhs1 (stmt));
- gimple_stmt_iterator prev = gsi;
- gsi_prev (&prev);
- if (fold_stmt (&gsi))
- {
- /* fold_stmt may have created new stmts inbetween
- the previous stmt and the folded stmt. Mark
- all defs created there as varying to not confuse
- the SCCVN machinery as we're using that even during
- elimination. */
- if (gsi_end_p (prev))
- prev = gsi_start_bb (b);
- else
- gsi_next (&prev);
- if (gsi_stmt (prev) != gsi_stmt (gsi))
- do
+ case GIMPLE_SWITCH:
+ e = find_taken_edge (bb, vn_valueize (gimple_switch_index
+ (as_a <gswitch *> (last))));
+ break;
+ case GIMPLE_COND:
+ {
+ tree lhs = vn_valueize (gimple_cond_lhs (last));
+ tree rhs = vn_valueize (gimple_cond_rhs (last));
+ tree val = gimple_simplify (gimple_cond_code (last),
+ boolean_type_node, lhs, rhs,
+ NULL, vn_valueize);
+ /* If the condition didn't simplfy see if we have recorded
+ an expression from sofar taken edges. */
+ if (! val || TREE_CODE (val) != INTEGER_CST)
+ {
+ vn_nary_op_t vnresult;
+ tree ops[2];
+ ops[0] = lhs;
+ ops[1] = rhs;
+ val = vn_nary_op_lookup_pieces (2, gimple_cond_code (last),
+ boolean_type_node, ops,
+ &vnresult);
+ /* Did we get a predicated value? */
+ if (! val && vnresult && vnresult->predicated_values)
{
- tree def;
- ssa_op_iter dit;
- FOR_EACH_SSA_TREE_OPERAND (def, gsi_stmt (prev),
- dit, SSA_OP_ALL_DEFS)
- /* As existing DEFs may move between stmts
- we have to guard VN_INFO_GET. */
- if (! has_VN_INFO (def))
- VN_INFO_GET (def)->valnum = def;
- if (gsi_stmt (prev) == gsi_stmt (gsi))
- break;
- gsi_next (&prev);
+ val = vn_nary_op_get_predicated_value (vnresult, bb);
+ if (val && dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Got predicated value ");
+ print_generic_expr (dump_file, val, TDF_NONE);
+ fprintf (dump_file, " for ");
+ print_gimple_stmt (dump_file, last, TDF_SLIM);
+ }
}
- while (1);
- }
- stmt = gsi_stmt (gsi);
- /* In case we folded the stmt away schedule the NOP for removal. */
- if (gimple_nop_p (stmt))
- to_remove.safe_push (stmt);
+ }
+ if (val)
+ e = find_taken_edge (bb, val);
+ if (! e)
+ {
+ /* If we didn't manage to compute the taken edge then
+ push predicated expressions for the condition itself
+ and related conditions to the hashtables. This allows
+ simplification of redundant conditions which is
+ important as early cleanup. */
+ edge true_e, false_e;
+ extract_true_false_edges_from_block (bb, &true_e, &false_e);
+ enum tree_code code = gimple_cond_code (last);
+ enum tree_code icode
+ = invert_tree_comparison (code, HONOR_NANS (lhs));
+ tree ops[2];
+ ops[0] = lhs;
+ ops[1] = rhs;
+ if (do_region
+ && bitmap_bit_p (exit_bbs, true_e->dest->index))
+ true_e = NULL;
+ if (do_region
+ && bitmap_bit_p (exit_bbs, false_e->dest->index))
+ false_e = NULL;
+ if (true_e)
+ vn_nary_op_insert_pieces_predicated
+ (2, code, boolean_type_node, ops,
+ boolean_true_node, 0, true_e);
+ if (false_e)
+ vn_nary_op_insert_pieces_predicated
+ (2, code, boolean_type_node, ops,
+ boolean_false_node, 0, false_e);
+ if (icode != ERROR_MARK)
+ {
+ if (true_e)
+ vn_nary_op_insert_pieces_predicated
+ (2, icode, boolean_type_node, ops,
+ boolean_false_node, 0, true_e);
+ if (false_e)
+ vn_nary_op_insert_pieces_predicated
+ (2, icode, boolean_type_node, ops,
+ boolean_true_node, 0, false_e);
+ }
+ /* Relax for non-integers, inverted condition handled
+ above. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
+ {
+ if (true_e)
+ insert_related_predicates_on_edge (code, ops, true_e);
+ if (false_e)
+ insert_related_predicates_on_edge (icode, ops, false_e);
+ }
+ }
+ break;
+ }
+ case GIMPLE_GOTO:
+ e = find_taken_edge (bb, vn_valueize (gimple_goto_dest (last)));
+ break;
+ default:
+ e = NULL;
}
-
- /* Visit indirect calls and turn them into direct calls if
- possible using the devirtualization machinery. Do this before
- checking for required EH/abnormal/noreturn cleanup as devird
- may expose more of those. */
- if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
+ if (e)
{
- tree fn = gimple_call_fn (call_stmt);
- if (fn
- && flag_devirtualize
- && virtual_method_call_p (fn))
+ todo = TODO_cleanup_cfg;
+ if (!(e->flags & EDGE_EXECUTABLE))
{
- tree otr_type = obj_type_ref_class (fn);
- unsigned HOST_WIDE_INT otr_tok
- = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (fn));
- tree instance;
- ipa_polymorphic_call_context context (current_function_decl,
- fn, stmt, &instance);
- context.get_dynamic_type (instance, OBJ_TYPE_REF_OBJECT (fn),
- otr_type, stmt);
- bool final;
- vec <cgraph_node *> targets
- = possible_polymorphic_call_targets (obj_type_ref_class (fn),
- otr_tok, context, &final);
- if (dump_file)
- dump_possible_polymorphic_call_targets (dump_file,
- obj_type_ref_class (fn),
- otr_tok, context);
- if (final && targets.length () <= 1 && dbg_cnt (devirt))
- {
- tree fn;
- if (targets.length () == 1)
- fn = targets[0]->decl;
- else
- fn = builtin_decl_implicit (BUILT_IN_UNREACHABLE);
- if (dump_enabled_p ())
- {
- dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, stmt,
- "converting indirect call to "
- "function %s\n",
- lang_hooks.decl_printable_name (fn, 2));
- }
- gimple_call_set_fndecl (call_stmt, fn);
- /* If changing the call to __builtin_unreachable
- or similar noreturn function, adjust gimple_call_fntype
- too. */
- if (gimple_call_noreturn_p (call_stmt)
- && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fn)))
- && TYPE_ARG_TYPES (TREE_TYPE (fn))
- && (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fn)))
- == void_type_node))
- gimple_call_set_fntype (call_stmt, TREE_TYPE (fn));
- maybe_remove_unused_call_args (cfun, call_stmt);
- modified = true;
- }
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "marking known outgoing %sedge %d -> %d executable\n",
+ e->flags & EDGE_DFS_BACK ? "back-" : "",
+ e->src->index, e->dest->index);
+ gcc_checking_assert (iterate || !(e->flags & EDGE_DFS_BACK));
+ e->flags |= EDGE_EXECUTABLE;
+ e->dest->flags |= BB_EXECUTABLE;
}
}
-
- if (modified)
+ else if (gsi_one_before_end_p (gsi))
{
- /* When changing a call into a noreturn call, cfg cleanup
- is needed to fix up the noreturn call. */
- if (!was_noreturn
- && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
- to_fixup.safe_push (stmt);
- /* When changing a condition or switch into one we know what
- edge will be executed, schedule a cfg cleanup. */
- if ((gimple_code (stmt) == GIMPLE_COND
- && (gimple_cond_true_p (as_a <gcond *> (stmt))
- || gimple_cond_false_p (as_a <gcond *> (stmt))))
- || (gimple_code (stmt) == GIMPLE_SWITCH
- && TREE_CODE (gimple_switch_index
- (as_a <gswitch *> (stmt))) == INTEGER_CST))
- el_todo |= TODO_cleanup_cfg;
- /* If we removed EH side-effects from the statement, clean
- its EH information. */
- if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
- {
- bitmap_set_bit (need_eh_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed EH side-effects.\n");
- }
- /* Likewise for AB side-effects. */
- if (can_make_abnormal_goto
- && !stmt_can_make_abnormal_goto (stmt))
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
- bitmap_set_bit (need_ab_cleanup,
- gimple_bb (stmt)->index);
+ if (e->flags & EDGE_EXECUTABLE)
+ continue;
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed AB side-effects.\n");
+ fprintf (dump_file,
+ "marking outgoing edge %d -> %d executable\n",
+ e->src->index, e->dest->index);
+ gcc_checking_assert (iterate || !(e->flags & EDGE_DFS_BACK));
+ e->flags |= EDGE_EXECUTABLE;
+ e->dest->flags |= BB_EXECUTABLE;
}
- update_stmt (stmt);
- if (vdef != gimple_vdef (stmt))
- VN_INFO (vdef)->valnum = vuse;
}
- /* Make new values available - for fully redundant LHS we
- continue with the next stmt above and skip this. */
- def_operand_p defp;
- FOR_EACH_SSA_DEF_OPERAND (defp, stmt, iter, SSA_OP_DEF)
- eliminate_push_avail (DEF_FROM_PTR (defp));
+ /* Eliminate. That also pushes to avail. */
+ if (eliminate && ! iterate)
+ avail.eliminate_stmt (bb, &gsi);
+ else
+ /* If not eliminating, make all not already available defs
+ available. */
+ FOR_EACH_SSA_TREE_OPERAND (op, gsi_stmt (gsi), i, SSA_OP_DEF)
+ if (! avail.eliminate_avail (bb, op))
+ avail.eliminate_push_avail (bb, op);
}
- /* Replace destination PHI arguments. */
- FOR_EACH_EDGE (e, ei, b->succs)
- if (e->flags & EDGE_EXECUTABLE)
+ /* Eliminate in destination PHI arguments. Always substitute in dest
+ PHIs, even for non-executable edges. This handles region
+ exits PHIs. */
+ if (!iterate && eliminate)
+ FOR_EACH_EDGE (e, ei, bb->succs)
for (gphi_iterator gsi = gsi_start_phis (e->dest);
- !gsi_end_p (gsi);
- gsi_next (&gsi))
+ !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
use_operand_p use_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e);
@@ -5904,106 +6127,424 @@ eliminate_dom_walker::before_dom_children (basic_block b)
if (TREE_CODE (arg) != SSA_NAME
|| virtual_operand_p (arg))
continue;
- tree sprime = eliminate_avail (arg);
- if (sprime && may_propagate_copy (arg, sprime))
+ tree sprime;
+ if (SSA_NAME_IS_DEFAULT_DEF (arg))
+ {
+ sprime = SSA_VAL (arg);
+ gcc_assert (TREE_CODE (sprime) != SSA_NAME
+ || SSA_NAME_IS_DEFAULT_DEF (sprime));
+ }
+ else
+ /* Look for sth available at the definition block of the argument.
+ This avoids inconsistencies between availability there which
+ decides if the stmt can be removed and availability at the
+ use site. The SSA property ensures that things available
+ at the definition are also available at uses. */
+ sprime = avail.eliminate_avail (gimple_bb (SSA_NAME_DEF_STMT (arg)),
+ arg);
+ if (sprime
+ && sprime != arg
+ && may_propagate_copy (arg, sprime))
propagate_value (use_p, sprime);
}
- return NULL;
+
+ vn_context_bb = NULL;
+ return todo;
}
-/* Make no longer available leaders no longer available. */
+/* Unwind state per basic-block. */
-void
-eliminate_dom_walker::after_dom_children (basic_block)
+struct unwind_state
{
- tree entry;
- while ((entry = avail_stack.pop ()) != NULL_TREE)
+ /* Times this block has been visited. */
+ unsigned visited;
+ /* Whether to handle this as iteration point or whether to treat
+ incoming backedge PHI values as varying. */
+ bool iterate;
+ void *ob_top;
+ vn_reference_t ref_top;
+ vn_phi_t phi_top;
+ vn_nary_op_t nary_top;
+};
+
+/* Unwind the RPO VN state for iteration. */
+
+static void
+do_unwind (unwind_state *to, int rpo_idx, rpo_elim &avail, int *bb_to_rpo)
+{
+ gcc_assert (to->iterate);
+ for (; last_inserted_nary != to->nary_top;
+ last_inserted_nary = last_inserted_nary->next)
{
- tree valnum = VN_INFO (entry)->valnum;
- tree old = avail[SSA_NAME_VERSION (valnum)];
- if (old == entry)
- avail[SSA_NAME_VERSION (valnum)] = NULL_TREE;
+ vn_nary_op_t *slot;
+ slot = valid_info->nary->find_slot_with_hash
+ (last_inserted_nary, last_inserted_nary->hashcode, NO_INSERT);
+ /* Predication causes the need to restore previous state. */
+ if ((*slot)->unwind_to)
+ *slot = (*slot)->unwind_to;
else
- avail[SSA_NAME_VERSION (valnum)] = entry;
+ valid_info->nary->clear_slot (slot);
+ }
+ for (; last_inserted_phi != to->phi_top;
+ last_inserted_phi = last_inserted_phi->next)
+ {
+ vn_phi_t *slot;
+ slot = valid_info->phis->find_slot_with_hash
+ (last_inserted_phi, last_inserted_phi->hashcode, NO_INSERT);
+ valid_info->phis->clear_slot (slot);
+ }
+ for (; last_inserted_ref != to->ref_top;
+ last_inserted_ref = last_inserted_ref->next)
+ {
+ vn_reference_t *slot;
+ slot = valid_info->references->find_slot_with_hash
+ (last_inserted_ref, last_inserted_ref->hashcode, NO_INSERT);
+ (*slot)->operands.release ();
+ valid_info->references->clear_slot (slot);
+ }
+ obstack_free (&vn_tables_obstack, to->ob_top);
+
+ /* Prune [rpo_idx, ] from avail. */
+ /* ??? This is O(number-of-values-in-region) which is
+ O(region-size) rather than O(iteration-piece). */
+ for (rpo_elim::rpo_avail_t::iterator i
+ = avail.m_rpo_avail.begin ();
+ i != avail.m_rpo_avail.end (); ++i)
+ {
+ while (! (*i).second.is_empty ())
+ {
+ if (bb_to_rpo[(*i).second.last ().first] < rpo_idx)
+ break;
+ (*i).second.pop ();
+ }
}
}
-/* Eliminate fully redundant computations. */
+/* Do VN on a SEME region specified by ENTRY and EXIT_BBS in FN.
+ If ITERATE is true then treat backedges optimistically as not
+ executed and iterate. If ELIMINATE is true then perform
+ elimination, otherwise leave that to the caller. */
-unsigned int
-vn_eliminate (bitmap inserted_exprs)
+static unsigned
+do_rpo_vn (function *fn, edge entry, bitmap exit_bbs,
+ bool iterate, bool eliminate)
{
- eliminate_dom_walker el (CDI_DOMINATORS, inserted_exprs);
- el.avail.reserve (num_ssa_names);
+ unsigned todo = 0;
- el.walk (cfun->cfg->x_entry_block_ptr);
+ /* We currently do not support region-based iteration when
+ elimination is requested. */
+ gcc_assert (!entry || !iterate || !eliminate);
+ /* When iterating we need loop info up-to-date. */
+ gcc_assert (!iterate || !loops_state_satisfies_p (LOOPS_NEED_FIXUP));
- /* We cannot remove stmts during BB walk, especially not release SSA
- names there as this confuses the VN machinery. The stmts ending
- up in to_remove are either stores or simple copies.
- Remove stmts in reverse order to make debug stmt creation possible. */
- while (!el.to_remove.is_empty ())
+ bool do_region = entry != NULL;
+ if (!do_region)
{
- gimple *stmt = el.to_remove.pop ();
+ entry = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fn));
+ exit_bbs = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (exit_bbs, EXIT_BLOCK);
+ }
- if (dump_file && (dump_flags & TDF_DETAILS))
+ int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fn) - NUM_FIXED_BLOCKS);
+ int n = rev_post_order_and_mark_dfs_back_seme (fn, entry, exit_bbs,
+ iterate, rpo);
+ /* rev_post_order_and_mark_dfs_back_seme fills RPO in reverse order. */
+ for (int i = 0; i < n / 2; ++i)
+ std::swap (rpo[i], rpo[n-i-1]);
+
+ if (!do_region)
+ BITMAP_FREE (exit_bbs);
+
+ int *bb_to_rpo = XNEWVEC (int, last_basic_block_for_fn (fn));
+ for (int i = 0; i < n; ++i)
+ bb_to_rpo[rpo[i]] = i;
+
+ unwind_state *rpo_state = XNEWVEC (unwind_state, n);
+
+ rpo_elim avail (entry->dest);
+ rpo_avail = &avail;
+
+ /* Verify we have no extra entries into the region. */
+ if (flag_checking && do_region)
+ {
+ auto_bb_flag bb_in_region (fn);
+ for (int i = 0; i < n; ++i)
{
- fprintf (dump_file, "Removing dead stmt ");
- print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[i]);
+ bb->flags |= bb_in_region;
+ }
+ /* We can't merge the first two loops because we cannot rely
+ on EDGE_DFS_BACK for edges not within the region. But if
+ we decide to always have the bb_in_region flag we can
+ do the checking during the RPO walk itself (but then it's
+ also easy to handle MEME conservatively). */
+ for (int i = 0; i < n; ++i)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[i]);
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ gcc_assert (e == entry || (e->src->flags & bb_in_region));
+ }
+ for (int i = 0; i < n; ++i)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[i]);
+ bb->flags &= ~bb_in_region;
}
+ }
- gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
- if (gimple_code (stmt) == GIMPLE_PHI)
- remove_phi_node (&gsi, true);
- else
+ /* Create the VN state. For the initial size of the various hashtables
+ use a heuristic based on region size and number of SSA names. */
+ unsigned region_size = (((unsigned HOST_WIDE_INT)n * num_ssa_names)
+ / (n_basic_blocks_for_fn (fn) - NUM_FIXED_BLOCKS));
+ VN_TOP = create_tmp_var_raw (void_type_node, "vn_top");
+
+ vn_ssa_aux_hash = new hash_table <vn_ssa_aux_hasher> (region_size * 2);
+ gcc_obstack_init (&vn_ssa_aux_obstack);
+
+ gcc_obstack_init (&vn_tables_obstack);
+ gcc_obstack_init (&vn_tables_insert_obstack);
+ valid_info = XCNEW (struct vn_tables_s);
+ allocate_vn_table (valid_info, region_size);
+ last_inserted_ref = NULL;
+ last_inserted_phi = NULL;
+ last_inserted_nary = NULL;
+
+ vn_valueize = rpo_vn_valueize;
+
+ /* Initialize the unwind state and edge/BB executable state. */
+ for (int i = 0; i < n; ++i)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[i]);
+ rpo_state[i].visited = 0;
+ bb->flags &= ~BB_EXECUTABLE;
+ bool has_backedges = false;
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
- basic_block bb = gimple_bb (stmt);
- unlink_stmt_vdef (stmt);
- if (gsi_remove (&gsi, true))
- bitmap_set_bit (el.need_eh_cleanup, bb->index);
- if (is_gimple_call (stmt) && stmt_can_make_abnormal_goto (stmt))
- bitmap_set_bit (el.need_ab_cleanup, bb->index);
- release_defs (stmt);
+ if (e->flags & EDGE_DFS_BACK)
+ has_backedges = true;
+ if (! iterate && (e->flags & EDGE_DFS_BACK))
+ {
+ e->flags |= EDGE_EXECUTABLE;
+ /* ??? Strictly speaking we only need to unconditionally
+ process a block when it is in an irreducible region,
+ thus when it may be reachable via the backedge only. */
+ bb->flags |= BB_EXECUTABLE;
+ }
+ else
+ e->flags &= ~EDGE_EXECUTABLE;
}
+ rpo_state[i].iterate = iterate && has_backedges;
+ }
+ entry->flags |= EDGE_EXECUTABLE;
+ entry->dest->flags |= BB_EXECUTABLE;
- /* Removing a stmt may expose a forwarder block. */
- el.el_todo |= TODO_cleanup_cfg;
+ /* As heuristic to improve compile-time we handle only the N innermost
+ loops and the outermost one optimistically. */
+ if (iterate)
+ {
+ loop_p loop;
+ unsigned max_depth = PARAM_VALUE (PARAM_RPO_VN_MAX_LOOP_DEPTH);
+ FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
+ if (loop_depth (loop) > max_depth)
+ for (unsigned i = 2;
+ i < loop_depth (loop) - max_depth; ++i)
+ {
+ basic_block header = superloop_at_depth (loop, i)->header;
+ rpo_state[bb_to_rpo[header->index]].iterate = false;
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, header->preds)
+ if (e->flags & EDGE_DFS_BACK)
+ e->flags |= EDGE_EXECUTABLE;
+ }
}
- /* Fixup stmts that became noreturn calls. This may require splitting
- blocks and thus isn't possible during the dominator walk. Do this
- in reverse order so we don't inadvertedly remove a stmt we want to
- fixup by visiting a dominating now noreturn call first. */
- while (!el.to_fixup.is_empty ())
+ /* Go and process all blocks, iterating as necessary. */
+ int idx = 0;
+ uint64_t nblk = 0;
+ do
{
- gimple *stmt = el.to_fixup.pop ();
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[idx]);
+
+ /* If the block has incoming backedges remember unwind state. This
+ is required even for non-executable blocks since in irreducible
+ regions we might reach them via the backedge and re-start iterating
+ from there.
+ Note we can individually mark blocks with incoming backedges to
+ not iterate where we then handle PHIs conservatively. We do that
+ heuristically to reduce compile-time for degenerate cases. */
+ if (rpo_state[idx].iterate)
+ {
+ rpo_state[idx].ob_top = obstack_alloc (&vn_tables_obstack, 0);
+ rpo_state[idx].ref_top = last_inserted_ref;
+ rpo_state[idx].phi_top = last_inserted_phi;
+ rpo_state[idx].nary_top = last_inserted_nary;
+ }
+
+ if (!(bb->flags & BB_EXECUTABLE))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Block %d: BB%d found not executable\n",
+ idx, bb->index);
+ idx++;
+ continue;
+ }
if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Processing block %d: BB%d\n", idx, bb->index);
+ nblk++;
+ todo |= process_bb (avail, bb,
+ rpo_state[idx].visited != 0,
+ rpo_state[idx].iterate,
+ iterate, eliminate, do_region, exit_bbs);
+ rpo_state[idx].visited++;
+
+ if (iterate)
{
- fprintf (dump_file, "Fixing up noreturn call ");
- print_gimple_stmt (dump_file, stmt, 0);
+ /* Verify if changed values flow over executable outgoing backedges
+ and those change destination PHI values (that's the thing we
+ can easily verify). Reduce over all such edges to the farthest
+ away PHI. */
+ int iterate_to = -1;
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if ((e->flags & (EDGE_DFS_BACK|EDGE_EXECUTABLE))
+ == (EDGE_DFS_BACK|EDGE_EXECUTABLE)
+ && rpo_state[bb_to_rpo[e->dest->index]].iterate)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Looking for changed values of backedge "
+ "%d->%d destination PHIs\n",
+ e->src->index, e->dest->index);
+ vn_context_bb = e->dest;
+ gphi_iterator gsi;
+ for (gsi = gsi_start_phis (e->dest);
+ !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ bool inserted = false;
+ /* While we'd ideally just iterate on value changes
+ we CSE PHIs and do that even across basic-block
+ boundaries. So even hashtable state changes can
+ be important (which is roughly equivalent to
+ PHI argument value changes). To not excessively
+ iterate because of that we track whether a PHI
+ was CSEd to with GF_PLF_1. */
+ bool phival_changed;
+ if ((phival_changed = visit_phi (gsi.phi (),
+ &inserted, false))
+ || (inserted && gimple_plf (gsi.phi (), GF_PLF_1)))
+ {
+ if (!phival_changed
+ && dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "PHI was CSEd and hashtable "
+ "state (changed)\n");
+ int destidx = bb_to_rpo[e->dest->index];
+ if (iterate_to == -1
+ || destidx < iterate_to)
+ iterate_to = destidx;
+ break;
+ }
+ }
+ vn_context_bb = NULL;
+ }
+ if (iterate_to != -1)
+ {
+ do_unwind (&rpo_state[iterate_to], iterate_to,
+ avail, bb_to_rpo);
+ idx = iterate_to;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Iterating to %d BB%d\n",
+ iterate_to, rpo[iterate_to]);
+ continue;
+ }
}
- if (fixup_noreturn_call (stmt))
- el.el_todo |= TODO_cleanup_cfg;
+ idx++;
}
+ while (idx < n);
- bool do_eh_cleanup = !bitmap_empty_p (el.need_eh_cleanup);
- bool do_ab_cleanup = !bitmap_empty_p (el.need_ab_cleanup);
+ /* If statistics or dump file active. */
+ int nex = 0;
+ unsigned max_visited = 1;
+ for (int i = 0; i < n; ++i)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (fn, rpo[i]);
+ if (bb->flags & BB_EXECUTABLE)
+ nex++;
+ statistics_histogram_event (cfun, "RPO block visited times",
+ rpo_state[i].visited);
+ if (rpo_state[i].visited > max_visited)
+ max_visited = rpo_state[i].visited;
+ }
+ unsigned nvalues = 0, navail = 0;
+ for (rpo_elim::rpo_avail_t::iterator i = avail.m_rpo_avail.begin ();
+ i != avail.m_rpo_avail.end (); ++i)
+ {
+ nvalues++;
+ navail += (*i).second.length ();
+ }
+ statistics_counter_event (cfun, "RPO blocks", n);
+ statistics_counter_event (cfun, "RPO blocks visited", nblk);
+ statistics_counter_event (cfun, "RPO blocks executable", nex);
+ statistics_histogram_event (cfun, "RPO iterations", 10*nblk / nex);
+ statistics_histogram_event (cfun, "RPO num values", nvalues);
+ statistics_histogram_event (cfun, "RPO num avail", navail);
+ statistics_histogram_event (cfun, "RPO num lattice",
+ vn_ssa_aux_hash->elements ());
+ if (dump_file && (dump_flags & (TDF_DETAILS|TDF_STATS)))
+ {
+ fprintf (dump_file, "RPO iteration over %d blocks visited %" PRIu64
+ " blocks in total discovering %d executable blocks iterating "
+ "%d.%d times, a block was visited max. %u times\n",
+ n, nblk, nex,
+ (int)((10*nblk / nex)/10), (int)((10*nblk / nex)%10),
+ max_visited);
+ fprintf (dump_file, "RPO tracked %d values available at %d locations "
+ "and %" PRIu64 " lattice elements\n",
+ nvalues, navail, (uint64_t) vn_ssa_aux_hash->elements ());
+ }
- if (do_eh_cleanup)
- gimple_purge_all_dead_eh_edges (el.need_eh_cleanup);
+ if (eliminate)
+ {
+ /* When !iterate we already performed elimination during the RPO
+ walk. */
+ if (iterate)
+ {
+ /* Elimination for region-based VN needs to be done within the
+ RPO walk. */
+ gcc_assert (! do_region);
+ /* Note we can't use avail.walk here because that gets confused
+ by the existing availability and it will be less efficient
+ as well. */
+ todo |= eliminate_with_rpo_vn (NULL);
+ }
+ else
+ todo |= avail.eliminate_cleanup (do_region);
+ }
- if (do_ab_cleanup)
- gimple_purge_all_dead_abnormal_call_edges (el.need_ab_cleanup);
+ vn_valueize = NULL;
+ rpo_avail = NULL;
- if (do_eh_cleanup || do_ab_cleanup)
- el.el_todo |= TODO_cleanup_cfg;
+ XDELETEVEC (bb_to_rpo);
+ XDELETEVEC (rpo);
+
+ return todo;
+}
- statistics_counter_event (cfun, "Eliminated", el.eliminations);
- statistics_counter_event (cfun, "Insertions", el.insertions);
+/* Region-based entry for RPO VN. Performs value-numbering and elimination
+ on the SEME region specified by ENTRY and EXIT_BBS. */
- return el.el_todo;
+unsigned
+do_rpo_vn (function *fn, edge entry, bitmap exit_bbs)
+{
+ default_vn_walk_kind = VN_WALKREWRITE;
+ unsigned todo = do_rpo_vn (fn, entry, exit_bbs, false, true);
+ free_rpo_vn ();
+ return todo;
}
@@ -6037,17 +6578,21 @@ public:
}; // class pass_fre
unsigned int
-pass_fre::execute (function *)
+pass_fre::execute (function *fun)
{
- unsigned int todo = 0;
+ unsigned todo = 0;
- run_scc_vn (VN_WALKREWRITE);
+ /* At -O[1g] use the cheap non-iterating mode. */
+ calculate_dominance_info (CDI_DOMINATORS);
+ if (optimize > 1)
+ loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
- /* Remove all the redundant expressions. */
- todo |= vn_eliminate (NULL);
+ default_vn_walk_kind = VN_WALKREWRITE;
+ todo = do_rpo_vn (fun, NULL, NULL, optimize > 1, true);
+ free_rpo_vn ();
- scc_vn_restore_ssa_info ();
- free_scc_vn ();
+ if (optimize > 1)
+ loop_optimizer_finalize ();
return todo;
}
@@ -6059,3 +6604,5 @@ make_pass_fre (gcc::context *ctxt)
{
return new pass_fre (ctxt);
}
+
+#undef BB_EXECUTABLE
diff --git a/gcc/tree-ssa-sccvn.h b/gcc/tree-ssa-sccvn.h
index 323c85fde55..ea4efd8e23b 100644
--- a/gcc/tree-ssa-sccvn.h
+++ b/gcc/tree-ssa-sccvn.h
@@ -28,6 +28,18 @@ bool expressions_equal_p (tree, tree);
/* TOP of the VN lattice. */
extern tree VN_TOP;
+/* A predicated value. */
+struct vn_pval
+{
+ vn_pval *next;
+ /* The value of the expression this is attached to is RESULT in
+ case the expression is computed dominated by one of the blocks
+ in valid_dominated_by_p. */
+ tree result;
+ unsigned n;
+ int valid_dominated_by_p[1];
+};
+
/* N-ary operations in the hashtable consist of length operands, an
opcode, and a type. Result is the value number of the operation,
and hashcode is stored to avoid having to calculate it
@@ -36,12 +48,19 @@ extern tree VN_TOP;
typedef struct vn_nary_op_s
{
vn_nary_op_s *next;
+ vn_nary_op_s *unwind_to;
/* Unique identify that all expressions with the same value have. */
unsigned int value_id;
ENUM_BITFIELD(tree_code) opcode : 16;
unsigned length : 16;
hashval_t hashcode;
- tree result;
+ unsigned predicated_values : 1;
+ union {
+ /* If ! predicated_values this is the value of the expression. */
+ tree result;
+ /* If predicated_values this is a list of values of the expression. */
+ vn_pval *values;
+ } u;
tree type;
tree op[1];
} *vn_nary_op_t;
@@ -176,36 +195,23 @@ vn_constant_eq_with_type (tree c1, tree c2)
typedef struct vn_ssa_aux
{
+ /* SSA name this vn_ssa_aux is associated with in the lattice. */
+ tree name;
/* Value number. This may be an SSA name or a constant. */
tree valnum;
/* Statements to insert if needs_insertion is true. */
gimple_seq expr;
- /* Saved SSA name info. */
- tree_ssa_name::ssa_name_info_type info;
-
/* Unique identifier that all expressions with the same value have. */
unsigned int value_id;
- /* SCC information. */
- unsigned int dfsnum;
- unsigned int low;
+ /* Whether the SSA_NAME has been processed at least once. */
unsigned visited : 1;
- unsigned on_sccstack : 1;
-
- /* Whether the SSA_NAME has been value numbered already. This is
- only saying whether visit_use has been called on it at least
- once. It cannot be used to avoid visitation for SSA_NAME's
- involved in non-singleton SCC's. */
- unsigned use_processed : 1;
/* Whether the SSA_NAME has no defining statement and thus an
insertion of such with EXPR as definition is required before
a use can be created of it. */
unsigned needs_insertion : 1;
-
- /* Whether range-info is anti-range. */
- unsigned range_info_anti_range_p : 1;
} *vn_ssa_aux_t;
enum vn_lookup_kind { VN_NOWALK, VN_WALK, VN_WALKREWRITE };
@@ -213,11 +219,7 @@ enum vn_lookup_kind { VN_NOWALK, VN_WALK, VN_WALKREWRITE };
/* Return the value numbering info for an SSA_NAME. */
bool has_VN_INFO (tree);
extern vn_ssa_aux_t VN_INFO (tree);
-extern vn_ssa_aux_t VN_INFO_GET (tree);
tree vn_get_expr_for (tree);
-void run_scc_vn (vn_lookup_kind);
-unsigned int vn_eliminate (bitmap);
-void free_scc_vn (void);
void scc_vn_restore_ssa_info (void);
tree vn_nary_op_lookup (tree, vn_nary_op_t *);
tree vn_nary_op_lookup_stmt (gimple *, vn_nary_op_t *);
@@ -250,55 +252,17 @@ bool value_id_constant_p (unsigned int);
tree fully_constant_vn_reference_p (vn_reference_t);
tree vn_nary_simplify (vn_nary_op_t);
-/* Valueize NAME if it is an SSA name, otherwise just return it. */
-
-static inline tree
-vn_valueize (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- tree tem = VN_INFO (name)->valnum;
- return tem == VN_TOP ? name : tem;
- }
- return name;
-}
+unsigned do_rpo_vn (function *, edge, bitmap);
+void run_rpo_vn (vn_lookup_kind);
+unsigned eliminate_with_rpo_vn (bitmap);
+void free_rpo_vn (void);
-/* Get at the original range info for NAME. */
+/* Valueize NAME if it is an SSA name, otherwise just return it. This hook
+ is initialized by run_scc_vn. */
+extern tree (*vn_valueize) (tree);
-inline range_info_def *
-VN_INFO_RANGE_INFO (tree name)
-{
- return (VN_INFO (name)->info.range_info
- ? VN_INFO (name)->info.range_info
- : SSA_NAME_RANGE_INFO (name));
-}
+/* Context that valueization should operate on. */
+extern basic_block vn_context_bb;
-/* Whether the original range info of NAME is an anti-range. */
-
-inline bool
-VN_INFO_ANTI_RANGE_P (tree name)
-{
- return (VN_INFO (name)->info.range_info
- ? VN_INFO (name)->range_info_anti_range_p
- : SSA_NAME_ANTI_RANGE_P (name));
-}
-
-/* Get at the original range info kind for NAME. */
-
-inline value_range_type
-VN_INFO_RANGE_TYPE (tree name)
-{
- return VN_INFO_ANTI_RANGE_P (name) ? VR_ANTI_RANGE : VR_RANGE;
-}
-
-/* Get at the original pointer info for NAME. */
-
-inline ptr_info_def *
-VN_INFO_PTR_INFO (tree name)
-{
- return (VN_INFO (name)->info.ptr_info
- ? VN_INFO (name)->info.ptr_info
- : SSA_NAME_PTR_INFO (name));
-}
#endif /* TREE_SSA_SCCVN_H */
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index d0792aa38c8..d625b940c1f 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -336,7 +336,7 @@ get_stridx (tree exp)
return idx;
}
- s = string_constant (exp, &o);
+ s = string_constant (exp, &o, NULL, NULL);
if (s != NULL_TREE
&& (o == NULL_TREE || tree_fits_shwi_p (o))
&& TREE_STRING_LENGTH (s) > 0)
@@ -581,7 +581,7 @@ get_string_length (strinfo *si)
gcc_assert (is_gimple_call (stmt));
callee = gimple_call_fndecl (stmt);
- gcc_assert (callee && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL);
+ gcc_assert (callee && fndecl_built_in_p (callee, BUILT_IN_NORMAL));
lhs = gimple_call_lhs (stmt);
/* unshare_strinfo is intentionally not called here. The (delayed)
transformation of strcpy or strcat into stpcpy is done at the place
@@ -1160,7 +1160,9 @@ maybe_set_strlen_range (tree lhs, tree src, tree bound)
suggests if it's treated as a poor-man's flexible array member. */
src = TREE_OPERAND (src, 0);
bool src_is_array = TREE_CODE (TREE_TYPE (src)) == ARRAY_TYPE;
- if (src_is_array && !array_at_struct_end_p (src))
+ if (src_is_array
+ && TREE_CODE (src) != MEM_REF
+ && !array_at_struct_end_p (src))
{
tree type = TREE_TYPE (src);
if (tree size = TYPE_SIZE_UNIT (type))
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index fd24f84fb14..22558351138 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -4075,7 +4075,7 @@ handle_lhs_call (gcall *stmt, tree lhs, int flags, vec<ce_s> rhsc,
initialized and thus may point to global memory. All
builtin functions with the malloc attribute behave in a sane way. */
if (!fndecl
- || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
+ || !fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
make_constraint_from (vi, nonlocal_id);
tmpc.var = vi->id;
tmpc.offset = 0;
@@ -4729,7 +4729,7 @@ find_func_aliases_for_call (struct function *fn, gcall *t)
varinfo_t fi;
if (fndecl != NULL_TREE
- && DECL_BUILT_IN (fndecl)
+ && fndecl_built_in_p (fndecl)
&& find_func_aliases_for_builtin_call (fn, t))
return;
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index 4339520039b..3396a5c28df 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -683,7 +683,7 @@ find_replaceable_in_bb (temp_expr_table *tab, basic_block bb)
insns instead of a true call. */
if (is_gimple_call (stmt)
&& !((fndecl = gimple_call_fndecl (stmt))
- && DECL_BUILT_IN (fndecl)))
+ && fndecl_built_in_p (fndecl)))
cur_call_cnt++;
/* Increment counter if this statement sets a local
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index dbc0bbd772a..a2304493495 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -977,7 +977,7 @@ thread_around_empty_blocks (edge taken_edge,
|| TREE_CODE (cond) == CASE_LABEL_EXPR))
{
if (TREE_CODE (cond) == CASE_LABEL_EXPR)
- taken_edge = find_edge (bb, label_to_block (CASE_LABEL (cond)));
+ taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
else
taken_edge = find_taken_edge (bb, cond);
@@ -1109,7 +1109,7 @@ thread_through_normal_block (edge e,
edge taken_edge;
if (TREE_CODE (cond) == CASE_LABEL_EXPR)
taken_edge = find_edge (e->dest,
- label_to_block (CASE_LABEL (cond)));
+ label_to_block (cfun, CASE_LABEL (cond)));
else
taken_edge = find_taken_edge (e->dest, cond);
diff --git a/gcc/tree-ssa-uncprop.c b/gcc/tree-ssa-uncprop.c
index 7d863a71551..98cc79e8607 100644
--- a/gcc/tree-ssa-uncprop.c
+++ b/gcc/tree-ssa-uncprop.c
@@ -184,7 +184,7 @@ associate_equivalences_with_edges (void)
for (i = 0; i < n_labels; i++)
{
tree label = gimple_switch_label (switch_stmt, i);
- basic_block bb = label_to_block (CASE_LABEL (label));
+ basic_block bb = label_to_block (cfun, CASE_LABEL (label));
if (CASE_HIGH (label)
|| !CASE_LOW (label)
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index a93610084f2..f3e42ddbd7f 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -725,7 +725,7 @@ convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
for (idx = 0; idx < gimple_switch_num_labels (gs); ++idx)
{
tree tl = gimple_switch_label (gs, idx);
- if (e->dest == label_to_block (CASE_LABEL (tl)))
+ if (e->dest == label_to_block (cfun, CASE_LABEL (tl)))
{
if (!l)
l = tl;
diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c
index c8594851957..f40dc5641cd 100644
--- a/gcc/tree-stdarg.c
+++ b/gcc/tree-stdarg.c
@@ -694,7 +694,7 @@ optimize_va_list_gpr_fpr_size (function *fun)
callee = gimple_call_fndecl (stmt);
if (!callee
- || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
+ || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
continue;
switch (DECL_FUNCTION_CODE (callee))
@@ -867,9 +867,8 @@ optimize_va_list_gpr_fpr_size (function *fun)
tree callee = gimple_call_fndecl (stmt);
if (callee
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
- && (DECL_FUNCTION_CODE (callee) == BUILT_IN_VA_START
- || DECL_FUNCTION_CODE (callee) == BUILT_IN_VA_END))
+ && (fndecl_built_in_p (callee, BUILT_IN_VA_START)
+ || fndecl_built_in_p (callee, BUILT_IN_VA_END)))
continue;
}
diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c
index 43cd9a4cad1..8d8f8695718 100644
--- a/gcc/tree-streamer-in.c
+++ b/gcc/tree-streamer-in.c
@@ -769,7 +769,6 @@ static void
lto_input_ts_function_decl_tree_pointers (struct lto_input_block *ib,
struct data_in *data_in, tree expr)
{
- DECL_VINDEX (expr) = stream_read_tree (ib, data_in);
/* DECL_STRUCT_FUNCTION is loaded on demand by cgraph_get_body. */
DECL_FUNCTION_PERSONALITY (expr) = stream_read_tree (ib, data_in);
#ifndef ACCEL_COMPILER
@@ -820,7 +819,6 @@ lto_input_ts_type_common_tree_pointers (struct lto_input_block *ib,
TYPE_CONTEXT (expr) = stream_read_tree (ib, data_in);
/* TYPE_CANONICAL gets re-computed during type merging. */
TYPE_CANONICAL (expr) = NULL_TREE;
- TYPE_STUB_DECL (expr) = stream_read_tree (ib, data_in);
}
/* Read all pointer fields in the TS_TYPE_NON_COMMON structure of EXPR
diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c
index a68cd4b135b..60ec1788bf3 100644
--- a/gcc/tree-streamer-out.c
+++ b/gcc/tree-streamer-out.c
@@ -657,7 +657,6 @@ static void
write_ts_function_decl_tree_pointers (struct output_block *ob, tree expr,
bool ref_p)
{
- stream_write_tree (ob, DECL_VINDEX (expr), ref_p);
/* DECL_STRUCT_FUNCTION is handled by lto_output_function. */
stream_write_tree (ob, DECL_FUNCTION_PERSONALITY (expr), ref_p);
/* Don't stream these when passing things to a different target. */
@@ -687,7 +686,9 @@ write_ts_type_common_tree_pointers (struct output_block *ob, tree expr,
stream_write_tree (ob, TYPE_CONTEXT (expr), ref_p);
/* TYPE_CANONICAL is re-computed during type merging, so no need
to stream it here. */
- stream_write_tree (ob, TYPE_STUB_DECL (expr), ref_p);
+ /* Do not stream TYPE_STUB_DECL; it is not needed by LTO but currently
+ it can not be freed by free_lang_data without triggering ICEs in
+ langhooks. */
}
/* Write all pointer fields in the TS_TYPE_NON_COMMON structure of EXPR
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 9a594a01fc4..7e4f34c71f8 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -78,7 +78,6 @@ switch_conversion::collect (gswitch *swtch)
unsigned int i;
edge e, e_default, e_first;
edge_iterator ei;
- basic_block first;
m_switch = swtch;
@@ -87,9 +86,8 @@ switch_conversion::collect (gswitch *swtch)
Collect the bits we can deduce from the CFG. */
m_index_expr = gimple_switch_index (swtch);
m_switch_bb = gimple_bb (swtch);
- m_default_bb
- = label_to_block (CASE_LABEL (gimple_switch_default_label (swtch)));
- e_default = find_edge (m_switch_bb, m_default_bb);
+ e_default = gimple_switch_default_edge (cfun, swtch);
+ m_default_bb = e_default->dest;
m_default_prob = e_default->probability;
m_default_count = e_default->count ();
FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
@@ -120,15 +118,9 @@ switch_conversion::collect (gswitch *swtch)
}
if (m_contiguous_range)
- {
- first = label_to_block (CASE_LABEL (gimple_switch_label (swtch, 1)));
- e_first = find_edge (m_switch_bb, first);
- }
+ e_first = gimple_switch_edge (cfun, swtch, 1);
else
- {
- first = m_default_bb;
- e_first = e_default;
- }
+ e_first = e_default;
/* See if there is one common successor block for all branch
targets. If it exists, record it in FINAL_BB.
@@ -306,8 +298,7 @@ switch_conversion::check_final_bb ()
unsigned int branch_num = gimple_switch_num_labels (m_switch);
for (unsigned int i = 1; i < branch_num; i++)
{
- tree lab = CASE_LABEL (gimple_switch_label (m_switch, i));
- if (label_to_block (lab) == bb)
+ if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
{
m_reason = reason;
return false;
@@ -351,7 +342,7 @@ void
switch_conversion::gather_default_values (tree default_case)
{
gphi_iterator gsi;
- basic_block bb = label_to_block (CASE_LABEL (default_case));
+ basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
edge e;
int i = 0;
@@ -388,7 +379,7 @@ switch_conversion::build_constructors ()
for (i = 1; i < branch_num; i++)
{
tree cs = gimple_switch_label (m_switch, i);
- basic_block bb = label_to_block (CASE_LABEL (cs));
+ basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
edge e;
tree high;
gphi_iterator gsi;
@@ -922,14 +913,7 @@ switch_conversion::expand (gswitch *swtch)
/* Group case labels so that we get the right results from the heuristics
that decide on the code generation approach for this switch. */
m_cfg_altered |= group_case_labels_stmt (swtch);
-
- /* If this switch is now a degenerate case with only a default label,
- there is nothing left for us to do. */
- if (gimple_switch_num_labels (swtch) < 2)
- {
- m_reason = "switch is a degenerate case";
- return;
- }
+ gcc_assert (gimple_switch_num_labels (swtch) >= 2);
collect (swtch);
@@ -1070,6 +1054,9 @@ void
jump_table_cluster::emit (tree index_expr, tree,
tree default_label_expr, basic_block default_bb)
{
+ unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
+ unsigned HOST_WIDE_INT nondefault_range = 0;
+
/* For jump table we just emit a new gswitch statement that will
be latter lowered to jump table. */
auto_vec <tree> labels;
@@ -1086,6 +1073,39 @@ jump_table_cluster::emit (tree index_expr, tree,
unshare_expr (default_label_expr), labels);
gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
gsi_insert_after (&gsi, s, GSI_NEW_STMT);
+
+ /* Set up even probabilities for all cases. */
+ for (unsigned i = 0; i < m_cases.length (); i++)
+ {
+ simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
+ edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
+ unsigned HOST_WIDE_INT case_range
+ = sc->get_range (sc->get_low (), sc->get_high ());
+ nondefault_range += case_range;
+
+ /* case_edge->aux is number of values in a jump-table that are covered
+ by the case_edge. */
+ case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
+ }
+
+ edge default_edge = gimple_switch_default_edge (cfun, s);
+ default_edge->probability = profile_probability::never ();
+
+ for (unsigned i = 0; i < m_cases.length (); i++)
+ {
+ simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
+ edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
+ case_edge->probability
+ = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
+ range);
+ }
+
+ /* Number of non-default values is probability of default edge. */
+ default_edge->probability
+ += profile_probability::always ().apply_scale (nondefault_range,
+ range).invert ();
+
+ switch_decision_tree::reset_out_edges_aux (s);
}
/* Find jump tables of given CLUSTERS, where all members of the vector
@@ -1252,12 +1272,16 @@ bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
return clusters.copy ();
/* Find and build the clusters. */
- for (int end = l;;)
+ for (unsigned end = l;;)
{
int start = min[end].m_start;
if (is_beneficial (clusters, start, end - 1))
- output.safe_push (new bit_test_cluster (clusters, start, end - 1));
+ {
+ bool entire = start == 0 && end == clusters.length ();
+ output.safe_push (new bit_test_cluster (clusters, start, end - 1,
+ entire));
+ }
else
for (int i = end - 1; i >= start; i--)
output.safe_push (clusters[i]);
@@ -1407,6 +1431,7 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
tree minval = get_low ();
tree maxval = get_high ();
tree range = int_const_binop (MINUS_EXPR, maxval, minval);
+ unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
/* Go through all case labels, and collect the case labels, profile
counts, and other information we need to build the branch tests. */
@@ -1425,11 +1450,11 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
test[k].mask = wi::zero (prec);
test[k].target_bb = n->m_case_bb;
test[k].label = n->m_case_label_expr;
- test[k].bits = 1;
+ test[k].bits = 0;
count++;
}
- else
- test[k].bits++;
+
+ test[k].bits += n->get_range (n->get_low (), n->get_high ());
lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
if (n->get_high () == NULL_TREE)
@@ -1486,14 +1511,20 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
/*simple=*/true, NULL_TREE,
/*before=*/true, GSI_SAME_STMT);
- /* if (idx > range) goto default */
- range = force_gimple_operand_gsi (&gsi,
+ if (m_handles_entire_switch)
+ {
+ /* if (idx > range) goto default */
+ range
+ = force_gimple_operand_gsi (&gsi,
fold_convert (unsigned_index_type, range),
/*simple=*/true, NULL_TREE,
/*before=*/true, GSI_SAME_STMT);
- tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
- basic_block new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb);
- gsi = gsi_last_bb (new_bb);
+ tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
+ basic_block new_bb
+ = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
+ profile_probability::unlikely ());
+ gsi = gsi_last_bb (new_bb);
+ }
/* csui = (1 << (word_mode) idx) */
csui = make_ssa_name (word_type_node);
@@ -1506,17 +1537,23 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
update_stmt (shift_stmt);
+ profile_probability prob = profile_probability::always ();
+
/* for each unique set of cases:
if (const & csui) goto target */
for (k = 0; k < count; k++)
{
+ prob = profile_probability::always ().apply_scale (test[k].bits,
+ bt_range);
+ bt_range -= test[k].bits;
tmp = wide_int_to_tree (word_type_node, test[k].mask);
tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
tmp = force_gimple_operand_gsi (&gsi, tmp,
/*simple=*/true, NULL_TREE,
/*before=*/true, GSI_SAME_STMT);
tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, word_mode_zero);
- new_bb = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb);
+ basic_block new_bb
+ = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb, prob);
gsi = gsi_last_bb (new_bb);
}
@@ -1524,7 +1561,8 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
/* If nothing matched, go to the default label. */
- make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
+ edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
+ e->probability = profile_probability::always ();
}
/* Split the basic block at the statement pointed to by GSIP, and insert
@@ -1544,7 +1582,8 @@ bit_test_cluster::emit (tree index_expr, tree index_type,
basic_block
bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
- tree cond, basic_block case_bb)
+ tree cond, basic_block case_bb,
+ profile_probability prob)
{
tree tmp;
gcond *cond_stmt;
@@ -1552,6 +1591,7 @@ bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
basic_block new_bb, split_bb = gsi_bb (*gsip);
edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
+ e_true->probability = prob;
gcc_assert (e_true->src == split_bb);
tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
@@ -1577,15 +1617,11 @@ bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
void
switch_decision_tree::compute_cases_per_edge ()
{
- basic_block bb = gimple_bb (m_switch);
- reset_out_edges_aux ();
+ reset_out_edges_aux (m_switch);
int ncases = gimple_switch_num_labels (m_switch);
for (int i = ncases - 1; i >= 1; --i)
{
- tree elt = gimple_switch_label (m_switch, i);
- tree lab = CASE_LABEL (elt);
- basic_block case_bb = label_to_block_fn (cfun, lab);
- edge case_edge = find_edge (bb, case_bb);
+ edge case_edge = gimple_switch_edge (cfun, m_switch, i);
case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
}
}
@@ -1601,8 +1637,7 @@ switch_decision_tree::analyze_switch_statement ()
auto_vec<cluster *> clusters;
clusters.create (l - 1);
- tree default_label = CASE_LABEL (gimple_switch_default_label (m_switch));
- basic_block default_bb = label_to_block_fn (cfun, default_label);
+ basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
m_case_bbs.reserve (l);
m_case_bbs.quick_push (default_bb);
@@ -1612,18 +1647,19 @@ switch_decision_tree::analyze_switch_statement ()
{
tree elt = gimple_switch_label (m_switch, i);
tree lab = CASE_LABEL (elt);
- basic_block case_bb = label_to_block_fn (cfun, lab);
+ basic_block case_bb = label_to_block (cfun, lab);
edge case_edge = find_edge (bb, case_bb);
tree low = CASE_LOW (elt);
tree high = CASE_HIGH (elt);
profile_probability p
= case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
- clusters.quick_push (new simple_cluster (low, high, elt, case_bb, p));
- m_case_bbs.quick_push (case_bb);
+ clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
+ p));
+ m_case_bbs.quick_push (case_edge->dest);
}
- reset_out_edges_aux ();
+ reset_out_edges_aux (m_switch);
/* Find jump table clusters. */
vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
@@ -1694,9 +1730,8 @@ switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
return false;
/* Find the default case target label. */
- tree default_label_expr = CASE_LABEL (gimple_switch_default_label (m_switch));
- m_default_bb = label_to_block_fn (cfun, default_label_expr);
- edge default_edge = find_edge (bb, m_default_bb);
+ edge default_edge = gimple_switch_default_edge (cfun, m_switch);
+ m_default_bb = default_edge->dest;
/* Do the insertion of a case label into m_case_list. The labels are
fed to us in descending order from the sorted vector of case labels used
@@ -1962,7 +1997,9 @@ switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
fprintf (f, "%*s", indent_step * indent_level, "");
root->m_c->dump (f);
root->m_c->m_prob.dump (f);
- fputs ("\n", f);
+ fputs (" subtree: ", f);
+ root->m_c->m_subtree_prob.dump (f);
+ fputs (")\n", f);
dump_case_nodes (f, root->m_right, indent_step, indent_level);
}
@@ -2008,6 +2045,34 @@ switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
return false_edge->dest;
}
+/* Generate code to jump to LABEL if OP0 and OP1 are equal.
+ PROB is the probability of jumping to LABEL_BB.
+ BB is a basic block where the new condition will be placed. */
+
+basic_block
+switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
+ basic_block label_bb,
+ profile_probability prob)
+{
+ op1 = fold_convert (TREE_TYPE (op0), op1);
+
+ gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
+
+ gcc_assert (single_succ_p (bb));
+
+ /* Make a new basic block where false branch will take place. */
+ edge false_edge = split_block (bb, cond);
+ false_edge->flags = EDGE_FALSE_VALUE;
+ false_edge->probability = prob.invert ();
+
+ edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
+ true_edge->probability = prob;
+
+ return false_edge->dest;
+}
+
/* Emit step-by-step code to select a case for the value of INDEX.
The thus generated decision tree follows the form of the
case-node binary tree NODE, whose nodes represent test conditions.
@@ -2020,41 +2085,193 @@ switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
profile_probability default_prob,
tree index_type)
{
+ profile_probability p;
+
/* If node is null, we are done. */
if (node == NULL)
return bb;
- /* Branch to a label where we will handle it later. */
- basic_block test_bb = split_edge (single_succ_edge (bb));
- redirect_edge_succ (single_pred_edge (test_bb),
- single_succ_edge (bb)->dest);
-
- profile_probability probability
- = (node->m_right
- ? node->m_right->m_c->m_subtree_prob : profile_probability::never ());
- probability = ((probability + default_prob.apply_scale (1, 2))
- / (node->m_c->m_subtree_prob + default_prob));
- bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (), GT_EXPR,
- test_bb, probability);
- default_prob = default_prob.apply_scale (1, 2);
-
- /* Value belongs to this node or to the left-hand subtree. */
- probability = node->m_c->m_prob /
- (node->m_c->m_subtree_prob + default_prob);
- bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (), GE_EXPR,
- node->m_c->m_case_bb, probability);
-
- /* Handle the left-hand subtree. */
- bb = emit_case_nodes (bb, index, node->m_left,
- default_prob, index_type);
-
- /* If the left-hand subtree fell through,
- don't let it fall into the right-hand subtree. */
- if (m_default_bb)
- emit_jump (bb, m_default_bb);
+ /* Single value case. */
+ if (node->m_c->is_single_value_p ())
+ {
+ /* Node is single valued. First see if the index expression matches
+ this node and then check our children, if any. */
+ p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
+ bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
+ node->m_c->m_case_bb, p);
+ /* Since this case is taken at this point, reduce its weight from
+ subtree_weight. */
+ node->m_c->m_subtree_prob -= p;
+
+ if (node->m_left != NULL && node->m_right != NULL)
+ {
+ /* 1) the node has both children
+
+ If both children are single-valued cases with no
+ children, finish up all the work. This way, we can save
+ one ordered comparison. */
+
+ if (!node->m_left->has_child ()
+ && node->m_left->m_c->is_single_value_p ()
+ && !node->m_right->has_child ()
+ && node->m_right->m_c->is_single_value_p ())
+ {
+ p = (node->m_right->m_c->m_prob
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
+ node->m_right->m_c->m_case_bb, p);
+
+ p = (node->m_left->m_c->m_prob
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
+ node->m_left->m_c->m_case_bb, p);
+ }
+ else
+ {
+ /* Branch to a label where we will handle it later. */
+ basic_block test_bb = split_edge (single_succ_edge (bb));
+ redirect_edge_succ (single_pred_edge (test_bb),
+ single_succ_edge (bb)->dest);
+
+ p = ((node->m_right->m_c->m_subtree_prob
+ + default_prob.apply_scale (1, 2))
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
+ GT_EXPR, test_bb, p);
+ default_prob = default_prob.apply_scale (1, 2);
+
+ /* Handle the left-hand subtree. */
+ bb = emit_case_nodes (bb, index, node->m_left,
+ default_prob, index_type);
+
+ /* If the left-hand subtree fell through,
+ don't let it fall into the right-hand subtree. */
+ if (bb && m_default_bb)
+ emit_jump (bb, m_default_bb);
+
+ bb = emit_case_nodes (test_bb, index, node->m_right,
+ default_prob, index_type);
+ }
+ }
+ else if (node->m_left == NULL && node->m_right != NULL)
+ {
+ /* 2) the node has only right child. */
+
+ /* Here we have a right child but no left so we issue a conditional
+ branch to default and process the right child.
+
+ Omit the conditional branch to default if the right child
+ does not have any children and is single valued; it would
+ cost too much space to save so little time. */
- bb = emit_case_nodes (test_bb, index, node->m_right,
- default_prob, index_type);
+ if (node->m_right->has_child ()
+ || !node->m_right->m_c->is_single_value_p ())
+ {
+ p = (default_prob.apply_scale (1, 2)
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
+ LT_EXPR, m_default_bb, p);
+ default_prob = default_prob.apply_scale (1, 2);
+
+ bb = emit_case_nodes (bb, index, node->m_right, default_prob,
+ index_type);
+ }
+ else
+ {
+ /* We cannot process node->right normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->right explicitly. */
+ p = (node->m_right->m_c->m_subtree_prob
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
+ node->m_right->m_c->m_case_bb, p);
+ }
+ }
+ else if (node->m_left != NULL && node->m_right == NULL)
+ {
+ /* 3) just one subtree, on the left. Similar case as previous. */
+
+ if (node->m_left->has_child ()
+ || !node->m_left->m_c->is_single_value_p ())
+ {
+ p = (default_prob.apply_scale (1, 2)
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
+ GT_EXPR, m_default_bb, p);
+ default_prob = default_prob.apply_scale (1, 2);
+
+ bb = emit_case_nodes (bb, index, node->m_left, default_prob,
+ index_type);
+ }
+ else
+ {
+ /* We cannot process node->left normally
+ since we haven't ruled out the numbers less than
+ this node's value. So handle node->left explicitly. */
+ p = (node->m_left->m_c->m_subtree_prob
+ / (node->m_c->m_subtree_prob + default_prob));
+ bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
+ node->m_left->m_c->m_case_bb, p);
+ }
+ }
+ }
+ else
+ {
+ /* Node is a range. These cases are very similar to those for a single
+ value, except that we do not start by testing whether this node
+ is the one to branch to. */
+ if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
+ {
+ /* Branch to a label where we will handle it later. */
+ basic_block test_bb = split_edge (single_succ_edge (bb));
+ redirect_edge_succ (single_pred_edge (test_bb),
+ single_succ_edge (bb)->dest);
+
+
+ profile_probability right_prob = profile_probability::never ();
+ if (node->m_right)
+ right_prob = node->m_right->m_c->m_subtree_prob;
+ p = ((right_prob + default_prob.apply_scale (1, 2))
+ / (node->m_c->m_subtree_prob + default_prob));
+
+ bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
+ GT_EXPR, test_bb, p);
+ default_prob = default_prob.apply_scale (1, 2);
+
+ /* Value belongs to this node or to the left-hand subtree. */
+ p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
+ bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
+ GE_EXPR, node->m_c->m_case_bb, p);
+
+ /* Handle the left-hand subtree. */
+ bb = emit_case_nodes (bb, index, node->m_left,
+ default_prob, index_type);
+
+ /* If the left-hand subtree fell through,
+ don't let it fall into the right-hand subtree. */
+ if (bb && m_default_bb)
+ emit_jump (bb, m_default_bb);
+
+ bb = emit_case_nodes (test_bb, index, node->m_right,
+ default_prob, index_type);
+ }
+ else
+ {
+ /* Node has no children so we check low and high bounds to remove
+ redundant tests. Only one of the bounds can exist,
+ since otherwise this node is bounded--a case tested already. */
+ tree lhs, rhs;
+ generate_range_test (bb, index, node->m_c->get_low (),
+ node->m_c->get_high (), &lhs, &rhs);
+ p = default_prob / (node->m_c->m_subtree_prob + default_prob);
+
+ bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
+ m_default_bb, p);
+
+ emit_jump (bb, node->m_c->m_case_bb);
+ return NULL;
+ }
+ }
return bb;
}
diff --git a/gcc/tree-switch-conversion.h b/gcc/tree-switch-conversion.h
index 4beac785f05..37ed2193724 100644
--- a/gcc/tree-switch-conversion.h
+++ b/gcc/tree-switch-conversion.h
@@ -72,6 +72,13 @@ struct cluster
/* Emit GIMPLE code to handle the cluster. */
virtual void emit (tree, tree, tree, basic_block) = 0;
+ /* Return true if a cluster handles only a single case value and the
+ value is not a range. */
+ virtual bool is_single_value_p ()
+ {
+ return false;
+ }
+
/* Return range of a cluster. If value would overflow in type of LOW,
then return 0. */
static unsigned HOST_WIDE_INT get_range (tree low, tree high)
@@ -161,6 +168,11 @@ struct simple_cluster: public cluster
gcc_unreachable ();
}
+ bool is_single_value_p ()
+ {
+ return tree_int_cst_equal (get_low (), get_high ());
+ }
+
/* Low value of the case. */
tree m_low;
@@ -329,8 +341,10 @@ This transformation was contributed by Roger Sayle, see this e-mail:
struct bit_test_cluster: public group_cluster
{
/* Constructor. */
- bit_test_cluster (vec<cluster *> &clusters, unsigned start, unsigned end)
- :group_cluster (clusters, start, end)
+ bit_test_cluster (vec<cluster *> &clusters, unsigned start, unsigned end,
+ bool handles_entire_switch)
+ :group_cluster (clusters, start, end),
+ m_handles_entire_switch (handles_entire_switch)
{}
cluster_type
@@ -396,7 +410,11 @@ struct bit_test_cluster: public group_cluster
Returns the newly created basic block. */
static basic_block hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
tree cond,
- basic_block case_bb);
+ basic_block case_bb,
+ profile_probability prob);
+
+ /* True when the jump table handles an entire switch statement. */
+ bool m_handles_entire_switch;
/* Maximum number of different basic blocks that can be handled by
a bit test. */
@@ -429,6 +447,12 @@ struct case_tree_node
/* Empty Constructor. */
case_tree_node ();
+ /* Return true when it has a child. */
+ bool has_child ()
+ {
+ return m_left != NULL || m_right != NULL;
+ }
+
/* Left son in binary tree. */
case_tree_node *m_left;
@@ -513,10 +537,6 @@ struct switch_decision_tree
/* Attempt to expand CLUSTERS as a decision tree. Return true when
expanded. */
bool try_switch_expansion (vec<cluster *> &clusters);
-
- /* Reset the aux field of all outgoing edges of switch basic block. */
- inline void reset_out_edges_aux ();
-
/* Compute the number of case labels that correspond to each outgoing edge of
switch statement. Record this information in the aux field of the edge.
*/
@@ -576,6 +596,15 @@ struct switch_decision_tree
basic_block label_bb,
profile_probability prob);
+ /* Generate code to jump to LABEL if OP0 and OP1 are equal in mode MODE.
+ PROB is the probability of jumping to LABEL_BB. */
+ static basic_block do_jump_if_equal (basic_block bb, tree op0, tree op1,
+ basic_block label_bb,
+ profile_probability prob);
+
+ /* Reset the aux field of all outgoing edges of switch basic block. */
+ static inline void reset_out_edges_aux (gswitch *swtch);
+
/* Switch statement. */
gswitch *m_switch;
@@ -838,9 +867,9 @@ struct switch_conversion
};
void
-switch_decision_tree::reset_out_edges_aux ()
+switch_decision_tree::reset_out_edges_aux (gswitch *swtch)
{
- basic_block bb = gimple_bb (m_switch);
+ basic_block bb = gimple_bb (swtch);
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c
index 9ebed9de524..8e9c2cbd289 100644
--- a/gcc/tree-tailcall.c
+++ b/gcc/tree-tailcall.c
@@ -476,7 +476,7 @@ find_tail_calls (basic_block bb, struct tailcall **ret)
tail_recursion = false;
func = gimple_call_fndecl (call);
if (func
- && !DECL_BUILT_IN (func)
+ && !fndecl_built_in_p (func)
&& recursive_call_p (current_function_decl, func))
{
tree arg;
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index e0d05333b45..9beb9d51f87 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -5154,7 +5154,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
- "permutaion op not supported by target.\n");
+ "permutation op not supported by target.\n");
return false;
}
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index d5e35521cef..124a4be0a67 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -5197,6 +5197,9 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs,
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
code = induc_code;
+ else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
+ == CONST_COND_REDUCTION)
+ code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
else
code = MAX_EXPR;
}
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 0a9ce24608c..0ab7bd8086c 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -3606,13 +3606,11 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
{
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
vec_info *vinfo = stmt_info->vinfo;
- tree mask_element_type = NULL_TREE, mask_type;
int vec_index = 0;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
+ unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
unsigned int mask_element;
machine_mode mode;
- unsigned HOST_WIDE_INT nunits, const_vf;
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
@@ -3620,22 +3618,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
mode = TYPE_MODE (vectype);
-
- /* At the moment, all permutations are represented using per-element
- indices, so we can't cope with variable vector lengths or
- vectorization factors. */
- if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
- || !vf.is_constant (&const_vf))
- return false;
-
- /* The generic VEC_PERM_EXPR code always uses an integral type of the
- same size as the vector element being permuted. */
- mask_element_type = lang_hooks.types.type_for_mode
- (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
- mask_type = get_vectype_for_scalar_type (mask_element_type);
- vec_perm_builder mask (nunits, nunits, 1);
- mask.quick_grow (nunits);
- vec_perm_indices indices;
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* Initialize the vect stmts of NODE to properly insert the generated
stmts later. */
@@ -3669,14 +3652,53 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
bool noop_p = true;
*n_perms = 0;
- for (unsigned int j = 0; j < const_vf; j++)
+ vec_perm_builder mask;
+ unsigned int nelts_to_build;
+ unsigned int nvectors_per_build;
+ bool repeating_p = (group_size == DR_GROUP_SIZE (stmt_info)
+ && multiple_p (nunits, group_size));
+ if (repeating_p)
{
- for (int k = 0; k < group_size; k++)
+ /* A single vector contains a whole number of copies of the node, so:
+ (a) all permutes can use the same mask; and
+ (b) the permutes only need a single vector input. */
+ mask.new_vector (nunits, group_size, 3);
+ nelts_to_build = mask.encoded_nelts ();
+ nvectors_per_build = SLP_TREE_VEC_STMTS (node).length ();
+ }
+ else
+ {
+ /* We need to construct a separate mask for each vector statement. */
+ unsigned HOST_WIDE_INT const_nunits, const_vf;
+ if (!nunits.is_constant (&const_nunits)
+ || !vf.is_constant (&const_vf))
+ return false;
+ mask.new_vector (const_nunits, const_nunits, 1);
+ nelts_to_build = const_vf * group_size;
+ nvectors_per_build = 1;
+ }
+
+ unsigned int count = mask.encoded_nelts ();
+ mask.quick_grow (count);
+ vec_perm_indices indices;
+
+ for (unsigned int j = 0; j < nelts_to_build; j++)
+ {
+ unsigned int iter_num = j / group_size;
+ unsigned int stmt_num = j % group_size;
+ unsigned int i = (iter_num * DR_GROUP_SIZE (stmt_info)
+ + SLP_TREE_LOAD_PERMUTATION (node)[stmt_num]);
+ if (repeating_p)
{
- unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
- + j * DR_GROUP_SIZE (stmt_info));
- vec_index = i / nunits;
- mask_element = i % nunits;
+ first_vec_index = 0;
+ mask_element = i;
+ }
+ else
+ {
+ /* Enforced before the loop when !repeating_p. */
+ unsigned int const_nunits = nunits.to_constant ();
+ vec_index = i / const_nunits;
+ mask_element = i % const_nunits;
if (vec_index == first_vec_index
|| first_vec_index == -1)
{
@@ -3686,7 +3708,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
|| second_vec_index == -1)
{
second_vec_index = vec_index;
- mask_element += nunits;
+ mask_element += const_nunits;
}
else
{
@@ -3702,50 +3724,54 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
return false;
}
- gcc_assert (mask_element < 2 * nunits);
- if (mask_element != index)
- noop_p = false;
- mask[index++] = mask_element;
+ gcc_assert (mask_element < 2 * const_nunits);
+ }
+
+ if (mask_element != index)
+ noop_p = false;
+ mask[index++] = mask_element;
- if (index == nunits && !noop_p)
+ if (index == count && !noop_p)
+ {
+ indices.new_vector (mask, second_vec_index == -1 ? 1 : 2, nunits);
+ if (!can_vec_perm_const_p (mode, indices))
{
- indices.new_vector (mask, 2, nunits);
- if (!can_vec_perm_const_p (mode, indices))
+ if (dump_enabled_p ())
{
- if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION,
+ vect_location,
+ "unsupported vect permute { ");
+ for (i = 0; i < count; ++i)
{
- dump_printf_loc (MSG_MISSED_OPTIMIZATION,
- vect_location,
- "unsupported vect permute { ");
- for (i = 0; i < nunits; ++i)
- {
- dump_dec (MSG_MISSED_OPTIMIZATION, mask[i]);
- dump_printf (MSG_MISSED_OPTIMIZATION, " ");
- }
- dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
+ dump_dec (MSG_MISSED_OPTIMIZATION, mask[i]);
+ dump_printf (MSG_MISSED_OPTIMIZATION, " ");
}
- gcc_assert (analyze_only);
- return false;
+ dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
}
-
- ++*n_perms;
+ gcc_assert (analyze_only);
+ return false;
}
- if (index == nunits)
+ ++*n_perms;
+ }
+
+ if (index == count)
+ {
+ if (!analyze_only)
{
- if (!analyze_only)
- {
- tree mask_vec = NULL_TREE;
+ tree mask_vec = NULL_TREE;
- if (! noop_p)
- mask_vec = vec_perm_indices_to_tree (mask_type, indices);
+ if (! noop_p)
+ mask_vec = vect_gen_perm_mask_checked (vectype, indices);
- if (second_vec_index == -1)
- second_vec_index = first_vec_index;
+ if (second_vec_index == -1)
+ second_vec_index = first_vec_index;
+ for (unsigned int ri = 0; ri < nvectors_per_build; ++ri)
+ {
/* Generate the permute statement if necessary. */
- tree first_vec = dr_chain[first_vec_index];
- tree second_vec = dr_chain[second_vec_index];
+ tree first_vec = dr_chain[first_vec_index + ri];
+ tree second_vec = dr_chain[second_vec_index + ri];
stmt_vec_info perm_stmt_info;
if (! noop_p)
{
@@ -3771,12 +3797,12 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
SLP_TREE_VEC_STMTS (node)[vect_stmts_counter++]
= perm_stmt_info;
}
-
- index = 0;
- first_vec_index = -1;
- second_vec_index = -1;
- noop_p = true;
}
+
+ index = 0;
+ first_vec_index = -1;
+ second_vec_index = -1;
+ noop_p = true;
}
}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 8fcb1e2f8bc..8d94fcae04e 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -2961,13 +2961,10 @@ vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
unsigned ncopies;
- unsigned HOST_WIDE_INT nunits, num_bytes;
op = gimple_call_arg (stmt, 0);
vectype = STMT_VINFO_VECTYPE (stmt_info);
-
- if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
- return false;
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
@@ -2983,11 +2980,11 @@ vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
if (! char_vectype)
return false;
- if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
+ poly_uint64 num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
+ unsigned word_bytes;
+ if (!constant_multiple_p (num_bytes, nunits, &word_bytes))
return false;
- unsigned word_bytes = num_bytes / nunits;
-
/* The encoding uses one stepped pattern for each byte in the word. */
vec_perm_builder elts (num_bytes, word_bytes, 3);
for (unsigned i = 0; i < 3; ++i)
@@ -8003,13 +8000,18 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
if (slp)
{
grouped_load = false;
- /* For SLP permutation support we need to load the whole group,
- not only the number of vector stmts the permutation result
- fits in. */
- if (slp_perm)
+ /* If an SLP permutation is from N elements to N elements,
+ and if one vector holds a whole number of N, we can load
+ the inputs to the permutation in the same way as an
+ unpermuted sequence. In other cases we need to load the
+ whole group, not only the number of vector stmts the
+ permutation result fits in. */
+ if (slp_perm
+ && (group_size != SLP_INSTANCE_GROUP_SIZE (slp_node_instance)
+ || !multiple_p (nunits, group_size)))
{
- /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
- variable VF. */
+ /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for
+ variable VF; see vect_transform_slp_perm_load. */
unsigned int const_vf = vf.to_constant ();
unsigned int const_nunits = nunits.to_constant ();
vec_num = CEIL (group_size * const_vf, const_nunits);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index ead19f15996..e8eb9292506 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -478,42 +478,6 @@ set_value_range_to_null (value_range *vr, tree type)
set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
}
-
-/* If abs (min) < abs (max), set VR to [-max, max], if
- abs (min) >= abs (max), set VR to [-min, min]. */
-
-static void
-abs_extent_range (value_range *vr, tree min, tree max)
-{
- int cmp;
-
- gcc_assert (TREE_CODE (min) == INTEGER_CST);
- gcc_assert (TREE_CODE (max) == INTEGER_CST);
- gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
- gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
- min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
- max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
- if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
- {
- set_value_range_to_varying (vr);
- return;
- }
- cmp = compare_values (min, max);
- if (cmp == -1)
- min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
- else if (cmp == 0 || cmp == 1)
- {
- max = min;
- min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
- }
- else
- {
- set_value_range_to_varying (vr);
- return;
- }
- set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
-}
-
/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
bool
@@ -538,17 +502,6 @@ vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
&& bitmap_equal_p (b1, b2)));
}
-/* Return true if VR is ~[0, 0]. */
-
-bool
-range_is_nonnull (value_range *vr)
-{
- return vr->type == VR_ANTI_RANGE
- && integer_zerop (vr->min)
- && integer_zerop (vr->max);
-}
-
-
/* Return true if VR is [0, 0]. */
static inline bool
@@ -916,14 +869,25 @@ value_ranges_intersect_p (value_range *vr0, value_range *vr1)
}
-/* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
- include the value zero, -2 if we cannot tell. */
+/* Return TRUE if *VR includes the value zero. */
-int
-range_includes_zero_p (tree min, tree max)
+bool
+range_includes_zero_p (const value_range *vr)
{
- tree zero = build_int_cst (TREE_TYPE (min), 0);
- return value_inside_range (zero, min, max);
+ if (vr->type == VR_VARYING)
+ return true;
+
+ /* Ughh, we don't know. We choose not to optimize. */
+ if (vr->type == VR_UNDEFINED)
+ return true;
+
+ tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
+ if (vr->type == VR_ANTI_RANGE)
+ {
+ int res = value_inside_range (zero, vr->min, vr->max);
+ return res == 0 || res == -2;
+ }
+ return value_inside_range (zero, vr->min, vr->max) != 0;
}
/* Return true if *VR is know to only contain nonnegative values. */
@@ -997,6 +961,9 @@ ranges_from_anti_range (value_range *ar,
vr0->type = VR_UNDEFINED;
vr1->type = VR_UNDEFINED;
+ /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
+ [A+1, +INF]. Not sure if this helps in practice, though. */
+
if (ar->type != VR_ANTI_RANGE
|| TREE_CODE (ar->min) != INTEGER_CST
|| TREE_CODE (ar->max) != INTEGER_CST
@@ -1034,17 +1001,17 @@ ranges_from_anti_range (value_range *ar,
static void inline
extract_range_into_wide_ints (value_range *vr,
signop sign, unsigned prec,
- wide_int *wmin, wide_int *wmax)
+ wide_int &wmin, wide_int &wmax)
{
if (range_int_cst_p (vr))
{
- *wmin = wi::to_wide (vr->min);
- *wmax = wi::to_wide (vr->max);
+ wmin = wi::to_wide (vr->min);
+ wmax = wi::to_wide (vr->max);
}
else
{
- *wmin = wi::min_value (prec, sign);
- *wmax = wi::max_value (prec, sign);
+ wmin = wi::min_value (prec, sign);
+ wmax = wi::max_value (prec, sign);
}
}
@@ -1457,7 +1424,7 @@ extract_range_from_binary_expr_1 (value_range *vr,
nullness, if both are non null, then the result is nonnull.
If both are null, then the result is null. Otherwise they
are varying. */
- if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
+ if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
set_value_range_to_nonnull (vr, expr_type);
else if (range_is_null (&vr0) && range_is_null (&vr1))
set_value_range_to_null (vr, expr_type);
@@ -1468,11 +1435,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
{
/* For pointer types, we are really only interested in asserting
whether the expression evaluates to non-NULL. */
- if (range_is_nonnull (&vr0)
- || range_is_nonnull (&vr1)
- || (vr1.type == VR_RANGE
- && !symbolic_range_p (&vr1)
- && !range_includes_zero_p (vr1.min, vr1.max)))
+ if (!range_includes_zero_p (&vr0)
+ || !range_includes_zero_p (&vr1))
set_value_range_to_nonnull (vr, expr_type);
else if (range_is_null (&vr0) && range_is_null (&vr1))
set_value_range_to_null (vr, expr_type);
@@ -1483,7 +1447,7 @@ extract_range_from_binary_expr_1 (value_range *vr,
{
/* For pointer types, we are really only interested in asserting
whether the expression evaluates to non-NULL. */
- if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
+ if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
set_value_range_to_nonnull (vr, expr_type);
else if (range_is_null (&vr0) || range_is_null (&vr1))
set_value_range_to_null (vr, expr_type);
@@ -1597,8 +1561,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
wide_int wmin, wmax;
wide_int vr0_min, vr0_max;
wide_int vr1_min, vr1_max;
- extract_range_into_wide_ints (&vr0, sign, prec, &vr0_min, &vr0_max);
- extract_range_into_wide_ints (&vr1, sign, prec, &vr1_min, &vr1_max);
+ extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
+ extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
vr0_min, vr0_max, vr1_min, vr1_max))
set_value_range (vr, VR_RANGE,
@@ -1668,109 +1632,55 @@ extract_range_from_binary_expr_1 (value_range *vr,
|| code == EXACT_DIV_EXPR
|| code == ROUND_DIV_EXPR)
{
- if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
+ wide_int dividend_min, dividend_max, divisor_min, divisor_max;
+ wide_int wmin, wmax, extra_min, extra_max;
+ bool extra_range_p;
+
+ /* Special case explicit division by zero as undefined. */
+ if (range_is_null (&vr1))
{
- /* For division, if op1 has VR_RANGE but op0 does not, something
- can be deduced just from that range. Say [min, max] / [4, max]
- gives [min / 4, max / 4] range. */
- if (vr1.type == VR_RANGE
- && !symbolic_range_p (&vr1)
- && range_includes_zero_p (vr1.min, vr1.max) == 0)
- {
- vr0.type = type = VR_RANGE;
- vr0.min = vrp_val_min (expr_type);
- vr0.max = vrp_val_max (expr_type);
- }
+ /* However, we must not eliminate a division by zero if
+ flag_non_call_exceptions. */
+ if (cfun->can_throw_non_call_exceptions)
+ set_value_range_to_varying (vr);
else
- {
- set_value_range_to_varying (vr);
- return;
- }
+ set_value_range_to_undefined (vr);
+ return;
}
- /* For divisions, if flag_non_call_exceptions is true, we must
- not eliminate a division by zero. */
- if (cfun->can_throw_non_call_exceptions
- && (vr1.type != VR_RANGE
- || range_includes_zero_p (vr1.min, vr1.max) != 0))
+ /* First, normalize ranges into constants we can handle. Note
+ that VR_ANTI_RANGE's of constants were already normalized
+ before arriving here.
+
+ NOTE: As a future improvement, we may be able to do better
+ with mixed symbolic (anti-)ranges like [0, A]. See note in
+ ranges_from_anti_range. */
+ extract_range_into_wide_ints (&vr0, sign, prec,
+ dividend_min, dividend_max);
+ extract_range_into_wide_ints (&vr1, sign, prec,
+ divisor_min, divisor_max);
+ if (!wide_int_range_div (wmin, wmax, code, sign, prec,
+ dividend_min, dividend_max,
+ divisor_min, divisor_max,
+ TYPE_OVERFLOW_UNDEFINED (expr_type),
+ TYPE_OVERFLOW_WRAPS (expr_type),
+ extra_range_p, extra_min, extra_max))
{
set_value_range_to_varying (vr);
return;
}
-
- /* For divisions, if op0 is VR_RANGE, we can deduce a range
- even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
- include 0. */
- if (vr0.type == VR_RANGE
- && (vr1.type != VR_RANGE
- || range_includes_zero_p (vr1.min, vr1.max) != 0))
- {
- tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
- int cmp;
-
- min = NULL_TREE;
- max = NULL_TREE;
- if (TYPE_UNSIGNED (expr_type)
- || value_range_nonnegative_p (&vr1))
- {
- /* For unsigned division or when divisor is known
- to be non-negative, the range has to cover
- all numbers from 0 to max for positive max
- and all numbers from min to 0 for negative min. */
- cmp = compare_values (vr0.max, zero);
- if (cmp == -1)
- {
- /* When vr0.max < 0, vr1.min != 0 and value
- ranges for dividend and divisor are available. */
- if (vr1.type == VR_RANGE
- && !symbolic_range_p (&vr0)
- && !symbolic_range_p (&vr1)
- && compare_values (vr1.min, zero) != 0)
- max = int_const_binop (code, vr0.max, vr1.min);
- else
- max = zero;
- }
- else if (cmp == 0 || cmp == 1)
- max = vr0.max;
- else
- type = VR_VARYING;
- cmp = compare_values (vr0.min, zero);
- if (cmp == 1)
- {
- /* For unsigned division when value ranges for dividend
- and divisor are available. */
- if (vr1.type == VR_RANGE
- && !symbolic_range_p (&vr0)
- && !symbolic_range_p (&vr1)
- && compare_values (vr1.max, zero) != 0)
- min = int_const_binop (code, vr0.min, vr1.max);
- else
- min = zero;
- }
- else if (cmp == 0 || cmp == -1)
- min = vr0.min;
- else
- type = VR_VARYING;
- }
- else
- {
- /* Otherwise the range is -max .. max or min .. -min
- depending on which bound is bigger in absolute value,
- as the division can change the sign. */
- abs_extent_range (vr, vr0.min, vr0.max);
- return;
- }
- if (type == VR_VARYING)
- {
- set_value_range_to_varying (vr);
- return;
- }
- }
- else if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1))
+ set_value_range (vr, VR_RANGE,
+ wide_int_to_tree (expr_type, wmin),
+ wide_int_to_tree (expr_type, wmax), NULL);
+ if (extra_range_p)
{
- extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
- return;
+ value_range extra_range = VR_INITIALIZER;
+ set_value_range (&extra_range, VR_RANGE,
+ wide_int_to_tree (expr_type, extra_min),
+ wide_int_to_tree (expr_type, extra_max), NULL);
+ vrp_meet (vr, &extra_range);
}
+ return;
}
else if (code == TRUNC_MOD_EXPR)
{
@@ -1781,8 +1691,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
}
wide_int wmin, wmax, tmp;
wide_int vr0_min, vr0_max, vr1_min, vr1_max;
- extract_range_into_wide_ints (&vr0, sign, prec, &vr0_min, &vr0_max);
- extract_range_into_wide_ints (&vr1, sign, prec, &vr1_min, &vr1_max);
+ extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
+ extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
wide_int_range_trunc_mod (wmin, wmax, sign, prec,
vr0_min, vr0_max, vr1_min, vr1_max);
min = wide_int_to_tree (expr_type, wmin);
@@ -1803,8 +1713,8 @@ extract_range_from_binary_expr_1 (value_range *vr,
&may_be_nonzero0, &must_be_nonzero0);
vrp_set_zero_nonzero_bits (expr_type, &vr1,
&may_be_nonzero1, &must_be_nonzero1);
- extract_range_into_wide_ints (&vr0, sign, prec, &vr0_min, &vr0_max);
- extract_range_into_wide_ints (&vr1, sign, prec, &vr1_min, &vr1_max);
+ extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
+ extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
if (code == BIT_AND_EXPR)
{
if (wide_int_range_bit_and (wmin, wmax, sign, prec,
@@ -1975,7 +1885,7 @@ extract_range_from_unary_expr (value_range *vr,
determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
if (POINTER_TYPE_P (type))
{
- if (range_is_nonnull (&vr0))
+ if (!range_includes_zero_p (&vr0))
set_value_range_to_nonnull (vr, type);
else if (range_is_null (&vr0))
set_value_range_to_null (vr, type);
@@ -2033,7 +1943,7 @@ extract_range_from_unary_expr (value_range *vr,
}
wide_int wmin, wmax;
wide_int vr0_min, vr0_max;
- extract_range_into_wide_ints (&vr0, sign, prec, &vr0_min, &vr0_max);
+ extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
TYPE_OVERFLOW_UNDEFINED (type)))
set_value_range (vr, VR_RANGE,
@@ -3564,7 +3474,7 @@ find_switch_asserts (basic_block bb, gswitch *last)
for (idx = 0; idx < n; ++idx)
{
ci[idx].expr = gimple_switch_label (last, idx);
- ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
+ ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
}
edge default_edge = find_edge (bb, ci[0].bb);
qsort (ci, n, sizeof (struct case_info), compare_case_labels);
@@ -6107,17 +6017,9 @@ vrp_meet_1 (value_range *vr0, const value_range *vr1)
{
/* Failed to find an efficient meet. Before giving up and setting
the result to VARYING, see if we can at least derive a useful
- anti-range. FIXME, all this nonsense about distinguishing
- anti-ranges from ranges is necessary because of the odd
- semantics of range_includes_zero_p and friends. */
- if (((saved.type == VR_RANGE
- && range_includes_zero_p (saved.min, saved.max) == 0)
- || (saved.type == VR_ANTI_RANGE
- && range_includes_zero_p (saved.min, saved.max) == 1))
- && ((vr1->type == VR_RANGE
- && range_includes_zero_p (vr1->min, vr1->max) == 0)
- || (vr1->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr1->min, vr1->max) == 1)))
+ anti-range. */
+ if (range_includes_zero_p (&saved) == 0
+ && range_includes_zero_p (vr1) == 0)
{
set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
@@ -6627,10 +6529,7 @@ vrp_prop::vrp_finalize (bool warn_array_bounds_p)
continue;
if (POINTER_TYPE_P (TREE_TYPE (name))
- && ((vr->type == VR_RANGE
- && range_includes_zero_p (vr->min, vr->max) == 0)
- || (vr->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr->min, vr->max) == 1)))
+ && range_includes_zero_p (vr) == 0)
set_ptr_nonnull (name);
else if (!POINTER_TYPE_P (TREE_TYPE (name)))
set_range_info (name, vr->type,
diff --git a/gcc/tree-vrp.h b/gcc/tree-vrp.h
index 0c1fb3637cf..f8c1c0c8f72 100644
--- a/gcc/tree-vrp.h
+++ b/gcc/tree-vrp.h
@@ -86,7 +86,7 @@ extern void register_edge_assert_for (tree, edge, enum tree_code,
tree, tree, vec<assert_info> &);
extern bool stmt_interesting_for_vrp (gimple *);
extern void set_value_range_to_varying (value_range *);
-extern int range_includes_zero_p (tree, tree);
+extern bool range_includes_zero_p (const value_range *);
extern bool infer_value_range (gimple *, tree, tree_code *, tree *);
extern void set_value_range_to_nonnull (value_range *, tree);
@@ -96,7 +96,6 @@ extern void set_and_canonicalize_value_range (value_range *,
enum value_range_type,
tree, tree, bitmap);
extern bool vrp_bitmap_equal_p (const_bitmap, const_bitmap);
-extern bool range_is_nonnull (value_range *);
extern tree value_range_constant_singleton (value_range *);
extern bool symbolic_range_p (value_range *);
extern int compare_values (tree, tree);
diff --git a/gcc/tree.c b/gcc/tree.c
index f00a519b302..c3ac8f36d55 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -5240,7 +5240,7 @@ need_assembler_name_p (tree decl)
{
/* Do not set assembler name on builtins. Allow RTL expansion to
decide whether to expand inline or via a regular call. */
- if (DECL_BUILT_IN (decl)
+ if (fndecl_built_in_p (decl)
&& DECL_BUILT_IN_CLASS (decl) != BUILT_IN_FRONTEND)
return false;
@@ -5339,12 +5339,7 @@ free_lang_data_in_decl (tree decl)
(DECL_CONTEXT (DECL_ABSTRACT_ORIGIN (decl))))
DECL_ABSTRACT_ORIGIN (decl) = NULL_TREE;
- /* Sometimes the C++ frontend doesn't manage to transform a temporary
- DECL_VINDEX referring to itself into a vtable slot number as it
- should. Happens with functions that are copied and then forgotten
- about. Just clear it, it won't matter anymore. */
- if (DECL_VINDEX (decl) && !tree_fits_shwi_p (DECL_VINDEX (decl)))
- DECL_VINDEX (decl) = NULL_TREE;
+ DECL_VINDEX (decl) = NULL_TREE;
}
else if (VAR_P (decl))
{
@@ -5371,10 +5366,9 @@ free_lang_data_in_decl (tree decl)
nodes and thus we can't use TREE_CHAIN in multiple lists. */
tree *nextp = &BLOCK_VARS (DECL_INITIAL (decl));
while (*nextp)
- {
- tree var = *nextp;
- if (TREE_CODE (var) == FUNCTION_DECL
- && DECL_BUILT_IN (var))
+ {
+ tree var = *nextp;
+ if (fndecl_built_in_p (var))
*nextp = TREE_CHAIN (var);
else
nextp = &TREE_CHAIN (var);
@@ -5525,9 +5519,14 @@ find_decls_types_r (tree *tp, int *ws, void *data)
fld_worklist_push (TYPE_POINTER_TO (t), fld);
fld_worklist_push (TYPE_REFERENCE_TO (t), fld);
fld_worklist_push (TYPE_NAME (t), fld);
- /* Do not walk TYPE_NEXT_PTR_TO or TYPE_NEXT_REF_TO. We do not stream
- them and thus do not and want not to reach unused pointer types
- this way. */
+ /* While we do not stream TYPE_POINTER_TO and TYPE_REFERENCE_TO
+ lists, we may look types up in these lists and use them while
+ optimizing the function body. Thus we need to free lang data
+ in them. */
+ if (TREE_CODE (t) == POINTER_TYPE)
+ fld_worklist_push (TYPE_NEXT_PTR_TO (t), fld);
+ if (TREE_CODE (t) == REFERENCE_TYPE)
+ fld_worklist_push (TYPE_NEXT_REF_TO (t), fld);
if (!POINTER_TYPE_P (t))
fld_worklist_push (TYPE_MIN_VALUE_RAW (t), fld);
/* TYPE_MAX_VALUE_RAW is TYPE_BINFO for record types. */
@@ -8990,7 +8989,7 @@ decl_function_context (const_tree decl)
C++ should really be fixed to use DECL_CONTEXT for the real context,
and use something else for the "virtual context". */
- else if (TREE_CODE (decl) == FUNCTION_DECL && DECL_VINDEX (decl))
+ else if (TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl))
context
= TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)))));
@@ -9100,7 +9099,7 @@ get_call_combined_fn (const_tree call)
return as_combined_fn (CALL_EXPR_IFN (call));
tree fndecl = get_callee_fndecl (call);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
return as_combined_fn (DECL_FUNCTION_CODE (fndecl));
return CFN_LAST;
diff --git a/gcc/tree.h b/gcc/tree.h
index 5d4f034e008..4f415b7a220 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1743,6 +1743,13 @@ extern tree maybe_wrap_with_location (tree, location_t);
#define SSA_NAME_IS_DEFAULT_DEF(NODE) \
SSA_NAME_CHECK (NODE)->base.default_def_flag
+/* Nonzero if this SSA_NAME is known to point to memory that may not
+ be written to. This is set for default defs of function parameters
+ that have a corresponding r or R specification in the functions
+ fn spec attribute. This is used by alias analysis. */
+#define SSA_NAME_POINTS_TO_READONLY_MEMORY(NODE) \
+ SSA_NAME_CHECK (NODE)->base.deprecated_flag
+
/* Attributes for SSA_NAMEs for pointer-type variables. */
#define SSA_NAME_PTR_INFO(N) \
SSA_NAME_CHECK (N)->ssa_name.info.ptr_info
@@ -2995,25 +3002,11 @@ extern vec<tree, va_gc> **decl_debug_args_insert (tree);
#define DECL_STRUCT_FUNCTION(NODE) \
(FUNCTION_DECL_CHECK (NODE)->function_decl.f)
-/* In a FUNCTION_DECL, nonzero means a built in function of a
- standard library or more generally a built in function that is
- recognized by optimizers and expanders.
-
- Note that it is different from the DECL_IS_BUILTIN accessor. For
- instance, user declared prototypes of C library functions are not
- DECL_IS_BUILTIN but may be DECL_BUILT_IN. */
-#define DECL_BUILT_IN(NODE) (DECL_BUILT_IN_CLASS (NODE) != NOT_BUILT_IN)
/* For a builtin function, identify which part of the compiler defined it. */
#define DECL_BUILT_IN_CLASS(NODE) \
(FUNCTION_DECL_CHECK (NODE)->function_decl.built_in_class)
-/* For a function declaration, return true if NODE is non-null and it is
- a builtin of a CLASS with requested NAME. */
-#define DECL_BUILT_IN_P(NODE, CLASS, NAME) \
- (NODE != NULL_TREE && DECL_BUILT_IN_CLASS (NODE) == CLASS \
- && DECL_FUNCTION_CODE (NODE) == NAME)
-
/* In FUNCTION_DECL, a chain of ..._DECL nodes. */
#define DECL_ARGUMENTS(NODE) \
(FUNCTION_DECL_CHECK (NODE)->function_decl.arguments)
@@ -5848,4 +5841,44 @@ type_has_mode_precision_p (const_tree t)
return known_eq (TYPE_PRECISION (t), GET_MODE_PRECISION (TYPE_MODE (t)));
}
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function.
+
+ Note that it is different from the DECL_IS_BUILTIN accessor. For
+ instance, user declared prototypes of C library functions are not
+ DECL_IS_BUILTIN but may be DECL_BUILT_IN. */
+
+inline bool
+fndecl_built_in_p (const_tree node)
+{
+ return (DECL_BUILT_IN_CLASS (node) != NOT_BUILT_IN);
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of class KLASS. */
+
+inline bool
+fndecl_built_in_p (const_tree node, built_in_class klass)
+{
+ return (fndecl_built_in_p (node) && DECL_BUILT_IN_CLASS (node) == klass);
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of class KLASS with name equal to NAME. */
+
+inline bool
+fndecl_built_in_p (const_tree node, int name, built_in_class klass)
+{
+ return (fndecl_built_in_p (node, klass) && DECL_FUNCTION_CODE (node) == name);
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of BUILT_IN_NORMAL class with name equal to NAME. */
+
+inline bool
+fndecl_built_in_p (const_tree node, built_in_function name)
+{
+ return (fndecl_built_in_p (node, BUILT_IN_NORMAL)
+ && DECL_FUNCTION_CODE (node) == name);
+}
+
#endif /* GCC_TREE_H */
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 722f5702612..9bbcecc58af 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -663,7 +663,7 @@ bool
is_ubsan_builtin_p (tree t)
{
return TREE_CODE (t) == FUNCTION_DECL
- && DECL_BUILT_IN_CLASS (t) == BUILT_IN_NORMAL
+ && fndecl_built_in_p (t, BUILT_IN_NORMAL)
&& strncmp (IDENTIFIER_POINTER (DECL_NAME (t)),
"__builtin___ubsan_", 18) == 0;
}
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 0d3609e2807..2180da48895 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -2401,7 +2401,7 @@ static hash_set<tree> *pending_assemble_externals_set;
static bool
incorporeal_function_p (tree decl)
{
- if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl))
+ if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl))
{
const char *name;
@@ -3923,7 +3923,6 @@ output_constant_pool_2 (fixed_size_mode mode, rtx x, unsigned int align)
case MODE_UFRACT:
case MODE_ACCUM:
case MODE_UACCUM:
- case MODE_POINTER_BOUNDS:
assemble_integer (x, GET_MODE_SIZE (mode), align, 1);
break;
diff --git a/gcc/vr-values.c b/gcc/vr-values.c
index 33335f3da31..11df1023b6e 100644
--- a/gcc/vr-values.c
+++ b/gcc/vr-values.c
@@ -343,7 +343,7 @@ vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
{
value_range *vr = get_value_range (TREE_OPERAND (base, 0));
- if (range_is_nonnull (vr))
+ if (!range_includes_zero_p (vr))
return true;
}
}
@@ -1107,12 +1107,8 @@ vr_values::extract_range_basic (value_range *vr, gimple *stmt)
if (TREE_CODE (arg) == SSA_NAME)
{
value_range *vr0 = get_value_range (arg);
- /* If arg is non-zero, then ffs or popcount
- are non-zero. */
- if ((vr0->type == VR_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 0)
- || (vr0->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 1))
+ /* If arg is non-zero, then ffs or popcount are non-zero. */
+ if (range_includes_zero_p (vr0) == 0)
mini = 1;
/* If some high bits are known to be zero,
we can decrease the maximum. */
@@ -2711,7 +2707,7 @@ vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
}
*taken_edge_p = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (val)));
+ label_to_block (cfun, CASE_LABEL (val)));
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -3764,7 +3760,8 @@ vr_values::simplify_switch_using_ranges (gswitch *stmt)
for (i = 0; i < n2; ++i)
{
e = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
+ label_to_block (cfun,
+ CASE_LABEL (TREE_VEC_ELT (vec2, i))));
e->aux = (void *)-1;
}
diff --git a/gcc/wide-int-range.cc b/gcc/wide-int-range.cc
index a202b5fd503..3cdcede04cd 100644
--- a/gcc/wide-int-range.cc
+++ b/gcc/wide-int-range.cc
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tree.h"
+#include "function.h"
#include "fold-const.h"
#include "wide-int-range.h"
@@ -663,3 +664,72 @@ wide_int_range_abs (wide_int &min, wide_int &max,
return false;
return true;
}
+
+/* Calculate a division operation on two ranges and store the result in
+ [WMIN, WMAX] U [EXTRA_MIN, EXTRA_MAX].
+
+ If EXTRA_RANGE_P is set upon return, EXTRA_MIN/EXTRA_MAX hold
+ meaningful information, otherwise they should be ignored.
+
+ Return TRUE if we were able to successfully calculate the new range. */
+
+bool
+wide_int_range_div (wide_int &wmin, wide_int &wmax,
+ tree_code code, signop sign, unsigned prec,
+ const wide_int &dividend_min, const wide_int &dividend_max,
+ const wide_int &divisor_min, const wide_int &divisor_max,
+ bool overflow_undefined,
+ bool overflow_wraps,
+ bool &extra_range_p,
+ wide_int &extra_min, wide_int &extra_max)
+{
+ extra_range_p = false;
+
+ /* If we know we won't divide by zero, just do the division. */
+ if (!wide_int_range_includes_zero_p (divisor_min, divisor_max, sign))
+ return wide_int_range_multiplicative_op (wmin, wmax, code, sign, prec,
+ dividend_min, dividend_max,
+ divisor_min, divisor_max,
+ overflow_undefined,
+ overflow_wraps);
+
+ /* If flag_non_call_exceptions, we must not eliminate a division
+ by zero. */
+ if (cfun->can_throw_non_call_exceptions)
+ return false;
+
+ /* If we're definitely dividing by zero, there's nothing to do. */
+ if (wide_int_range_zero_p (divisor_min, divisor_max, prec))
+ return false;
+
+ /* Perform the division in 2 parts, [LB, -1] and [1, UB],
+ which will skip any division by zero.
+
+ First divide by the negative numbers, if any. */
+ if (wi::neg_p (divisor_min, sign))
+ {
+ if (!wide_int_range_multiplicative_op (wmin, wmax,
+ code, sign, prec,
+ dividend_min, dividend_max,
+ divisor_min, wi::minus_one (prec),
+ overflow_undefined,
+ overflow_wraps))
+ return false;
+ extra_range_p = true;
+ }
+ /* Then divide by the non-zero positive numbers, if any. */
+ if (wi::gt_p (divisor_max, wi::zero (prec), sign))
+ {
+ if (!wide_int_range_multiplicative_op (extra_range_p ? extra_min : wmin,
+ extra_range_p ? extra_max : wmax,
+ code, sign, prec,
+ dividend_min, dividend_max,
+ wi::one (prec), divisor_max,
+ overflow_undefined,
+ overflow_wraps))
+ return false;
+ }
+ else
+ extra_range_p = false;
+ return true;
+}
diff --git a/gcc/wide-int-range.h b/gcc/wide-int-range.h
index 41198e05b13..427ef34c6b4 100644
--- a/gcc/wide-int-range.h
+++ b/gcc/wide-int-range.h
@@ -99,6 +99,17 @@ extern bool wide_int_range_abs (wide_int &min, wide_int &max,
const wide_int &vr0_min,
const wide_int &vr0_max,
bool overflow_undefined);
+extern bool wide_int_range_div (wide_int &wmin, wide_int &wmax,
+ enum tree_code code,
+ signop sign, unsigned prec,
+ const wide_int &dividend_min,
+ const wide_int &dividend_max,
+ const wide_int &divisor_min,
+ const wide_int &divisor_max,
+ bool overflow_undefined,
+ bool overflow_wraps,
+ bool &extra_range_p,
+ wide_int &extra_min, wide_int &extra_max);
/* Return TRUE if shifting by range [MIN, MAX] is undefined behavior. */
@@ -137,4 +148,22 @@ wide_int_range_min_max (wide_int &min, wide_int &max,
return true;
}
+/* Return TRUE if 0 is within [WMIN, WMAX]. */
+
+inline bool
+wide_int_range_includes_zero_p (const wide_int &wmin, const wide_int &wmax,
+ signop sign)
+{
+ return wi::le_p (wmin, 0, sign) && wi::ge_p (wmax, 0, sign);
+}
+
+/* Return TRUE if [WMIN, WMAX] is the singleton 0. */
+
+inline bool
+wide_int_range_zero_p (const wide_int &wmin, const wide_int &wmax,
+ unsigned prec)
+{
+ return wmin == wmax && wi::eq_p (wmin, wi::zero (prec));
+}
+
#endif /* GCC_WIDE_INT_RANGE_H */
diff --git a/include/ChangeLog b/include/ChangeLog
index 6d04fa14a67..e2e115a2eed 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,6 +1,6 @@
2018-07-26 Martin Liska <mliska@suse.cz>
- PR lto/86548
+ PR lto/86548
* libiberty.h (make_temp_file_with_prefix): New function.
2018-05-30 Jan Hubicka <hubicka@ucw.cz>
@@ -440,8 +440,8 @@
* libiberty.h (xasprintf): Declare.
2014-12-11 Uros Bizjak <ubizjak@gmail.com>
- Ben Elliston <bje@au.ibm.com>
- Manuel Lopez-Ibanez <manu@gcc.gnu.org>
+ Ben Elliston <bje@au.ibm.com>
+ Manuel Lopez-Ibanez <manu@gcc.gnu.org>
* libiberty.h (xvasprintf): Declare.
@@ -631,7 +631,7 @@
DEMANGLE_COMPONENT_TAGGED_NAME.
2012-10-29 Sterling Augustine <saugustine@google.com>
- Cary Coutant <ccoutant@google.com>
+ Cary Coutant <ccoutant@google.com>
* dwarf2.h (dwarf_location_list_entry_type): New enum with fields
DW_LLE_GNU_end_of_list_entry, DW_LLE_GNU_base_address_selection_entry,
@@ -1268,7 +1268,7 @@
* splay-tree.h (libi_uhostptr_t, libi_shostptr_t): New types,
needed for WIN64 when a long is not wide enough for a pointer.
- (splay_tree_key, splay_tree_value): Use the new types.
+ (splay_tree_key, splay_tree_value): Use the new types.
2007-05-07 Nathan Froyd <froydnj@codesourcery.com>
@@ -1308,7 +1308,7 @@
* ansidecl.h (ATTRIBUTE_PACKED): Define.
2006-11-30 Andrew Stubbs <andrew.stubbs@st.com>
- J"orn Rennecke <joern.rennecke@st.com>
+ J"orn Rennecke <joern.rennecke@st.com>
PR driver/29931
* libiberty.h (make_relative_prefix_ignore_links): Declare.
diff --git a/intl/ChangeLog b/intl/ChangeLog
index 6150e7e250f..5dfd2ea61a1 100644
--- a/intl/ChangeLog
+++ b/intl/ChangeLog
@@ -132,8 +132,8 @@
2004-04-25 Paolo Bonzini <bonzini@gnu.org>
- * configure.ac: Point config.intl to the parent directory of
- ${top_builddir}.
+ * configure.ac: Point config.intl to the parent directory of
+ ${top_builddir}.
* configure: Regenerate.
2004-03-10 Kelley Cook <kcook@gcc.gnu.org>
diff --git a/libada/ChangeLog b/libada/ChangeLog
index 6661c4d3832..2db7cbb5021 100644
--- a/libada/ChangeLog
+++ b/libada/ChangeLog
@@ -187,11 +187,11 @@
2009-04-06 Laurent GUERBY <laurent@guerby.net>
- * Makefile.in (ADA_RTS_DIR): Define.
- * Makefile.in (gnatlib-*): Link adainclude and adalib to it.
+ * Makefile.in (ADA_RTS_DIR): Define.
+ * Makefile.in (gnatlib-*): Link adainclude and adalib to it.
2008-09-21 Laurent Guerby <laurent@guerby.net>
- Paolo Bonzini <bonzini@gnu.org>
+ Paolo Bonzini <bonzini@gnu.org>
PR ada/5911
* Makefile.in (all, install, mostlyclean, clean, distclean): Add
diff --git a/libatomic/ChangeLog b/libatomic/ChangeLog
index 41c52731415..e51e864ae10 100644
--- a/libatomic/ChangeLog
+++ b/libatomic/ChangeLog
@@ -116,7 +116,7 @@
* configure.tgt: Add RISC-V tuple.
2017-02-01 Richard Henderson <rth@redhat.com>
- Torvald Riegel <triegel@redhat.com>
+ Torvald Riegel <triegel@redhat.com>
* acinclude.m4: Add #define FAST_ATOMIC_LDST_*.
* auto-config.h.in: Regenerate.
diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog
index 140bf4d155b..ea43f145c3d 100644
--- a/libbacktrace/ChangeLog
+++ b/libbacktrace/ChangeLog
@@ -881,11 +881,11 @@
2012-09-19 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
Ian Lance Taylor <iant@google.com>
- * configure.ac (GCC_HEADER_STDINT): Invoke.
- * backtrace.h: If we can't find <stdint.h>, use "gstdint.h".
- * btest.c: Don't include <stdint.h>.
- * dwarf.c: Likewise.
- * configure, aclocal.m4, Makefile.in, config.h.in: Rebuild.
+ * configure.ac (GCC_HEADER_STDINT): Invoke.
+ * backtrace.h: If we can't find <stdint.h>, use "gstdint.h".
+ * btest.c: Don't include <stdint.h>.
+ * dwarf.c: Likewise.
+ * configure, aclocal.m4, Makefile.in, config.h.in: Rebuild.
2012-09-18 Ian Lance Taylor <iant@google.com>
diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog
index de6f5421b12..59052be1ba6 100644
--- a/libcpp/ChangeLog
+++ b/libcpp/ChangeLog
@@ -1,3 +1,33 @@
+2018-08-30 Nathan Sidwell <nathan@acm.org>
+
+ * include/line-map.h (enum lc_reason): Comment each member
+ separately.
+ (struct line_maps): Fix reallocator comment.
+
+2018-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ PR 87091
+ * include/line-map.h (enum range_display_kind): New enum.
+ (struct location_range): Replace field "m_show_caret_p" with
+ "m_range_display_kind", converting from bool to the new enum.
+ (class rich_location): Add example of line insertion fix-it hint.
+ (rich_location::add_range): Convert param "show_caret_p" from bool
+ to enum range_display_kind and rename to "range_display_kind",
+ giving it a default of SHOW_RANGE_WITHOUT_CARET.
+ (rich_location::set_range): Likewise, albeit without a default.
+ * line-map.c (rich_location::rich_location): Update for conversion
+ of show_caret_p to tri-state enum.
+ (rich_location::add_range): Likewise.
+ (rich_location::set_range): Likewise.
+
+2018-08-24 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR bootstrap/86872
+ * line-map.c (pure_location_p): Return true if linemap_lookup
+ returns NULL.
+ (linemap_add): Set start_location to 0 if we run out of line map
+ space.
+
2018-08-20 Nathan Sidwell <nathan@acm.org>
* include/cpplib.h: Fixup some whitespace.
@@ -272,10 +302,10 @@
2018-01-18 Boris Kolpackov <boris@codesynthesis.com>
- PR other/70268
- * include/cpplib.h (cpp_callbacks::remap_filename): New callback.
- * macro.c (_cpp_builtin_macro_text): Call remap_filename for
- __FILE__ and __BASE_FILE__.
+ PR other/70268
+ * include/cpplib.h (cpp_callbacks::remap_filename): New callback.
+ * macro.c (_cpp_builtin_macro_text): Call remap_filename for
+ __FILE__ and __BASE_FILE__.
2018-01-10 Kelvin Nilsen <kelvin@gcc.gnu.org>
diff --git a/libcpp/include/line-map.h b/libcpp/include/line-map.h
index 4f0ff8719a7..fd067589b63 100644
--- a/libcpp/include/line-map.h
+++ b/libcpp/include/line-map.h
@@ -61,20 +61,14 @@ inline int compare (linenum_type lhs, linenum_type rhs)
return 0;
}
-/* Reason for creating a new line map with linemap_add. LC_ENTER is
- when including a new file, e.g. a #include directive in C.
- LC_LEAVE is when reaching a file's end. LC_RENAME is when a file
- name or line number changes for neither of the above reasons
- (e.g. a #line directive in C); LC_RENAME_VERBATIM is like LC_RENAME
- but a filename of "" is not specially interpreted as standard
- input. LC_ENTER_MACRO is when a macro expansion is about to start. */
+/* Reason for creating a new line map with linemap_add. */
enum lc_reason
{
- LC_ENTER = 0,
- LC_LEAVE,
- LC_RENAME,
- LC_RENAME_VERBATIM,
- LC_ENTER_MACRO,
+ LC_ENTER = 0, /* Begin #include. */
+ LC_LEAVE, /* Return to including file. */
+ LC_RENAME, /* Other reason for name change. */
+ LC_RENAME_VERBATIM, /* Likewise, but "" != stdin. */
+ LC_ENTER_MACRO, /* Begin macro expansion. */
/* FIXME: add support for stringize and paste. */
LC_HWM /* High Water Mark. */
};
@@ -777,8 +771,7 @@ struct GTY(()) line_maps {
may require allocating a new line_map. */
unsigned int max_column_hint;
- /* If non-null, the allocator to use when resizing 'maps'. If null,
- xrealloc is used. */
+ /* The allocator to use when resizing 'maps', defaults to xrealloc. */
line_map_realloc reallocator;
/* The allocators' function used to know the actual size it
@@ -1283,6 +1276,36 @@ typedef struct
class range_label;
+/* A hint to diagnostic_show_locus on how to print a source range within a
+ rich_location.
+
+ Typically this is SHOW_RANGE_WITH_CARET for the 0th range, and
+ SHOW_RANGE_WITHOUT_CARET for subsequent ranges,
+ but the Fortran frontend uses SHOW_RANGE_WITH_CARET repeatedly for
+ printing things like:
+
+ x = x + y
+ 1 2
+ Error: Shapes for operands at (1) and (2) are not conformable
+
+ where "1" and "2" are notionally carets. */
+
+enum range_display_kind
+{
+ /* Show the pertinent source line(s), the caret, and underline(s). */
+ SHOW_RANGE_WITH_CARET,
+
+ /* Show the pertinent source line(s) and underline(s), but don't
+ show the caret (just an underline). */
+ SHOW_RANGE_WITHOUT_CARET,
+
+ /* Just show the source lines; don't show the range itself.
+ This is for use when displaying some line-insertion fix-it hints (for
+ showing the user context on the change, for when it doesn't make sense
+ to highlight the first column on the next line). */
+ SHOW_LINES_WITHOUT_RANGE
+};
+
/* A location within a rich_location: a caret&range, with
the caret potentially flagged for display, and an optional
label. */
@@ -1291,16 +1314,7 @@ struct location_range
{
source_location m_loc;
- /* Should a caret be drawn for this range? Typically this is
- true for the 0th range, and false for subsequent ranges,
- but the Fortran frontend overrides this for rendering things like:
-
- x = x + y
- 1 2
- Error: Shapes for operands at (1) and (2) are not conformable
-
- where "1" and "2" are notionally carets. */
- bool m_show_caret_p;
+ enum range_display_kind m_range_display_kind;
/* If non-NULL, the label for this range. */
const range_label *m_label;
@@ -1567,6 +1581,18 @@ class fixit_hint;
added via
richloc.add_fixit_replace ("color");
+ Example J: fix-it hint: line insertion
+ **************************************
+
+ 3 | #include <stddef.h>
+ + |+#include <stdio.h>
+ 4 | int the_next_line;
+
+ This rich location has a single range at line 4 column 1, marked
+ with SHOW_LINES_WITHOUT_RANGE (to avoid printing a meaningless caret
+ on the "i" of int). It has a insertion fix-it hint of the string
+ "#include <stdio.h>\n".
+
Adding a fix-it hint can fail: for example, attempts to insert content
at the transition between two line maps may fail due to there being no
source_location (aka location_t) value to express the new location.
@@ -1610,11 +1636,14 @@ class rich_location
source_location get_loc (unsigned int idx) const;
void
- add_range (source_location loc, bool show_caret_p,
+ add_range (source_location loc,
+ enum range_display_kind range_display_kind
+ = SHOW_RANGE_WITHOUT_CARET,
const range_label *label = NULL);
void
- set_range (unsigned int idx, source_location loc, bool show_caret_p);
+ set_range (unsigned int idx, source_location loc,
+ enum range_display_kind range_display_kind);
unsigned int get_num_locations () const { return m_ranges.count (); }
diff --git a/libcpp/line-map.c b/libcpp/line-map.c
index f0e6318e412..73d94443090 100644
--- a/libcpp/line-map.c
+++ b/libcpp/line-map.c
@@ -304,6 +304,8 @@ pure_location_p (line_maps *set, source_location loc)
return false;
const line_map *map = linemap_lookup (set, loc);
+ if (map == NULL)
+ return true;
const line_map_ordinary *ordmap = linemap_check_ordinary (map);
if (loc & ((1U << ordmap->m_range_bits) - 1))
@@ -492,6 +494,11 @@ linemap_add (struct line_maps *set, enum lc_reason reason,
}
linemap_assert (reason != LC_ENTER_MACRO);
+
+ if (start_location >= LINE_MAP_MAX_LOCATION)
+ /* We ran out of line map space. */
+ start_location = 0;
+
line_map_ordinary *map
= linemap_check_ordinary (new_linemap (set, start_location));
map->reason = reason;
@@ -1998,7 +2005,7 @@ rich_location::rich_location (line_maps *set, source_location loc,
m_seen_impossible_fixit (false),
m_fixits_cannot_be_auto_applied (false)
{
- add_range (loc, true, label);
+ add_range (loc, SHOW_RANGE_WITH_CARET, label);
}
/* The destructor for class rich_location. */
@@ -2074,18 +2081,19 @@ rich_location::override_column (int column)
/* Add the given range. */
void
-rich_location::add_range (source_location loc, bool show_caret_p,
+rich_location::add_range (source_location loc,
+ enum range_display_kind range_display_kind,
const range_label *label)
{
location_range range;
range.m_loc = loc;
- range.m_show_caret_p = show_caret_p;
+ range.m_range_display_kind = range_display_kind;
range.m_label = label;
m_ranges.push (range);
}
/* Add or overwrite the location given by IDX, setting its location to LOC,
- and setting its "should my caret be printed" flag to SHOW_CARET_P.
+ and setting its m_range_display_kind to RANGE_DISPLAY_KIND.
It must either overwrite an existing location, or add one *exactly* on
the end of the array.
@@ -2099,19 +2107,19 @@ rich_location::add_range (source_location loc, bool show_caret_p,
void
rich_location::set_range (unsigned int idx, source_location loc,
- bool show_caret_p)
+ enum range_display_kind range_display_kind)
{
/* We can either overwrite an existing range, or add one exactly
on the end of the array. */
linemap_assert (idx <= m_ranges.count ());
if (idx == m_ranges.count ())
- add_range (loc, show_caret_p);
+ add_range (loc, range_display_kind);
else
{
location_range *locrange = get_range (idx);
locrange->m_loc = loc;
- locrange->m_show_caret_p = show_caret_p;
+ locrange->m_range_display_kind = range_display_kind;
}
if (idx == 0)
diff --git a/libdecnumber/ChangeLog b/libdecnumber/ChangeLog
index 600faae8e4b..262df6b6012 100644
--- a/libdecnumber/ChangeLog
+++ b/libdecnumber/ChangeLog
@@ -274,7 +274,7 @@
2009-04-01 Ben Elliston <bje@au.ibm.com>
- * decContext.h: Include gstdint.h instead of <stdint.h>.
+ * decContext.h: Include gstdint.h instead of <stdint.h>.
2009-03-30 Ben Elliston <bje@au.ibm.com>
@@ -621,11 +621,11 @@
2006-01-02 Paolo Bonzini <bonzini@gnu.org>
- PR target/25259
- * configure.ac: Use GCC_HEADER_STDINT.
- * decContext.h: Include gstdint.h.
- * aclocal.m4: Regenerate.
- * configure: Regenerate.
+ PR target/25259
+ * configure.ac: Use GCC_HEADER_STDINT.
+ * decContext.h: Include gstdint.h.
+ * aclocal.m4: Regenerate.
+ * configure: Regenerate.
2005-12-20 Roger Sayle <roger@eyesopen.com>
@@ -681,7 +681,7 @@
2005-11-29 Ben Elliston <bje@au.ibm.com>
* decimal32.h, decimal64.h, decimal128.h: New.
- * decimal32.c, decimal64.c, decimal128.c: Likewise.
+ * decimal32.c, decimal64.c, decimal128.c: Likewise.
* decContext.c, decContext.h: Likewise.
* decUtility.c, decUtility.h: Likewise.
* decNumber.c, decNumber.h, decNumberLocal.h: Likewise.
diff --git a/libffi/ChangeLog b/libffi/ChangeLog
index d63d1cee47c..b4cae169e8e 100644
--- a/libffi/ChangeLog
+++ b/libffi/ChangeLog
@@ -462,7 +462,7 @@
* configure: Rebuild.
2012-10-30 James Greenhalgh <james.greenhalgh at arm.com>
- Marcus Shawcroft <marcus.shawcroft at arm.com>
+ Marcus Shawcroft <marcus.shawcroft at arm.com>
* README: Add details of aarch64 port.
* src/aarch64/ffi.c: New.
@@ -473,7 +473,7 @@
* Makefile.in, configure: Rebuilt.
2012-10-30 James Greenhalgh <james.greenhalgh at arm.com>
- Marcus Shawcroft <marcus.shawcroft at arm.com>
+ Marcus Shawcroft <marcus.shawcroft at arm.com>
* testsuite/lib/libffi.exp: Add support for aarch64.
* testsuite/libffi.call/cls_struct_va1.c: New.
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index d003fd54c6e..8a350e14e0c 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,10 @@
+2018-08-23 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/86951
+ * config/arm/lib1funcs.asm (speculation_barrier): New function.
+ * config/arm/t-arm (LIB1ASMFUNCS): Add it to list of functions
+ to build.
+
2018-08-22 Iain Sandoe <iain@sandoe.co.uk>
* config/unwind-dw2-fde-darwin.c
@@ -62,7 +69,7 @@
2018-08-01 Martin Liska <mliska@suse.cz>
* libgcov-profiler.c (__gcov_indirect_call_profiler_v2): Do not
- check that __gcov_indirect_call_callee is non-null.
+ check that __gcov_indirect_call_callee is non-null.
2018-07-30 Christophe Lyon <christophe.lyon@linaro.org>
@@ -136,30 +143,30 @@
2018-06-07 Martin Liska <mliska@suse.cz>
* libgcov-driver.c: Rename cs_all to all and assign it from
- all_prg.
+ all_prg.
2018-06-07 Martin Liska <mliska@suse.cz>
- PR bootstrap/86057
+ PR bootstrap/86057
* libgcov-driver-system.c (replace_filename_variables): Use
- memcpy instead of mempcpy.
+ memcpy instead of mempcpy.
(allocate_filename_struct): Do not allocate filename, allocate
- prefix and set it.
+ prefix and set it.
(gcov_exit_open_gcda_file): Allocate memory for gf->filename
- here and properly copy content into it.
+ here and properly copy content into it.
* libgcov-driver.c (struct gcov_filename): Remove max_length
- field, change prefix from size_t into char *.
+ field, change prefix from size_t into char *.
(compute_summary): Do not calculate longest filename.
(gcov_do_dump): Release memory of gf.filename after each file.
* libgcov-util.c (compute_summary): Use new signature of
- compute_summary.
+ compute_summary.
(calculate_overlap): Likewise.
2018-06-05 Martin Liska <mliska@suse.cz>
PR gcov-profile/47618
* libgcov-driver-system.c (replace_filename_variables): New
- function.
+ function.
(gcov_exit_open_gcda_file): Use it.
2018-06-05 Martin Liska <mliska@suse.cz>
@@ -190,15 +197,15 @@
2018-05-30 Rasmus Villemoes <rasmus.villemoes@prevas.dk>
- * crtstuff.c: Remove declaration of _Jv_RegisterClasses.
+ * crtstuff.c: Remove declaration of _Jv_RegisterClasses.
2018-05-29 Martin Liska <mliska@suse.cz>
- PR gcov-profile/85759
+ PR gcov-profile/85759
* libgcov-driver-system.c (gcov_error): Introduce usage of
- GCOV_EXIT_AT_ERROR env. variable.
+ GCOV_EXIT_AT_ERROR env. variable.
* libgcov-driver.c (merge_one_data): Print error that we
- overwrite a gcov file with a different timestamp.
+ overwrite a gcov file with a different timestamp.
2018-05-23 Kalamatee <kalamatee@gmail.com>
@@ -782,8 +789,8 @@
config/i386/enable-execute-stack-mingw32.c
2017-08-01 Jerome Lambourg <lambourg@adacore.com>
- Doug Rupp <rupp@adacore.com>
- Olivier Hainque <hainque@adacore.com>
+ Doug Rupp <rupp@adacore.com>
+ Olivier Hainque <hainque@adacore.com>
* config.host (arm-wrs-vxworks*): Rework to handle arm-wrs-vxworks7
as well as arm-wrs-vxworks.
@@ -919,10 +926,10 @@
Matthieu Sarter <matthieu.sarter.external@atos.net>
David Edelsohn <dje.gcc@gmail.com>
- * config/rs6000/aix-unwind.h (MD_FALLBACK_FRAME_STATE_FOR): Define
- unconditionally.
- (ucontext_for): Add 64-bit AIX 6.1, 7.1, 7.2 support. Add 32-bit
- AIX 7.2 support.
+ * config/rs6000/aix-unwind.h (MD_FALLBACK_FRAME_STATE_FOR): Define
+ unconditionally.
+ (ucontext_for): Add 64-bit AIX 6.1, 7.1, 7.2 support. Add 32-bit
+ AIX 7.2 support.
2017-06-02 Olivier Hainque <hainque@adacore.com>
diff --git a/libgcc/ChangeLog.meissner b/libgcc/ChangeLog.meissner
index 730efaa3610..c19f5752247 100644
--- a/libgcc/ChangeLog.meissner
+++ b/libgcc/ChangeLog.meissner
@@ -1,3 +1,7 @@
+2018-08-30 Michael Meissner <meissner@linux.ibm.com>
+
+ Merge up to 263992.
+
2018-08-22 Michael Meissner <meissner@linux.ibm.com>
Merge up to 263784.
diff --git a/libgcc/config/arm/lib1funcs.S b/libgcc/config/arm/lib1funcs.S
index b9919aa966d..ff06d504a4c 100644
--- a/libgcc/config/arm/lib1funcs.S
+++ b/libgcc/config/arm/lib1funcs.S
@@ -1533,6 +1533,50 @@ LSYM(Lover12):
#error "This is only for ARM EABI GNU/Linux"
#endif
#endif /* L_clear_cache */
+
+#ifdef L_speculation_barrier
+ FUNC_START speculation_barrier
+#if __ARM_ARCH >= 7
+ isb
+ dsb sy
+#elif defined __ARM_EABI__ && defined __linux__
+ /* We don't have a speculation barrier directly for this
+ platform/architecture variant. But we can use a kernel
+ clear_cache service routine which will emit such instructions
+ if run on a later version of the architecture. We don't
+ really want to flush the cache, but we must give it a valid
+ address, so just clear pc..pc+1. */
+#if defined __thumb__ && !defined __thumb2__
+ push {r7}
+ mov r7, #0xf
+ lsl r7, #16
+ add r7, #2
+ adr r0, . + 4
+ add r1, r0, #1
+ mov r2, #0
+ svc 0
+ pop {r7}
+#else
+ do_push {r7}
+#ifdef __ARM_ARCH_6T2__
+ movw r7, #2
+ movt r7, #0xf
+#else
+ mov r7, #0xf0000
+ add r7, r7, #2
+#endif
+ add r0, pc, #0 /* ADR. */
+ add r1, r0, #1
+ mov r2, #0
+ svc 0
+ do_pop {r7}
+#endif /* Thumb1 only */
+#else
+#warning "No speculation barrier defined for this platform"
+#endif
+ RET
+ FUNC_END speculation_barrier
+#endif
/* ------------------------------------------------------------------------ */
/* Dword shift operations. */
/* All the following Dword shift variants rely on the fact that
diff --git a/libgcc/config/arm/t-arm b/libgcc/config/arm/t-arm
index 9e85ac06b14..274bf2a8ef3 100644
--- a/libgcc/config/arm/t-arm
+++ b/libgcc/config/arm/t-arm
@@ -1,6 +1,6 @@
LIB1ASMSRC = arm/lib1funcs.S
LIB1ASMFUNCS = _thumb1_case_sqi _thumb1_case_uqi _thumb1_case_shi \
- _thumb1_case_uhi _thumb1_case_si
+ _thumb1_case_uhi _thumb1_case_si _speculation_barrier
HAVE_CMSE:=$(findstring __ARM_FEATURE_CMSE,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
ifneq ($(shell $(gcc_compile_bare) -E -mcmse - </dev/null 2>/dev/null),)
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index 63d01e705ad..76228e26b69 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,45 @@
+2018-08-25 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR libfortran/86704
+ * m4/matmul_internal.m4: Correct calculation of needed buffer size
+ for arrays of shape (1,n).
+ * generated/matmul_c10.c: Regenerated
+ * generated/matmul_c16.c: Regenerated
+ * generated/matmul_c4.c: Regenerated
+ * generated/matmul_c8.c: Regenerated
+ * generated/matmul_i1.c: Regenerated
+ * generated/matmul_i16.c: Regenerated
+ * generated/matmul_i2.c: Regenerated
+ * generated/matmul_i4.c: Regenerated
+ * generated/matmul_i8.c: Regenerated
+ * generated/matmul_r10.c: Regenerated
+ * generated/matmul_r16.c: Regenerated
+ * generated/matmul_r4.c: Regenerated
+ * generated/matmul_r8.c: Regenerated
+ * generated/matmulavx128_c10.c: Regenerated
+ * generated/matmulavx128_c16.c: Regenerated
+ * generated/matmulavx128_c4.c: Regenerated
+ * generated/matmulavx128_c8.c: Regenerated
+ * generated/matmulavx128_i1.c: Regenerated
+ * generated/matmulavx128_i16.c: Regenerated
+ * generated/matmulavx128_i2.c: Regenerated
+ * generated/matmulavx128_i4.c: Regenerated
+ * generated/matmulavx128_i8.c: Regenerated
+ * generated/matmulavx128_r10.c: Regenerated
+ * generated/matmulavx128_r16.c: Regenerated
+ * generated/matmulavx128_r4.c: Regenerated
+ * generated/matmulavx128_r8.c: Regenerated
+
+2018-08-23 David Edelsohn <dje.gcc@gmail.com>
+
+ * async.h (ASYNC_IO): Revert _AIX test.
+
+2018-08-22 Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ * async.h: Set ASYNC_IO to zero if _AIX is defined.
+ (struct adv_cond): If ASYNC_IO is zero, the struct has no members.
+ (async_unit): If ASYNC_IO is zero, remove unneeded members.
+
2018-08-21 Nicolas Koenig <koenigni@gcc.gnu.org>
Thomas Koenig <tkoenig@gcc.gnu.org>
diff --git a/libgfortran/generated/matmul_c10.c b/libgfortran/generated/matmul_c10.c
index 7cd8a6d2a0d..462d71e23f5 100644
--- a/libgfortran/generated/matmul_c10.c
+++ b/libgfortran/generated/matmul_c10.c
@@ -317,8 +317,13 @@ matmul_c10_avx (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_c10_avx2 (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_c10_avx512f (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_c10_vanilla (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_c16.c b/libgfortran/generated/matmul_c16.c
index d93099ea71e..2062739ce1f 100644
--- a/libgfortran/generated/matmul_c16.c
+++ b/libgfortran/generated/matmul_c16.c
@@ -317,8 +317,13 @@ matmul_c16_avx (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_c16_avx2 (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_c16_avx512f (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_c16_vanilla (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_c4.c b/libgfortran/generated/matmul_c4.c
index eeef3699dcd..91d193dca37 100644
--- a/libgfortran/generated/matmul_c4.c
+++ b/libgfortran/generated/matmul_c4.c
@@ -317,8 +317,13 @@ matmul_c4_avx (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_c4_avx2 (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_c4_avx512f (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_c4_vanilla (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_c8.c b/libgfortran/generated/matmul_c8.c
index 7a73f671938..425af85d1bb 100644
--- a/libgfortran/generated/matmul_c8.c
+++ b/libgfortran/generated/matmul_c8.c
@@ -317,8 +317,13 @@ matmul_c8_avx (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_c8_avx2 (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_c8_avx512f (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_c8_vanilla (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_i1.c b/libgfortran/generated/matmul_i1.c
index f3559163bee..0c9335d4322 100644
--- a/libgfortran/generated/matmul_i1.c
+++ b/libgfortran/generated/matmul_i1.c
@@ -317,8 +317,13 @@ matmul_i1_avx (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_i1_avx2 (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_i1_avx512f (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_i1_vanilla (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_i16.c b/libgfortran/generated/matmul_i16.c
index c8a08eb8e60..b9334b3278f 100644
--- a/libgfortran/generated/matmul_i16.c
+++ b/libgfortran/generated/matmul_i16.c
@@ -317,8 +317,13 @@ matmul_i16_avx (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_i16_avx2 (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_i16_avx512f (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_i16_vanilla (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_i2.c b/libgfortran/generated/matmul_i2.c
index ae8a45978b0..e4246e948df 100644
--- a/libgfortran/generated/matmul_i2.c
+++ b/libgfortran/generated/matmul_i2.c
@@ -317,8 +317,13 @@ matmul_i2_avx (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_i2_avx2 (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_i2_avx512f (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_i2_vanilla (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_i4.c b/libgfortran/generated/matmul_i4.c
index 9a3822b754a..78cf27c4fcd 100644
--- a/libgfortran/generated/matmul_i4.c
+++ b/libgfortran/generated/matmul_i4.c
@@ -317,8 +317,13 @@ matmul_i4_avx (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_i4_avx2 (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_i4_avx512f (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_i4_vanilla (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_i8.c b/libgfortran/generated/matmul_i8.c
index 88bfd62923f..cf8c401400d 100644
--- a/libgfortran/generated/matmul_i8.c
+++ b/libgfortran/generated/matmul_i8.c
@@ -317,8 +317,13 @@ matmul_i8_avx (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_i8_avx2 (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_i8_avx512f (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_i8_vanilla (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_r10.c b/libgfortran/generated/matmul_r10.c
index d4f13114734..e4309c80a6b 100644
--- a/libgfortran/generated/matmul_r10.c
+++ b/libgfortran/generated/matmul_r10.c
@@ -317,8 +317,13 @@ matmul_r10_avx (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_r10_avx2 (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_r10_avx512f (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_r10_vanilla (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_r16.c b/libgfortran/generated/matmul_r16.c
index f56d1c3066d..1ab554660d7 100644
--- a/libgfortran/generated/matmul_r16.c
+++ b/libgfortran/generated/matmul_r16.c
@@ -317,8 +317,13 @@ matmul_r16_avx (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_r16_avx2 (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_r16_avx512f (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_r16_vanilla (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_r4.c b/libgfortran/generated/matmul_r4.c
index a5677b96969..97dba9825b1 100644
--- a/libgfortran/generated/matmul_r4.c
+++ b/libgfortran/generated/matmul_r4.c
@@ -317,8 +317,13 @@ matmul_r4_avx (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_r4_avx2 (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_r4_avx512f (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_r4_vanilla (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmul_r8.c b/libgfortran/generated/matmul_r8.c
index d9acdd9d92f..5e4c9500260 100644
--- a/libgfortran/generated/matmul_r8.c
+++ b/libgfortran/generated/matmul_r8.c
@@ -317,8 +317,13 @@ matmul_r8_avx (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -869,8 +874,13 @@ matmul_r8_avx2 (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1421,8 +1431,13 @@ matmul_r8_avx512f (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -1987,8 +2002,13 @@ matmul_r8_vanilla (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -2613,8 +2633,13 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_c10.c b/libgfortran/generated/matmulavx128_c10.c
index 69ad45b334c..5cb0f6ad6f3 100644
--- a/libgfortran/generated/matmulavx128_c10.c
+++ b/libgfortran/generated/matmulavx128_c10.c
@@ -282,8 +282,13 @@ matmul_c10_avx128_fma3 (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_c10_avx128_fma4 (gfc_array_c10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_c16.c b/libgfortran/generated/matmulavx128_c16.c
index 0bf8ba3f599..66272fefaf9 100644
--- a/libgfortran/generated/matmulavx128_c16.c
+++ b/libgfortran/generated/matmulavx128_c16.c
@@ -282,8 +282,13 @@ matmul_c16_avx128_fma3 (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_c16_avx128_fma4 (gfc_array_c16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_c4.c b/libgfortran/generated/matmulavx128_c4.c
index 0763d5d1707..f6e06e2e88f 100644
--- a/libgfortran/generated/matmulavx128_c4.c
+++ b/libgfortran/generated/matmulavx128_c4.c
@@ -282,8 +282,13 @@ matmul_c4_avx128_fma3 (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_c4_avx128_fma4 (gfc_array_c4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_c8.c b/libgfortran/generated/matmulavx128_c8.c
index d440784c285..accc69c4d1a 100644
--- a/libgfortran/generated/matmulavx128_c8.c
+++ b/libgfortran/generated/matmulavx128_c8.c
@@ -282,8 +282,13 @@ matmul_c8_avx128_fma3 (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_c8_avx128_fma4 (gfc_array_c8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_i1.c b/libgfortran/generated/matmulavx128_i1.c
index efa34614544..48b15c8074a 100644
--- a/libgfortran/generated/matmulavx128_i1.c
+++ b/libgfortran/generated/matmulavx128_i1.c
@@ -282,8 +282,13 @@ matmul_i1_avx128_fma3 (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_i1_avx128_fma4 (gfc_array_i1 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_i16.c b/libgfortran/generated/matmulavx128_i16.c
index 3c0e6064efe..319321eca49 100644
--- a/libgfortran/generated/matmulavx128_i16.c
+++ b/libgfortran/generated/matmulavx128_i16.c
@@ -282,8 +282,13 @@ matmul_i16_avx128_fma3 (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_i16_avx128_fma4 (gfc_array_i16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_i2.c b/libgfortran/generated/matmulavx128_i2.c
index 1da7f25189f..4d8945b10a2 100644
--- a/libgfortran/generated/matmulavx128_i2.c
+++ b/libgfortran/generated/matmulavx128_i2.c
@@ -282,8 +282,13 @@ matmul_i2_avx128_fma3 (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_i2_avx128_fma4 (gfc_array_i2 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_i4.c b/libgfortran/generated/matmulavx128_i4.c
index ca7de54b9b8..acaa00a30bb 100644
--- a/libgfortran/generated/matmulavx128_i4.c
+++ b/libgfortran/generated/matmulavx128_i4.c
@@ -282,8 +282,13 @@ matmul_i4_avx128_fma3 (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_i4_avx128_fma4 (gfc_array_i4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_i8.c b/libgfortran/generated/matmulavx128_i8.c
index 731e55d2f63..56e85167a3f 100644
--- a/libgfortran/generated/matmulavx128_i8.c
+++ b/libgfortran/generated/matmulavx128_i8.c
@@ -282,8 +282,13 @@ matmul_i8_avx128_fma3 (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_i8_avx128_fma4 (gfc_array_i8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_r10.c b/libgfortran/generated/matmulavx128_r10.c
index 190e41cbb71..880c9d921b2 100644
--- a/libgfortran/generated/matmulavx128_r10.c
+++ b/libgfortran/generated/matmulavx128_r10.c
@@ -282,8 +282,13 @@ matmul_r10_avx128_fma3 (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_r10_avx128_fma4 (gfc_array_r10 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_r16.c b/libgfortran/generated/matmulavx128_r16.c
index 2994ed311fe..328e251a3a1 100644
--- a/libgfortran/generated/matmulavx128_r16.c
+++ b/libgfortran/generated/matmulavx128_r16.c
@@ -282,8 +282,13 @@ matmul_r16_avx128_fma3 (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_r16_avx128_fma4 (gfc_array_r16 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_r4.c b/libgfortran/generated/matmulavx128_r4.c
index 4f18b5006d8..013a1804a11 100644
--- a/libgfortran/generated/matmulavx128_r4.c
+++ b/libgfortran/generated/matmulavx128_r4.c
@@ -282,8 +282,13 @@ matmul_r4_avx128_fma3 (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_r4_avx128_fma4 (gfc_array_r4 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/generated/matmulavx128_r8.c b/libgfortran/generated/matmulavx128_r8.c
index 443d7426f4f..4da59f9e69c 100644
--- a/libgfortran/generated/matmulavx128_r8.c
+++ b/libgfortran/generated/matmulavx128_r8.c
@@ -282,8 +282,13 @@ matmul_r8_avx128_fma3 (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
@@ -835,8 +840,13 @@ matmul_r8_avx128_fma4 (gfc_array_r8 * const restrict retarray,
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgfortran/io/async.h b/libgfortran/io/async.h
index 7dfbc8bd696..4ddb498069c 100644
--- a/libgfortran/io/async.h
+++ b/libgfortran/io/async.h
@@ -328,21 +328,18 @@ typedef union transfer_args
struct adv_cond
{
+#if ASYNC_IO
int pending;
__gthread_mutex_t lock;
__gthread_cond_t signal;
+#endif
};
typedef struct async_unit
{
- pthread_mutex_t lock; /* Lock for manipulating the queue structure. */
pthread_mutex_t io_lock; /* Lock for doing actual I/O. */
- struct adv_cond work;
- struct adv_cond emptysignal;
- struct st_parameter_dt *pdt;
- pthread_t thread;
- struct transfer_queue *head;
- struct transfer_queue *tail;
+ pthread_mutex_t lock; /* Lock for manipulating the queue structure. */
+ bool empty;
struct
{
int waiting;
@@ -351,7 +348,13 @@ typedef struct async_unit
struct adv_cond done;
} id;
- bool empty;
+#if ASYNC_IO
+ struct adv_cond work;
+ struct adv_cond emptysignal;
+ struct st_parameter_dt *pdt;
+ pthread_t thread;
+ struct transfer_queue *head;
+ struct transfer_queue *tail;
struct {
const char *message;
@@ -361,7 +364,7 @@ typedef struct async_unit
int family;
bool fatal_error;
} error;
-
+#endif
} async_unit;
void init_async_unit (gfc_unit *);
diff --git a/libgfortran/m4/matmul_internal.m4 b/libgfortran/m4/matmul_internal.m4
index 6860f606122..2020e8a50df 100644
--- a/libgfortran/m4/matmul_internal.m4
+++ b/libgfortran/m4/matmul_internal.m4
@@ -233,8 +233,13 @@ sinclude(`matmul_asm_'rtype_code`.m4')dnl
return;
/* Adjust size of t1 to what is needed. */
- index_type t1_dim;
- t1_dim = (a_dim1 - (ycount > 1)) * 256 + b_dim1;
+ index_type t1_dim, a_sz;
+ if (aystride == 1)
+ a_sz = rystride;
+ else
+ a_sz = a_dim1;
+
+ t1_dim = a_sz * 256 + b_dim1;
if (t1_dim > 65536)
t1_dim = 65536;
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 07fe4d001c0..bbbef9107c2 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -351,7 +351,6 @@ type mapType struct {
key *rtype // map key type
elem *rtype // map element (value) type
bucket *rtype // internal bucket structure
- hmap *rtype // internal map header
keysize uint8 // size of key slot
indirectkey uint8 // store ptr to key instead of key itself
valuesize uint8 // size of value slot
diff --git a/libgo/go/runtime/cgo_gccgo.go b/libgo/go/runtime/cgo_gccgo.go
index 05be4964500..e689b0e2616 100644
--- a/libgo/go/runtime/cgo_gccgo.go
+++ b/libgo/go/runtime/cgo_gccgo.go
@@ -47,7 +47,7 @@ func Cgocall() {
mp := getg().m
mp.ncgocall++
mp.ncgo++
- entersyscall(0)
+ entersyscall()
mp.incgo = true
}
@@ -63,7 +63,7 @@ func CgocallDone() {
// If we are invoked because the C function called _cgo_panic,
// then _cgo_panic will already have exited syscall mode.
if readgstatus(gp)&^_Gscan == _Gsyscall {
- exitsyscall(0)
+ exitsyscall()
}
}
@@ -84,7 +84,7 @@ func CgocallBack() {
lockOSThread()
- exitsyscall(0)
+ exitsyscall()
gp.m.incgo = false
if gp.m.ncgo == 0 {
@@ -134,7 +134,7 @@ func CgocallBackDone() {
}
gp.m.incgo = true
- entersyscall(0)
+ entersyscall()
if drop {
mp.dropextram = false
@@ -144,7 +144,7 @@ func CgocallBackDone() {
// _cgo_panic may be called by SWIG code to panic.
func _cgo_panic(p *byte) {
- exitsyscall(0)
+ exitsyscall()
panic(gostringnocopy(p))
}
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
index aba9abd7aab..53b05b1ef77 100644
--- a/libgo/go/runtime/hashmap.go
+++ b/libgo/go/runtime/hashmap.go
@@ -311,20 +311,13 @@ func makemap_small() *hmap {
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
func makemap(t *maptype, hint int, h *hmap) *hmap {
- // The size of hmap should be 48 bytes on 64 bit
- // and 28 bytes on 32 bit platforms.
- if sz := unsafe.Sizeof(hmap{}); sz != 8+5*sys.PtrSize {
- println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
- throw("bad hmap size")
- }
-
if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) {
hint = 0
}
// initialize Hmap
if h == nil {
- h = (*hmap)(newobject(t.hmap))
+ h = new(hmap)
}
h.hash0 = fastrand()
@@ -1210,11 +1203,6 @@ func ismapkey(t *_type) bool {
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
- // Check invariants and reflects math.
- if sz := unsafe.Sizeof(hmap{}); sz != t.hmap.size {
- println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
- throw("bad hmap size")
- }
if !ismapkey(t.key) {
throw("runtime.reflect_makemap: unsupported map key type")
}
diff --git a/libgo/go/runtime/lock_futex.go b/libgo/go/runtime/lock_futex.go
index 7ddd3786624..b2c9ccb37ee 100644
--- a/libgo/go/runtime/lock_futex.go
+++ b/libgo/go/runtime/lock_futex.go
@@ -236,8 +236,8 @@ func notetsleepg(n *note, ns int64) bool {
throw("notetsleepg on g0")
}
- entersyscallblock(0)
+ entersyscallblock()
ok := notetsleep_internal(n, ns)
- exitsyscall(0)
+ exitsyscall()
return ok
}
diff --git a/libgo/go/runtime/lock_sema.go b/libgo/go/runtime/lock_sema.go
index d000b112f44..b5cce6af583 100644
--- a/libgo/go/runtime/lock_sema.go
+++ b/libgo/go/runtime/lock_sema.go
@@ -289,8 +289,8 @@ func notetsleepg(n *note, ns int64) bool {
throw("notetsleepg on g0")
}
semacreate(gp.m)
- entersyscallblock(0)
+ entersyscallblock()
ok := notetsleep_internal(n, ns, nil, 0)
- exitsyscall(0)
+ exitsyscall()
return ok
}
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index c8d528474c5..523989e2181 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -621,7 +621,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// callback.
incallback := false
if gomcache() == nil && getg().m.ncgo > 0 {
- exitsyscall(0)
+ exitsyscall()
incallback = true
}
@@ -709,7 +709,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
mp.mallocing = 0
releasem(mp)
if incallback {
- entersyscall(0)
+ entersyscall()
}
return x
}
@@ -835,7 +835,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if incallback {
- entersyscall(0)
+ entersyscall()
}
return x
diff --git a/libgo/go/runtime/norace_test.go b/libgo/go/runtime/norace_test.go
index e9b39b2f455..e90128bb6d4 100644
--- a/libgo/go/runtime/norace_test.go
+++ b/libgo/go/runtime/norace_test.go
@@ -34,12 +34,12 @@ func benchmarkSyscall(b *testing.B, work, excess int) {
b.RunParallel(func(pb *testing.PB) {
foo := 42
for pb.Next() {
- runtime.Entersyscall(0)
+ runtime.Entersyscall()
for i := 0; i < work; i++ {
foo *= 2
foo /= 2
}
- runtime.Exitsyscall(0)
+ runtime.Exitsyscall()
}
_ = foo
})
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 5826958dfd3..4c217cc1c0a 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -1168,7 +1168,7 @@ func kickoff() {
goexit1()
}
-func mstart1(dummy int32) {
+func mstart1() {
_g_ := getg()
if _g_ != _g_.m.g0 {
@@ -2774,7 +2774,7 @@ func entersyscallblock_handoff() {
//
//go:nosplit
//go:nowritebarrierrec
-func exitsyscall(dummy int32) {
+func exitsyscall() {
_g_ := getg()
_g_.m.locks++ // see comment in entersyscall
@@ -2984,13 +2984,13 @@ func exitsyscallclear(gp *g) {
//go:linkname syscall_entersyscall syscall.Entersyscall
//go:nosplit
func syscall_entersyscall() {
- entersyscall(0)
+ entersyscall()
}
//go:linkname syscall_exitsyscall syscall.Exitsyscall
//go:nosplit
func syscall_exitsyscall() {
- exitsyscall(0)
+ exitsyscall()
}
func beforefork() {
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index bda2c694ac6..1d214454ed4 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -199,16 +199,14 @@ func publicationBarrier()
// getcallerpc returns the program counter (PC) of its caller's caller.
// getcallersp returns the stack pointer (SP) of its caller's caller.
// argp must be a pointer to the caller's first function argument.
-// The implementation may or may not use argp, depending on
-// the architecture. The implementation may be a compiler
-// intrinsic; there is not necessarily code implementing this
-// on every platform.
+// The implementation may be a compiler intrinsic; there is not
+// necessarily code implementing this on every platform.
//
// For example:
//
// func f(arg1, arg2, arg3 int) {
// pc := getcallerpc()
-// sp := getcallersp(unsafe.Pointer(&arg1))
+// sp := getcallersp()
// }
//
// These two lines find the PC and SP immediately following
@@ -230,7 +228,7 @@ func publicationBarrier()
func getcallerpc() uintptr
//go:noescape
-func getcallersp(argp unsafe.Pointer) uintptr
+func getcallersp() uintptr
func asmcgocall(fn, arg unsafe.Pointer) int32 {
throw("asmcgocall")
@@ -309,9 +307,9 @@ func setSupportAES(v bool) {
// Here for gccgo.
func errno() int
-// Temporary for gccgo until we port proc.go.
-func entersyscall(int32)
-func entersyscallblock(int32)
+// For gccgo these are written in C.
+func entersyscall()
+func entersyscallblock()
// For gccgo to call from C code, so that the C code and the Go code
// can share the memstats variable for now.
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 0ec0da41179..3c08f7e1b9d 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -72,7 +72,6 @@ type maptype struct {
key *_type
elem *_type
bucket *_type // internal type representing a hash bucket
- hmap *_type // internal type representing a hmap
keysize uint8 // size of key slot
indirectkey bool // store ptr to key instead of key itself
valuesize uint8 // size of value slot
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 913ce5c014c..d8d231bae30 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -382,7 +382,7 @@ extern void kickoff(void)
__asm__(GOSYM_PREFIX "runtime.kickoff");
extern void minit(void)
__asm__(GOSYM_PREFIX "runtime.minit");
-extern void mstart1(int32)
+extern void mstart1()
__asm__(GOSYM_PREFIX "runtime.mstart1");
extern void stopm(void)
__asm__(GOSYM_PREFIX "runtime.stopm");
@@ -542,7 +542,7 @@ runtime_mstart(void *arg)
}
#endif
- mstart1(0);
+ mstart1();
// mstart1 does not return, but we need a return statement
// here to avoid a compiler warning.
@@ -621,12 +621,12 @@ makeGContext(G* gp, byte* sp, uintptr spsize) {
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
-void runtime_entersyscall(int32) __attribute__ ((no_split_stack));
+void runtime_entersyscall() __attribute__ ((no_split_stack));
static void doentersyscall(uintptr, uintptr)
__attribute__ ((no_split_stack, noinline));
void
-runtime_entersyscall(int32 dummy __attribute__ ((unused)))
+runtime_entersyscall()
{
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
@@ -638,8 +638,8 @@ runtime_entersyscall(int32 dummy __attribute__ ((unused)))
// callee-saved registers to access the TLS variable g. We
// don't want to put the ucontext_t on the stack because it is
// large and we can not split the stack here.
- doentersyscall((uintptr)runtime_getcallerpc(&dummy),
- (uintptr)runtime_getcallersp(&dummy));
+ doentersyscall((uintptr)runtime_getcallerpc(),
+ (uintptr)runtime_getcallersp());
}
static void
@@ -672,15 +672,15 @@ static void doentersyscallblock(uintptr, uintptr)
// The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
void
-runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
+runtime_entersyscallblock()
{
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
getcontext(ucontext_arg(&g->gcregs[0]));
// See comment in runtime_entersyscall.
- doentersyscallblock((uintptr)runtime_getcallerpc(&dummy),
- (uintptr)runtime_getcallersp(&dummy));
+ doentersyscallblock((uintptr)runtime_getcallerpc(),
+ (uintptr)runtime_getcallersp());
}
static void
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 0ffcf4bde9e..0856618b499 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -268,7 +268,7 @@ void* runtime_sysAlloc(uintptr, uint64*)
void runtime_sysFree(void*, uintptr, uint64*)
__asm__ (GOSYM_PREFIX "runtime.sysFree");
void runtime_mprofinit(void);
-#define runtime_getcallersp(p) __builtin_frame_address(0)
+#define runtime_getcallersp() __builtin_frame_address(0)
void runtime_mcall(FuncVal*)
__asm__ (GOSYM_PREFIX "runtime.mcall");
int32 runtime_timediv(int64, int32, int32*)
@@ -305,12 +305,10 @@ void runtime_schedtrace(bool)
void runtime_goparkunlock(Lock*, String, byte, intgo)
__asm__ (GOSYM_PREFIX "runtime.goparkunlock");
void runtime_tsleep(int64, const char*);
-void runtime_entersyscall(int32)
+void runtime_entersyscall()
__asm__ (GOSYM_PREFIX "runtime.entersyscall");
-void runtime_entersyscallblock(int32)
+void runtime_entersyscallblock()
__asm__ (GOSYM_PREFIX "runtime.entersyscallblock");
-void runtime_exitsyscall(int32)
- __asm__ (GOSYM_PREFIX "runtime.exitsyscall");
G* __go_go(void (*pfn)(void*), void*);
int32 runtime_callers(int32, Location*, int32, bool keep_callers);
int64 runtime_nanotime(void) // monotonic time
@@ -385,7 +383,7 @@ bool runtime_notetsleepg(Note*, int64) // false - timeout
#define runtime_munmap munmap
#define runtime_madvise madvise
#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
-#define runtime_getcallerpc(p) __builtin_return_address(0)
+#define runtime_getcallerpc() __builtin_return_address(0)
#ifdef __rtems__
void __wrap_rtems_task_variable_add(void **);
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index e2ec2e8edf4..b3fad644863 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1884,7 +1884,7 @@
* config/nvptx/affinity.c: Delete to use fallback implementation.
2016-11-23 Alexander Monakov <amonakov@ispras.ru>
- Jakub Jelinek <jakub@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
Dmitry Melnik <dm@ispras.ru>
* Makefile.am (libgomp_la_SOURCES): Add atomic.c, icv.c, icv-device.c.
@@ -1986,7 +1986,7 @@
* testsuite/libgomp.hsa.c/tiling-2.c: Likewise.
2016-11-23 Martin Liska <mliska@suse.cz>
- Martin Jambor <mjambor@suse.cz>
+ Martin Jambor <mjambor@suse.cz>
* plugin/hsa.h: New file.
* plugin/hsa_ext_finalize.h: New file.
@@ -2036,7 +2036,7 @@
* testsuite/Makefile.in: Likewise.
2016-11-15 Martin Jambor <mjambor@suse.cz>
- Alexander Monakov <amonakov@ispras.ru>
+ Alexander Monakov <amonakov@ispras.ru>
* testsuite/libgomp.fortran/examples-4/device-1.f90 (e_57_1): Add
mapping clauses to target constructs.
@@ -2426,7 +2426,7 @@
2016-05-16 Martin Jambor <mjambor@suse.cz>
- * testsuite/libgomp.hsa.c/complex-align-2.c: New test.
+ * testsuite/libgomp.hsa.c/complex-align-2.c: New test.
2016-05-02 Nathan Sidwell <nathan@codesourcery.com>
diff --git a/libiberty/ChangeLog b/libiberty/ChangeLog
index 22d995f8cfb..294bcb6b261 100644
--- a/libiberty/ChangeLog
+++ b/libiberty/ChangeLog
@@ -1,3 +1,10 @@
+2018-08-23 Nathan Sidwell <nathan@acm.org>
+ Martin Liska <mliska@suse.cz>
+
+ PR driver/87056
+ * pex-unix.c (pex_unix_exec_child): Duplicate bad_fn into local
+ scopes to avoid potential clobber.
+
2018-08-20 Nathan Sidwell <nathan@acm.org>
* pex-unix.c (pex_child_error): Delete.
@@ -10,10 +17,10 @@
2018-07-26 Martin Liska <mliska@suse.cz>
- PR lto/86548
+ PR lto/86548
* make-temp-file.c (TEMP_FILE): Remove leading 'cc'.
(make_temp_file): Call make_temp_file_with_prefix with
- first argument set to NULL.
+ first argument set to NULL.
(make_temp_file_with_prefix): Support also prefix.
2018-07-19 Eli Zaretskii <eliz@gnu.org>
@@ -922,8 +929,8 @@
* functions.texi: Regenerate.
2014-12-11 Uros Bizjak <ubizjak@gmail.com>
- Ben Elliston <bje@au.ibm.com>
- Manuel Lopez-Ibanez <manu@gcc.gnu.org>
+ Ben Elliston <bje@au.ibm.com>
+ Manuel Lopez-Ibanez <manu@gcc.gnu.org>
* xvasprintf.c: New file.
* vprintf-support.h: Likewise.
diff --git a/libiberty/pex-unix.c b/libiberty/pex-unix.c
index ea5ee4c141b..703010bdeae 100644
--- a/libiberty/pex-unix.c
+++ b/libiberty/pex-unix.c
@@ -582,8 +582,6 @@ pex_unix_exec_child (struct pex_obj *obj, int flags, const char *executable,
issues. */
char **save_environ = environ;
- const char *bad_fn = NULL;
-
for (retries = 0; retries < 4; ++retries)
{
pid = vfork ();
@@ -602,62 +600,64 @@ pex_unix_exec_child (struct pex_obj *obj, int flags, const char *executable,
case 0:
/* Child process. */
- if (!bad_fn && in != STDIN_FILE_NO)
- {
- if (dup2 (in, STDIN_FILE_NO) < 0)
- bad_fn = "dup2";
- else if (close (in) < 0)
- bad_fn = "close";
- }
- if (!bad_fn && out != STDOUT_FILE_NO)
- {
- if (dup2 (out, STDOUT_FILE_NO) < 0)
- bad_fn = "dup2";
- else if (close (out) < 0)
- bad_fn = "close";
- }
- if (!bad_fn && errdes != STDERR_FILE_NO)
- {
- if (dup2 (errdes, STDERR_FILE_NO) < 0)
- bad_fn = "dup2";
- else if (close (errdes) < 0)
- bad_fn = "close";
- }
- if (!bad_fn && toclose >= 0)
- {
- if (close (toclose) < 0)
- bad_fn = "close";
- }
- if (!bad_fn && (flags & PEX_STDERR_TO_STDOUT) != 0)
- {
- if (dup2 (STDOUT_FILE_NO, STDERR_FILE_NO) < 0)
- bad_fn = "dup2";
- }
- if (!bad_fn)
- {
- if (env)
- /* NOTE: In a standard vfork implementation this clobbers
- the parent's copy of environ "too" (in reality there's
- only one copy). This is ok as we restore it below. */
- environ = (char**) env;
- if ((flags & PEX_SEARCH) != 0)
- {
- execvp (executable, to_ptr32 (argv));
- bad_fn = "execvp";
- }
- else
- {
- execv (executable, to_ptr32 (argv));
- bad_fn = "execv";
- }
- }
-
- /* Something failed, report an error. We don't use stdio
- routines, because we might be here due to a vfork call. */
{
+ const char *bad_fn = NULL;
+
+ if (!bad_fn && in != STDIN_FILE_NO)
+ {
+ if (dup2 (in, STDIN_FILE_NO) < 0)
+ bad_fn = "dup2";
+ else if (close (in) < 0)
+ bad_fn = "close";
+ }
+ if (!bad_fn && out != STDOUT_FILE_NO)
+ {
+ if (dup2 (out, STDOUT_FILE_NO) < 0)
+ bad_fn = "dup2";
+ else if (close (out) < 0)
+ bad_fn = "close";
+ }
+ if (!bad_fn && errdes != STDERR_FILE_NO)
+ {
+ if (dup2 (errdes, STDERR_FILE_NO) < 0)
+ bad_fn = "dup2";
+ else if (close (errdes) < 0)
+ bad_fn = "close";
+ }
+ if (!bad_fn && toclose >= 0)
+ {
+ if (close (toclose) < 0)
+ bad_fn = "close";
+ }
+ if (!bad_fn && (flags & PEX_STDERR_TO_STDOUT) != 0)
+ {
+ if (dup2 (STDOUT_FILE_NO, STDERR_FILE_NO) < 0)
+ bad_fn = "dup2";
+ }
+ if (!bad_fn)
+ {
+ if (env)
+ /* NOTE: In a standard vfork implementation this clobbers
+ the parent's copy of environ "too" (in reality there's
+ only one copy). This is ok as we restore it below. */
+ environ = (char**) env;
+ if ((flags & PEX_SEARCH) != 0)
+ {
+ execvp (executable, to_ptr32 (argv));
+ bad_fn = "execvp";
+ }
+ else
+ {
+ execv (executable, to_ptr32 (argv));
+ bad_fn = "execv";
+ }
+ }
+
+ /* Something failed, report an error. We don't use stdio
+ routines, because we might be here due to a vfork call. */
ssize_t retval = 0;
- int err = errno;
-
+ int eno = errno;
+
#define writeerr(s) (retval |= write (STDERR_FILE_NO, s, strlen (s)))
writeerr (obj->pname);
writeerr (": error trying to exec '");
@@ -665,7 +665,7 @@ pex_unix_exec_child (struct pex_obj *obj, int flags, const char *executable,
writeerr ("': ");
writeerr (bad_fn);
writeerr (": ");
- writeerr (xstrerror (err));
+ writeerr (xstrerror (eno));
writeerr ("\n");
#undef writeerr
@@ -677,30 +677,33 @@ pex_unix_exec_child (struct pex_obj *obj, int flags, const char *executable,
default:
/* Parent process. */
+ {
+ const char *bad_fn = NULL;
+
+ /* Restore environ. Note that the parent either doesn't run
+ until the child execs/exits (standard vfork behaviour), or
+ if it does run then vfork is behaving more like fork. In
+ either case we needn't worry about clobbering the child's
+ copy of environ. */
+ environ = save_environ;
+
+ if (!bad_fn && in != STDIN_FILE_NO)
+ if (close (in) < 0)
+ bad_fn = "close";
+ if (!bad_fn && out != STDOUT_FILE_NO)
+ if (close (out) < 0)
+ bad_fn = "close";
+ if (!bad_fn && errdes != STDERR_FILE_NO)
+ if (close (errdes) < 0)
+ bad_fn = "close";
- /* Restore environ.
- Note that the parent either doesn't run until the child execs/exits
- (standard vfork behaviour), or if it does run then vfork is behaving
- more like fork. In either case we needn't worry about clobbering
- the child's copy of environ. */
- environ = save_environ;
-
- if (!bad_fn && in != STDIN_FILE_NO)
- if (close (in) < 0)
- bad_fn = "close";
- if (!bad_fn && out != STDOUT_FILE_NO)
- if (close (out) < 0)
- bad_fn = "close";
- if (!bad_fn && errdes != STDERR_FILE_NO)
- if (close (errdes) < 0)
- bad_fn = "close";
-
- if (bad_fn)
- {
- *err = errno;
- *errmsg = bad_fn;
- return (pid_t) -1;
- }
+ if (bad_fn)
+ {
+ *err = errno;
+ *errmsg = bad_fn;
+ return (pid_t) -1;
+ }
+ }
return pid;
}
diff --git a/libobjc/ChangeLog b/libobjc/ChangeLog
index 140c56d86df..0ca8b771d27 100644
--- a/libobjc/ChangeLog
+++ b/libobjc/ChangeLog
@@ -47,11 +47,11 @@
2017-01-18 Matthias Klose <doko@ubuntu.com>
- PR libobjc/78697
+ PR libobjc/78697
* configure.ac: Allow default for --with-target-bdw-gc-include.
* configure: Regenerate.
- PR libobjc/78698
+ PR libobjc/78698
* configure.ac: Use the libgc.la file when available.
* configure: Regenerate.
@@ -294,7 +294,7 @@
clang which seems to emit calls to it.
2011-10-08 Richard Frith-Macdonald <rfm@gnu.org>
- Nicola Pero <nicola.pero@meta-innovation.com>
+ Nicola Pero <nicola.pero@meta-innovation.com>
PR libobjc/50428
* sendmsg.c (__objc_send_initialize): If a class does not have an
@@ -2098,7 +2098,7 @@
for noreturn.
2008-09-26 Peter O'Gorman <pogma@thewrittenword.com>
- Steve Ellcey <sje@cup.hp.com>
+ Steve Ellcey <sje@cup.hp.com>
* configure: Regenerate for new libtool.
* config.h.in: Regenerate for new libtool.
@@ -2346,7 +2346,7 @@
global scope.
2005-09-04 Andrew Pinski <pinskia@physics.uc.edu>
- Rasmus Hahn <rassahah@neofonie.de>
+ Rasmus Hahn <rassahah@neofonie.de>
PR libobjc/23108
* archive.c (objc_write_type): Correct the element offset.
@@ -2357,7 +2357,7 @@
* All files: Update FSF address.
2005-08-13 Marcin Koziej <creep@desk.pl>
- Andrew Pinski <pinskia@physics.uc.edu>
+ Andrew Pinski <pinskia@physics.uc.edu>
PR libobjc/22492
* exception.c (PERSONALITY_FUNCTION): Fix the PC with finally.
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index 556fdfbddc4..37104d0ec6a 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,8 +1,8 @@
2018-08-02 Martin Liska <mliska@suse.cz>
- PR sanitizer/86022
+ PR sanitizer/86022
* sanitizer_common/sanitizer_linux_libcdep.cc (ThreadDescriptorSize):
- Cherry-pick compiler-rt revision 338606.
+ Cherry-pick compiler-rt revision 338606.
2018-08-01 Marek Polacek <polacek@redhat.com>
@@ -46,9 +46,9 @@
2018-06-13 Denis Khalikov <d.khalikov@partner.samsung.com>
- PR sanitizer/86090
- * configure.ac: Check for lstat and readlink.
- * configure, config.h.in: Rebuild.
+ PR sanitizer/86090
+ * configure.ac: Check for lstat and readlink.
+ * configure, config.h.in: Rebuild.
2018-05-31 Matthias Klose <doko@ubuntu.com>
@@ -199,26 +199,26 @@
2017-10-19 Jakub Jelinek <jakub@redhat.com>
* All source files: Merge from upstream 315899.
- * asan/Makefile.am (nodist_saninclude_HEADERS): Add
+ * asan/Makefile.am (nodist_saninclude_HEADERS): Add
include/sanitizer/tsan_interface.h.
- * asan/libtool-version: Bump the libasan SONAME.
+ * asan/libtool-version: Bump the libasan SONAME.
* lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc.
(lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc.
- * sanitizer_common/Makefile.am (sanitizer_common_files): Add
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Add
sancov_flags.cc, sanitizer_allocator_checks.cc,
sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc,
sanitizer_file.cc, sanitizer_mac_libcdep.cc and
sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc
and sanitizer_coverage_mapping_libcdep.cc.
- * tsan/Makefile.am (tsan_files): Add tsan_external.cc.
+ * tsan/Makefile.am (tsan_files): Add tsan_external.cc.
* ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1.
(ubsan_files): Add ubsan_init_standalone.cc and
ubsan_signals_standalone.cc.
* ubsan/libtool-version: Bump the libubsan SONAME.
- * asan/Makefile.in: Regenerate.
- * lsan/Makefile.in: Regenerate.
- * sanitizer_common/Makefile.in: Regenerate.
- * tsan/Makefile.in: Regenerate.
+ * asan/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
* ubsan/Makefile.in: Regenerate.
2017-10-05 H.J. Lu <hongjiu.lu@intel.com>
@@ -1029,7 +1029,7 @@
2013-12-19 Kostya Serebryany <kcc@google.com>
- * sanitizer_common/sanitizer_platform_limits_posix.cc:
+ * sanitizer_common/sanitizer_platform_limits_posix.cc:
workaround for missing definition of EOWNERDEAD, backport
from upstream r196779.
@@ -1132,10 +1132,10 @@
2013-11-15 Kostya Serebryany <kcc@google.com>
PR sanitizer/58994
- Backport from upstream revision 194573
- * asan/asan_interceptors.cc (COMMON_INTERCEPTOR_ENTER): Fall
- back to the original functions in the common libsanitizer
- interceptors and the __cxa_atexit() interceptor on Darwin.
+ Backport from upstream revision 194573
+ * asan/asan_interceptors.cc (COMMON_INTERCEPTOR_ENTER): Fall
+ back to the original functions in the common libsanitizer
+ interceptors and the __cxa_atexit() interceptor on Darwin.
2013-11-13 Peter Bergner <bergner@vnet.ibm.com>
diff --git a/libssp/ChangeLog b/libssp/ChangeLog
index ef673a03f54..828ca1c5296 100644
--- a/libssp/ChangeLog
+++ b/libssp/ChangeLog
@@ -251,7 +251,7 @@
* configure: Regenerate.
2008-09-26 Peter O'Gorman <pogma@thewrittenword.com>
- Steve Ellcey <sje@cup.hp.com>
+ Steve Ellcey <sje@cup.hp.com>
* configure: Regenerate for new libtool.
* Makefile.in: Ditto.
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index ec299803992..98288521abd 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,3 +1,334 @@
+2018-08-30 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/bits/hashtable_policy.h (__clp2): Fix calculation for LLP64
+ targets where sizeof(size_t) > sizeof(long). Avoid undefined shifts
+ of the number of bits in the type.
+ * include/std/bit (__ceil2): Avoid undefined shifts.
+ * testsuite/26_numerics/bit/bit.pow.two/ceil2.cc: Test values with
+ the most signifiant bit set.
+
+ * config/abi/pre/gnu.ver: Add missing exports for mingw.
+
+ * include/ext/pointer.h (_Pointer_adapter): Define operators for
+ pointer arithmetic using long long offsets.
+ * testsuite/ext/ext_pointer/1.cc: Test pointer arithmetic using
+ long long values.
+
+2018-08-29 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/31413
+ * testsuite/22_locale/time_get/get_date/wchar_t/4.cc: Check D_FMT
+ string for alternative format.
+
+2018-08-28 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/87116
+ * src/filesystem/std-path.cc (path::lexically_normal): When handling
+ a dot-dot filename, preserve an empty final component in the iteration
+ sequence.
+ [_GLIBCXX_FILESYSTEM_IS_WINDOWS]: Use preferred-separator for
+ root-directory.
+ * testsuite/27_io/filesystem/path/generation/normal.cc: Add new tests
+ for more than two adjacent dot-dot filenames.
+ [_GLIBCXX_FILESYSTEM_IS_WINDOWS]: Replace slashes with
+ preferred-separator in expected normalized strings.
+
+2018-08-25 Iain Sandoe <iain@sandoe.co.uk>
+
+ PR libstdc++/70694
+ * configure.host (OPT_LDFLAGS): Don't append
+ -fvisibility-inlines-hidden for newer Darwin.
+
+2018-08-24 Marc Glisse <marc.glisse@inria.fr>
+
+ PR libstdc++/86822
+ * libsupc++/new (operator new(size_t, nothrow_t), operator
+ new[](size_t, nothrow_t), operator new(size_t, align_val_t, nothrow_t),
+ operator new[](size_t, align_val_t, nothrow_t)): Add malloc attribute.
+
+2018-08-24 Jonathan Wakely <jwakely@redhat.com>
+
+ * include/debug/deque (std::__debug::deque): Declare.
+ * include/debug/forward_list (std::__debug::forward_list): Declare.
+ * include/debug/list (std::__debug::list): Declare.
+ * include/debug/map (std::__debug::map): Declare.
+ * include/debug/set (std::__debug::set): Declare.
+ * include/debug/unordered_map (std::__debug::unordered_map): Declare.
+ * include/debug/unordered_set (std::__debug::unordered_set): Declare.
+ * include/debug/vector (std::__debug::vector): Declare.
+ * testsuite/23_containers/deque/types/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/forward_list/pmr_typedefs_debug.cc: New
+ test.
+ * testsuite/23_containers/list/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/map/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/multimap/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/multiset/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/set/pmr_typedefs_debug.cc: New test.
+ * testsuite/23_containers/unordered_map/pmr_typedefs_debug.cc: New
+ test.
+ * testsuite/23_containers/unordered_multimap/pmr_typedefs_debug.cc:
+ New test.
+ * testsuite/23_containers/unordered_multiset/pmr_typedefs_debug.cc:
+ New test.
+ * testsuite/23_containers/unordered_set/pmr_typedefs_debug.cc: New
+ test.
+ * testsuite/23_containers/vector/cons/destructible_debug_neg.cc:
+ Adjust dg-error lineno.
+ * testsuite/23_containers/vector/types/pmr_typedefs_debug.cc: New
+ test.
+
+2018-08-23 Jonathan Wakely <jwakely@redhat.com>
+
+ * testsuite/util/testsuite_allocator.h (__gnu_test::memory_resource):
+ Only define when RTTI is enabled.
+
+ * include/debug/vector (__niter_base): Define for C++98.
+
+ * testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc:
+ Fix C++98 test to not use C++11 features.
+ * testsuite/25_algorithms/fill_n/2.cc: Likewise.
+
+ * scripts/check_compile: Fix comments.
+
+ * include/debug/string (insert(__const_iterator, _InIter, _InIter)):
+ [!_GLIBCXX_USE_CXX11_ABI]: Replace use of C++11-only cbegin() with
+ begin(), for C++98 compatibility.
+
+ * include/bits/basic_string.h [_GLIBCXX_USE_CXX11_ABI]
+ (basic_string::__const_iterator): Change access to protected.
+ [!_GLIBCXX_USE_CXX11_ABI] (basic_string::__const_iterator): Define
+ as typedef for iterator.
+ * include/debug/string (__const_iterator): Use typedef from base.
+ (insert(const_iterator, _CharT))
+ (replace(const_iterator, const_iterator, const basic_string&))
+ (replace(const_iterator, const_iterator, const _CharT*, size_type))
+ (replace(const_iterator, const_iterator, const CharT*))
+ (replace(const_iterator, const_iterator, size_type, _CharT))
+ (replace(const_iterator, const_iterator, _InputIter, _InputIter))
+ (replace(const_iterator, const_iterator, initializer_list<_CharT>)):
+ Change const_iterator parameters to __const_iterator.
+ (insert(iterator, size_type, _CharT)): Add C++98 overload.
+ (insert(const_iterator, _InputIterator, _InputIterator)): Change
+ const_iterator parameter to __const_iterator.
+ [!_GLIBCXX_USE_CXX11_ABI]: Add workaround for incorrect return type
+ of base's member function.
+ (insert(const_iterator, size_type, _CharT)) [!_GLIBCXX_USE_CXX11_ABI]:
+ Likewise.
+ (insert(const_iterator, initializer_list<_CharT>))
+ [!_GLIBCXX_USE_CXX11_ABI]: Likewise.
+ * testsuite/21_strings/basic_string/init-list.cc: Remove effective
+ target directive.
+
+ * testsuite/20_util/reference_wrapper/lwg2993.cc: Fix C++11 test to
+ not use C++14 feature.
+ * testsuite/23_containers/list/68222_neg.cc: Likewise.
+
+ * testsuite/21_strings/basic_string/init-list.cc:
+ Require cxx11-abi.
+ * testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc:
+ Likewise.
+ * testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc:
+ Likewise.
+
+ * testsuite/23_containers/deque/capacity/max_size.cc: Fix test for
+ C++98 mode.
+ * testsuite/23_containers/deque/modifiers/assign/1.cc: Likewise.
+ * testsuite/23_containers/list/modifiers/assign/1.cc: Likewise.
+ * testsuite/23_containers/vector/bool/modifiers/assign/1.cc: Likewise.
+ * testsuite/23_containers/vector/capacity/max_size.cc: Likewise.
+ * testsuite/23_containers/vector/modifiers/assign/1.cc: Likewise.
+
+2018-08-22 Jonathan Wakely <jwakely@redhat.com>
+
+ PR libstdc++/87061
+ * include/experimental/regex [!_GLIBCXX_USE_CXX11_ABI]
+ (experimental::pmr::match_results, experimental::pmr::cmatch)
+ (experimental::pmr::smatch, experimental::pmr::wcmatch)
+ (experimental::pmr::wsmatch): Do not declare for gcc4-compatible ABI,
+ because COW strings don't support C++11 allocator model.
+ * include/experimental/string [!_GLIBCXX_USE_CXX11_ABI]
+ (experimental::pmr::basic_string, experimental::pmr::string)
+ (experimental::pmr::u16string, experimental::pmr::u32string)
+ (experimental::pmr::wstring): Likewise.
+ * include/std/regex [!_GLIBCXX_USE_CXX11_ABI] (pmr::match_results)
+ (pmr::cmatch, pmr::smatch, pmr::wcmatch, pmr::wsmatch): Likewise.
+ * include/std/string [!_GLIBCXX_USE_CXX11_ABI] (pmr::basic_string)
+ (pmr::string, pmr::u16string, pmr::u32string, pmr::wstring): Likewise.
+ * testsuite/21_strings/basic_string/types/pmr_typedefs.cc: Require
+ cxx11-abi.
+ * testsuite/28_regex/match_results/pmr_typedefs.cc: Likewise.
+
+ PR libstdc++/78448
+ * include/bits/deque.tcc (deque::_M_range_initialize): Use
+ _S_check_init_len to check size.
+ (deque::_M_push_back_aux, deque::_M_push_front_aux): Throw length
+ error if size would exceed max_size().
+ * include/bits/stl_deque.h (_Deque_base::size_type): Remove typedef.
+ (_Deque_base(_Deque_base&&, const allocator_type&, size_t)): Use
+ size_t instead of size_type.
+ (deq(size_type, const allocator_type&)
+ (deq(size_type, const value_type&, const allocator_type&)
+ (deque::_M_initialize_dispatch): Use _S_check_init_len to check size.
+ (deque::max_size): Call _S_max_size.
+ (deque::_S_check_init_len, deque::_S_max_size): New functions.
+ * include/bits/stl_vector.h (vector(size_type, const allocator_type&))
+ (vector(size_type, const value_type&, const allocator_type&))
+ (vector::_M_initialize_dispatch, vector::_M_range_initialize): Use
+ _S_check_init_len to check size.
+ (vector::max_size): Call _S_max_size.
+ (vector::_M_check_len): Prevent max from being expanded as a
+ function-like macro.
+ (vector::_S_check_init_len, vector::_S_max_size): New functions.
+ * include/bits/vector.tcc (vector::_M_assign_aux): Use
+ _S_check_init_len to check size.
+ * testsuite/23_containers/deque/capacity/max_size.cc: New test.
+ * testsuite/23_containers/vector/capacity/max_size.cc: New test.
+
+2018-08-22 François Dumont <fdumont@gcc.gnu.org>
+
+ PR libstdc++/68222
+ * include/debug/safe_iterator.h
+ (_Safe_iterator<_It, _Sq, _Cat>): Add category template parameter.
+ (_Safe_iterator<>::_Const_iterator): Remove.
+ (_Safe_iterator<>::_IsConstant): New.
+ (_Safe_iterator<>::_OtherIterator): New.
+ (_Safe_iterator<_It, _Sq, _Cat>::_Safe_iterator<_MutIte>(
+ const _Safe_iterator<_MutIte, _Sq, _Cat>&)): Add _IsConstant::__value in
+ __gnu_cxx::__enable_if condition.
+ (_Safe_iterator<_It, _Sq, _Cat>::_M_get_distance_to): New.
+ (_Safe_iterator<_It, _Sq, _Cat>::_M_get_distance_from_begin): New.
+ (_Safe_iterator<_It, _Sq, _Cat>::_M_get_distance_to_end): New.
+ (_Safe_iterator<_It, _Sq, std::bidirectional_iterator_tag>): New.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator--()): Move...
+ (_Safe_iterator<_It, _Sq, std::bidirectional_iterator_tag>
+ ::operator--()): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator--(int)): Move...
+ (_Safe_iterator<_It, _Sq, std::bidirectional_iterator_tag>
+ ::operator--(int)): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::_M_decrementable()): Move...
+ (_Safe_iterator<_It, _Sq, std::bidirectional_iterator_tag>
+ ::_M_decrementable()): ...here.
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>): New.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator[](const difference_type&)):
+ Move...
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>
+ ::operator[](const difference_type&)): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator+=(const difference_type&)):
+ Move...
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>
+ ::operator+=(const difference_type&)): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator+(const difference_type&)):
+ Move...
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>
+ ::operator+(const difference_type&)): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator-=(const difference_type&)):
+ Move...
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>
+ ::operator-=(const difference_type&)): ...here.
+ (_Safe_iterator<_It, _Sq, _Cat>::operator-(const difference_type&)):
+ Move...
+ (_Safe_iterator<_It, _Sq, std::random_access_iterator_tag>
+ ::operator-(const difference_type&)): ...here.
+ (operator<(const _Safe_iterator<>&, const _Safe_iterator<>&)):
+ Constraint to random access iterators.
+ (operator<=(const _Safe_iterator<>&, const _Safe_iterator<>&)):
+ Likewise.
+ (operator>(const _Safe_iterator<>&, const _Safe_iterator<>&)): Likewise.
+ (operator>=(const _Safe_iterator<>&, const _Safe_iterator<>&)):
+ Likewise.
+ (operator-(const _Safe_iterator<>&, const _Safe_iterator<>&)): Likewise.
+ (operator+(const difference_type&, const _Safe_iterator<>&)): Likewise.
+ (__check_dereferenceable(const _Safe_iterator<>&)): Remove.
+ (__get_distance): Remove.
+ (__get_distance_from_begin): Remove.
+ (__get_distance_to_end): Remove.
+ (struct __is_safe_random_iterator<_Safe_iterator<>>): Remove partial
+ specialization.
+ (__base(const _Safe_iterator<>&, std::input_iterator_tag)): Remove.
+ (__base(const _Safe_iterator<>&, std::random_access_iterator_tag)): Remove.
+ (__base(const _Safe_iterator<>&)): Constraint to random access iterator.
+ * include/debug/safe_iterator.tcc
+ (_Safe_iterator<>::_M_get_distance_from_begin()): New.
+ (_Safe_iterator<>::_M_get_distance_to_end()): New.
+ (_Safe_iterator<>::_M_get_distance_to(const _Safe_iterator<>&)): New.
+ (_Safe_iterator<_It, _Seq, std::random_access_iterator_tag>
+ ::_M_valid_range): New.
+ * include/debug/safe_local_iterator.h
+ (_Safe_local_iterator<>::_Const_local_iterator): Remove.
+ (_Safe_local_iterator<>::_IsConstant): New.
+ (_Safe_local_iterator<>::_OtherIterator): New.
+ (_Safe_local_iterator<_It, _Cont>::_Safe_local_iterator<_MutIte, _Cont>(
+ const _Safe_local_iterator<_MutIte, _Seq>&)): Add _IsConstant::__value
+ in __gnu_cxx::__enable_if condition. If singular compare base iterator
+ with _MutIte rather than _It.
+ (_Safe_local_iterator<>::_S_constant): Make constexpr.
+ (_Safe_local_iterator<>::_M_get_distance_to): New.
+ (__check_dereferenceable(const _Safe_local_iterator<>&)): Remove.
+ (__get_distance(const _Safe_local_iterator<>&,
+ const _Safe_local_iterator<>&, std::input_iterator_tag)): Remove.
+ (__valid_range(const _Safe_local_iterator<>&,
+ const _Safe_local_iterator<>&)): New.
+ * include/debug/safe_local_iterator.tcc
+ (_Safe_local_iterator<>::_M_get_distance_to): New.
+ * include/debug/deque (std::__debug::deque<>): Add
+ ::__gnu_debug::_Safe_iterator<> friend declaration.
+ * include/debug/forward_list (std::__debug::forward_list<>): Likewise.
+ * include/debug/list (std::__debug::list<>): Likewise.
+ * include/debug/map.h (std::__debug::map<>): Likewise.
+ * include/debug/multimap.h (std::__debug::multimap<>): Likewise.
+ * include/debug/set.h (std::__debug::set<>): Likewise.
+ * include/debug/multiset.h (std::__debug::multiset<>): Likewise.
+ * include/debug/string (std::__debug::basic_string<>): Likewise.
+ * include/debug/unordered_map (std::__debug::unordered_map<>): Likewise
+ and add ::__gnu_debug::_Safe_local_iterator<> friend declaration.
+ (std::__debug::unordered_multimap<>): Likewise.
+ * include/debug/unordered_set (std::__debug::unordered_set<>): Likewise.
+ (std::__debug::unordered_multiset<>): Likewise.
+ * include/debug/formatter.h: Adapt.
+ * include/debug/helper_functions.h
+ (__gnu_debug::_Safe_local_iterator<>): Add declaration.
+ (__get_distance<_Ite>(_Ite, _Ite, std::random_access_iterator_tag):
+ Pass parameter by copy.
+ (__get_distance<_Ite>(_Ite, _Ite, std::input_iterator_tag): Likewise.
+ (__get_distance<_Ite>(_Ite, _Ite): Likewise.
+ (__valid_range_aux<_Integral>): Pass _Integral by copy.
+ (__valid_range<_InputIterator>): Pass _InputIterator by copy.
+ (__valid_range<>(const _Safe_iterator<>&,
+ const _Safe_iterator<>&, typename _Distance_traits<>::__type&)):
+ Declare.
+ (__valid_range(const _Safe_local_iterator<>&,
+ const _Safe_local_iterator<>&, typename _Distance_traits<>::__type&)):
+ Declare.
+ (__valid_range<>(const _Safe_iterator<>&, const _Safe_iterator<>&)):
+ Declare.
+ (__valid_range(const _Safe_local_iterator<>&, const _Safe_local_iterator<>&)):
+ Declare.
+ (__can_advance): Adapt.
+ (struct __is_safe_random_iterator<>): Remove.
+ (struct _SIter_base<>): Remove.
+ * include/debug/functions.h: Include <bits/stl_iterator.h>.
+ (__check_dereferenceable): Remove.
+ (__foreign_iterator_aux4, __foreign_iterator_aux3): Adapt.
+ (__foreign_iterator_aux2, __foreign_iterator_aux): Adapt.
+ (__foreign_iterator): Adapt.
+ * include/debug/stl_iterator.h
+ (__is_safe_random_iterator<std::reverse_iterator<>>): Remove.
+ (__base(const std::reverse_iterator<_Safe_iterator<_It, _Sq>)):
+ Constraint for random access iterators.
+ (__niter_base): Adapt.
+ * testsuite/util/testsuite_containers.h:
+ Include <bits/boost_concept_check.h>.
+ (iterator_concept_checks<_It, _Mutable, _Category>): New.
+ (citerator<_Cont>::forward_members::forward_members()): Instantiate
+ latter for container iterator and const_iterator.
+ * testsuite/23_containers/list/68222_neg.cc: New.
+ * testsuite/23_containers/vector/cons/destructible_debug_neg.cc: Adapt
+ line number.
+ * testsuite/23_containers/unordered_set/debug/debug_functions.cc:
+ (test01): Remove.
+ * testsuite/23_containers/vector/debug/debug_functions.cc (test01):
+ Remove.
+
2018-08-22 Jonathan Wakely <jwakely@redhat.com>
PR libstdc++/77854
diff --git a/libstdc++-v3/config/abi/pre/gnu.ver b/libstdc++-v3/config/abi/pre/gnu.ver
index 03b23200a1a..4766d28f708 100644
--- a/libstdc++-v3/config/abi/pre/gnu.ver
+++ b/libstdc++-v3/config/abi/pre/gnu.ver
@@ -2049,6 +2049,11 @@ GLIBCXX_3.4.26 {
# std::__throw_ios_failure(const char*, int);
_ZSt19__throw_ios_failurePKci;
+ # std::wistream::ignore(long long, unsigned short)
+ _ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreExt;
+ # std::basic_filebuf::open(const wchar_t*, openmode)
+ _ZNSt13basic_filebufI[cw]St11char_traitsI[cw]EE4openEPKwSt13_Ios_Openmode;
+
} GLIBCXX_3.4.25;
# Symbols in the support library (libsupc++) have their own tag.
diff --git a/libstdc++-v3/configure.host b/libstdc++-v3/configure.host
index caea9de9c76..155a3cdea1b 100644
--- a/libstdc++-v3/configure.host
+++ b/libstdc++-v3/configure.host
@@ -230,16 +230,15 @@ case "${host_os}" in
os_include_dir="os/newlib"
OPT_LDFLAGS="${OPT_LDFLAGS} \$(lt_host_flags)"
;;
- darwin | darwin[1-7] | darwin[1-7].*)
- # On Darwin, performance is improved if libstdc++ is single-module.
- # Up to at least 10.3.7, -flat_namespace is required for proper
- # treatment of coalesced symbols.
+ darwin[4-7] | darwin[4-7].*)
+ # For earlier Darwin, performance is improved if libstdc++ is
+ # single-module. Up to at least 10.3.7, -flat_namespace is required
+ # for proper treatment of coalesced symbols.
OPT_LDFLAGS="${OPT_LDFLAGS} -Wl,-single_module -Wl,-flat_namespace"
os_include_dir="os/bsd/darwin"
;;
- darwin[89] | darwin[89].* | darwin[1-9][0-9]* )
- # On Darwin, performance is improved if libstdc++ is single-module,
- # and on 8+ compatibility is better if not -flat_namespace.
+ darwin8 | darwin8.* )
+ # For 8+ compatibility is better if not -flat_namespace.
OPT_LDFLAGS="${OPT_LDFLAGS} -Wl,-single_module"
case "${host_cpu}" in
i[34567]86 | x86_64)
@@ -248,6 +247,10 @@ case "${host_os}" in
esac
os_include_dir="os/bsd/darwin"
;;
+ darwin*)
+ # Post Darwin8, defaults should be sufficient.
+ os_include_dir="os/bsd/darwin"
+ ;;
*djgpp*) # leading * picks up "msdosdjgpp"
os_include_dir="os/djgpp"
error_constants_dir="os/djgpp"
diff --git a/libstdc++-v3/include/bits/basic_string.h b/libstdc++-v3/include/bits/basic_string.h
index c9463989ddc..ba94b51f616 100644
--- a/libstdc++-v3/include/bits/basic_string.h
+++ b/libstdc++-v3/include/bits/basic_string.h
@@ -100,7 +100,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
/// Value returned by various member functions when they fail.
static const size_type npos = static_cast<size_type>(-1);
- private:
+ protected:
// type used for positions in insert, erase etc.
#if __cplusplus < 201103L
typedef iterator __const_iterator;
@@ -108,6 +108,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CXX11
typedef const_iterator __const_iterator;
#endif
+ private:
#if __cplusplus > 201402L
// A helper type for avoiding boiler-plate.
typedef basic_string_view<_CharT, _Traits> __sv_type;
@@ -3119,6 +3120,10 @@ _GLIBCXX_END_NAMESPACE_CXX11
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
+ protected:
+ // type used for positions in insert, erase etc.
+ typedef iterator __const_iterator;
+
private:
// _Rep: string representation
// Invariants:
diff --git a/libstdc++-v3/include/bits/deque.tcc b/libstdc++-v3/include/bits/deque.tcc
index 8724a19504b..a22948a9753 100644
--- a/libstdc++-v3/include/bits/deque.tcc
+++ b/libstdc++-v3/include/bits/deque.tcc
@@ -443,7 +443,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
std::forward_iterator_tag)
{
const size_type __n = std::distance(__first, __last);
- this->_M_initialize_map(__n);
+ this->_M_initialize_map(_S_check_init_len(__n, _M_get_Tp_allocator()));
_Map_pointer __cur_node;
__try
@@ -484,6 +484,10 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
_M_push_back_aux(const value_type& __t)
#endif
{
+ if (size() == max_size())
+ __throw_length_error(
+ __N("cannot create std::deque larger than max_size()"));
+
_M_reserve_map_at_back();
*(this->_M_impl._M_finish._M_node + 1) = this->_M_allocate_node();
__try
@@ -519,6 +523,10 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
_M_push_front_aux(const value_type& __t)
#endif
{
+ if (size() == max_size())
+ __throw_length_error(
+ __N("cannot create std::deque larger than max_size()"));
+
_M_reserve_map_at_front();
*(this->_M_impl._M_start._M_node - 1) = this->_M_allocate_node();
__try
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index d7497711071..66fbfbe5f21 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -511,8 +511,11 @@ namespace __detail
// Equivalent to return __n ? std::ceil2(__n) : 0;
if (__n < 2)
return __n;
- return 1ul << (numeric_limits<unsigned long>::digits
- - __builtin_clzl(__n - 1ul));
+ const unsigned __lz = sizeof(size_t) > sizeof(long)
+ ? __builtin_clzll(__n - 1ull)
+ : __builtin_clzl(__n - 1ul);
+ // Doing two shifts avoids undefined behaviour when __lz == 0.
+ return (size_t(1) << (numeric_limits<size_t>::digits - __lz - 1)) << 1;
}
/// Rehash policy providing power of 2 bucket numbers. Avoids modulo
diff --git a/libstdc++-v3/include/bits/stl_deque.h b/libstdc++-v3/include/bits/stl_deque.h
index 58a01c894c0..555be16dcd5 100644
--- a/libstdc++-v3/include/bits/stl_deque.h
+++ b/libstdc++-v3/include/bits/stl_deque.h
@@ -493,7 +493,6 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
public:
typedef _Alloc allocator_type;
- typedef typename _Alloc_traits::size_type size_type;
allocator_type
get_allocator() const _GLIBCXX_NOEXCEPT
@@ -535,7 +534,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
: _Deque_base(std::move(__x), typename _Alloc_traits::is_always_equal{})
{ }
- _Deque_base(_Deque_base&& __x, const allocator_type& __a, size_type __n)
+ _Deque_base(_Deque_base&& __x, const allocator_type& __a, size_t __n)
: _M_impl(__a)
{
if (__x.get_allocator() == __a)
@@ -930,7 +929,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
*/
explicit
deque(size_type __n, const allocator_type& __a = allocator_type())
- : _Base(__a, __n)
+ : _Base(__a, _S_check_init_len(__n, __a))
{ _M_default_initialize(); }
/**
@@ -943,7 +942,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
*/
deque(size_type __n, const value_type& __value,
const allocator_type& __a = allocator_type())
- : _Base(__a, __n)
+ : _Base(__a, _S_check_init_len(__n, __a))
{ _M_fill_initialize(__value); }
#else
/**
@@ -957,7 +956,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
explicit
deque(size_type __n, const value_type& __value = value_type(),
const allocator_type& __a = allocator_type())
- : _Base(__a, __n)
+ : _Base(__a, _S_check_init_len(__n, __a))
{ _M_fill_initialize(__value); }
#endif
@@ -1298,7 +1297,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
/** Returns the size() of the largest possible %deque. */
size_type
max_size() const _GLIBCXX_NOEXCEPT
- { return _Alloc_traits::max_size(_M_get_Tp_allocator()); }
+ { return _S_max_size(_M_get_Tp_allocator()); }
#if __cplusplus >= 201103L
/**
@@ -1875,10 +1874,28 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
void
_M_initialize_dispatch(_Integer __n, _Integer __x, __true_type)
{
- _M_initialize_map(static_cast<size_type>(__n));
+ _M_initialize_map(_S_check_init_len(static_cast<size_type>(__n),
+ _M_get_Tp_allocator()));
_M_fill_initialize(__x);
}
+ static size_t
+ _S_check_init_len(size_t __n, const allocator_type& __a)
+ {
+ if (__n > _S_max_size(__a))
+ __throw_length_error(
+ __N("cannot create std::deque larger than max_size()"));
+ return __n;
+ }
+
+ static size_type
+ _S_max_size(const _Tp_alloc_type& __a) _GLIBCXX_NOEXCEPT
+ {
+ const size_t __diffmax = __gnu_cxx::__numeric_traits<ptrdiff_t>::__max;
+ const size_t __allocmax = _Alloc_traits::max_size(__a);
+ return (std::min)(__diffmax, __allocmax);
+ }
+
// called by the range constructor to implement [23.1.1]/9
template<typename _InputIterator>
void
diff --git a/libstdc++-v3/include/bits/stl_vector.h b/libstdc++-v3/include/bits/stl_vector.h
index 424971a02f2..6bb75b7f8fd 100644
--- a/libstdc++-v3/include/bits/stl_vector.h
+++ b/libstdc++-v3/include/bits/stl_vector.h
@@ -459,7 +459,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
*/
explicit
vector(size_type __n, const allocator_type& __a = allocator_type())
- : _Base(__n, __a)
+ : _Base(_S_check_init_len(__n, __a), __a)
{ _M_default_initialize(__n); }
/**
@@ -472,7 +472,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
*/
vector(size_type __n, const value_type& __value,
const allocator_type& __a = allocator_type())
- : _Base(__n, __a)
+ : _Base(_S_check_init_len(__n, __a), __a)
{ _M_fill_initialize(__n, __value); }
#else
/**
@@ -486,7 +486,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
explicit
vector(size_type __n, const value_type& __value = value_type(),
const allocator_type& __a = allocator_type())
- : _Base(__n, __a)
+ : _Base(_S_check_init_len(__n, __a), __a)
{ _M_fill_initialize(__n, __value); }
#endif
@@ -872,7 +872,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
/** Returns the size() of the largest possible %vector. */
size_type
max_size() const _GLIBCXX_NOEXCEPT
- { return _Alloc_traits::max_size(_M_get_Tp_allocator()); }
+ { return _S_max_size(_M_get_Tp_allocator()); }
#if __cplusplus >= 201103L
/**
@@ -1485,7 +1485,8 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
void
_M_initialize_dispatch(_Integer __n, _Integer __value, __true_type)
{
- this->_M_impl._M_start = _M_allocate(static_cast<size_type>(__n));
+ this->_M_impl._M_start = _M_allocate(_S_check_init_len(
+ static_cast<size_type>(__n), _M_get_Tp_allocator()));
this->_M_impl._M_end_of_storage =
this->_M_impl._M_start + static_cast<size_type>(__n);
_M_fill_initialize(static_cast<size_type>(__n), __value);
@@ -1528,7 +1529,8 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
std::forward_iterator_tag)
{
const size_type __n = std::distance(__first, __last);
- this->_M_impl._M_start = this->_M_allocate(__n);
+ this->_M_impl._M_start
+ = this->_M_allocate(_S_check_init_len(__n, _M_get_Tp_allocator()));
this->_M_impl._M_end_of_storage = this->_M_impl._M_start + __n;
this->_M_impl._M_finish =
std::__uninitialized_copy_a(__first, __last,
@@ -1707,10 +1709,28 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
if (max_size() - size() < __n)
__throw_length_error(__N(__s));
- const size_type __len = size() + std::max(size(), __n);
+ const size_type __len = size() + (std::max)(size(), __n);
return (__len < size() || __len > max_size()) ? max_size() : __len;
}
+ // Called by constructors to check initial size.
+ static size_type
+ _S_check_init_len(size_type __n, const allocator_type& __a)
+ {
+ if (__n > _S_max_size(_Tp_alloc_type(__a)))
+ __throw_length_error(
+ __N("cannot create std::vector larger than max_size()"));
+ return __n;
+ }
+
+ static size_type
+ _S_max_size(const _Tp_alloc_type& __a) _GLIBCXX_NOEXCEPT
+ {
+ const size_t __diffmax = __gnu_cxx::__numeric_traits<ptrdiff_t>::__max;
+ const size_t __allocmax = _Alloc_traits::max_size(__a);
+ return (std::min)(__diffmax, __allocmax);
+ }
+
// Internal erase functions follow.
// Called by erase(q1,q2), clear(), resize(), _M_fill_assign,
diff --git a/libstdc++-v3/include/bits/vector.tcc b/libstdc++-v3/include/bits/vector.tcc
index 86a711713b2..a1d114a0a9a 100644
--- a/libstdc++-v3/include/bits/vector.tcc
+++ b/libstdc++-v3/include/bits/vector.tcc
@@ -293,6 +293,7 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER
if (__len > capacity())
{
+ _S_check_init_len(__len, _M_get_Tp_allocator());
pointer __tmp(_M_allocate_and_copy(__len, __first, __last));
_GLIBCXX_ASAN_ANNOTATE_REINIT;
std::_Destroy(this->_M_impl._M_start, this->_M_impl._M_finish,
diff --git a/libstdc++-v3/include/debug/deque b/libstdc++-v3/include/debug/deque
index 93b82cf0cdc..ad86b5c8f38 100644
--- a/libstdc++-v3/include/debug/deque
+++ b/libstdc++-v3/include/debug/deque
@@ -31,6 +31,11 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Tp, typename _Allocator> class deque;
+} } // namespace std::__debug
+
#include <deque>
#include <debug/safe_sequence.h>
#include <debug/safe_container.h>
@@ -56,6 +61,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
typedef typename _Base::reference reference;
typedef typename _Base::const_reference const_reference;
diff --git a/libstdc++-v3/include/debug/formatter.h b/libstdc++-v3/include/debug/formatter.h
index 383a3406d34..1f03f251488 100644
--- a/libstdc++-v3/include/debug/formatter.h
+++ b/libstdc++-v3/include/debug/formatter.h
@@ -76,7 +76,7 @@ namespace __gnu_debug
class _Safe_sequence_base;
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
class _Safe_iterator;
template<typename _Iterator, typename _Sequence>
@@ -263,8 +263,8 @@ namespace __gnu_debug
_M_variant._M_string._M_value = __value;
}
- template<typename _Iterator, typename _Sequence>
- _Parameter(_Safe_iterator<_Iterator, _Sequence> const& __it,
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ _Parameter(_Safe_iterator<_Iterator, _Sequence, _Category> const& __it,
const char* __name, _Is_iterator)
: _M_kind(__iterator), _M_variant()
{
@@ -378,10 +378,10 @@ namespace __gnu_debug
= _S_reverse_state(_M_variant._M_iterator._M_state);
}
- template<typename _Iterator, typename _Sequence>
- _Parameter(std::reverse_iterator<_Safe_iterator<_Iterator,
- _Sequence>> const& __it,
- const char* __name, _Is_iterator)
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ _Parameter(std::reverse_iterator<_Safe_iterator<_Iterator, _Sequence,
+ _Category>> const& __it,
+ const char* __name, _Is_iterator)
: _Parameter(__it.base(), __name, _Is_iterator{})
{
_M_variant._M_iterator._M_type
@@ -396,10 +396,10 @@ namespace __gnu_debug
: _Parameter(__it.base(), __name, _Is_iterator{})
{ _M_variant._M_iterator._M_type = _GLIBCXX_TYPEID(__it); }
- template<typename _Iterator, typename _Sequence>
- _Parameter(std::move_iterator<_Safe_iterator<_Iterator,
- _Sequence>> const& __it,
- const char* __name, _Is_iterator)
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ _Parameter(std::move_iterator<_Safe_iterator<_Iterator, _Sequence,
+ _Category>> const& __it,
+ const char* __name, _Is_iterator)
: _Parameter(__it.base(), __name, _Is_iterator{})
{
_M_variant._M_iterator._M_type
diff --git a/libstdc++-v3/include/debug/forward_list b/libstdc++-v3/include/debug/forward_list
index 75d4f63af8f..e542447badd 100644
--- a/libstdc++-v3/include/debug/forward_list
+++ b/libstdc++-v3/include/debug/forward_list
@@ -31,6 +31,11 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Tp, typename _Allocator> class forward_list;
+} } // namespace std::__debug
+
#include <forward_list>
#include <debug/safe_sequence.h>
#include <debug/safe_container.h>
@@ -193,6 +198,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef typename _Base::const_iterator _Base_const_iterator;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
typedef typename _Base::reference reference;
typedef typename _Base::const_reference const_reference;
diff --git a/libstdc++-v3/include/debug/functions.h b/libstdc++-v3/include/debug/functions.h
index ce501f20c31..21b60df16ed 100644
--- a/libstdc++-v3/include/debug/functions.h
+++ b/libstdc++-v3/include/debug/functions.h
@@ -31,7 +31,9 @@
#include <bits/move.h> // for __addressof
#include <bits/stl_function.h> // for less
+
#if __cplusplus >= 201103L
+# include <bits/stl_iterator.h> // for __miter_base
# include <type_traits> // for is_lvalue_reference and conditional.
#endif
@@ -64,19 +66,6 @@ namespace __gnu_debug
__check_singular(const _Tp* __ptr)
{ return __ptr == 0; }
- /** Assume that some arbitrary iterator is dereferenceable, because we
- can't prove that it isn't. */
- template<typename _Iterator>
- inline bool
- __check_dereferenceable(const _Iterator&)
- { return true; }
-
- /** Non-NULL pointers are dereferenceable. */
- template<typename _Tp>
- inline bool
- __check_dereferenceable(const _Tp* __ptr)
- { return __ptr; }
-
/* Checks that [first, last) is a valid range, and then returns
* __first. This routine is useful when we can't use a separate
* assertion statement because, e.g., we are in a constructor.
@@ -95,10 +84,11 @@ namespace __gnu_debug
}
/* Handle the case where __other is a pointer to _Sequence::value_type. */
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
inline bool
- __foreign_iterator_aux4(const _Safe_iterator<_Iterator, _Sequence>& __it,
- const typename _Sequence::value_type* __other)
+ __foreign_iterator_aux4(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ const typename _Sequence::value_type* __other)
{
typedef const typename _Sequence::value_type* _PointerType;
typedef std::less<_PointerType> _Less;
@@ -116,18 +106,20 @@ namespace __gnu_debug
}
/* Fallback overload for when we can't tell, assume it is valid. */
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
inline bool
- __foreign_iterator_aux4(const _Safe_iterator<_Iterator, _Sequence>&, ...)
+ __foreign_iterator_aux4(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&, ...)
{ return true; }
/* Handle sequences with contiguous storage */
- template<typename _Iterator, typename _Sequence, typename _InputIterator>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _InputIterator>
inline bool
- __foreign_iterator_aux3(const _Safe_iterator<_Iterator, _Sequence>& __it,
- const _InputIterator& __other,
- const _InputIterator& __other_end,
- std::__true_type)
+ __foreign_iterator_aux3(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ const _InputIterator& __other, const _InputIterator& __other_end,
+ std::__true_type)
{
if (__other == __other_end)
return true; // inserting nothing is safe even if not foreign iters
@@ -137,36 +129,46 @@ namespace __gnu_debug
}
/* Handle non-contiguous containers, assume it is valid. */
- template<typename _Iterator, typename _Sequence, typename _InputIterator>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _InputIterator>
inline bool
- __foreign_iterator_aux3(const _Safe_iterator<_Iterator, _Sequence>&,
- const _InputIterator&, const _InputIterator&,
- std::__false_type)
+ __foreign_iterator_aux3(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ const _InputIterator&, const _InputIterator&,
+ std::__false_type)
{ return true; }
/** Handle debug iterators from the same type of container. */
- template<typename _Iterator, typename _Sequence, typename _OtherIterator>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _OtherIterator>
inline bool
- __foreign_iterator_aux2(const _Safe_iterator<_Iterator, _Sequence>& __it,
- const _Safe_iterator<_OtherIterator, _Sequence>& __other,
- const _Safe_iterator<_OtherIterator, _Sequence>&)
+ __foreign_iterator_aux2(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ const _Safe_iterator<_OtherIterator, _Sequence, _Category>& __other,
+ const _Safe_iterator<_OtherIterator, _Sequence, _Category>&)
{ return __it._M_get_sequence() != __other._M_get_sequence(); }
/** Handle debug iterators from different types of container. */
- template<typename _Iterator, typename _Sequence, typename _OtherIterator,
- typename _OtherSequence>
- inline bool
- __foreign_iterator_aux2(const _Safe_iterator<_Iterator, _Sequence>& __it,
- const _Safe_iterator<_OtherIterator, _OtherSequence>&,
- const _Safe_iterator<_OtherIterator, _OtherSequence>&)
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _OtherIterator, typename _OtherSequence,
+ typename _OtherCategory>
+ inline bool
+ __foreign_iterator_aux2(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ const _Safe_iterator<_OtherIterator, _OtherSequence,
+ _OtherCategory>&,
+ const _Safe_iterator<_OtherIterator, _OtherSequence,
+ _OtherCategory>&)
{ return true; }
/* Handle non-debug iterators. */
- template<typename _Iterator, typename _Sequence, typename _InputIterator>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _InputIterator>
inline bool
- __foreign_iterator_aux2(const _Safe_iterator<_Iterator, _Sequence>& __it,
- const _InputIterator& __other,
- const _InputIterator& __other_end)
+ __foreign_iterator_aux2(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ const _InputIterator& __other,
+ const _InputIterator& __other_end)
{
#if __cplusplus < 201103L
typedef _Is_contiguous_sequence<_Sequence> __tag;
@@ -181,31 +183,34 @@ namespace __gnu_debug
}
/* Handle the case where we aren't really inserting a range after all */
- template<typename _Iterator, typename _Sequence, typename _Integral>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _Integral>
inline bool
- __foreign_iterator_aux(const _Safe_iterator<_Iterator, _Sequence>&,
- _Integral, _Integral,
- std::__true_type)
+ __foreign_iterator_aux(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ _Integral, _Integral, std::__true_type)
{ return true; }
/* Handle all iterators. */
- template<typename _Iterator, typename _Sequence,
+ template<typename _Iterator, typename _Sequence, typename _Category,
typename _InputIterator>
inline bool
- __foreign_iterator_aux(const _Safe_iterator<_Iterator, _Sequence>& __it,
- _InputIterator __other, _InputIterator __other_end,
- std::__false_type)
+ __foreign_iterator_aux(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ _InputIterator __other, _InputIterator __other_end,
+ std::__false_type)
{
return _Insert_range_from_self_is_safe<_Sequence>::__value
|| __foreign_iterator_aux2(__it, std::__miter_base(__other),
std::__miter_base(__other_end));
}
- template<typename _Iterator, typename _Sequence,
+ template<typename _Iterator, typename _Sequence, typename _Category,
typename _InputIterator>
inline bool
- __foreign_iterator(const _Safe_iterator<_Iterator, _Sequence>& __it,
- _InputIterator __other, _InputIterator __other_end)
+ __foreign_iterator(
+ const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ _InputIterator __other, _InputIterator __other_end)
{
typedef typename std::__is_integer<_InputIterator>::__type _Integral;
return __foreign_iterator_aux(__it, __other, __other_end, _Integral());
diff --git a/libstdc++-v3/include/debug/helper_functions.h b/libstdc++-v3/include/debug/helper_functions.h
index 2073df95191..21d969310dd 100644
--- a/libstdc++-v3/include/debug/helper_functions.h
+++ b/libstdc++-v3/include/debug/helper_functions.h
@@ -37,9 +37,14 @@
namespace __gnu_debug
{
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
class _Safe_iterator;
+#if __cplusplus >= 201103L
+ template<typename _Iterator, typename _Sequence>
+ class _Safe_local_iterator;
+#endif
+
/** The precision to which we can calculate the distance between
* two iterators.
*/
@@ -83,13 +88,13 @@ namespace __gnu_debug
*/
template<typename _Iterator>
inline typename _Distance_traits<_Iterator>::__type
- __get_distance(const _Iterator& __lhs, const _Iterator& __rhs,
+ __get_distance(_Iterator __lhs, _Iterator __rhs,
std::random_access_iterator_tag)
{ return std::make_pair(__rhs - __lhs, __dp_exact); }
template<typename _Iterator>
inline typename _Distance_traits<_Iterator>::__type
- __get_distance(const _Iterator& __lhs, const _Iterator& __rhs,
+ __get_distance(_Iterator __lhs, _Iterator __rhs,
std::input_iterator_tag)
{
if (__lhs == __rhs)
@@ -100,7 +105,7 @@ namespace __gnu_debug
template<typename _Iterator>
inline typename _Distance_traits<_Iterator>::__type
- __get_distance(const _Iterator& __lhs, const _Iterator& __rhs)
+ __get_distance(_Iterator __lhs, _Iterator __rhs)
{ return __get_distance(__lhs, __rhs, std::__iterator_category(__lhs)); }
/** We say that integral types for a valid range, and defer to other
@@ -109,7 +114,7 @@ namespace __gnu_debug
*/
template<typename _Integral>
inline bool
- __valid_range_aux(const _Integral&, const _Integral&,
+ __valid_range_aux(_Integral, _Integral,
typename _Distance_traits<_Integral>::__type& __dist,
std::__true_type)
{
@@ -117,13 +122,12 @@ namespace __gnu_debug
return true;
}
- /** We have iterators, so figure out what kind of iterators that are
+ /** We have iterators, so figure out what kind of iterators they are
* to see if we can check the range ahead of time.
*/
template<typename _InputIterator>
inline bool
- __valid_range_aux(const _InputIterator& __first,
- const _InputIterator& __last,
+ __valid_range_aux(_InputIterator __first, _InputIterator __last,
typename _Distance_traits<_InputIterator>::__type& __dist,
std::__false_type)
{
@@ -152,61 +156,69 @@ namespace __gnu_debug
*/
template<typename _InputIterator>
inline bool
- __valid_range(const _InputIterator& __first, const _InputIterator& __last,
+ __valid_range(_InputIterator __first, _InputIterator __last,
typename _Distance_traits<_InputIterator>::__type& __dist)
{
typedef typename std::__is_integer<_InputIterator>::__type _Integral;
return __valid_range_aux(__first, __last, __dist, _Integral());
}
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ bool
+ __valid_range(const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ typename _Distance_traits<_Iterator>::__type&);
+
+#if __cplusplus >= 201103L
+ template<typename _Iterator,typename _Sequence>
+ bool
+ __valid_range(const _Safe_local_iterator<_Iterator, _Sequence>&,
+ const _Safe_local_iterator<_Iterator, _Sequence>&,
+ typename _Distance_traits<_Iterator>::__type&);
+#endif
+
template<typename _InputIterator>
inline bool
- __valid_range(const _InputIterator& __first, const _InputIterator& __last)
+ __valid_range(_InputIterator __first, _InputIterator __last)
{
typename _Distance_traits<_InputIterator>::__type __dist;
return __valid_range(__first, __last, __dist);
}
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ bool
+ __valid_range(const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ const _Safe_iterator<_Iterator, _Sequence, _Category>&);
+
+#if __cplusplus >= 201103L
+ template<typename _Iterator, typename _Sequence>
+ bool
+ __valid_range(const _Safe_local_iterator<_Iterator, _Sequence>&,
+ const _Safe_local_iterator<_Iterator, _Sequence>&);
+#endif
+
// Fallback method, always ok.
template<typename _InputIterator, typename _Size>
inline bool
__can_advance(_InputIterator, _Size)
{ return true; }
- template<typename _Iterator, typename _Sequence, typename _Size>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _Size>
bool
- __can_advance(const _Safe_iterator<_Iterator, _Sequence>&, _Size);
-
-#if __cplusplus < 201103L
- // Helper struct to detect random access safe iterators.
- template<typename _Iterator>
- struct __is_safe_random_iterator
- {
- enum { __value = 0 };
- typedef std::__false_type __type;
- };
-
- template<typename _Iterator>
- struct _Siter_base
- : std::_Iter_base<_Iterator, __is_safe_random_iterator<_Iterator>::__value>
- { };
+ __can_advance(const _Safe_iterator<_Iterator, _Sequence, _Category>&,
+ _Size);
/** Helper function to extract base iterator of random access safe iterator
- in order to reduce performance impact of debug mode. Limited to random
- access iterator because it is the only category for which it is possible
- to check for correct iterators order in the __valid_range function
- thanks to the < operator.
- */
- template<typename _Iterator>
- inline typename _Siter_base<_Iterator>::iterator_type
- __base(_Iterator __it)
- { return _Siter_base<_Iterator>::_S_base(__it); }
-#else
+ * in order to reduce performance impact of debug mode. Limited to random
+ * access iterator because it is the only category for which it is possible
+ * to check for correct iterators order in the __valid_range function
+ * thanks to the < operator.
+ */
template<typename _Iterator>
inline _Iterator
__base(_Iterator __it)
{ return __it; }
-#endif
#if __cplusplus < 201103L
template<typename _Iterator>
diff --git a/libstdc++-v3/include/debug/list b/libstdc++-v3/include/debug/list
index 80fa3047d3a..8add1d596e0 100644
--- a/libstdc++-v3/include/debug/list
+++ b/libstdc++-v3/include/debug/list
@@ -31,6 +31,11 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Tp, typename _Allocator> class list;
+} } // namespace std::__debug
+
#include <list>
#include <debug/safe_sequence.h>
#include <debug/safe_container.h>
@@ -57,6 +62,9 @@ namespace __debug
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
typedef __gnu_debug::_Not_equal_to<_Base_const_iterator> _Not_equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
typedef typename _Base::reference reference;
typedef typename _Base::const_reference const_reference;
diff --git a/libstdc++-v3/include/debug/map b/libstdc++-v3/include/debug/map
index 8bd513b13d3..7151e8ee832 100644
--- a/libstdc++-v3/include/debug/map
+++ b/libstdc++-v3/include/debug/map
@@ -31,6 +31,14 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Key, typename _Tp, typename _Cmp, typename _Allocator>
+ class map;
+ template<typename _Key, typename _Tp, typename _Cmp, typename _Allocator>
+ class multimap;
+} } // namespace std::__debug
+
#include <map>
#include <debug/map.h>
#include <debug/multimap.h>
diff --git a/libstdc++-v3/include/debug/map.h b/libstdc++-v3/include/debug/map.h
index 23966ba968d..f4b4e8d2ad9 100644
--- a/libstdc++-v3/include/debug/map.h
+++ b/libstdc++-v3/include/debug/map.h
@@ -56,6 +56,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
// types:
typedef _Key key_type;
diff --git a/libstdc++-v3/include/debug/multimap.h b/libstdc++-v3/include/debug/multimap.h
index 80549848fcb..992ccf07536 100644
--- a/libstdc++-v3/include/debug/multimap.h
+++ b/libstdc++-v3/include/debug/multimap.h
@@ -56,6 +56,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
// types:
typedef _Key key_type;
diff --git a/libstdc++-v3/include/debug/multiset.h b/libstdc++-v3/include/debug/multiset.h
index 6e4c1b0e564..fa747ff9c36 100644
--- a/libstdc++-v3/include/debug/multiset.h
+++ b/libstdc++-v3/include/debug/multiset.h
@@ -55,6 +55,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
// types:
typedef _Key key_type;
diff --git a/libstdc++-v3/include/debug/safe_iterator.h b/libstdc++-v3/include/debug/safe_iterator.h
index b8256fc3a22..86211b9ca3d 100644
--- a/libstdc++-v3/include/debug/safe_iterator.h
+++ b/libstdc++-v3/include/debug/safe_iterator.h
@@ -44,14 +44,14 @@ namespace __gnu_debug
template<typename _Sequence>
struct _BeforeBeginHelper
{
- template<typename _Iterator>
+ template<typename _Iterator, typename _Category>
static bool
- _S_Is(const _Safe_iterator<_Iterator, _Sequence>&)
+ _S_Is(const _Safe_iterator<_Iterator, _Sequence, _Category>&)
{ return false; }
- template<typename _Iterator>
+ template<typename _Iterator, typename _Category>
static bool
- _S_Is_Beginnest(const _Safe_iterator<_Iterator, _Sequence>& __it)
+ _S_Is_Beginnest(const _Safe_iterator<_Iterator, _Sequence, _Category>& __it)
{ return __it.base() == __it._M_get_sequence()->_M_base().begin(); }
};
@@ -82,22 +82,30 @@ namespace __gnu_debug
* of iterators and it is being detached before _Iterator get
* destroyed. Otherwise it would result in a data race.
*/
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category
+ = typename std::iterator_traits<_Iterator>::iterator_category>
class _Safe_iterator
: private _Iterator,
public _Safe_iterator_base
{
typedef _Iterator _Iter_base;
typedef _Safe_iterator_base _Safe_base;
- typedef typename _Sequence::const_iterator _Const_iterator;
typedef std::iterator_traits<_Iterator> _Traits;
+ protected:
+ typedef std::__are_same<typename _Sequence::_Base::const_iterator,
+ _Iterator> _IsConstant;
+
+ typedef typename __gnu_cxx::__conditional_type<
+ _IsConstant::__value,
+ typename _Sequence::_Base::iterator,
+ typename _Sequence::_Base::const_iterator>::__type _OtherIterator;
+
struct _Attach_single
{ };
- _Safe_iterator(const _Iterator& __i, _Safe_sequence_base* __seq,
- _Attach_single)
+ _Safe_iterator(_Iterator __i, _Safe_sequence_base* __seq, _Attach_single)
_GLIBCXX_NOEXCEPT
: _Iter_base(__i)
{ _M_attach_single(__seq); }
@@ -120,7 +128,7 @@ namespace __gnu_debug
* @pre @p seq is not NULL
* @post this is not singular
*/
- _Safe_iterator(const _Iterator& __i, const _Safe_sequence_base* __seq)
+ _Safe_iterator(_Iterator __i, const _Safe_sequence_base* __seq)
_GLIBCXX_NOEXCEPT
: _Iter_base(__i), _Safe_base(__seq, _S_constant())
{
@@ -171,10 +179,11 @@ namespace __gnu_debug
*/
template<typename _MutableIterator>
_Safe_iterator(
- const _Safe_iterator<_MutableIterator,
- typename __gnu_cxx::__enable_if<(std::__are_same<_MutableIterator,
- typename _Sequence::iterator::iterator_type>::__value),
- _Sequence>::__type>& __x) _GLIBCXX_NOEXCEPT
+ const _Safe_iterator<_MutableIterator, _Sequence,
+ typename __gnu_cxx::__enable_if<_IsConstant::__value &&
+ std::__are_same<_MutableIterator, _OtherIterator>::__value,
+ _Category>::__type>& __x)
+ _GLIBCXX_NOEXCEPT
: _Iter_base(__x.base())
{
// _GLIBCXX_RESOLVE_LIB_DEFECTS
@@ -309,93 +318,12 @@ namespace __gnu_debug
return _Safe_iterator(base()++, this->_M_sequence, _Attach_single());
}
- // ------ Bidirectional iterator requirements ------
- /**
- * @brief Iterator predecrement
- * @pre iterator is decrementable
- */
- _Safe_iterator&
- operator--() _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_decrementable(),
- _M_message(__msg_bad_dec)
- ._M_iterator(*this, "this"));
- __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
- --base();
- return *this;
- }
-
- /**
- * @brief Iterator postdecrement
- * @pre iterator is decrementable
- */
- _Safe_iterator
- operator--(int) _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_decrementable(),
- _M_message(__msg_bad_dec)
- ._M_iterator(*this, "this"));
- __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
- return _Safe_iterator(base()--, this->_M_sequence, _Attach_single());
- }
-
- // ------ Random access iterator requirements ------
- reference
- operator[](const difference_type& __n) const _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n)
- && this->_M_can_advance(__n+1),
- _M_message(__msg_iter_subscript_oob)
- ._M_iterator(*this)._M_integer(__n));
- return base()[__n];
- }
-
- _Safe_iterator&
- operator+=(const difference_type& __n) _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n),
- _M_message(__msg_advance_oob)
- ._M_iterator(*this)._M_integer(__n));
- __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
- base() += __n;
- return *this;
- }
-
- _Safe_iterator
- operator+(const difference_type& __n) const _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n),
- _M_message(__msg_advance_oob)
- ._M_iterator(*this)._M_integer(__n));
- return _Safe_iterator(base() + __n, this->_M_sequence);
- }
-
- _Safe_iterator&
- operator-=(const difference_type& __n) _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(-__n),
- _M_message(__msg_retreat_oob)
- ._M_iterator(*this)._M_integer(__n));
- __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
- base() -= __n;
- return *this;
- }
-
- _Safe_iterator
- operator-(const difference_type& __n) const _GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(-__n),
- _M_message(__msg_retreat_oob)
- ._M_iterator(*this)._M_integer(__n));
- return _Safe_iterator(base() - __n, this->_M_sequence);
- }
-
// ------ Utilities ------
/// Determine if this is a constant iterator.
- static bool
+ static _GLIBCXX_CONSTEXPR bool
_S_constant()
- { return std::__are_same<_Const_iterator, _Safe_iterator>::__value; }
+ { return _IsConstant::__value; }
/**
* @brief Return the underlying iterator
@@ -444,10 +372,6 @@ namespace __gnu_debug
_M_incrementable() const
{ return !this->_M_singular() && !_M_is_end(); }
- // Is the iterator decrementable?
- bool
- _M_decrementable() const { return !_M_singular() && !_M_is_begin(); }
-
// Can we advance the iterator @p __n steps (@p __n may be negative)
bool
_M_can_advance(const difference_type& __n) const;
@@ -459,14 +383,23 @@ namespace __gnu_debug
bool __check_dereferenceable = true) const;
// The sequence this iterator references.
- typename
- __gnu_cxx::__conditional_type<std::__are_same<_Const_iterator,
- _Safe_iterator>::__value,
- const _Sequence*,
- _Sequence*>::__type
+ typename __gnu_cxx::__conditional_type<
+ _IsConstant::__value, const _Sequence*, _Sequence*>::__type
_M_get_sequence() const
{ return static_cast<_Sequence*>(_M_sequence); }
+ // Get distance to __rhs.
+ typename _Distance_traits<_Iterator>::__type
+ _M_get_distance_to(const _Safe_iterator& __rhs) const;
+
+ // Get distance from sequence begin up to *this.
+ typename _Distance_traits<_Iterator>::__type
+ _M_get_distance_from_begin() const;
+
+ // Get distance from *this to sequence end.
+ typename _Distance_traits<_Iterator>::__type
+ _M_get_distance_to_end() const;
+
/// Is this iterator equal to the sequence's begin() iterator?
bool
_M_is_begin() const
@@ -490,13 +423,346 @@ namespace __gnu_debug
{ return _BeforeBeginHelper<_Sequence>::_S_Is_Beginnest(*this); }
};
+ template<typename _Iterator, typename _Sequence>
+ class _Safe_iterator<_Iterator, _Sequence, std::bidirectional_iterator_tag>
+ : public _Safe_iterator<_Iterator, _Sequence, std::forward_iterator_tag>
+ {
+ typedef _Safe_iterator<_Iterator, _Sequence,
+ std::forward_iterator_tag> _Safe_base;
+
+ protected:
+ typedef typename _Safe_base::_OtherIterator _OtherIterator;
+ typedef typename _Safe_base::_Attach_single _Attach_single;
+
+ _Safe_iterator(_Iterator __i, _Safe_sequence_base* __seq, _Attach_single)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__i, __seq, _Attach_single())
+ { }
+
+ public:
+ /// @post the iterator is singular and unattached
+ _Safe_iterator() _GLIBCXX_NOEXCEPT { }
+
+ /**
+ * @brief Safe iterator construction from an unsafe iterator and
+ * its sequence.
+ *
+ * @pre @p seq is not NULL
+ * @post this is not singular
+ */
+ _Safe_iterator(_Iterator __i, const _Safe_sequence_base* __seq)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__i, __seq)
+ { }
+
+ /**
+ * @brief Copy construction.
+ */
+ _Safe_iterator(const _Safe_iterator& __x) _GLIBCXX_NOEXCEPT
+ : _Safe_base(__x)
+ { }
+
+#if __cplusplus >= 201103L
+ /** @brief Move construction. */
+ _Safe_iterator(_Safe_iterator&&) = default;
+#endif
+
+ /**
+ * @brief Converting constructor from a mutable iterator to a
+ * constant iterator.
+ */
+ template<typename _MutableIterator>
+ _Safe_iterator(
+ const _Safe_iterator<_MutableIterator, _Sequence,
+ typename __gnu_cxx::__enable_if<_Safe_base::_IsConstant::__value &&
+ std::__are_same<_MutableIterator, _OtherIterator>::__value,
+ std::bidirectional_iterator_tag>::__type>& __x)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__x)
+ { }
+
+#if __cplusplus >= 201103L
+ /** @brief Copy assignment. */
+ _Safe_iterator&
+ operator=(const _Safe_iterator&) = default;
+
+ /** @brief Move assignment. */
+ _Safe_iterator&
+ operator=(_Safe_iterator&&) = default;
+#else
+ /** @brief Copy assignment. */
+ _Safe_iterator&
+ operator=(const _Safe_iterator& __x)
+ {
+ _Safe_base::operator=(__x);
+ return *this;
+ }
+#endif
+
+ // ------ Input iterator requirements ------
+ /**
+ * @brief Iterator preincrement
+ * @pre iterator is incrementable
+ */
+ _Safe_iterator&
+ operator++() _GLIBCXX_NOEXCEPT
+ {
+ _Safe_base::operator++();
+ return *this;
+ }
+
+ /**
+ * @brief Iterator postincrement
+ * @pre iterator is incrementable
+ */
+ _Safe_iterator
+ operator++(int) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_incrementable(),
+ _M_message(__msg_bad_inc)
+ ._M_iterator(*this, "this"));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ return _Safe_iterator(this->base()++, this->_M_sequence,
+ _Attach_single());
+ }
+
+ // ------ Bidirectional iterator requirements ------
+ /**
+ * @brief Iterator predecrement
+ * @pre iterator is decrementable
+ */
+ _Safe_iterator&
+ operator--() _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_decrementable(),
+ _M_message(__msg_bad_dec)
+ ._M_iterator(*this, "this"));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ --this->base();
+ return *this;
+ }
+
+ /**
+ * @brief Iterator postdecrement
+ * @pre iterator is decrementable
+ */
+ _Safe_iterator
+ operator--(int) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_decrementable(),
+ _M_message(__msg_bad_dec)
+ ._M_iterator(*this, "this"));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ return _Safe_iterator(this->base()--, this->_M_sequence,
+ _Attach_single());
+ }
+
+ // ------ Utilities ------
+
+ // Is the iterator decrementable?
+ bool
+ _M_decrementable() const
+ { return !this->_M_singular() && !this->_M_is_begin(); }
+ };
+
+ template<typename _Iterator, typename _Sequence>
+ class _Safe_iterator<_Iterator, _Sequence, std::random_access_iterator_tag>
+ : public _Safe_iterator<_Iterator, _Sequence,
+ std::bidirectional_iterator_tag>
+ {
+ typedef _Safe_iterator<_Iterator, _Sequence,
+ std::bidirectional_iterator_tag> _Safe_base;
+ typedef typename _Safe_base::_OtherIterator _OtherIterator;
+
+ typedef typename _Safe_base::_Attach_single _Attach_single;
+
+ _Safe_iterator(_Iterator __i, _Safe_sequence_base* __seq, _Attach_single)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__i, __seq, _Attach_single())
+ { }
+
+ public:
+ typedef typename _Safe_base::difference_type difference_type;
+ typedef typename _Safe_base::reference reference;
+
+ /// @post the iterator is singular and unattached
+ _Safe_iterator() _GLIBCXX_NOEXCEPT { }
+
+ /**
+ * @brief Safe iterator construction from an unsafe iterator and
+ * its sequence.
+ *
+ * @pre @p seq is not NULL
+ * @post this is not singular
+ */
+ _Safe_iterator(_Iterator __i, const _Safe_sequence_base* __seq)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__i, __seq)
+ { }
+
+ /**
+ * @brief Copy construction.
+ */
+ _Safe_iterator(const _Safe_iterator& __x) _GLIBCXX_NOEXCEPT
+ : _Safe_base(__x)
+ { }
+
+#if __cplusplus >= 201103L
+ /** @brief Move construction. */
+ _Safe_iterator(_Safe_iterator&&) = default;
+#endif
+
+ /**
+ * @brief Converting constructor from a mutable iterator to a
+ * constant iterator.
+ */
+ template<typename _MutableIterator>
+ _Safe_iterator(
+ const _Safe_iterator<_MutableIterator, _Sequence,
+ typename __gnu_cxx::__enable_if<_Safe_base::_IsConstant::__value &&
+ std::__are_same<_MutableIterator, _OtherIterator>::__value,
+ std::random_access_iterator_tag>::__type>& __x)
+ _GLIBCXX_NOEXCEPT
+ : _Safe_base(__x)
+ { }
+
+#if __cplusplus >= 201103L
+ /** @brief Copy assignment. */
+ _Safe_iterator&
+ operator=(const _Safe_iterator&) = default;
+
+ /** @brief Move assignment. */
+ _Safe_iterator&
+ operator=(_Safe_iterator&&) = default;
+#else
+ /** @brief Copy assignment. */
+ _Safe_iterator&
+ operator=(const _Safe_iterator& __x)
+ {
+ _Safe_base::operator=(__x);
+ return *this;
+ }
+#endif
+
+ // Is the iterator range [*this, __rhs) valid?
+ bool
+ _M_valid_range(const _Safe_iterator& __rhs,
+ std::pair<difference_type,
+ _Distance_precision>& __dist) const;
+
+ // ------ Input iterator requirements ------
+ /**
+ * @brief Iterator preincrement
+ * @pre iterator is incrementable
+ */
+ _Safe_iterator&
+ operator++() _GLIBCXX_NOEXCEPT
+ {
+ _Safe_base::operator++();
+ return *this;
+ }
+
+ /**
+ * @brief Iterator postincrement
+ * @pre iterator is incrementable
+ */
+ _Safe_iterator
+ operator++(int) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_incrementable(),
+ _M_message(__msg_bad_inc)
+ ._M_iterator(*this, "this"));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ return _Safe_iterator(this->base()++, this->_M_sequence,
+ _Attach_single());
+ }
+
+ // ------ Bidirectional iterator requirements ------
+ /**
+ * @brief Iterator predecrement
+ * @pre iterator is decrementable
+ */
+ _Safe_iterator&
+ operator--() _GLIBCXX_NOEXCEPT
+ {
+ _Safe_base::operator--();
+ return *this;
+ }
+
+ /**
+ * @brief Iterator postdecrement
+ * @pre iterator is decrementable
+ */
+ _Safe_iterator
+ operator--(int) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_decrementable(),
+ _M_message(__msg_bad_dec)
+ ._M_iterator(*this, "this"));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ return _Safe_iterator(this->base()--, this->_M_sequence,
+ _Attach_single());
+ }
+
+ // ------ Random access iterator requirements ------
+ reference
+ operator[](const difference_type& __n) const _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n)
+ && this->_M_can_advance(__n + 1),
+ _M_message(__msg_iter_subscript_oob)
+ ._M_iterator(*this)._M_integer(__n));
+ return this->base()[__n];
+ }
+
+ _Safe_iterator&
+ operator+=(const difference_type& __n) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n),
+ _M_message(__msg_advance_oob)
+ ._M_iterator(*this)._M_integer(__n));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ this->base() += __n;
+ return *this;
+ }
+
+ _Safe_iterator
+ operator+(const difference_type& __n) const _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(__n),
+ _M_message(__msg_advance_oob)
+ ._M_iterator(*this)._M_integer(__n));
+ return _Safe_iterator(this->base() + __n, this->_M_sequence);
+ }
+
+ _Safe_iterator&
+ operator-=(const difference_type& __n) _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(-__n),
+ _M_message(__msg_retreat_oob)
+ ._M_iterator(*this)._M_integer(__n));
+ __gnu_cxx::__scoped_lock __l(this->_M_get_mutex());
+ this->base() -= __n;
+ return *this;
+ }
+
+ _Safe_iterator
+ operator-(const difference_type& __n) const _GLIBCXX_NOEXCEPT
+ {
+ _GLIBCXX_DEBUG_VERIFY(this->_M_can_advance(-__n),
+ _M_message(__msg_retreat_oob)
+ ._M_iterator(*this)._M_integer(__n));
+ return _Safe_iterator(this->base() - __n, this->_M_sequence);
+ }
+ };
+
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
inline bool
operator==(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_compare_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -513,7 +779,7 @@ namespace __gnu_debug
const _Safe_iterator<_Iterator, _Sequence>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_compare_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -530,7 +796,7 @@ namespace __gnu_debug
const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_compare_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -547,7 +813,7 @@ namespace __gnu_debug
const _Safe_iterator<_Iterator, _Sequence>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_compare_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -560,11 +826,13 @@ namespace __gnu_debug
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
inline bool
- operator<(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
- const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
+ operator<(const _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_IteratorR, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -577,11 +845,13 @@ namespace __gnu_debug
template<typename _Iterator, typename _Sequence>
inline bool
- operator<(const _Safe_iterator<_Iterator, _Sequence>& __lhs,
- const _Safe_iterator<_Iterator, _Sequence>& __rhs)
+ operator<(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -594,11 +864,13 @@ namespace __gnu_debug
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
inline bool
- operator<=(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
- const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
+ operator<=(const _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_IteratorR, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -611,11 +883,13 @@ namespace __gnu_debug
template<typename _Iterator, typename _Sequence>
inline bool
- operator<=(const _Safe_iterator<_Iterator, _Sequence>& __lhs,
- const _Safe_iterator<_Iterator, _Sequence>& __rhs)
+ operator<=(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -628,11 +902,13 @@ namespace __gnu_debug
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
inline bool
- operator>(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
- const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
+ operator>(const _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_IteratorR, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -645,11 +921,13 @@ namespace __gnu_debug
template<typename _Iterator, typename _Sequence>
inline bool
- operator>(const _Safe_iterator<_Iterator, _Sequence>& __lhs,
- const _Safe_iterator<_Iterator, _Sequence>& __rhs)
+ operator>(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -662,11 +940,13 @@ namespace __gnu_debug
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
inline bool
- operator>=(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
- const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
+ operator>=(const _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_IteratorR, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -679,11 +959,13 @@ namespace __gnu_debug
template<typename _Iterator, typename _Sequence>
inline bool
- operator>=(const _Safe_iterator<_Iterator, _Sequence>& __lhs,
- const _Safe_iterator<_Iterator, _Sequence>& __rhs)
+ operator>=(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_order_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -699,12 +981,15 @@ namespace __gnu_debug
// operators but also operator- must accept mixed iterator/const_iterator
// parameters.
template<typename _IteratorL, typename _IteratorR, typename _Sequence>
- inline typename _Safe_iterator<_IteratorL, _Sequence>::difference_type
- operator-(const _Safe_iterator<_IteratorL, _Sequence>& __lhs,
- const _Safe_iterator<_IteratorR, _Sequence>& __rhs)
+ inline typename _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>::difference_type
+ operator-(const _Safe_iterator<_IteratorL, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_IteratorR, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_distance_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -715,185 +1000,69 @@ namespace __gnu_debug
return __lhs.base() - __rhs.base();
}
- template<typename _Iterator, typename _Sequence>
- inline typename _Safe_iterator<_Iterator, _Sequence>::difference_type
- operator-(const _Safe_iterator<_Iterator, _Sequence>& __lhs,
- const _Safe_iterator<_Iterator, _Sequence>& __rhs)
+ template<typename _Iterator, typename _Sequence>
+ inline typename _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>::difference_type
+ operator-(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __lhs,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __rhs)
_GLIBCXX_NOEXCEPT
- {
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
- _M_message(__msg_distance_bad)
- ._M_iterator(__lhs, "lhs")
- ._M_iterator(__rhs, "rhs"));
- _GLIBCXX_DEBUG_VERIFY(__lhs._M_can_compare(__rhs),
- _M_message(__msg_distance_different)
- ._M_iterator(__lhs, "lhs")
- ._M_iterator(__rhs, "rhs"));
- return __lhs.base() - __rhs.base();
- }
+ {
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
+ _M_message(__msg_distance_bad)
+ ._M_iterator(__lhs, "lhs")
+ ._M_iterator(__rhs, "rhs"));
+ _GLIBCXX_DEBUG_VERIFY(__lhs._M_can_compare(__rhs),
+ _M_message(__msg_distance_different)
+ ._M_iterator(__lhs, "lhs")
+ ._M_iterator(__rhs, "rhs"));
+ return __lhs.base() - __rhs.base();
+ }
template<typename _Iterator, typename _Sequence>
- inline _Safe_iterator<_Iterator, _Sequence>
- operator+(typename _Safe_iterator<_Iterator,_Sequence>::difference_type __n,
- const _Safe_iterator<_Iterator, _Sequence>& __i) _GLIBCXX_NOEXCEPT
+ inline _Safe_iterator<_Iterator, _Sequence, std::random_access_iterator_tag>
+ operator+(typename _Safe_iterator<_Iterator,_Sequence,
+ std::random_access_iterator_tag>::difference_type __n,
+ const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __i)
+ _GLIBCXX_NOEXCEPT
{ return __i + __n; }
- /** Safe iterators know if they are dereferenceable. */
- template<typename _Iterator, typename _Sequence>
- inline bool
- __check_dereferenceable(const _Safe_iterator<_Iterator, _Sequence>& __x)
- { return __x._M_dereferenceable(); }
-
/** Safe iterators know how to check if they form a valid range. */
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
inline bool
- __valid_range(const _Safe_iterator<_Iterator, _Sequence>& __first,
- const _Safe_iterator<_Iterator, _Sequence>& __last,
+ __valid_range(const _Safe_iterator<_Iterator, _Sequence,
+ _Category>& __first,
+ const _Safe_iterator<_Iterator, _Sequence,
+ _Category>& __last,
typename _Distance_traits<_Iterator>::__type& __dist)
{ return __first._M_valid_range(__last, __dist); }
- /** Safe iterators can help to get better distance knowledge. */
- template<typename _Iterator, typename _Sequence>
- inline typename _Distance_traits<_Iterator>::__type
- __get_distance(const _Safe_iterator<_Iterator, _Sequence>& __first,
- const _Safe_iterator<_Iterator, _Sequence>& __last,
- std::random_access_iterator_tag)
- { return std::make_pair(__last.base() - __first.base(), __dp_exact); }
-
- template<typename _Iterator, typename _Sequence>
- inline typename _Distance_traits<_Iterator>::__type
- __get_distance(const _Safe_iterator<_Iterator, _Sequence>& __first,
- const _Safe_iterator<_Iterator, _Sequence>& __last,
- std::input_iterator_tag)
- {
- typedef typename _Distance_traits<_Iterator>::__type _Diff;
- typedef _Sequence_traits<_Sequence> _SeqTraits;
-
- if (__first.base() == __last.base())
- return std::make_pair(0, __dp_exact);
-
- if (__first._M_is_before_begin())
- {
- if (__last._M_is_begin())
- return std::make_pair(1, __dp_exact);
-
- return std::make_pair(1, __dp_sign);
- }
-
- if (__first._M_is_begin())
- {
- if (__last._M_is_before_begin())
- return std::make_pair(-1, __dp_exact);
-
- if (__last._M_is_end())
- return _SeqTraits::_S_size(*__first._M_get_sequence());
-
- return std::make_pair(1, __dp_sign);
- }
-
- if (__first._M_is_end())
- {
- if (__last._M_is_before_begin())
- return std::make_pair(-1, __dp_exact);
-
- if (__last._M_is_begin())
- {
- _Diff __diff = _SeqTraits::_S_size(*__first._M_get_sequence());
- return std::make_pair(-__diff.first, __diff.second);
- }
-
- return std::make_pair(-1, __dp_sign);
- }
-
- if (__last._M_is_before_begin() || __last._M_is_begin())
- return std::make_pair(-1, __dp_sign);
-
- if (__last._M_is_end())
- return std::make_pair(1, __dp_sign);
-
- return std::make_pair(1, __dp_equality);
- }
-
- // Get distance from sequence begin to specified iterator.
- template<typename _Iterator, typename _Sequence>
- inline typename _Distance_traits<_Iterator>::__type
- __get_distance_from_begin(const _Safe_iterator<_Iterator, _Sequence>& __it)
- {
- typedef _Sequence_traits<_Sequence> _SeqTraits;
-
- // No need to consider before_begin as this function is only used in
- // _M_can_advance which won't be used for forward_list iterators.
- if (__it._M_is_begin())
- return std::make_pair(0, __dp_exact);
-
- if (__it._M_is_end())
- return _SeqTraits::_S_size(*__it._M_get_sequence());
-
- typename _Distance_traits<_Iterator>::__type __res
- = __get_distance(__it._M_get_sequence()->_M_base().begin(), __it.base());
-
- if (__res.second == __dp_equality)
- return std::make_pair(1, __dp_sign);
-
- return __res;
- }
-
- // Get distance from specified iterator to sequence end.
- template<typename _Iterator, typename _Sequence>
- inline typename _Distance_traits<_Iterator>::__type
- __get_distance_to_end(const _Safe_iterator<_Iterator, _Sequence>& __it)
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ inline bool
+ __valid_range(const _Safe_iterator<_Iterator, _Sequence,
+ _Category>& __first,
+ const _Safe_iterator<_Iterator, _Sequence,
+ _Category>& __last)
{
- typedef _Sequence_traits<_Sequence> _SeqTraits;
-
- // No need to consider before_begin as this function is only used in
- // _M_can_advance which won't be used for forward_list iterators.
- if (__it._M_is_begin())
- return _SeqTraits::_S_size(*__it._M_get_sequence());
-
- if (__it._M_is_end())
- return std::make_pair(0, __dp_exact);
-
- typename _Distance_traits<_Iterator>::__type __res
- = __get_distance(__it.base(), __it._M_get_sequence()->_M_base().end());
-
- if (__res.second == __dp_equality)
- return std::make_pair(1, __dp_sign);
-
- return __res;
+ typename _Distance_traits<_Iterator>::__type __dist;
+ return __first._M_valid_range(__last, __dist);
}
- template<typename _Iterator, typename _Sequence, typename _Size>
+ template<typename _Iterator, typename _Sequence, typename _Category,
+ typename _Size>
inline bool
- __can_advance(const _Safe_iterator<_Iterator, _Sequence>& __it, _Size __n)
+ __can_advance(const _Safe_iterator<_Iterator, _Sequence, _Category>& __it,
+ _Size __n)
{ return __it._M_can_advance(__n); }
-#if __cplusplus < 201103L
- template<typename _Iterator, typename _Sequence>
- struct __is_safe_random_iterator<_Safe_iterator<_Iterator, _Sequence> >
- : std::__are_same<std::random_access_iterator_tag,
- typename std::iterator_traits<_Iterator>::
- iterator_category>
- { };
-#else
template<typename _Iterator, typename _Sequence>
_Iterator
- __base(const _Safe_iterator<_Iterator, _Sequence>& __it,
- std::random_access_iterator_tag)
+ __base(const _Safe_iterator<_Iterator, _Sequence,
+ std::random_access_iterator_tag>& __it)
{ return __it.base(); }
- template<typename _Iterator, typename _Sequence>
- const _Safe_iterator<_Iterator, _Sequence>&
- __base(const _Safe_iterator<_Iterator, _Sequence>& __it,
- std::input_iterator_tag)
- { return __it; }
-
- template<typename _Iterator, typename _Sequence>
- auto
- __base(const _Safe_iterator<_Iterator, _Sequence>& __it)
- -> decltype(__base(__it, std::__iterator_category(__it)))
- { return __base(__it, std::__iterator_category(__it)); }
-#endif
-
#if __cplusplus < 201103L
template<typename _Iterator, typename _Sequence>
struct _Unsafe_type<_Safe_iterator<_Iterator, _Sequence> >
diff --git a/libstdc++-v3/include/debug/safe_iterator.tcc b/libstdc++-v3/include/debug/safe_iterator.tcc
index bdd95bbd8e3..2841583667f 100644
--- a/libstdc++-v3/include/debug/safe_iterator.tcc
+++ b/libstdc++-v3/include/debug/safe_iterator.tcc
@@ -31,9 +31,57 @@
namespace __gnu_debug
{
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ typename _Distance_traits<_Iterator>::__type
+ _Safe_iterator<_Iterator, _Sequence, _Category>::
+ _M_get_distance_from_begin() const
+ {
+ typedef _Sequence_traits<_Sequence> _SeqTraits;
+
+ // No need to consider before_begin as this function is only used in
+ // _M_can_advance which won't be used for forward_list iterators.
+ if (_M_is_begin())
+ return std::make_pair(0, __dp_exact);
+
+ if (_M_is_end())
+ return _SeqTraits::_S_size(*_M_get_sequence());
+
+ typename _Distance_traits<_Iterator>::__type __res
+ = __get_distance(_M_get_sequence()->_M_base().begin(), base());
+
+ if (__res.second == __dp_equality)
+ return std::make_pair(1, __dp_sign);
+
+ return __res;
+ }
+
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ typename _Distance_traits<_Iterator>::__type
+ _Safe_iterator<_Iterator, _Sequence, _Category>::
+ _M_get_distance_to_end() const
+ {
+ typedef _Sequence_traits<_Sequence> _SeqTraits;
+
+ // No need to consider before_begin as this function is only used in
+ // _M_can_advance which won't be used for forward_list iterators.
+ if (_M_is_begin())
+ return _SeqTraits::_S_size(*_M_get_sequence());
+
+ if (_M_is_end())
+ return std::make_pair(0, __dp_exact);
+
+ typename _Distance_traits<_Iterator>::__type __res
+ = __get_distance(base(), _M_get_sequence()->_M_base().end());
+
+ if (__res.second == __dp_equality)
+ return std::make_pair(1, __dp_sign);
+
+ return __res;
+ }
+
+ template<typename _Iterator, typename _Sequence, typename _Category>
bool
- _Safe_iterator<_Iterator, _Sequence>::
+ _Safe_iterator<_Iterator, _Sequence, _Category>::
_M_can_advance(const difference_type& __n) const
{
if (this->_M_singular())
@@ -45,7 +93,7 @@ namespace __gnu_debug
if (__n < 0)
{
std::pair<difference_type, _Distance_precision> __dist =
- __get_distance_from_begin(*this);
+ _M_get_distance_from_begin();
bool __ok = ((__dist.second == __dp_exact && __dist.first >= -__n)
|| (__dist.second != __dp_exact && __dist.first > 0));
return __ok;
@@ -53,16 +101,69 @@ namespace __gnu_debug
else
{
std::pair<difference_type, _Distance_precision> __dist =
- __get_distance_to_end(*this);
+ _M_get_distance_to_end();
bool __ok = ((__dist.second == __dp_exact && __dist.first >= __n)
|| (__dist.second != __dp_exact && __dist.first > 0));
return __ok;
}
}
- template<typename _Iterator, typename _Sequence>
+ template<typename _Iterator, typename _Sequence, typename _Category>
+ typename _Distance_traits<_Iterator>::__type
+ _Safe_iterator<_Iterator, _Sequence, _Category>::
+ _M_get_distance_to(const _Safe_iterator& __rhs) const
+ {
+ typedef typename _Distance_traits<_Iterator>::__type _Diff;
+ typedef _Sequence_traits<_Sequence> _SeqTraits;
+
+ if (this->base() == __rhs.base())
+ return std::make_pair(0, __dp_exact);
+
+ if (this->_M_is_before_begin())
+ {
+ if (__rhs._M_is_begin())
+ return std::make_pair(1, __dp_exact);
+
+ return std::make_pair(1, __dp_sign);
+ }
+
+ if (this->_M_is_begin())
+ {
+ if (__rhs._M_is_before_begin())
+ return std::make_pair(-1, __dp_exact);
+
+ if (__rhs._M_is_end())
+ return _SeqTraits::_S_size(*this->_M_get_sequence());
+
+ return std::make_pair(1, __dp_sign);
+ }
+
+ if (this->_M_is_end())
+ {
+ if (__rhs._M_is_before_begin())
+ return std::make_pair(-1, __dp_exact);
+
+ if (__rhs._M_is_begin())
+ {
+ _Diff __diff = _SeqTraits::_S_size(*this->_M_get_sequence());
+ return std::make_pair(-__diff.first, __diff.second);
+ }
+
+ return std::make_pair(-1, __dp_sign);
+ }
+
+ if (__rhs._M_is_before_begin() || __rhs._M_is_begin())
+ return std::make_pair(-1, __dp_sign);
+
+ if (__rhs._M_is_end())
+ return std::make_pair(1, __dp_sign);
+
+ return std::make_pair(1, __dp_equality);
+ }
+
+ template<typename _Iterator, typename _Sequence, typename _Category>
bool
- _Safe_iterator<_Iterator, _Sequence>::
+ _Safe_iterator<_Iterator, _Sequence, _Category>::
_M_valid_range(const _Safe_iterator& __rhs,
std::pair<difference_type, _Distance_precision>& __dist,
bool __check_dereferenceable) const
@@ -71,7 +172,7 @@ namespace __gnu_debug
return false;
/* Determine iterators order */
- __dist = __get_distance(*this, __rhs);
+ __dist = _M_get_distance_to(__rhs);
switch (__dist.second)
{
case __dp_equality:
@@ -90,6 +191,25 @@ namespace __gnu_debug
// Assume that this is a valid range; we can't check anything else.
return true;
}
+
+ template<typename _Iterator, typename _Sequence>
+ bool
+ _Safe_iterator<_Iterator, _Sequence, std::random_access_iterator_tag>::
+ _M_valid_range(const _Safe_iterator& __rhs,
+ std::pair<difference_type,
+ _Distance_precision>& __dist) const
+ {
+ if (!this->_M_can_compare(__rhs))
+ return false;
+
+ /* Determine iterators order */
+ __dist = std::make_pair(__rhs.base() - this->base(), __dp_exact);
+
+ // If range is not empty first iterator must be dereferenceable.
+ if (__dist.first > 0)
+ return this->_M_dereferenceable();
+ return __dist.first == 0;
+ }
} // namespace __gnu_debug
#endif
diff --git a/libstdc++-v3/include/debug/safe_local_iterator.h b/libstdc++-v3/include/debug/safe_local_iterator.h
index f9597a6da08..854518848f9 100644
--- a/libstdc++-v3/include/debug/safe_local_iterator.h
+++ b/libstdc++-v3/include/debug/safe_local_iterator.h
@@ -51,15 +51,24 @@ namespace __gnu_debug
{
typedef _Iterator _Iter_base;
typedef _Safe_local_iterator_base _Safe_base;
- typedef typename _Sequence::const_local_iterator _Const_local_iterator;
+
typedef typename _Sequence::size_type size_type;
typedef std::iterator_traits<_Iterator> _Traits;
+ typedef std::__are_same<
+ typename _Sequence::_Base::const_local_iterator,
+ _Iterator> _IsConstant;
+
+ typedef typename __gnu_cxx::__conditional_type<_IsConstant::__value,
+ typename _Sequence::_Base::local_iterator,
+ typename _Sequence::_Base::const_local_iterator>::__type
+ _OtherIterator;
+
struct _Attach_single
{ };
- _Safe_local_iterator(const _Iterator& __i, _Safe_sequence_base* __cont,
+ _Safe_local_iterator(_Iterator __i, _Safe_sequence_base* __cont,
_Attach_single) noexcept
: _Iter_base(__i)
{ _M_attach_single(__cont); }
@@ -82,8 +91,7 @@ namespace __gnu_debug
* @pre @p seq is not NULL
* @post this is not singular
*/
- _Safe_local_iterator(const _Iterator& __i,
- const _Safe_sequence_base* __cont)
+ _Safe_local_iterator(_Iterator __i, const _Safe_sequence_base* __cont)
: _Iter_base(__i), _Safe_base(__cont, _S_constant())
{
_GLIBCXX_DEBUG_VERIFY(!this->_M_singular(),
@@ -132,16 +140,15 @@ namespace __gnu_debug
template<typename _MutableIterator>
_Safe_local_iterator(
const _Safe_local_iterator<_MutableIterator,
- typename __gnu_cxx::__enable_if<std::__are_same<
- _MutableIterator,
- typename _Sequence::local_iterator::iterator_type>::__value,
- _Sequence>::__type>& __x)
+ typename __gnu_cxx::__enable_if<_IsConstant::__value &&
+ std::__are_same<_MutableIterator, _OtherIterator>::__value,
+ _Sequence>::__type>& __x) noexcept
: _Iter_base(__x.base())
{
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// DR 408. Is vector<reverse_iterator<char*> > forbidden?
_GLIBCXX_DEBUG_VERIFY(!__x._M_singular()
- || __x.base() == _Iterator(),
+ || __x.base() == _MutableIterator(),
_M_message(__msg_init_const_singular)
._M_iterator(*this, "this")
._M_iterator(__x, "other"));
@@ -272,12 +279,9 @@ namespace __gnu_debug
// ------ Utilities ------
/// Determine if this is a constant iterator.
- static bool
+ static constexpr bool
_S_constant()
- {
- return std::__are_same<_Const_local_iterator,
- _Safe_local_iterator>::__value;
- }
+ { return _IsConstant::__value; }
/**
* @brief Return the underlying iterator
@@ -326,12 +330,13 @@ namespace __gnu_debug
std::pair<difference_type,
_Distance_precision>& __dist_info) const;
+ // Get distance to __rhs.
+ typename _Distance_traits<_Iterator>::__type
+ _M_get_distance_to(const _Safe_local_iterator& __rhs) const;
+
// The sequence this iterator references.
- typename
- __gnu_cxx::__conditional_type<std::__are_same<_Const_local_iterator,
- _Safe_local_iterator>::__value,
- const _Sequence*,
- _Sequence*>::__type
+ typename __gnu_cxx::__conditional_type<
+ _IsConstant::__value, const _Sequence*, _Sequence*>::__type
_M_get_sequence() const
{ return static_cast<_Sequence*>(_M_sequence); }
@@ -396,7 +401,7 @@ namespace __gnu_debug
operator!=(const _Safe_local_iterator<_IteratorL, _Sequence>& __lhs,
const _Safe_local_iterator<_IteratorR, _Sequence>& __rhs)
{
- _GLIBCXX_DEBUG_VERIFY(! __lhs._M_singular() && ! __rhs._M_singular(),
+ _GLIBCXX_DEBUG_VERIFY(!__lhs._M_singular() && !__rhs._M_singular(),
_M_message(__msg_iter_compare_bad)
._M_iterator(__lhs, "lhs")
._M_iterator(__rhs, "rhs"));
@@ -431,13 +436,6 @@ namespace __gnu_debug
return __lhs.base() != __rhs.base();
}
- /** Safe local iterators know if they are dereferenceable. */
- template<typename _Iterator, typename _Sequence>
- inline bool
- __check_dereferenceable(const _Safe_local_iterator<_Iterator,
- _Sequence>& __x)
- { return __x._M_dereferenceable(); }
-
/** Safe local iterators know how to check if they form a valid range. */
template<typename _Iterator, typename _Sequence>
inline bool
@@ -446,49 +444,13 @@ namespace __gnu_debug
typename _Distance_traits<_Iterator>::__type& __dist_info)
{ return __first._M_valid_range(__last, __dist_info); }
- /** Safe local iterators need a special method to get distance between each
- other. */
template<typename _Iterator, typename _Sequence>
- inline std::pair<typename std::iterator_traits<_Iterator>::difference_type,
- _Distance_precision>
- __get_distance(const _Safe_local_iterator<_Iterator, _Sequence>& __first,
- const _Safe_local_iterator<_Iterator, _Sequence>& __last,
- std::input_iterator_tag)
+ inline bool
+ __valid_range(const _Safe_local_iterator<_Iterator, _Sequence>& __first,
+ const _Safe_local_iterator<_Iterator, _Sequence>& __last)
{
- if (__first.base() == __last.base())
- return { 0, __dp_exact };
-
- if (__first._M_is_begin())
- {
- if (__last._M_is_end())
- return
- {
- __first._M_get_sequence()->bucket_size(__first.bucket()),
- __dp_exact
- };
-
- return { 1, __dp_sign };
- }
-
- if (__first._M_is_end())
- {
- if (__last._M_is_begin())
- return
- {
- -__first._M_get_sequence()->bucket_size(__first.bucket()),
- __dp_exact
- };
-
- return { -1, __dp_sign };
- }
-
- if (__last._M_is_begin())
- return { -1, __dp_sign };
-
- if (__last._M_is_end())
- return { 1, __dp_sign };
-
- return { 1, __dp_equality };
+ typename _Distance_traits<_Iterator>::__type __dist_info;
+ return __first._M_valid_range(__last, __dist_info);
}
#if __cplusplus < 201103L
diff --git a/libstdc++-v3/include/debug/safe_local_iterator.tcc b/libstdc++-v3/include/debug/safe_local_iterator.tcc
index 24d8e176890..9637e9fa444 100644
--- a/libstdc++-v3/include/debug/safe_local_iterator.tcc
+++ b/libstdc++-v3/include/debug/safe_local_iterator.tcc
@@ -32,6 +32,47 @@
namespace __gnu_debug
{
template<typename _Iterator, typename _Sequence>
+ typename _Distance_traits<_Iterator>::__type
+ _Safe_local_iterator<_Iterator, _Sequence>::
+ _M_get_distance_to(const _Safe_local_iterator& __rhs) const
+ {
+ if (base() == __rhs.base())
+ return { 0, __dp_exact };
+
+ if (_M_is_begin())
+ {
+ if (__rhs._M_is_end())
+ return
+ {
+ _M_get_sequence()->bucket_size(bucket()),
+ __dp_exact
+ };
+
+ return { 1, __dp_sign };
+ }
+
+ if (_M_is_end())
+ {
+ if (__rhs._M_is_begin())
+ return
+ {
+ -_M_get_sequence()->bucket_size(bucket()),
+ __dp_exact
+ };
+
+ return { -1, __dp_sign };
+ }
+
+ if (__rhs._M_is_begin())
+ return { -1, __dp_sign };
+
+ if (__rhs._M_is_end())
+ return { 1, __dp_sign };
+
+ return { 1, __dp_equality };
+ }
+
+ template<typename _Iterator, typename _Sequence>
bool
_Safe_local_iterator<_Iterator, _Sequence>::
_M_valid_range(const _Safe_local_iterator& __rhs,
@@ -45,7 +86,7 @@ namespace __gnu_debug
/* Determine if we can order the iterators without the help of
the container */
- __dist = __get_distance(*this, __rhs);
+ __dist = _M_get_distance_to(__rhs);
switch (__dist.second)
{
case __dp_equality:
diff --git a/libstdc++-v3/include/debug/set b/libstdc++-v3/include/debug/set
index 08806ca5475..f5d41a20334 100644
--- a/libstdc++-v3/include/debug/set
+++ b/libstdc++-v3/include/debug/set
@@ -31,6 +31,12 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Key, typename _Cmp, typename _Allocator> class set;
+ template<typename _Key, typename _Cmp, typename _Allocator> class multiset;
+} } // namespace std::__debug
+
#include <set>
#include <debug/set.h>
#include <debug/multiset.h>
diff --git a/libstdc++-v3/include/debug/set.h b/libstdc++-v3/include/debug/set.h
index 571cc474948..6f1a5070b2b 100644
--- a/libstdc++-v3/include/debug/set.h
+++ b/libstdc++-v3/include/debug/set.h
@@ -55,6 +55,9 @@ namespace __debug
typedef typename _Base::iterator _Base_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
// types:
typedef _Key key_type;
diff --git a/libstdc++-v3/include/debug/stl_iterator.h b/libstdc++-v3/include/debug/stl_iterator.h
index f20b000e0e5..3dbe402616c 100644
--- a/libstdc++-v3/include/debug/stl_iterator.h
+++ b/libstdc++-v3/include/debug/stl_iterator.h
@@ -52,12 +52,13 @@ namespace __gnu_debug
__can_advance(const std::reverse_iterator<_Iterator>& __it, _Size __n)
{ return __can_advance(__it.base(), -__n); }
-#if __cplusplus < 201103L
- template<typename _Iterator>
- struct __is_safe_random_iterator<std::reverse_iterator<_Iterator> >
- : __is_safe_random_iterator<_Iterator>
- { };
+ template<typename _Iterator, typename _Sequence>
+ inline std::reverse_iterator<_Iterator>
+ __base(const std::reverse_iterator<_Safe_iterator<
+ _Iterator, _Sequence, std::random_access_iterator_tag> >& __it)
+ { return std::reverse_iterator<_Iterator>(__it.base().base()); }
+#if __cplusplus < 201103L
template<typename _Iterator>
struct _Unsafe_type<std::reverse_iterator<_Iterator> >
{
@@ -75,12 +76,6 @@ namespace __gnu_debug
#else
template<typename _Iterator>
inline auto
- __base(const std::reverse_iterator<_Iterator>& __it)
- -> decltype(std::__make_reverse_iterator(__base(__it.base())))
- { return std::__make_reverse_iterator(__base(__it.base())); }
-
- template<typename _Iterator>
- inline auto
__unsafe(const std::reverse_iterator<_Iterator>& __it)
-> decltype(std::__make_reverse_iterator(__unsafe(__it.base())))
{ return std::__make_reverse_iterator(__unsafe(__it.base())); }
@@ -128,7 +123,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Iterator
__niter_base(const __gnu_debug::_Safe_iterator<
__gnu_cxx::__normal_iterator<_Iterator, _Container>,
- _Sequence>&);
+ _Sequence, std::random_access_iterator_tag>&);
_GLIBCXX_END_NAMESPACE_VERSION
}
diff --git a/libstdc++-v3/include/debug/string b/libstdc++-v3/include/debug/string
index 7fa39e302ed..ca190fa6528 100644
--- a/libstdc++-v3/include/debug/string
+++ b/libstdc++-v3/include/debug/string
@@ -94,6 +94,13 @@ namespace __gnu_debug
basic_string, _Allocator, _Safe_sequence, bool(_GLIBCXX_USE_CXX11_ABI)>
_Safe;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
+ // type used for positions in insert, erase etc.
+ typedef __gnu_debug::_Safe_iterator<
+ typename _Base::__const_iterator, basic_string> __const_iterator;
+
public:
// types:
typedef _Traits traits_type;
@@ -586,7 +593,7 @@ namespace __gnu_debug
}
iterator
- insert(const_iterator __p, _CharT __c)
+ insert(__const_iterator __p, _CharT __c)
{
__glibcxx_check_insert(__p);
typename _Base::iterator __res = _Base::insert(__p.base(), __c);
@@ -594,29 +601,51 @@ namespace __gnu_debug
return iterator(__res, this);
}
+#if __cplusplus >= 201103L
iterator
insert(const_iterator __p, size_type __n, _CharT __c)
{
__glibcxx_check_insert(__p);
+#if _GLIBCXX_USE_CXX11_ABI
typename _Base::iterator __res = _Base::insert(__p.base(), __n, __c);
+#else
+ const size_type __offset = __p.base() - _Base::cbegin();
+ _Base::insert(_Base::begin() + __offset, __n, __c);
+ typename _Base::iterator __res = _Base::begin() + __offset;
+#endif
this->_M_invalidate_all();
return iterator(__res, this);
}
+#else
+ void
+ insert(iterator __p, size_type __n, _CharT __c)
+ {
+ __glibcxx_check_insert(__p);
+ _Base::insert(__p.base(), __n, __c);
+ this->_M_invalidate_all();
+ }
+#endif
template<typename _InputIterator>
iterator
- insert(const_iterator __p,
+ insert(__const_iterator __p,
_InputIterator __first, _InputIterator __last)
{
typename __gnu_debug::_Distance_traits<_InputIterator>::__type __dist;
__glibcxx_check_insert_range(__p, __first, __last, __dist);
typename _Base::iterator __res;
+#if _GLIBCXX_USE_CXX11_ABI
if (__dist.second >= __dp_sign)
__res = _Base::insert(__p.base(), __gnu_debug::__unsafe(__first),
__gnu_debug::__unsafe(__last));
else
__res = _Base::insert(__p.base(), __first, __last);
+#else
+ const size_type __offset = __p.base() - _Base::begin();
+ _Base::insert(__p.base(), __first, __last);
+ __res = _Base::begin() + __offset;
+#endif
this->_M_invalidate_all();
return iterator(__res, this);
}
@@ -626,7 +655,13 @@ namespace __gnu_debug
insert(const_iterator __p, std::initializer_list<_CharT> __l)
{
__glibcxx_check_insert(__p);
+#if _GLIBCXX_USE_CXX11_ABI
const auto __res = _Base::insert(__p.base(), __l);
+#else
+ const size_type __offset = __p.base() - _Base::cbegin();
+ _Base::insert(_Base::begin() + __offset, __l);
+ auto __res = _Base::begin() + __offset;
+#endif
this->_M_invalidate_all();
return iterator(__res, this);
}
@@ -716,7 +751,8 @@ namespace __gnu_debug
}
basic_string&
- replace(iterator __i1, iterator __i2, const basic_string& __str)
+ replace(__const_iterator __i1, __const_iterator __i2,
+ const basic_string& __str)
{
__glibcxx_check_erase_range(__i1, __i2);
_Base::replace(__i1.base(), __i2.base(), __str);
@@ -725,7 +761,8 @@ namespace __gnu_debug
}
basic_string&
- replace(iterator __i1, iterator __i2, const _CharT* __s, size_type __n)
+ replace(__const_iterator __i1, __const_iterator __i2,
+ const _CharT* __s, size_type __n)
{
__glibcxx_check_erase_range(__i1, __i2);
__glibcxx_check_string_len(__s, __n);
@@ -735,7 +772,8 @@ namespace __gnu_debug
}
basic_string&
- replace(iterator __i1, iterator __i2, const _CharT* __s)
+ replace(__const_iterator __i1, __const_iterator __i2,
+ const _CharT* __s)
{
__glibcxx_check_erase_range(__i1, __i2);
__glibcxx_check_string(__s);
@@ -745,7 +783,8 @@ namespace __gnu_debug
}
basic_string&
- replace(iterator __i1, iterator __i2, size_type __n, _CharT __c)
+ replace(__const_iterator __i1, __const_iterator __i2,
+ size_type __n, _CharT __c)
{
__glibcxx_check_erase_range(__i1, __i2);
_Base::replace(__i1.base(), __i2.base(), __n, __c);
@@ -755,7 +794,7 @@ namespace __gnu_debug
template<typename _InputIterator>
basic_string&
- replace(iterator __i1, iterator __i2,
+ replace(__const_iterator __i1, __const_iterator __i2,
_InputIterator __j1, _InputIterator __j2)
{
__glibcxx_check_erase_range(__i1, __i2);
@@ -775,8 +814,9 @@ namespace __gnu_debug
}
#if __cplusplus >= 201103L
- basic_string& replace(iterator __i1, iterator __i2,
- std::initializer_list<_CharT> __l)
+ basic_string&
+ replace(__const_iterator __i1, __const_iterator __i2,
+ std::initializer_list<_CharT> __l)
{
__glibcxx_check_erase_range(__i1, __i2);
_Base::replace(__i1.base(), __i2.base(), __l);
diff --git a/libstdc++-v3/include/debug/unordered_map b/libstdc++-v3/include/debug/unordered_map
index e4f7c5ca733..e558f3957f7 100644
--- a/libstdc++-v3/include/debug/unordered_map
+++ b/libstdc++-v3/include/debug/unordered_map
@@ -34,6 +34,16 @@
#if __cplusplus < 201103L
# include <bits/c++0x_warning.h>
#else
+# include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Key, typename _Tp, typename _Hash, typename _Pred,
+ typename _Allocator>
+ class unordered_map;
+ template<typename _Key, typename _Tp, typename _Hash, typename _Pred,
+ typename _Allocator>
+ class unordered_multimap;
+} } // namespace std::__debug
+
# include <unordered_map>
#include <debug/safe_unordered_container.h>
@@ -66,6 +76,11 @@ namespace __debug
_Base_const_local_iterator;
typedef typename _Base::local_iterator _Base_local_iterator;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+ template<typename _ItT, typename _SeqT>
+ friend class ::__gnu_debug::_Safe_local_iterator;
+
public:
typedef typename _Base::size_type size_type;
typedef typename _Base::hasher hasher;
@@ -752,6 +767,11 @@ namespace __debug
typedef typename _Base::const_local_iterator _Base_const_local_iterator;
typedef typename _Base::local_iterator _Base_local_iterator;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+ template<typename _ItT, typename _SeqT>
+ friend class ::__gnu_debug::_Safe_local_iterator;
+
public:
typedef typename _Base::size_type size_type;
typedef typename _Base::hasher hasher;
@@ -768,7 +788,7 @@ namespace __debug
typedef __gnu_debug::_Safe_local_iterator<
_Base_local_iterator, unordered_multimap> local_iterator;
typedef __gnu_debug::_Safe_local_iterator<
- _Base_const_local_iterator, unordered_multimap> const_local_iterator;
+ _Base_const_local_iterator, unordered_multimap> const_local_iterator;
unordered_multimap() = default;
diff --git a/libstdc++-v3/include/debug/unordered_set b/libstdc++-v3/include/debug/unordered_set
index adafdb73a4f..a883d3204b0 100644
--- a/libstdc++-v3/include/debug/unordered_set
+++ b/libstdc++-v3/include/debug/unordered_set
@@ -34,6 +34,13 @@
#if __cplusplus < 201103L
# include <bits/c++0x_warning.h>
#else
+# include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Key, typename _Hash, typename _Pred, typename _Allocator>
+ class unordered_set;
+ template<typename _Key, typename _Hash, typename _Pred, typename _Allocator>
+ class unordered_multiset;
+} } // namespace std::__debug
# include <unordered_set>
#include <debug/safe_unordered_container.h>
@@ -66,6 +73,11 @@ namespace __debug
typedef typename _Base::const_local_iterator _Base_const_local_iterator;
typedef typename _Base::local_iterator _Base_local_iterator;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+ template<typename _ItT, typename _SeqT>
+ friend class ::__gnu_debug::_Safe_local_iterator;
+
public:
typedef typename _Base::size_type size_type;
typedef typename _Base::hasher hasher;
@@ -629,6 +641,11 @@ namespace __debug
_Base_const_local_iterator;
typedef typename _Base::local_iterator _Base_local_iterator;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+ template<typename _ItT, typename _SeqT>
+ friend class ::__gnu_debug::_Safe_local_iterator;
+
public:
typedef typename _Base::size_type size_type;
typedef typename _Base::hasher hasher;
@@ -645,7 +662,7 @@ namespace __debug
typedef __gnu_debug::_Safe_local_iterator<
_Base_local_iterator, unordered_multiset> local_iterator;
typedef __gnu_debug::_Safe_local_iterator<
- _Base_const_local_iterator, unordered_multiset> const_local_iterator;
+ _Base_const_local_iterator, unordered_multiset> const_local_iterator;
unordered_multiset() = default;
diff --git a/libstdc++-v3/include/debug/vector b/libstdc++-v3/include/debug/vector
index ced5520ae7e..ff9f5f47c24 100644
--- a/libstdc++-v3/include/debug/vector
+++ b/libstdc++-v3/include/debug/vector
@@ -31,6 +31,11 @@
#pragma GCC system_header
+#include <bits/c++config.h>
+namespace std _GLIBCXX_VISIBILITY(default) { namespace __debug {
+ template<typename _Tp, typename _Allocator> class vector;
+} } // namespace std::__debug
+
#include <vector>
#include <utility>
#include <debug/safe_sequence.h>
@@ -127,6 +132,9 @@ namespace __debug
typedef typename _Base::const_iterator _Base_const_iterator;
typedef __gnu_debug::_Equal_to<_Base_const_iterator> _Equal;
+ template<typename _ItT, typename _SeqT, typename _CatT>
+ friend class ::__gnu_debug::_Safe_iterator;
+
public:
typedef typename _Base::reference reference;
typedef typename _Base::const_reference const_reference;
@@ -771,9 +779,9 @@ namespace __debug
} // namespace __debug
-#if __cplusplus >= 201103L
_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#if __cplusplus >= 201103L
// DR 1182.
/// std::hash specialization for vector<bool>.
template<typename _Alloc>
@@ -784,17 +792,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
operator()(const __debug::vector<bool, _Alloc>& __b) const noexcept
{ return std::hash<_GLIBCXX_STD_C::vector<bool, _Alloc>>()(__b); }
};
+#endif
template<typename _Iterator, typename _Container, typename _Sequence>
_Iterator
__niter_base(const __gnu_debug::_Safe_iterator<
__gnu_cxx::__normal_iterator<_Iterator, _Container>,
- _Sequence>& __it)
+ _Sequence, std::random_access_iterator_tag>& __it)
{ return std::__niter_base(__it.base()); }
_GLIBCXX_END_NAMESPACE_VERSION
-#endif
-
} // namespace std
namespace __gnu_debug
diff --git a/libstdc++-v3/include/experimental/regex b/libstdc++-v3/include/experimental/regex
index eb2af151245..633b396b312 100644
--- a/libstdc++-v3/include/experimental/regex
+++ b/libstdc++-v3/include/experimental/regex
@@ -44,6 +44,7 @@ namespace experimental
{
inline namespace fundamentals_v2
{
+#if _GLIBCXX_USE_CXX11_ABI
namespace pmr
{
template<typename _BidirectionalIterator>
@@ -57,7 +58,7 @@ namespace pmr
typedef match_results<wstring::const_iterator> wsmatch;
} // namespace pmr
-
+#endif
} // namespace fundamentals_v2
} // namespace experimental
_GLIBCXX_END_NAMESPACE_VERSION
diff --git a/libstdc++-v3/include/experimental/string b/libstdc++-v3/include/experimental/string
index f101255d641..5a96bf78d73 100644
--- a/libstdc++-v3/include/experimental/string
+++ b/libstdc++-v3/include/experimental/string
@@ -62,6 +62,7 @@ inline namespace fundamentals_v2
__cont.end());
}
+#if _GLIBCXX_USE_CXX11_ABI
namespace pmr
{
// basic_string using polymorphic allocator in namespace pmr
@@ -77,6 +78,7 @@ inline namespace fundamentals_v2
typedef basic_string<wchar_t> wstring;
} // namespace pmr
+#endif
} // namespace fundamentals_v2
} // namespace experimental
diff --git a/libstdc++-v3/include/ext/pointer.h b/libstdc++-v3/include/ext/pointer.h
index ee5c30dfa64..1e89c3779af 100644
--- a/libstdc++-v3/include/ext/pointer.h
+++ b/libstdc++-v3/include/ext/pointer.h
@@ -441,6 +441,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_CXX_POINTER_ARITH_OPERATOR_SET(unsigned int);
_CXX_POINTER_ARITH_OPERATOR_SET(long);
_CXX_POINTER_ARITH_OPERATOR_SET(unsigned long);
+#ifdef _GLIBCXX_USE_LONG_LONG
+ _CXX_POINTER_ARITH_OPERATOR_SET(long long);
+ _CXX_POINTER_ARITH_OPERATOR_SET(unsigned long long);
+#endif
// Mathematical Manipulators
inline _Pointer_adapter&
diff --git a/libstdc++-v3/include/std/bit b/libstdc++-v3/include/std/bit
index 0aebac28758..bc2ade75b35 100644
--- a/libstdc++-v3/include/std/bit
+++ b/libstdc++-v3/include/std/bit
@@ -195,9 +195,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__ceil2(_Tp __x) noexcept
{
constexpr auto _Nd = numeric_limits<_Tp>::digits;
- if (__x == 0)
+ if (__x == 0 || __x == 1)
return 1;
- return (_Tp)1u << (_Nd - std::__countl_zero((_Tp)(__x - 1u)));
+ const unsigned __n = _Nd - std::__countl_zero((_Tp)(__x - 1u));
+ const _Tp __y_2 = (_Tp)1u << (__n - 1u);
+ return __y_2 << 1u;
}
template<typename _Tp>
diff --git a/libstdc++-v3/include/std/regex b/libstdc++-v3/include/std/regex
index f0dfa3f68ab..fd155658353 100644
--- a/libstdc++-v3/include/std/regex
+++ b/libstdc++-v3/include/std/regex
@@ -62,7 +62,7 @@
#include <bits/regex.h>
#include <bits/regex_executor.h>
-#if __cplusplus >= 201703L
+#if __cplusplus >= 201703L && _GLIBCXX_USE_CXX11_ABI
#include <memory_resource>
namespace std _GLIBCXX_VISIBILITY(default)
{
diff --git a/libstdc++-v3/include/std/string b/libstdc++-v3/include/std/string
index d3cc03aa179..dd60df2ba6e 100644
--- a/libstdc++-v3/include/std/string
+++ b/libstdc++-v3/include/std/string
@@ -52,7 +52,7 @@
#include <bits/basic_string.h>
#include <bits/basic_string.tcc>
-#if __cplusplus >= 201703L
+#if __cplusplus >= 201703L && _GLIBCXX_USE_CXX11_ABI
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
diff --git a/libstdc++-v3/libsupc++/new b/libstdc++-v3/libsupc++/new
index 82d884d95d3..19bc1832541 100644
--- a/libstdc++-v3/libsupc++/new
+++ b/libstdc++-v3/libsupc++/new
@@ -137,9 +137,9 @@ void operator delete[](void*, std::size_t) _GLIBCXX_USE_NOEXCEPT
__attribute__((__externally_visible__));
#endif
void* operator new(std::size_t, const std::nothrow_t&) _GLIBCXX_USE_NOEXCEPT
- __attribute__((__externally_visible__));
+ __attribute__((__externally_visible__, __malloc__));
void* operator new[](std::size_t, const std::nothrow_t&) _GLIBCXX_USE_NOEXCEPT
- __attribute__((__externally_visible__));
+ __attribute__((__externally_visible__, __malloc__));
void operator delete(void*, const std::nothrow_t&) _GLIBCXX_USE_NOEXCEPT
__attribute__((__externally_visible__));
void operator delete[](void*, const std::nothrow_t&) _GLIBCXX_USE_NOEXCEPT
@@ -148,7 +148,7 @@ void operator delete[](void*, const std::nothrow_t&) _GLIBCXX_USE_NOEXCEPT
void* operator new(std::size_t, std::align_val_t)
__attribute__((__externally_visible__));
void* operator new(std::size_t, std::align_val_t, const std::nothrow_t&)
- _GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__));
+ _GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__, __malloc__));
void operator delete(void*, std::align_val_t)
_GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__));
void operator delete(void*, std::align_val_t, const std::nothrow_t&)
@@ -156,7 +156,7 @@ void operator delete(void*, std::align_val_t, const std::nothrow_t&)
void* operator new[](std::size_t, std::align_val_t)
__attribute__((__externally_visible__));
void* operator new[](std::size_t, std::align_val_t, const std::nothrow_t&)
- _GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__));
+ _GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__, __malloc__));
void operator delete[](void*, std::align_val_t)
_GLIBCXX_USE_NOEXCEPT __attribute__((__externally_visible__));
void operator delete[](void*, std::align_val_t, const std::nothrow_t&)
diff --git a/libstdc++-v3/scripts/check_compile b/libstdc++-v3/scripts/check_compile
index 76762e675ee..2f347ec9d30 100755
--- a/libstdc++-v3/scripts/check_compile
+++ b/libstdc++-v3/scripts/check_compile
@@ -1,9 +1,9 @@
#!/usr/bin/env bash
-# Script to do performance testing.
+# Script to do compilation-only testing.
# Invocation
-# check_performance SRC_DIR BUILD_DIR
+# check_compile SRC_DIR BUILD_DIR
# 1: variables
#
@@ -17,7 +17,7 @@ shift
# This has been true all along. Found out about it the hard way...
case $BASH_VERSION in
1*)
- echo 'You need bash 2.x to run check_performance. Exiting.';
+ echo 'You need bash 2.x to run check_compile. Exiting.';
exit 1 ;;
*) ;;
esac
diff --git a/libstdc++-v3/src/filesystem/std-path.cc b/libstdc++-v3/src/filesystem/std-path.cc
index f6c0b8bb0f6..f382eb3759a 100644
--- a/libstdc++-v3/src/filesystem/std-path.cc
+++ b/libstdc++-v3/src/filesystem/std-path.cc
@@ -438,7 +438,7 @@ path::lexically_normal() const
{
#ifdef _GLIBCXX_FILESYSTEM_IS_WINDOWS
// Replace each slash character in the root-name
- if (p._M_type == _Type::_Root_name)
+ if (p._M_type == _Type::_Root_name || p._M_type == _Type::_Root_dir)
{
string_type s = p.native();
std::replace(s.begin(), s.end(), L'/', L'\\');
@@ -458,7 +458,8 @@ path::lexically_normal() const
}
else if (!ret.has_relative_path())
{
- if (!ret.is_absolute())
+ // remove a dot-dot filename immediately after root-directory
+ if (!ret.has_root_directory())
ret /= p;
}
else
@@ -471,8 +472,18 @@ path::lexically_normal() const
{
// Remove the filename before the trailing slash
// (equiv. to ret = ret.parent_path().remove_filename())
- ret._M_pathname.erase(elem._M_cur->_M_pos);
- ret._M_cmpts.erase(elem._M_cur, ret._M_cmpts.end());
+
+ if (elem == ret.begin())
+ ret.clear();
+ else
+ {
+ ret._M_pathname.erase(elem._M_cur->_M_pos);
+ // Do we still have a trailing slash?
+ if (std::prev(elem)->_M_type == _Type::_Filename)
+ ret._M_cmpts.erase(elem._M_cur);
+ else
+ ret._M_cmpts.erase(elem._M_cur, ret._M_cmpts.end());
+ }
}
else // ???
ret /= p;
diff --git a/libstdc++-v3/testsuite/20_util/reference_wrapper/lwg2993.cc b/libstdc++-v3/testsuite/20_util/reference_wrapper/lwg2993.cc
index 0a339486ef8..fa7cc728abe 100644
--- a/libstdc++-v3/testsuite/20_util/reference_wrapper/lwg2993.cc
+++ b/libstdc++-v3/testsuite/20_util/reference_wrapper/lwg2993.cc
@@ -51,5 +51,5 @@ test02()
// error: no member 'type' because the conditional
// expression is ill-formed
- using t = std::common_type_t<std::reference_wrapper<int>, int>;
+ using t = typename std::common_type<std::reference_wrapper<int>, int>::type;
}
diff --git a/libstdc++-v3/testsuite/21_strings/basic_string/types/pmr_typedefs.cc b/libstdc++-v3/testsuite/21_strings/basic_string/types/pmr_typedefs.cc
index d20fc42901e..23956673792 100644
--- a/libstdc++-v3/testsuite/21_strings/basic_string/types/pmr_typedefs.cc
+++ b/libstdc++-v3/testsuite/21_strings/basic_string/types/pmr_typedefs.cc
@@ -17,6 +17,7 @@
// { dg-options "-std=gnu++17" }
// { dg-do compile { target c++17 } }
+// { dg-require-effective-target cxx11-abi }
#include <string>
#include <memory_resource>
diff --git a/libstdc++-v3/testsuite/22_locale/time_get/get_date/wchar_t/4.cc b/libstdc++-v3/testsuite/22_locale/time_get/get_date/wchar_t/4.cc
index 21e51d250ad..a532c93d4a1 100644
--- a/libstdc++-v3/testsuite/22_locale/time_get/get_date/wchar_t/4.cc
+++ b/libstdc++-v3/testsuite/22_locale/time_get/get_date/wchar_t/4.cc
@@ -25,6 +25,8 @@
#include <sstream>
#include <testsuite_hooks.h>
+static bool debian_date_format();
+
void test01()
{
using namespace std;
@@ -46,7 +48,7 @@ void test01()
0x5e74, L'1', L'2', 0x6708, L'1', L'7',
0x65e5 , 0x0 };
- iss.str(wstr);
+ iss.str(debian_date_format() ? wstr+2 : wstr);
iterator_type is_it01(iss);
tm time01;
tim_get.get_date(is_it01, end, iss, errorstate, &time01);
@@ -56,6 +58,26 @@ void test01()
VERIFY( time01.tm_year == 103 );
}
+#include <locale.h>
+#if __has_include(<langinfo.h>)
+# include <langinfo.h>
+#endif
+
+static bool debian_date_format()
+{
+#ifdef D_FMT
+ if (setlocale(LC_TIME, "zh_TW.UTF-8") != NULL)
+ {
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31413
+ // and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71641#c2
+ if (*nl_langinfo(D_FMT) == '%')
+ return true;
+ setlocale(LC_TIME, "C");
+ }
+#endif
+ return false;
+}
+
int main()
{
test01();
diff --git a/libstdc++-v3/testsuite/23_containers/deque/capacity/max_size.cc b/libstdc++-v3/testsuite/23_containers/deque/capacity/max_size.cc
new file mode 100644
index 00000000000..1a38c4ed698
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/deque/capacity/max_size.cc
@@ -0,0 +1,146 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run }
+
+#include <deque>
+#include <stdexcept>
+#include <limits>
+#include <testsuite_hooks.h>
+
+typedef std::deque<char> test_type;
+
+typedef test_type::size_type size_type;
+typedef test_type::difference_type difference_type;
+
+const difference_type diffmax = std::numeric_limits<difference_type>::max();
+
+void
+test01()
+{
+ test_type v;
+ VERIFY( v.max_size() <= diffmax );
+}
+
+void
+test02()
+{
+ size_type n = size_type(diffmax) + 1;
+ VERIFY( n > test_type().max_size() );
+
+ try {
+ test_type v(n);
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+
+ try {
+ test_type v(n, 'x');
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+
+ try {
+ test_type v(n, 'x', test_type::allocator_type());
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+}
+
+#ifdef __GLIBCXX_TYPE_INT_N_0
+template<typename T, typename U, bool = (sizeof(T) > sizeof(long long))>
+ struct Base_
+ {
+ typedef T difference_type;
+ typedef U size_type;
+ };
+
+template<typename T, typename U>
+ struct Base_<T, U, false>
+ {
+ typedef long long difference_type;
+ typedef unsigned long long size_type;
+ };
+
+typedef Base_<__GLIBCXX_TYPE_INT_N_0, unsigned __GLIBCXX_TYPE_INT_N_0> Base;
+#else
+struct Base
+{
+ typedef long long difference_type;
+ typedef unsigned long long size_type;
+};
+#endif
+
+// An iterator with a difference_type larger than ptrdiff_t
+struct Iter : Base
+{
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef char value_type;
+ typedef const char* pointer;
+ typedef const char& reference;
+ using Base::difference_type;
+
+ Iter() : n(0) { }
+ Iter(size_type n) : n(n) { }
+
+ reference operator*() const { return value; }
+ pointer operator->() const { return &value; }
+
+ Iter& operator++() { ++n; return *this; }
+ Iter operator++(int) { Iter tmp(*this); ++n; return tmp; }
+ Iter& operator--() { --n; return *this; }
+ Iter operator--(int) { Iter tmp(*this); --n; return tmp; }
+
+ Iter& operator+=(difference_type d) { n += d; return *this; }
+ Iter& operator-=(difference_type d) { n -= d; return *this; }
+
+ difference_type operator-(const Iter& rhs) const { return n - rhs.n; }
+
+ reference operator[](difference_type d) const { return value; }
+
+ bool operator==(const Iter& rhs) const { return n == rhs.n; }
+ bool operator!=(const Iter& rhs) const { return n != rhs.n; }
+ bool operator<(const Iter& rhs) const { return n < rhs.n; }
+ bool operator>(const Iter& rhs) const { return n > rhs.n; }
+ bool operator<=(const Iter& rhs) const { return n <= rhs.n; }
+ bool operator>=(const Iter& rhs) const { return n >= rhs.n; }
+
+private:
+ size_type n;
+ static const char value = 'x';
+};
+
+Iter operator+(Iter i, Iter::difference_type n) { return i += n; }
+Iter operator+(Iter::difference_type n, Iter i) { return i += n; }
+Iter operator-(Iter::difference_type n, Iter i) { return i -= n; }
+
+void
+test03()
+{
+ Iter first, last(Iter::size_type(diffmax) + 1);
+ VERIFY( std::distance(first, last) > test_type().max_size() );
+
+ try {
+ test_type vec(first, last);
+ VERIFY(false);
+ } catch (const std::length_error&) { }
+}
+
+int
+main()
+{
+ test01();
+ test02();
+ test03();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/deque/modifiers/assign/1.cc b/libstdc++-v3/testsuite/23_containers/deque/modifiers/assign/1.cc
index fbab09b9ba2..a6668f7c199 100644
--- a/libstdc++-v3/testsuite/23_containers/deque/modifiers/assign/1.cc
+++ b/libstdc++-v3/testsuite/23_containers/deque/modifiers/assign/1.cc
@@ -27,7 +27,7 @@ int main()
{
std::deque<int> d;
- int array[] { 0, 1, 2 };
+ int array[] = { 0, 1, 2 };
input_iterator_seq seq(array, array + 3);
d.assign(seq.begin(), seq.end());
diff --git a/libstdc++-v3/testsuite/23_containers/deque/types/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/deque/types/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..85c725ad535
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/deque/types/pmr_typedefs_debug.cc
@@ -0,0 +1,25 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/deque>
+static_assert(std::is_same_v<
+ std::pmr::deque<int>,
+ __gnu_debug::deque<int, std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/forward_list/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/forward_list/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..410ed82deed
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/forward_list/pmr_typedefs_debug.cc
@@ -0,0 +1,25 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/forward_list>
+static_assert(std::is_same_v<
+ std::pmr::forward_list<int>,
+ __gnu_debug::forward_list<int, std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/list/68222_neg.cc b/libstdc++-v3/testsuite/23_containers/list/68222_neg.cc
new file mode 100644
index 00000000000..cd33762e01a
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/list/68222_neg.cc
@@ -0,0 +1,37 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do compile { target c++11 } }
+
+#include <list>
+
+void
+test01()
+{
+ // A list of int.
+ const std::list<int> nums = { 1, 2, 3, 4 };
+
+ // Grab the iterator type.
+ using list_itr_type = decltype( std::begin( nums ) );
+
+ // Confirm cend returns the same type.
+ static_assert( std::is_same< decltype( std::end( nums ) ), list_itr_type >::value, "" );
+
+ // The list's iterator type provides a well-formed non-member operator-() with valid return type (long int)
+ using substraction_type
+ = decltype( std::declval<list_itr_type>() - std::declval<list_itr_type>() ); // { dg-error "no match for 'operator-'" }
+}
diff --git a/libstdc++-v3/testsuite/23_containers/list/modifiers/assign/1.cc b/libstdc++-v3/testsuite/23_containers/list/modifiers/assign/1.cc
index c5fde47059a..f4d32883328 100644
--- a/libstdc++-v3/testsuite/23_containers/list/modifiers/assign/1.cc
+++ b/libstdc++-v3/testsuite/23_containers/list/modifiers/assign/1.cc
@@ -27,7 +27,7 @@ int main()
{
std::list<int> l;
- int array[] { 0, 1, 2 };
+ int array[] = { 0, 1, 2 };
input_iterator_seq seq(array, array + 3);
l.assign(seq.begin(), seq.end());
diff --git a/libstdc++-v3/testsuite/23_containers/list/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/list/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..671c12e43d1
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/list/pmr_typedefs_debug.cc
@@ -0,0 +1,25 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/list>
+static_assert(std::is_same_v<
+ std::pmr::list<int>,
+ __gnu_debug::list<int, std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/map/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/map/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..e3978a1ec53
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/map/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/map>
+static_assert(std::is_same_v<
+ std::pmr::map<int, int>,
+ __gnu_debug::map<int, int, std::less<int>,
+ std::pmr::polymorphic_allocator<std::pair<const int, int>>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/multimap/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/multimap/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..cbd4d95176a
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/multimap/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/map>
+static_assert(std::is_same_v<
+ std::pmr::multimap<int, int>,
+ __gnu_debug::multimap<int, int, std::less<int>,
+ std::pmr::polymorphic_allocator<std::pair<const int, int>>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/multiset/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/multiset/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..d8ff08d04b5
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/multiset/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/set>
+static_assert(std::is_same_v<
+ std::pmr::multiset<int>,
+ __gnu_debug::multiset<int, std::less<int>,
+ std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/set/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/set/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..f44f68a49a4
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/set/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/set>
+static_assert(std::is_same_v<
+ std::pmr::set<int>,
+ __gnu_debug::set<int, std::less<int>,
+ std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..2c423bc79e2
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/unordered_map>
+static_assert(std::is_same_v<
+ std::pmr::unordered_map<int, int>,
+ __gnu_debug::unordered_map<int, int, std::hash<int>, std::equal_to<int>,
+ std::pmr::polymorphic_allocator<std::pair<const int, int>>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..30ae2a732b6
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/pmr_typedefs_debug.cc
@@ -0,0 +1,27 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/unordered_map>
+static_assert(std::is_same_v<
+ std::pmr::unordered_multimap<int, int>,
+ __gnu_debug::unordered_multimap<int, int, std::hash<int>,
+ std::equal_to<int>,
+ std::pmr::polymorphic_allocator<std::pair<const int, int>>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..5f0ed27a1d0
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/unordered_set>
+static_assert(std::is_same_v<
+ std::pmr::unordered_multiset<int>,
+ __gnu_debug::unordered_multiset<int, std::hash<int>, std::equal_to<int>,
+ std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/debug/debug_functions.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/debug/debug_functions.cc
index 9fb12edbe6e..1d45e74782d 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/debug/debug_functions.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/debug/debug_functions.cc
@@ -21,31 +21,6 @@
#include <unordered_set>
#include <testsuite_hooks.h>
-void test01()
-{
- using namespace __gnu_debug;
-
- std::unordered_set<int> u = { 0, 1, 2 };
- VERIFY( __check_dereferenceable(u.begin()) );
- auto it = u.begin();
- VERIFY( __check_dereferenceable(it) );
-
- VERIFY( __check_dereferenceable(u.cbegin()) );
- auto cit = u.begin();
- VERIFY( __check_dereferenceable(cit) );
-
- VERIFY( !__check_dereferenceable(u.end()) );
- it = u.end();
- VERIFY( !__check_dereferenceable(it) );
-
- auto bucket = u.bucket(0);
- VERIFY( __check_dereferenceable(u.begin(bucket)) );
- auto lit = u.begin(bucket);
- VERIFY( __check_dereferenceable(lit) );
-
- VERIFY( !__check_dereferenceable(u.end(bucket)) );
-}
-
void test02()
{
using namespace __gnu_debug;
@@ -84,7 +59,6 @@ void test02()
int main()
{
- test01();
test02();
return 0;
}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..73ad1e0f01f
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/pmr_typedefs_debug.cc
@@ -0,0 +1,26 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/unordered_set>
+static_assert(std::is_same_v<
+ std::pmr::unordered_set<int>,
+ __gnu_debug::unordered_set<int, std::hash<int>, std::equal_to<int>,
+ std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/23_containers/vector/bool/modifiers/assign/1.cc b/libstdc++-v3/testsuite/23_containers/vector/bool/modifiers/assign/1.cc
index 833201b39a3..06fb2ab2d03 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/bool/modifiers/assign/1.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/bool/modifiers/assign/1.cc
@@ -27,7 +27,7 @@ void test01()
std::vector<bool> bv;
- bool array[] { false, true, true };
+ bool array[] = { false, true, true };
input_iterator_seq seq(array, array + 3);
bv.assign(seq.begin(), seq.end());
diff --git a/libstdc++-v3/testsuite/23_containers/vector/capacity/max_size.cc b/libstdc++-v3/testsuite/23_containers/vector/capacity/max_size.cc
new file mode 100644
index 00000000000..34d3c4ab96e
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/vector/capacity/max_size.cc
@@ -0,0 +1,146 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run }
+
+#include <vector>
+#include <stdexcept>
+#include <limits>
+#include <testsuite_hooks.h>
+
+typedef std::vector<char> test_type;
+
+typedef test_type::size_type size_type;
+typedef test_type::difference_type difference_type;
+
+const difference_type diffmax = std::numeric_limits<difference_type>::max();
+
+void
+test01()
+{
+ test_type v;
+ VERIFY( v.max_size() <= diffmax );
+}
+
+void
+test02()
+{
+ size_type n = size_type(diffmax) + 1;
+ VERIFY( n > test_type().max_size() );
+
+ try {
+ test_type v(n);
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+
+ try {
+ test_type v(n, 'x');
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+
+ try {
+ test_type v(n, 'x', test_type::allocator_type());
+ VERIFY( false );
+ } catch (const std::length_error&) { }
+}
+
+#ifdef __GLIBCXX_TYPE_INT_N_0
+template<typename T, typename U, bool = (sizeof(T) > sizeof(long long))>
+ struct Base_
+ {
+ typedef T difference_type;
+ typedef U size_type;
+ };
+
+template<typename T, typename U>
+ struct Base_<T, U, false>
+ {
+ typedef long long difference_type;
+ typedef unsigned long long size_type;
+ };
+
+typedef Base_<__GLIBCXX_TYPE_INT_N_0, unsigned __GLIBCXX_TYPE_INT_N_0> Base;
+#else
+struct Base
+{
+ typedef long long difference_type;
+ typedef unsigned long long size_type;
+};
+#endif
+
+// An iterator with a difference_type larger than ptrdiff_t
+struct Iter : Base
+{
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef char value_type;
+ typedef const char* pointer;
+ typedef const char& reference;
+ using Base::difference_type;
+
+ Iter() : n(0) { }
+ Iter(size_type n) : n(n) { }
+
+ reference operator*() const { return value; }
+ pointer operator->() const { return &value; }
+
+ Iter& operator++() { ++n; return *this; }
+ Iter operator++(int) { Iter tmp(*this); ++n; return tmp; }
+ Iter& operator--() { --n; return *this; }
+ Iter operator--(int) { Iter tmp(*this); --n; return tmp; }
+
+ Iter& operator+=(difference_type d) { n += d; return *this; }
+ Iter& operator-=(difference_type d) { n -= d; return *this; }
+
+ difference_type operator-(const Iter& rhs) const { return n - rhs.n; }
+
+ reference operator[](difference_type d) const { return value; }
+
+ bool operator==(const Iter& rhs) const { return n == rhs.n; }
+ bool operator!=(const Iter& rhs) const { return n != rhs.n; }
+ bool operator<(const Iter& rhs) const { return n < rhs.n; }
+ bool operator>(const Iter& rhs) const { return n > rhs.n; }
+ bool operator<=(const Iter& rhs) const { return n <= rhs.n; }
+ bool operator>=(const Iter& rhs) const { return n >= rhs.n; }
+
+private:
+ size_type n;
+ static const char value = 'x';
+};
+
+Iter operator+(Iter i, Iter::difference_type n) { return i += n; }
+Iter operator+(Iter::difference_type n, Iter i) { return i += n; }
+Iter operator-(Iter::difference_type n, Iter i) { return i -= n; }
+
+void
+test03()
+{
+ Iter first, last(Iter::size_type(diffmax) + 1);
+ VERIFY( std::distance(first, last) > test_type().max_size() );
+
+ try {
+ test_type vec(first, last);
+ VERIFY(false);
+ } catch (const std::length_error&) { }
+}
+
+int
+main()
+{
+ test01();
+ test02();
+ test03();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/vector/cons/destructible_debug_neg.cc b/libstdc++-v3/testsuite/23_containers/vector/cons/destructible_debug_neg.cc
index 5127f5105f4..587c67fe936 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/cons/destructible_debug_neg.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/cons/destructible_debug_neg.cc
@@ -45,4 +45,4 @@ test02()
// { dg-error "value type is destructible" "" { target *-*-* } 0 }
// In Debug Mode the "required from here" errors come from <debug/vector>
-// { dg-error "required from here" "" { target *-*-* } 155 }
+// { dg-error "required from here" "" { target *-*-* } 163 }
diff --git a/libstdc++-v3/testsuite/23_containers/vector/debug/debug_functions.cc b/libstdc++-v3/testsuite/23_containers/vector/debug/debug_functions.cc
index ea683eeb776..acbd0d110c0 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/debug/debug_functions.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/debug/debug_functions.cc
@@ -20,28 +20,6 @@
#include <vector>
#include <testsuite_hooks.h>
-void test01()
-{
- using namespace __gnu_debug;
-
- std::vector<int> v1(3, 1);
- VERIFY( __check_dereferenceable(v1.begin()) );
- std::vector<int>::iterator it = v1.begin();
- VERIFY( __check_dereferenceable(it) );
-
- VERIFY( !__check_dereferenceable(v1.end()) );
- it = v1.end();
- VERIFY( !__check_dereferenceable(it) );
-
- const volatile int* pi = 0;
- VERIFY( !__check_dereferenceable(pi) );
-
- int i;
- pi = &i;
-
- VERIFY( __check_dereferenceable(pi) );
-}
-
void test02()
{
using namespace __gnu_debug;
@@ -67,7 +45,6 @@ void test02()
int main()
{
- test01();
test02();
return 0;
}
diff --git a/libstdc++-v3/testsuite/23_containers/vector/modifiers/assign/1.cc b/libstdc++-v3/testsuite/23_containers/vector/modifiers/assign/1.cc
index ca7b125e7ca..001f204c93b 100644
--- a/libstdc++-v3/testsuite/23_containers/vector/modifiers/assign/1.cc
+++ b/libstdc++-v3/testsuite/23_containers/vector/modifiers/assign/1.cc
@@ -27,7 +27,7 @@ void test01()
std::vector<int> v;
- int array[] { 0, 1, 2 };
+ int array[] = { 0, 1, 2 };
input_iterator_seq seq(array, array + 3);
v.assign(seq.begin(), seq.end());
diff --git a/libstdc++-v3/testsuite/23_containers/vector/types/pmr_typedefs_debug.cc b/libstdc++-v3/testsuite/23_containers/vector/types/pmr_typedefs_debug.cc
new file mode 100644
index 00000000000..f0da4ca511f
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/vector/types/pmr_typedefs_debug.cc
@@ -0,0 +1,25 @@
+// Copyright (C) 2018 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-options "-std=gnu++17 -D_GLIBCXX_DEBUG" }
+// { dg-do compile { target c++17 } }
+
+#include <debug/vector>
+static_assert(std::is_same_v<
+ std::pmr::vector<int>,
+ __gnu_debug::vector<int, std::pmr::polymorphic_allocator<int>>
+ >);
diff --git a/libstdc++-v3/testsuite/25_algorithms/fill_n/2.cc b/libstdc++-v3/testsuite/25_algorithms/fill_n/2.cc
index 6e18032e1e6..fb951b5c175 100644
--- a/libstdc++-v3/testsuite/25_algorithms/fill_n/2.cc
+++ b/libstdc++-v3/testsuite/25_algorithms/fill_n/2.cc
@@ -31,7 +31,7 @@ test01()
ref.push_back(1);
ref.push_back(2);
- std::vector<std::vector<int>> vvect;
+ std::vector<std::vector<int> > vvect;
vvect.push_back(std::vector<int>());
vvect.push_back(std::vector<int>());
diff --git a/libstdc++-v3/testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc b/libstdc++-v3/testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc
index 84449d6dbd4..5da782a4550 100644
--- a/libstdc++-v3/testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc
+++ b/libstdc++-v3/testsuite/25_algorithms/partial_sort_copy/debug/irreflexive_neg.cc
@@ -31,8 +31,8 @@ bad_lower(int lhs, int rhs)
void test01()
{
- int ins[] { 0, 1, 2, 3 };
- int outs[] { 9, 9 };
+ int ins[] = { 0, 1, 2, 3 };
+ int outs[] = { 9, 9 };
std::partial_sort_copy(ins, ins + 4, outs, outs + 2, bad_lower);
}
diff --git a/libstdc++-v3/testsuite/26_numerics/bit/bit.pow.two/ceil2.cc b/libstdc++-v3/testsuite/26_numerics/bit/bit.pow.two/ceil2.cc
index 65e1569c277..e41f82c8bb8 100644
--- a/libstdc++-v3/testsuite/26_numerics/bit/bit.pow.two/ceil2.cc
+++ b/libstdc++-v3/testsuite/26_numerics/bit/bit.pow.two/ceil2.cc
@@ -55,6 +55,14 @@ test(UInt x)
static_assert( std::ceil2(UInt(3) << 64) == (UInt(4) << 64) );
}
+ constexpr UInt msb = UInt(1) << (std::numeric_limits<UInt>::digits - 1);
+ static_assert( std::ceil2( msb ) == msb );
+ // Larger values cannot be represented so the return value is unspecified,
+ // but must still be valid in constant expressions, i.e. not undefined.
+ static_assert( std::ceil2( UInt(msb + 1) ) != 77 );
+ static_assert( std::ceil2( UInt(msb + 2) ) != 77 );
+ static_assert( std::ceil2( UInt(msb + 77) ) != 77 );
+
return true;
}
diff --git a/libstdc++-v3/testsuite/27_io/filesystem/path/generation/normal.cc b/libstdc++-v3/testsuite/27_io/filesystem/path/generation/normal.cc
index 6d0e2007a75..3b8311f81ad 100644
--- a/libstdc++-v3/testsuite/27_io/filesystem/path/generation/normal.cc
+++ b/libstdc++-v3/testsuite/27_io/filesystem/path/generation/normal.cc
@@ -24,7 +24,17 @@
#include <testsuite_hooks.h>
using std::filesystem::path;
-using __gnu_test::compare_paths;
+
+void
+compare_paths(path p, std::string expected)
+{
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ for (auto& c : expected)
+ if (c == '/')
+ c = '\\';
+#endif
+ __gnu_test::compare_paths(p, expected);
+}
void
test01()
@@ -69,8 +79,11 @@ test03()
{"/foo" , "/foo" },
{"/foo/" , "/foo/" },
{"/foo/." , "/foo/" },
- {"/foo/bar/.." , "/foo/" },
{"/foo/.." , "/" },
+ {"/foo/../.." , "/" },
+ {"/foo/bar/.." , "/foo/" },
+ {"/foo/bar/../.." , "/" },
+ {"/foo/bar/baz/../../.." , "/" }, // PR libstdc++/87116
{"/." , "/" },
{"/./" , "/" },
@@ -88,10 +101,11 @@ test03()
{"foo/.." , "." },
{"foo/../" , "." },
{"foo/../.." , ".." },
+ {"foo/../../..", "../.." },
// with root name (OS-dependent):
#if defined(_WIN32) && !defined(__CYGWIN__)
- {"C:bar/.." , "C:." },
+ {"C:bar/.." , "C:" },
#else
{"C:bar/.." , "." },
#endif
@@ -119,10 +133,53 @@ test03()
compare_paths( path(test.input).lexically_normal(), test.normalized );
}
+void
+test04()
+{
+ // PR libstdc++/87116
+ path p = "a/b/c";
+ compare_paths( (p/"../..").lexically_normal(), "a/" );
+
+ p = "a/b/c/d/e";
+ compare_paths( (p/"..").lexically_normal(), "a/b/c/d/" );
+ compare_paths( (p/"../..").lexically_normal(), "a/b/c/" );
+ compare_paths( (p/"../../..").lexically_normal(), "a/b/" );
+ compare_paths( (p/"../../../..").lexically_normal(), "a/" );
+ compare_paths( (p/"../../../../..").lexically_normal(), "." );
+ compare_paths( (p/"../../../../../..").lexically_normal(), ".." );
+
+ p = "/a/b/c/d/e";
+ compare_paths( (p/"..").lexically_normal(), "/a/b/c/d/" );
+ compare_paths( (p/"../..").lexically_normal(), "/a/b/c/" );
+ compare_paths( (p/"../../..").lexically_normal(), "/a/b/" );
+ compare_paths( (p/"../../../..").lexically_normal(), "/a/" );
+ compare_paths( (p/"../../../../..").lexically_normal(), "/" );
+ compare_paths( (p/"../../../../../..").lexically_normal(), "/" );
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ p = "A:b/c/d/e";
+ compare_paths( (p/"..").lexically_normal(), "A:b/c/d/" );
+ compare_paths( (p/"../..").lexically_normal(), "A:b/c/" );
+ compare_paths( (p/"../../..").lexically_normal(), "A:b/" );
+ compare_paths( (p/"../../../..").lexically_normal(), "A:" );
+ compare_paths( (p/"../../../../..").lexically_normal(), "A:.." );
+ compare_paths( (p/"../../../../../..").lexically_normal(), "A:../.." );
+
+ p = "A:/b/c/d/e";
+ compare_paths( (p/"..").lexically_normal(), "A:/b/c/d/" );
+ compare_paths( (p/"../..").lexically_normal(), "A:/b/c/" );
+ compare_paths( (p/"../../..").lexically_normal(), "A:/b/" );
+ compare_paths( (p/"../../../..").lexically_normal(), "A:/" );
+ compare_paths( (p/"../../../../..").lexically_normal(), "A:/" );
+ compare_paths( (p/"../../../../../..").lexically_normal(), "A:/" );
+#endif
+}
+
int
main()
{
test01();
test02();
test03();
+ test04();
}
diff --git a/libstdc++-v3/testsuite/28_regex/match_results/pmr_typedefs.cc b/libstdc++-v3/testsuite/28_regex/match_results/pmr_typedefs.cc
index 23aef39caf3..d97fcfeb75e 100644
--- a/libstdc++-v3/testsuite/28_regex/match_results/pmr_typedefs.cc
+++ b/libstdc++-v3/testsuite/28_regex/match_results/pmr_typedefs.cc
@@ -17,6 +17,7 @@
// { dg-options "-std=gnu++17" }
// { dg-do compile { target c++17 } }
+// { dg-require-effective-target cxx11-abi }
#include <regex>
#include <memory_resource>
diff --git a/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc b/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc
index c237f1e9de9..8f5f8513403 100644
--- a/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc
+++ b/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_match.cc
@@ -16,6 +16,7 @@
// <http://www.gnu.org/licenses/>.
// { dg-do compile { target c++14 } }
+// { dg-require-effective-target cxx11-abi }
#include <experimental/regex>
diff --git a/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc b/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc
index 3cf8c94cfb1..ed53ce1a89b 100644
--- a/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc
+++ b/libstdc++-v3/testsuite/experimental/polymorphic_allocator/pmr_typedefs_string.cc
@@ -16,6 +16,7 @@
// <http://www.gnu.org/licenses/>.
// { dg-do compile { target c++14 } }
+// { dg-require-effective-target cxx11-abi }
#include <experimental/string>
diff --git a/libstdc++-v3/testsuite/ext/ext_pointer/1.cc b/libstdc++-v3/testsuite/ext/ext_pointer/1.cc
index 351a9775ec8..bbedc43b12e 100644
--- a/libstdc++-v3/testsuite/ext/ext_pointer/1.cc
+++ b/libstdc++-v3/testsuite/ext/ext_pointer/1.cc
@@ -180,11 +180,25 @@ void test04() {
VERIFY(bPtr3 == aPtr);
}
+// Check that long long values can be used for pointer arithmetic.
+void test05()
+{
+ A a[2] = { 1, 2 };
+ A_pointer p = a;
+ A_pointer q = p + 0ull;
+ VERIFY( p == q );
+ q += 0ll;
+ VERIFY( p == q );
+ q += 1ll;
+ VERIFY( q->i == p[1ll].i );
+}
+
int main()
{
test01();
test02();
test03();
test04();
+ test05();
return 0;
}
diff --git a/libstdc++-v3/testsuite/util/testsuite_allocator.h b/libstdc++-v3/testsuite/util/testsuite_allocator.h
index 03679aad8dc..b0fecfb59a3 100644
--- a/libstdc++-v3/testsuite/util/testsuite_allocator.h
+++ b/libstdc++-v3/testsuite/util/testsuite_allocator.h
@@ -697,7 +697,8 @@ namespace __gnu_test
};
#endif // C++11
-#if __cplusplus >= 201703L && __cpp_aligned_new
+#if __cplusplus >= 201703L
+#if __cpp_aligned_new && __cpp_rtti
// A concrete memory_resource, with error checking.
class memory_resource : public std::pmr::memory_resource
{
@@ -835,6 +836,7 @@ namespace __gnu_test
allocation_lists* lists;
};
+#endif // aligned-new && rtti
// Set the default resource, and restore the previous one on destruction.
struct default_resource_mgr
@@ -849,7 +851,7 @@ namespace __gnu_test
std::pmr::memory_resource* prev;
};
-#endif // C++17 && aligned-new
+#endif // C++17
} // namespace __gnu_test
diff --git a/libstdc++-v3/testsuite/util/testsuite_containers.h b/libstdc++-v3/testsuite/util/testsuite_containers.h
index 89c88cc9936..759f4d6b79f 100644
--- a/libstdc++-v3/testsuite/util/testsuite_containers.h
+++ b/libstdc++-v3/testsuite/util/testsuite_containers.h
@@ -20,6 +20,7 @@
#ifndef _GLIBCXX_TESTSUITE_CONTAINERS_H
#define _GLIBCXX_TESTSUITE_CONTAINERS_H
+#include <bits/boost_concept_check.h>
#include <cassert>
#include <testsuite_container_traits.h>
@@ -191,6 +192,77 @@ namespace __gnu_test
forward_members_unordered(_Tp& container) { }
};
+ template<typename _Iterator,
+ bool _Mutable,
+ typename = typename std::iterator_traits<_Iterator>::iterator_category>
+ struct iterator_concept_checks;
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, false,
+ std::forward_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_ForwardIteratorConcept<_Iterator>>();
+ }
+ };
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, true,
+ std::forward_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_Mutable_ForwardIteratorConcept<_Iterator>>();
+ }
+ };
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, false,
+ std::bidirectional_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_BidirectionalIteratorConcept<_Iterator>>();
+ }
+ };
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, true,
+ std::bidirectional_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_Mutable_BidirectionalIteratorConcept<_Iterator>>();
+ }
+ };
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, false,
+ std::random_access_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_RandomAccessIteratorConcept<_Iterator>>();
+ }
+ };
+
+ template<typename _Iterator>
+ struct iterator_concept_checks<_Iterator, true,
+ std::random_access_iterator_tag>
+ {
+ iterator_concept_checks()
+ {
+ using namespace __gnu_cxx;
+ __function_requires<_Mutable_RandomAccessIteratorConcept<_Iterator>>();
+ }
+ };
+
template<typename _Tp>
struct citerator
{
diff --git a/libvtv/ChangeLog b/libvtv/ChangeLog
index cd1beb9004b..77fbc8f008a 100644
--- a/libvtv/ChangeLog
+++ b/libvtv/ChangeLog
@@ -166,7 +166,7 @@
* libvtv/configure.ac : Add ACX_LT_HOST_FLAGS. Define VTV_CYGMIN.
* libvtv/configure.tgt : (x86_64-*-cygwin*, i?86-*-cygwin*,
x86_64-*-mingw*)
- (i?86-*-mingw*): Add to supported targets.
+ (i?86-*-mingw*): Add to supported targets.
* libvtv/vtv_fail.cc : Skip inclusion of execinfo.h on Cygwin and MinGW.
(log_error_message): Skip calls to backtrace and backtrace_symbols_fd
on Cygwin and MinGW.
diff --git a/lto-plugin/ChangeLog b/lto-plugin/ChangeLog
index c3a08236831..0b7ee5769ec 100644
--- a/lto-plugin/ChangeLog
+++ b/lto-plugin/ChangeLog
@@ -656,7 +656,7 @@
(onload): Don't create the temporary directory.
2009-11-04 Richard Guenther <rguenther@suse.de>
- Rafael Avila de Espindola <espindola@google.com>
+ Rafael Avila de Espindola <espindola@google.com>
* lto-plugin.c (plugin_file_info): Remove temp field.
(cleanup_handler): Don't delete temporary objects.
@@ -692,9 +692,9 @@
2009-10-19 Rafael Avila de Espindola <espindola@google.com>
- PR40790
- * configure: Regenerate.
- * configure.ac: Add AC_TYPE_UINT64_T.
+ PR40790
+ * configure: Regenerate.
+ * configure.ac: Add AC_TYPE_UINT64_T.
2009-10-16 Rafael Avila de Espindola <espindola@google.com>
@@ -935,7 +935,7 @@
2008-09-23 Rafael Espindola <espindola@google.com>
- * plugin-api.h: Moved to include.
+ * plugin-api.h: Moved to include.
2008-09-23 Rafael Espindola <espindola@google.com>