summaryrefslogtreecommitdiff
path: root/tcwg_gnu-build.sh
diff options
context:
space:
mode:
Diffstat (limited to 'tcwg_gnu-build.sh')
-rwxr-xr-xtcwg_gnu-build.sh530
1 files changed, 412 insertions, 118 deletions
diff --git a/tcwg_gnu-build.sh b/tcwg_gnu-build.sh
index 421f06e5..c21f84d8 100755
--- a/tcwg_gnu-build.sh
+++ b/tcwg_gnu-build.sh
@@ -10,48 +10,103 @@ scripts=$(dirname $0)
convert_args_to_variables "$@"
-obligatory_variables rr[ci_config]
+obligatory_variables rr[ci_project] rr[ci_config]
declare -A rr
-# Execution mode: baseline, bisect, jenkins-full
-rr[mode]="${rr[mode]-baseline}"
+# All config about configure flags, simulator, pretty names, .... is
+# implemented in this file
+# shellcheck source=tcwg_gnu-config.sh
+. $scripts/tcwg_gnu-config.sh
+
+# Execution mode: build or bisect,
+rr[mode]="${rr[mode]-build}"
# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
-rr[ci_project]="${rr[ci_project]-tcwg_gnu}"
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
-rr[update_baseline]="${rr[update_baseline]-update}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
-# Resolve top_artifacts to absolute dir because some of the subsequent
-# processes work below pwd and would write their artifacts in a wrong
-# location
-rr[top_artifacts]=$(abs_path "${rr[top_artifacts]}")
-
-# {toolchain_name}-{toolchain_ver}-{target}-{type_of_test}
+# {toolchain_ver}-{target}[-{type_of_test}]
IFS=- read -a ci_config <<EOF
${rr[ci_config]}
EOF
# Toolchain version -- master or release
-toolchain_ver=${toolchain_ver-${ci_config[1]}}
-# type_of_test contains the type of action to perform in this test
-# campaign: bootstrap, bootstrap_lto, check_binutils, ....
-type_of_test=${type_of_test-${ci_config[3]}}
-
-case "$type_of_test" in
- *_binutils)
+toolchain_ver=${toolchain_ver-${ci_config[0]}}
+ci_target=${ci_target-${ci_config[1]}}
+# optional type_of_test contains the type of action to perform in this test
+# campaign: bootstrap, bootstrap_lto, ...
+type_of_test=${type_of_test-${ci_config[2]-}}
+
+case "${rr[ci_project]}" in
+ tcwg_gcc_*|tcwg_bootstrap_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="gcc"
+ ;;
+ tcwg_binutils_*)
rr[target]="${rr[target]-native}"
rr[components]="binutils"
;;
- *bootstrap*|*gcc*)
+ tcwg_glibc_*)
rr[target]="${rr[target]-native}"
- rr[components]="binutils gcc"
+ rr[components]="glibc"
+ ;;
+ tcwg_gdb_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="gdb"
+ ;;
+ tcwg_gnu_native_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="binutils gcc linux glibc gdb"
+ ;;
+ tcwg_gnu_cross_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc linux glibc gdb qemu"
;;
- *_cross)
- rr[target]="${rr[target]-${ci_config[2]}}"
- rr[components]="binutils gcc glibc qemu"
+ tcwg_gnu_embed_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc newlib gdb qemu"
+ ;;
+ tcwg_gnu_mingw_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc mingw"
+ ;;
+ *) assert_with_msg "Unknown ci_project: ${rr[ci_project]}" false ;;
+esac
+
+# Artifacts version
+rr[major]=4
+rr[minor]=0
+
+case "${rr[ci_project]}" in
+ tcwg_gnu_native_fast_check_gdb)
+ # Tests that run quickly and have stable results. This list is taken from
+ # Sourceware's builder:
+ # https://sourceware.org/git/?p=builder.git;a=blob;f=builder/master.cfg;h=612b3177f0e5#l2407
+ testsuites=(gdb.base/break-always.exp
+ gdb.base/break-caller-line.exp
+ gdb.base/break-entry.exp
+ gdb.base/break.exp
+ gdb.base/break-fun-addr.exp
+ gdb.base/break-idempotent.exp
+ gdb.base/break-include.exp
+ gdb.base/break-inline.exp
+ gdb.base/break-main-file-remove-fail.exp
+ gdb.base/break-on-linker-gcd-function.exp
+ gdb.base/breakpoint-in-ro-region.exp
+ gdb.base/breakpoint-shadow.exp
+ gdb.base/break-probes.exp
+ gdb.gdb/unittest.exp
+ )
+ ;;
+
+ tcwg_gnu_native_fast_check_gcc)
+ # Use a small subset of GCC testsuite until we have made the
+ # results stable.
+ testsuites=(compile.exp
+ execute.exp
+ )
;;
- *) assert_with_msg "Unknown type_of_test: $type_of_test" false ;;
esac
runtestflags=()
@@ -80,21 +135,22 @@ trap print_traceback EXIT
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
- "baseline")
- default_finish_at="update_baseline"
- ;;
"bisect")
- case "$(print_single_updated_component):$type_of_test" in
+ case "$(print_single_updated_component):${rr[ci_project]}" in
+ *:tcwg_gnu_mingw_*) default_start_at="clean_sysroot" ;;
binutils:*) default_start_at="build_abe-binutils" ;;
- gcc:*_cross) default_start_at="build_abe-stage1" ;;
- gcc:*) default_start_at="build_abe-${type_of_test#check_}" ;;
- glibc:*) default_start_at="build_abe-glibc" ;;
+ gcc:tcwg_gnu_embed_*) default_start_at="build_abe-stage1" ;;
+ gcc:tcwg_gnu_cross_*) default_start_at="build_abe-stage1" ;;
+ gcc:tcwg_gnu_native_*) default_start_at="build_abe-gcc" ;;
+ gcc:tcwg_gcc_*) default_start_at="build_abe-gcc" ;;
+ gcc:tcwg_bootstrap_*) default_start_at="build_abe-${type_of_test#check_}" ;;
+ glibc:tcwg_glibc_*) default_start_at="build_abe-glibc" ;;
+ linux:*|glibc:*|newlib:*) default_start_at="clean_sysroot" ;;
+ gdb:*) default_start_at="build_abe-gdb" ;;
qemu:*) default_start_at="build_abe-qemu" ;;
*) assert_with_msg "Trying to bisecting unknown component: $(print_single_updated_component)" false ;;
esac
- default_finish_at="check_regression"
;;
- "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
start_at="$default_start_at"
@@ -105,6 +161,86 @@ fi
run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+# Initialize sysroot directory in ABE's build/ with system headers
+# so that [native] glibc can find headers to build against.
+init_abe_sysroot ()
+{
+ (
+ set -euf -o pipefail
+
+ local abe_sysroot gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_target=$(print_gnu_target "$ci_target")
+ rm -rf "$abe_sysroot/usr/include/"
+ mkdir -p "$abe_sysroot/usr/include/"
+ rsync -a "/usr/include/" "$abe_sysroot/usr/include/"
+ # Debian/Ubuntu have arch headers under target-specific directory.
+ rsync -a "/usr/include/$gnu_target/" "$abe_sysroot/usr/include/"
+ )
+}
+
+# Build mingw parts: headers, crt and libs.
+build_mingw ()
+{
+ (
+ set -euf -o pipefail
+ local todo="$1"
+
+ if [ "$todo" = "headers" ]; then
+ clone_repo mingw
+ fi
+
+ local abe_sysroot gnu_host gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_host=$(print_gnu_target native)
+ gnu_target=$(print_gnu_target "$ci_target")
+
+ PATH="$(pwd)/abe/builds/destdir/$gnu_host/bin:$PATH"
+ export PATH
+
+ local dir="mingw-$todo"
+ rm -rf "$dir"
+ mkdir "$dir"
+ cd "$dir"
+
+ if [ "$todo" = "headers" ]; then
+ ../mingw/mingw-w64-headers/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target"
+ elif [ "$todo" = "crt" ]; then
+ ../mingw/mingw-w64-crt/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-default-msvcrt=msvcrt
+ elif [ "$todo" = "libs" ]; then
+ ../mingw/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-libraries=winpthreads \
+ --with-default-msvcrt=msvcrt
+ fi
+
+ make -j"$(nproc --all)"
+ make install
+
+ if [ "$todo" = "headers" ]; then
+ # ??? I'm not sure whether this is a hack or an adorable percularity
+ # of building a mingw toolchain. The point of this symlink is that
+ # it's easiest for stage1 GCC mingw compiler to find headers in mingw/
+ # directory instead of the actual sysroot.
+ rm -rf "$abe_sysroot/../mingw"
+ ln -s "$abe_sysroot" "$abe_sysroot/../mingw"
+ fi
+ )
+}
+
# Exit with code 0 if no regression compared to base-artifacts/results.
no_regression_p ()
{
@@ -119,119 +255,277 @@ no_regression_p ()
local sumfiles_base=$ref_artifacts/sumfiles
local sumfiles_new=$new_artifacts/sumfiles
+ local xfails="$sumfiles_new/xfails.xfail"
+
+ # Remove files generated here (in case we are re-generating results in
+ # round-robin-baseline.sh).
+ rm -f "$xfails"
+
if ! [ -d $sumfiles_base ]; then
return 0
elif ! [ -d $sumfiles_new ]; then
return 1
fi
- local res
-
- # We use our modified version of GCC's comparison script
- clone_or_update_repo gcc-compare-results master https://git.linaro.org/toolchain/gcc-compare-results.git
-
- # (defined by init_step in jenkins-helpers)
- # shellcheck disable=SC2154
- gcc-compare-results/compare_tests -compr none -pass-thresh 0.9 \
- $sumfiles_base $sumfiles_new \
- | tee $run_step_artifacts/results.compare1 &
- res=0 && wait $! || res=$?
-
- local xfail="gcc-compare-results/contrib/testsuite-management/flaky"
- if [ -f "$xfail/${rr[ci_config]}.xfail" ]; then
- xfail="$xfail/${rr[ci_config]}"
- fi
+ cat > "$xfails" <<EOF
+# This file contains three sections:
+# - newly detected flaky tests (if any)
+# - known flaky tests (from baseline)
+# - known failures (from baseline)
+#
+EOF
- local ignore_ERRORs_opt=""
- if [ ${#runtestflags[@]} != 0 ]; then
- # We are running a subset of the testsuite, which might generate
- # ERRORs in GCC testsuites that will have no tests to run --
- # ignore these ERRORs, because they are unstable from run to run.
- ignore_ERRORs_opt="--ignore_ERRORs"
+ local flaky_tests="$run_step_artifacts/flaky.xfail"
+ local baseline_fails="$run_step_artifacts/baseline.xfail"
+ build_abe_check_xfails "$flaky_tests" "$baseline_fails"
+
+ # Add known flaky tests and baseline_fails to the xfails.
+ #
+ # Note #1: We generate $baseline_fails without regard for flaky
+ # tests. Therefore, validate_failures in no_regression_p() will
+ # see same tests with and without flaky attributes.
+ # Validate_failure uses python sets to store results, so the first
+ # entry wins. Therefore, we need to put lists of flaky tests before
+ # lists of expected fails -- $baseline_fails.
+ #
+ # Note #2: Order of expired and non-expired flaky tests is less of
+ # an issue because expired entries are discarded by validate_failures
+ # before adding them to the ResultSet. Still, for uniformity, we
+ # put fresh flaky entries before older ones.
+ cat "$flaky_tests" >> "$xfails"
+ echo "# Known failures (from baseline)" >> "$xfails"
+ cat "$baseline_fails" >> "$xfails"
+
+ # Set and save result_expiry_date so that we use the same expiration date
+ # when re-generating results in round-robin-baseline.sh.
+ if [ "${rr[result_expiry_date]-}" = "" ]; then
+ rr[result_expiry_date]=$(date +%Y%m%d)
+ # finalize_manifest() will not see declaration because we are running
+ # in a sub-shell. Update manifest manually.
+ cat <<EOF | manifest_out
+rr[result_expiry_date]="${rr[result_expiry_date]}"
+EOF
fi
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt \
- | tee $run_step_artifacts/results.compare2 &
+ local validate_failures="gcc-compare-results/contrib/testsuite-management/validate_failures.py"
+ "$validate_failures" --manifest="$xfails" \
+ --expiry_date="${rr[result_expiry_date]}" \
+ --build_dir=$sumfiles_new --verbosity=1 \
+ > $run_step_artifacts/fails.sum &
+ local res
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
- local reg_lines
- for i in 1 2; do
- reg_lines=$(cat $run_step_artifacts/results.compare$i | wc -l)
- reg_lines=$(($reg_lines-100))
- cat $run_step_artifacts/results.compare$i | sed -e "s/^/# /" \
- | (head -n100; cat >/dev/null) \
- > $run_step_artifacts/results.regressions
- if [ $reg_lines -gt 0 ]; then
- echo "# ... and $reg_lines more entries" \
- >> $run_step_artifacts/results.regressions
- fi
- done
-
- local res1
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt --verbosity=1 \
- > $run_step_artifacts/fails.sum &
- res1=0 && wait $! || res1=$?
- assert_with_msg "Result comparison should have failed" \
- [ $res1 = $res ]
-
- printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
- local exp
- while read exp; do
- printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params
- done < <(cat $run_step_artifacts/fails.sum \
- | awk '/^Running .* \.\.\./ { print $2 }')
- printf "\n" >> $run_step_artifacts/extra-bisect-params
+ # Add a short marker to record the status (used by Jenkins build-name)
+ local n_regressions
+ n_regressions=$(grep -c "^[A-Z]\+:" $run_step_artifacts/fails.sum \
+ || true)
+ echo "# $n_regressions regressions" \
+ > $run_step_artifacts/results.regressions
+
+ if [ $res = 2 ]; then
+ # Result comparison found regressions (exit code 2)
+ #
+ # Exit code 1 means that the script has failed to process
+ # .sum files. This likely indicates malformed or very unusual
+ # results.
+
+ printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
+ local exp
+ while read exp; do
+ printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params
+ done < <(cat $run_step_artifacts/fails.sum \
+ | awk '/^Running .* \.\.\./ { print $2 }')
+ printf "\n" >> $run_step_artifacts/extra-bisect-params
+ else
+ # validate_failures.py failed to process results. This means
+ # either a corrupted manifest, or corrupted results, or a bug
+ # in validate_failures.py. In either case, stop and wait for
+ # a fix.
+ return $EXTERNAL_FAIL
+ fi
fi
return $res
)
}
+# Implement rr[breakup_changed_components] hook.
+tcwg_gnu_breakup_changed_components ()
+{
+ (
+ set -euf -o pipefail
+
+ local cc=""
+ case "${rr[ci_project]}" in
+ *_check_*)
+ # Changes to "foo" of check_foo projects tend to cause the most
+ # regressions.
+ # Breakup changed components into "foo" and the rest of components
+ # to reduce the number of builds.
+ cc=$(echo "${rr[ci_project]}" | sed -e "s/.*_check_\(.*\)/\1/")
+ ;;
+ esac
+
+ if print_changed_components "\n" | grep "^$cc\$" >/dev/null; then
+ echo "$cc"
+ print_changed_components "\n" | grep -v "^$cc\$" | tr '\n' ' ' \
+ | sed -e "s/ \$//g"
+ echo
+ else
+ print_changed_components "\n"
+ fi
+ )
+}
+rr[breakup_changed_components]=tcwg_gnu_breakup_changed_components
+
+# Define gnu_data[] as needed below
+settings_for_ci_project_and_config "${rr[ci_project]}" "${rr[ci_config]}"
+
run_step stop_on_fail -10 reset_artifacts
run_step stop_on_fail x prepare_abe
-case "$type_of_test" in
- build_cross)
+case "${rr[ci_project]}" in
+ tcwg_gnu_cross_build|tcwg_gnu_embed_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail 1 build_abe binutils
+ run_step skip_on_fail 2 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail x clean_sysroot
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail 3 build_abe linux
+ run_step skip_on_fail 4 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail 4 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail 5 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail 6 build_abe gdb
+ run_step skip_on_fail 7 build_abe qemu
+ ;;
+ tcwg_gnu_cross_check_*|tcwg_gnu_embed_check_*)
+ run_step skip_on_fail -8 build_abe binutils
+ run_step skip_on_fail -7 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail x clean_sysroot
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail -6 build_abe linux
+ run_step skip_on_fail -5 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail -5 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail -4 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail -3 build_abe gdb
+ run_step skip_on_fail -2 build_abe qemu
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[ci_project]#*check_}" -- "${runtestflags[@]}" \
+ ${gnu_data[gcc_override_configure]} \
+ ${gnu_data[gcc_target_board_options]} \
+ ${gnu_data[qemu_cpu]}
+ ;;
+ tcwg_gnu_native_build)
run_step skip_on_fail 0 true
run_step skip_on_fail 1 build_abe binutils
- run_step skip_on_fail 2 build_abe stage1
- run_step skip_on_fail 3 build_abe linux
- run_step skip_on_fail 4 build_abe glibc
- run_step skip_on_fail 5 build_abe stage2
- run_step skip_on_fail 6 build_abe qemu
+ run_step skip_on_fail 2 build_abe gcc
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail 4 build_abe linux
+ run_step skip_on_fail 5 build_abe glibc
+ run_step skip_on_fail 6 build_abe gdb
;;
- check_cross)
+ tcwg_gnu_native_check_*|tcwg_gnu_native_fast_check_*)
+ component="${rr[ci_project]#tcwg_gnu_native_}"
+ component="${component#fast_}"
+
+ declare -a abe_arguments=()
+ if [ "${component}" = check_gdb ]; then
+ abe_arguments=(--set check_buffer_workaround=gdb-read1)
+ fi
+
run_step skip_on_fail -8 build_abe binutils
- run_step skip_on_fail -7 build_abe stage1
- run_step skip_on_fail -6 build_abe linux
- run_step skip_on_fail -5 build_abe glibc
- run_step skip_on_fail -4 build_abe stage2
- run_step skip_on_fail -3 build_abe qemu
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe check_gcc -- "${runtestflags[@]}"
- ;;
- check_binutils)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe check_binutils -- "${runtestflags[@]}"
- ;;
- check_gcc*|check_bootstrap*)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail -1 build_abe ${type_of_test#check_}
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe ${type_of_test} -- "${runtestflags[@]}"
- ;;
- *)
+ run_step skip_on_fail -7 build_abe gcc
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail -5 build_abe linux
+ run_step skip_on_fail -4 build_abe glibc
+ run_step skip_on_fail -3 build_abe gdb
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "$component" -- "${runtestflags[@]}" \
+ "${abe_arguments[@]}"
+ ;;
+ tcwg_bootstrap_build)
run_step skip_on_fail 0 true
run_step skip_on_fail 1 build_abe ${type_of_test}
;;
+ tcwg_bootstrap_check)
+ run_step skip_on_fail -2 build_abe ${type_of_test#check_}
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe ${type_of_test} -- "${runtestflags[@]}"
+ ;;
+ tcwg_binutils_build|tcwg_gcc_build|tcwg_gdb_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail 1 build_abe "${rr[components]}"
+ ;;
+ tcwg_binutils_check|tcwg_gcc_check)
+ run_step skip_on_fail -2 build_abe "${rr[components]}"
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}"
+ ;;
+ tcwg_gdb_check)
+ # GDB's testsuite has a limitation where it can only find debug info
+ # within the installation prefix. To allow it to find the
+ # distro-installed debug info for ld.so, use /usr as the prefix. We
+ # don't need to actually install GDB there though, so disable the
+ # install step.
+ run_step skip_on_fail -2 build_abe "${rr[components]}" \
+ -- --prefix /usr --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}" --set check_buffer_workaround=gdb-read1
+ ;;
+ tcwg_glibc_build)
+ run_step skip_on_fail 0 init_abe_sysroot
+ # ABE tries to copy gcc runtime libraries on glibc install, which
+ # fails when we don't build gcc. Workaround by not installing glibc.
+ run_step skip_on_fail 1 build_abe glibc -- --disable install
+ ;;
+ tcwg_glibc_check)
+ run_step skip_on_fail -3 init_abe_sysroot
+ run_step skip_on_fail -2 build_abe glibc -- --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe check_glibc \
+ -- "${runtestflags[@]}" --disable install
+ ;;
+ tcwg_gnu_mingw_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail 1 build_abe binutils
+ run_step skip_on_fail 2 build_mingw headers
+ run_step skip_on_fail 3 build_abe stage1
+ run_step skip_on_fail 4 build_mingw crt
+ run_step skip_on_fail 5 build_mingw libs
+ run_step skip_on_fail 6 build_abe stage2
+ #run_step skip_on_fail 7 build_abe gdb
+ ;;
+ tcwg_gnu_mingw_check_*)
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail -8 build_abe binutils
+ run_step skip_on_fail -7 build_mingw headers
+ run_step skip_on_fail -6 build_abe stage1
+ run_step skip_on_fail -5 build_mingw crt
+ run_step skip_on_fail -4 build_mingw libs
+ run_step skip_on_fail -3 build_abe stage2
+ #run_step skip_on_fail -2 build_abe gdb
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "${rr[ci_project]#tcwg_gnu_mingw_}" \
+ -- "${runtestflags[@]}"
+ ;;
esac
run_step reset_on_fail x check_regression
-run_step stop_on_fail x update_baseline
-run_step stop_on_fail x push_baseline
trap "" EXIT