summaryrefslogtreecommitdiff
path: root/tcwg_gnu-build.sh
diff options
context:
space:
mode:
Diffstat (limited to 'tcwg_gnu-build.sh')
-rwxr-xr-xtcwg_gnu-build.sh454
1 files changed, 337 insertions, 117 deletions
diff --git a/tcwg_gnu-build.sh b/tcwg_gnu-build.sh
index 5ef04e0f..c21f84d8 100755
--- a/tcwg_gnu-build.sh
+++ b/tcwg_gnu-build.sh
@@ -13,46 +13,102 @@ convert_args_to_variables "$@"
obligatory_variables rr[ci_project] rr[ci_config]
declare -A rr
-# Execution mode: baseline, bisect, jenkins-full
-rr[mode]="${rr[mode]-baseline}"
+# All config about configure flags, simulator, pretty names, .... is
+# implemented in this file
+# shellcheck source=tcwg_gnu-config.sh
+. $scripts/tcwg_gnu-config.sh
+
+# Execution mode: build or bisect,
+rr[mode]="${rr[mode]-build}"
# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
-rr[update_baseline]="${rr[update_baseline]-update}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
-# Resolve top_artifacts to absolute dir because some of the subsequent
-# processes work below pwd and would write their artifacts in a wrong
-# location
-rr[top_artifacts]=$(abs_path "${rr[top_artifacts]}")
-
# {toolchain_ver}-{target}[-{type_of_test}]
IFS=- read -a ci_config <<EOF
${rr[ci_config]}
EOF
# Toolchain version -- master or release
toolchain_ver=${toolchain_ver-${ci_config[0]}}
+ci_target=${ci_target-${ci_config[1]}}
# optional type_of_test contains the type of action to perform in this test
# campaign: bootstrap, bootstrap_lto, ...
type_of_test=${type_of_test-${ci_config[2]-}}
case "${rr[ci_project]}" in
- tcwg_gnu_native_*)
+ tcwg_gcc_*|tcwg_bootstrap_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="gcc"
+ ;;
+ tcwg_binutils_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="binutils"
+ ;;
+ tcwg_glibc_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="glibc"
+ ;;
+ tcwg_gdb_*)
rr[target]="${rr[target]-native}"
- rr[components]="${rr[components]-binutils gcc linux glibc gdb}"
+ rr[components]="gdb"
;;
- tcwg_gcc_*)
+ tcwg_gnu_native_*)
rr[target]="${rr[target]-native}"
- rr[components]="${rr[components]-binutils gcc}"
+ rr[components]="binutils gcc linux glibc gdb"
;;
tcwg_gnu_cross_*)
- rr[target]="${rr[target]-${ci_config[1]}}"
- rr[components]="${rr[components]-binutils gcc linux glibc gdb qemu}"
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc linux glibc gdb qemu"
+ ;;
+ tcwg_gnu_embed_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc newlib gdb qemu"
+ ;;
+ tcwg_gnu_mingw_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc mingw"
;;
*) assert_with_msg "Unknown ci_project: ${rr[ci_project]}" false ;;
esac
+# Artifacts version
+rr[major]=4
+rr[minor]=0
+
+case "${rr[ci_project]}" in
+ tcwg_gnu_native_fast_check_gdb)
+ # Tests that run quickly and have stable results. This list is taken from
+ # Sourceware's builder:
+ # https://sourceware.org/git/?p=builder.git;a=blob;f=builder/master.cfg;h=612b3177f0e5#l2407
+ testsuites=(gdb.base/break-always.exp
+ gdb.base/break-caller-line.exp
+ gdb.base/break-entry.exp
+ gdb.base/break.exp
+ gdb.base/break-fun-addr.exp
+ gdb.base/break-idempotent.exp
+ gdb.base/break-include.exp
+ gdb.base/break-inline.exp
+ gdb.base/break-main-file-remove-fail.exp
+ gdb.base/break-on-linker-gcd-function.exp
+ gdb.base/breakpoint-in-ro-region.exp
+ gdb.base/breakpoint-shadow.exp
+ gdb.base/break-probes.exp
+ gdb.gdb/unittest.exp
+ )
+ ;;
+
+ tcwg_gnu_native_fast_check_gcc)
+ # Use a small subset of GCC testsuite until we have made the
+ # results stable.
+ testsuites=(compile.exp
+ execute.exp
+ )
+ ;;
+esac
+
runtestflags=()
if test_array testsuites; then
# shellcheck disable=SC2154
@@ -79,24 +135,22 @@ trap print_traceback EXIT
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
- "baseline")
- default_finish_at="update_baseline"
- ;;
"bisect")
case "$(print_single_updated_component):${rr[ci_project]}" in
+ *:tcwg_gnu_mingw_*) default_start_at="clean_sysroot" ;;
binutils:*) default_start_at="build_abe-binutils" ;;
+ gcc:tcwg_gnu_embed_*) default_start_at="build_abe-stage1" ;;
gcc:tcwg_gnu_cross_*) default_start_at="build_abe-stage1" ;;
gcc:tcwg_gnu_native_*) default_start_at="build_abe-gcc" ;;
- gcc:tcwg_gcc_check) default_start_at="build_abe-gcc" ;;
- gcc:tcwg_gcc_*bootstrap) default_start_at="build_abe-${type_of_test#check_}" ;;
- linux:*|glibc:*) default_start_at="clean_sysroot" ;;
+ gcc:tcwg_gcc_*) default_start_at="build_abe-gcc" ;;
+ gcc:tcwg_bootstrap_*) default_start_at="build_abe-${type_of_test#check_}" ;;
+ glibc:tcwg_glibc_*) default_start_at="build_abe-glibc" ;;
+ linux:*|glibc:*|newlib:*) default_start_at="clean_sysroot" ;;
gdb:*) default_start_at="build_abe-gdb" ;;
qemu:*) default_start_at="build_abe-qemu" ;;
*) assert_with_msg "Trying to bisecting unknown component: $(print_single_updated_component)" false ;;
esac
- default_finish_at="check_regression"
;;
- "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
start_at="$default_start_at"
@@ -107,6 +161,86 @@ fi
run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+# Initialize sysroot directory in ABE's build/ with system headers
+# so that [native] glibc can find headers to build against.
+init_abe_sysroot ()
+{
+ (
+ set -euf -o pipefail
+
+ local abe_sysroot gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_target=$(print_gnu_target "$ci_target")
+ rm -rf "$abe_sysroot/usr/include/"
+ mkdir -p "$abe_sysroot/usr/include/"
+ rsync -a "/usr/include/" "$abe_sysroot/usr/include/"
+ # Debian/Ubuntu have arch headers under target-specific directory.
+ rsync -a "/usr/include/$gnu_target/" "$abe_sysroot/usr/include/"
+ )
+}
+
+# Build mingw parts: headers, crt and libs.
+build_mingw ()
+{
+ (
+ set -euf -o pipefail
+ local todo="$1"
+
+ if [ "$todo" = "headers" ]; then
+ clone_repo mingw
+ fi
+
+ local abe_sysroot gnu_host gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_host=$(print_gnu_target native)
+ gnu_target=$(print_gnu_target "$ci_target")
+
+ PATH="$(pwd)/abe/builds/destdir/$gnu_host/bin:$PATH"
+ export PATH
+
+ local dir="mingw-$todo"
+ rm -rf "$dir"
+ mkdir "$dir"
+ cd "$dir"
+
+ if [ "$todo" = "headers" ]; then
+ ../mingw/mingw-w64-headers/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target"
+ elif [ "$todo" = "crt" ]; then
+ ../mingw/mingw-w64-crt/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-default-msvcrt=msvcrt
+ elif [ "$todo" = "libs" ]; then
+ ../mingw/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-libraries=winpthreads \
+ --with-default-msvcrt=msvcrt
+ fi
+
+ make -j"$(nproc --all)"
+ make install
+
+ if [ "$todo" = "headers" ]; then
+ # ??? I'm not sure whether this is a hack or an adorable percularity
+ # of building a mingw toolchain. The point of this symlink is that
+ # it's easiest for stage1 GCC mingw compiler to find headers in mingw/
+ # directory instead of the actual sysroot.
+ rm -rf "$abe_sysroot/../mingw"
+ ln -s "$abe_sysroot" "$abe_sysroot/../mingw"
+ fi
+ )
+}
+
# Exit with code 0 if no regression compared to base-artifacts/results.
no_regression_p ()
{
@@ -121,64 +255,73 @@ no_regression_p ()
local sumfiles_base=$ref_artifacts/sumfiles
local sumfiles_new=$new_artifacts/sumfiles
+ local xfails="$sumfiles_new/xfails.xfail"
+
+ # Remove files generated here (in case we are re-generating results in
+ # round-robin-baseline.sh).
+ rm -f "$xfails"
+
if ! [ -d $sumfiles_base ]; then
return 0
elif ! [ -d $sumfiles_new ]; then
return 1
fi
- local res
-
- # We use our modified version of GCC's comparison script
- clone_or_update_repo gcc-compare-results master https://git.linaro.org/toolchain/gcc-compare-results.git
-
- local ignore_ERRORs_opt=""
- if [ ${#runtestflags[@]} != 0 ] \
- || { [ x"${rr[mode]}" = x"baseline" ] \
- && [ x"${rr[update_baseline]}" = x"push" ]; }; then
- # We are running a subset of the testsuite, which might generate
- # ERRORs in GCC testsuites that will have no tests to run --
- # ignore these ERRORs, because they are unstable from run to run.
- #
- # Also, we must ignore ERRORs during bisection baseline runs
- # (rr[mode]=baseline, rr[update_baseline]=push) so that ERRORs
- # don't creep into automatically-generated flaky XFAILs.
- ignore_ERRORs_opt="--ignore_ERRORs"
- echo "NOT COMPARING PARTIAL RESULTS" \
- > $run_step_artifacts/results.compare2
- else
- # Compare results using compare_tests only when we compare complete
- # testsuites.
- # (defined by init_step in jenkins-helpers)
- # shellcheck disable=SC2154
- gcc-compare-results/compare_tests -compr none -pass-thresh 0.9 \
- $sumfiles_base $sumfiles_new \
- | tee $run_step_artifacts/results.compare2 &
- res=0 && wait $! || res=$?
- fi
+ cat > "$xfails" <<EOF
+# This file contains three sections:
+# - newly detected flaky tests (if any)
+# - known flaky tests (from baseline)
+# - known failures (from baseline)
+#
+EOF
- local xfail="gcc-compare-results/contrib/testsuite-management/flaky"
- if [ -f "$xfail/${rr[ci_project]}-${rr[ci_config]}.xfail" ]; then
- xfail="$xfail/${rr[ci_project]}-${rr[ci_config]}"
+ local flaky_tests="$run_step_artifacts/flaky.xfail"
+ local baseline_fails="$run_step_artifacts/baseline.xfail"
+ build_abe_check_xfails "$flaky_tests" "$baseline_fails"
+
+ # Add known flaky tests and baseline_fails to the xfails.
+ #
+ # Note #1: We generate $baseline_fails without regard for flaky
+ # tests. Therefore, validate_failures in no_regression_p() will
+ # see same tests with and without flaky attributes.
+ # Validate_failure uses python sets to store results, so the first
+ # entry wins. Therefore, we need to put lists of flaky tests before
+ # lists of expected fails -- $baseline_fails.
+ #
+ # Note #2: Order of expired and non-expired flaky tests is less of
+ # an issue because expired entries are discarded by validate_failures
+ # before adding them to the ResultSet. Still, for uniformity, we
+ # put fresh flaky entries before older ones.
+ cat "$flaky_tests" >> "$xfails"
+ echo "# Known failures (from baseline)" >> "$xfails"
+ cat "$baseline_fails" >> "$xfails"
+
+ # Set and save result_expiry_date so that we use the same expiration date
+ # when re-generating results in round-robin-baseline.sh.
+ if [ "${rr[result_expiry_date]-}" = "" ]; then
+ rr[result_expiry_date]=$(date +%Y%m%d)
+ # finalize_manifest() will not see declaration because we are running
+ # in a sub-shell. Update manifest manually.
+ cat <<EOF | manifest_out
+rr[result_expiry_date]="${rr[result_expiry_date]}"
+EOF
fi
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt \
- | tee $run_step_artifacts/results.compare &
+ local validate_failures="gcc-compare-results/contrib/testsuite-management/validate_failures.py"
+ "$validate_failures" --manifest="$xfails" \
+ --expiry_date="${rr[result_expiry_date]}" \
+ --build_dir=$sumfiles_new --verbosity=1 \
+ > $run_step_artifacts/fails.sum &
+ local res
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
- local reg_lines
- reg_lines=$(cat $run_step_artifacts/results.compare | wc -l)
- reg_lines=$(($reg_lines-100))
- cat $run_step_artifacts/results.compare | sed -e "s/^/# /" \
- | (head -n100; cat >/dev/null) \
- > $run_step_artifacts/results.regressions
- if [ $reg_lines -gt 0 ]; then
- echo "# ... and $reg_lines more entries" \
- >> $run_step_artifacts/results.regressions
- fi
+ # Add a short marker to record the status (used by Jenkins build-name)
+ local n_regressions
+ n_regressions=$(grep -c "^[A-Z]\+:" $run_step_artifacts/fails.sum \
+ || true)
+ echo "# $n_regressions regressions" \
+ > $run_step_artifacts/results.regressions
if [ $res = 2 ]; then
# Result comparison found regressions (exit code 2)
@@ -186,14 +329,6 @@ no_regression_p ()
# Exit code 1 means that the script has failed to process
# .sum files. This likely indicates malformed or very unusual
# results.
- local res1
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt --verbosity=1 \
- > $run_step_artifacts/fails.sum &
- res1=0 && wait $! || res1=$?
- assert_with_msg "Result comparison should have failed" \
- [ $res1 = 2 ]
printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
local exp
@@ -202,19 +337,21 @@ no_regression_p ()
done < <(cat $run_step_artifacts/fails.sum \
| awk '/^Running .* \.\.\./ { print $2 }')
printf "\n" >> $run_step_artifacts/extra-bisect-params
+ else
+ # validate_failures.py failed to process results. This means
+ # either a corrupted manifest, or corrupted results, or a bug
+ # in validate_failures.py. In either case, stop and wait for
+ # a fix.
+ return $EXTERNAL_FAIL
fi
fi
- # FIXME: In our current setup dejagnu testsuites are just not stable.
- # We disable bisections until this is resolved.
- res=0
-
return $res
)
}
-# Implement rr[breakup_updated_components] hook.
-tcwg_gnu_breakup_updated_components ()
+# Implement rr[breakup_changed_components] hook.
+tcwg_gnu_breakup_changed_components ()
{
(
set -euf -o pipefail
@@ -224,48 +361,73 @@ tcwg_gnu_breakup_updated_components ()
*_check_*)
# Changes to "foo" of check_foo projects tend to cause the most
# regressions.
- # Breakup updated components into "foo" and the rest of components
+ # Breakup changed components into "foo" and the rest of components
# to reduce the number of builds.
cc=$(echo "${rr[ci_project]}" | sed -e "s/.*_check_\(.*\)/\1/")
;;
esac
- if print_updated_components "\n" | grep -q "^$cc\$"; then
+ if print_changed_components "\n" | grep "^$cc\$" >/dev/null; then
echo "$cc"
- print_updated_components "\n" | grep -v "^$cc\$" | tr '\n' ' ' | sed -e "s/ \$//g"
+ print_changed_components "\n" | grep -v "^$cc\$" | tr '\n' ' ' \
+ | sed -e "s/ \$//g"
echo
else
- print_updated_components "\n"
+ print_changed_components "\n"
fi
)
}
-rr[breakup_updated_components]=tcwg_gnu_breakup_updated_components
+rr[breakup_changed_components]=tcwg_gnu_breakup_changed_components
+
+# Define gnu_data[] as needed below
+settings_for_ci_project_and_config "${rr[ci_project]}" "${rr[ci_config]}"
run_step stop_on_fail -10 reset_artifacts
run_step stop_on_fail x prepare_abe
case "${rr[ci_project]}" in
- tcwg_gnu_cross_build)
+ tcwg_gnu_cross_build|tcwg_gnu_embed_build)
run_step skip_on_fail 0 true
run_step skip_on_fail 1 build_abe binutils
- run_step skip_on_fail 2 build_abe stage1
+ run_step skip_on_fail 2 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
run_step skip_on_fail x clean_sysroot
- run_step skip_on_fail 3 build_abe linux
- run_step skip_on_fail 4 build_abe glibc
- run_step skip_on_fail 5 build_abe stage2
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail 3 build_abe linux
+ run_step skip_on_fail 4 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail 4 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail 5 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
run_step skip_on_fail 6 build_abe gdb
run_step skip_on_fail 7 build_abe qemu
;;
- tcwg_gnu_cross_check_*)
+ tcwg_gnu_cross_check_*|tcwg_gnu_embed_check_*)
run_step skip_on_fail -8 build_abe binutils
- run_step skip_on_fail -7 build_abe stage1
+ run_step skip_on_fail -7 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
run_step skip_on_fail x clean_sysroot
- run_step skip_on_fail -6 build_abe linux
- run_step skip_on_fail -5 build_abe glibc
- run_step skip_on_fail -4 build_abe stage2
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail -6 build_abe linux
+ run_step skip_on_fail -5 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail -5 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail -4 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
run_step skip_on_fail -3 build_abe gdb
run_step skip_on_fail -2 build_abe qemu
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe "${rr[ci_project]#tcwg_gnu_cross_}" -- "${runtestflags[@]}"
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[ci_project]#*check_}" -- "${runtestflags[@]}" \
+ ${gnu_data[gcc_override_configure]} \
+ ${gnu_data[gcc_target_board_options]} \
+ ${gnu_data[qemu_cpu]}
;;
tcwg_gnu_native_build)
run_step skip_on_fail 0 true
@@ -276,36 +438,94 @@ case "${rr[ci_project]}" in
run_step skip_on_fail 5 build_abe glibc
run_step skip_on_fail 6 build_abe gdb
;;
- tcwg_gnu_native_check_*)
+ tcwg_gnu_native_check_*|tcwg_gnu_native_fast_check_*)
+ component="${rr[ci_project]#tcwg_gnu_native_}"
+ component="${component#fast_}"
+
+ declare -a abe_arguments=()
+ if [ "${component}" = check_gdb ]; then
+ abe_arguments=(--set check_buffer_workaround=gdb-read1)
+ fi
+
run_step skip_on_fail -8 build_abe binutils
run_step skip_on_fail -7 build_abe gcc
run_step skip_on_fail x clean_sysroot
run_step skip_on_fail -5 build_abe linux
run_step skip_on_fail -4 build_abe glibc
run_step skip_on_fail -3 build_abe gdb
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe "${rr[ci_project]#tcwg_gnu_native_}" -- "${runtestflags[@]}"
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "$component" -- "${runtestflags[@]}" \
+ "${abe_arguments[@]}"
;;
- tcwg_gcc_bootstrap)
+ tcwg_bootstrap_build)
run_step skip_on_fail 0 true
- run_step skip_on_fail 1 build_abe binutils
- run_step skip_on_fail 2 build_abe ${type_of_test}
+ run_step skip_on_fail 1 build_abe ${type_of_test}
+ ;;
+ tcwg_bootstrap_check)
+ run_step skip_on_fail -2 build_abe ${type_of_test#check_}
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe ${type_of_test} -- "${runtestflags[@]}"
+ ;;
+ tcwg_binutils_build|tcwg_gcc_build|tcwg_gdb_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail 1 build_abe "${rr[components]}"
+ ;;
+ tcwg_binutils_check|tcwg_gcc_check)
+ run_step skip_on_fail -2 build_abe "${rr[components]}"
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}"
;;
- tcwg_gcc_check)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail -1 build_abe gcc
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe check_gcc -- "${runtestflags[@]}"
+ tcwg_gdb_check)
+ # GDB's testsuite has a limitation where it can only find debug info
+ # within the installation prefix. To allow it to find the
+ # distro-installed debug info for ld.so, use /usr as the prefix. We
+ # don't need to actually install GDB there though, so disable the
+ # install step.
+ run_step skip_on_fail -2 build_abe "${rr[components]}" \
+ -- --prefix /usr --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}" --set check_buffer_workaround=gdb-read1
;;
- tcwg_gcc_check_bootstrap)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail -1 build_abe ${type_of_test#check_}
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe ${type_of_test} -- "${runtestflags[@]}"
+ tcwg_glibc_build)
+ run_step skip_on_fail 0 init_abe_sysroot
+ # ABE tries to copy gcc runtime libraries on glibc install, which
+ # fails when we don't build gcc. Workaround by not installing glibc.
+ run_step skip_on_fail 1 build_abe glibc -- --disable install
+ ;;
+ tcwg_glibc_check)
+ run_step skip_on_fail -3 init_abe_sysroot
+ run_step skip_on_fail -2 build_abe glibc -- --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe check_glibc \
+ -- "${runtestflags[@]}" --disable install
+ ;;
+ tcwg_gnu_mingw_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail 1 build_abe binutils
+ run_step skip_on_fail 2 build_mingw headers
+ run_step skip_on_fail 3 build_abe stage1
+ run_step skip_on_fail 4 build_mingw crt
+ run_step skip_on_fail 5 build_mingw libs
+ run_step skip_on_fail 6 build_abe stage2
+ #run_step skip_on_fail 7 build_abe gdb
+ ;;
+ tcwg_gnu_mingw_check_*)
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail -8 build_abe binutils
+ run_step skip_on_fail -7 build_mingw headers
+ run_step skip_on_fail -6 build_abe stage1
+ run_step skip_on_fail -5 build_mingw crt
+ run_step skip_on_fail -4 build_mingw libs
+ run_step skip_on_fail -3 build_abe stage2
+ #run_step skip_on_fail -2 build_abe gdb
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "${rr[ci_project]#tcwg_gnu_mingw_}" \
+ -- "${runtestflags[@]}"
;;
esac
run_step reset_on_fail x check_regression
-run_step stop_on_fail x update_baseline
-run_step stop_on_fail x push_baseline
trap "" EXIT