#!/bin/bash set -euf -o pipefail scripts=$(dirname $0) # shellcheck source=jenkins-helpers.sh . $scripts/jenkins-helpers.sh # shellcheck source=round-robin.sh . $scripts/round-robin.sh convert_args_to_variables "$@" obligatory_variables rr[ci_config] declare -A rr # Execution mode: baseline, bisect, jenkins-full rr[mode]="${rr[mode]-baseline}" # Set custom revision for one of the projects, and use baseline revisions # for all other projects. rr[ci_project]="${rr[ci_project]-tcwg_gnu}" rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}" rr[update_baseline]="${rr[update_baseline]-update}" rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}" # Resolve top_artifacts to absolute dir because some of the subsequent # processes work below pwd and would write their artifacts in a wrong # location rr[top_artifacts]=$(abs_path "${rr[top_artifacts]}") # {toolchain_name}-{toolchain_ver}-{target}-{type_of_test} IFS=- read -a ci_config </dev/null) \ > $run_step_artifacts/results.regressions if [ $reg_lines -gt 0 ]; then echo "# ... and $reg_lines more entries" \ >> $run_step_artifacts/results.regressions fi done local res1 gcc-compare-results/contrib/testsuite-management/validate_failures.py \ --manifest=$xfail.xfail --clean_build=$sumfiles_base \ --build_dir=$sumfiles_new $ignore_ERRORs_opt --verbosity=1 \ > $run_step_artifacts/fails.sum & res1=0 && wait $! || res1=$? assert_with_msg "Result comparison should have failed" \ [ $res1 = $res ] printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params local exp while read exp; do printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params done < <(cat $run_step_artifacts/fails.sum \ | awk '/^Running .* \.\.\./ { print $2 }') printf "\n" >> $run_step_artifacts/extra-bisect-params fi return $res ) } run_step stop_on_fail -10 reset_artifacts run_step stop_on_fail x prepare_abe case "$type_of_test" in build_cross) run_step skip_on_fail 0 true run_step skip_on_fail 1 build_abe binutils run_step skip_on_fail 2 build_abe stage1 run_step skip_on_fail 3 build_abe linux run_step skip_on_fail 4 build_abe glibc run_step skip_on_fail 5 build_abe stage2 run_step skip_on_fail 6 build_abe qemu ;; check_cross) run_step skip_on_fail -8 build_abe binutils run_step skip_on_fail -7 build_abe stage1 run_step skip_on_fail -6 build_abe linux run_step skip_on_fail -5 build_abe glibc run_step skip_on_fail -4 build_abe stage2 run_step skip_on_fail -3 build_abe qemu run_step skip_on_fail 0 build_abe dejagnu run_step skip_on_fail 1 build_abe check_gcc -- "${runtestflags[@]}" ;; check_binutils) run_step skip_on_fail -2 build_abe binutils run_step skip_on_fail 0 build_abe dejagnu run_step skip_on_fail 1 build_abe check_binutils -- "${runtestflags[@]}" ;; check_gcc*|check_bootstrap*) run_step skip_on_fail -2 build_abe binutils run_step skip_on_fail -1 build_abe ${type_of_test#check_} run_step skip_on_fail 0 build_abe dejagnu run_step skip_on_fail 1 build_abe ${type_of_test} -- "${runtestflags[@]}" ;; *) run_step skip_on_fail 0 true run_step skip_on_fail 1 build_abe ${type_of_test} ;; esac run_step reset_on_fail x check_regression run_step stop_on_fail x update_baseline run_step stop_on_fail x push_baseline trap "" EXIT