summaryrefslogtreecommitdiff
path: root/tcwg_gnu-build.sh
blob: 8daedce22193d1e06f30e6bf975b832828dd0b08 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
#!/bin/bash

set -euf -o pipefail

scripts=$(dirname $0)
# shellcheck source=jenkins-helpers.sh
. $scripts/jenkins-helpers.sh
# shellcheck source=round-robin.sh
. $scripts/round-robin.sh

convert_args_to_variables "$@"

obligatory_variables rr[ci_config]
declare -A rr

# Execution mode: baseline, bisect, jenkins-full
rr[mode]="${rr[mode]-baseline}"

# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
rr[ci_project]="${rr[ci_project]-tcwg_gnu}"
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
rr[update_baseline]="${rr[update_baseline]-update}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"

# Resolve top_artifacts to absolute dir because some of the subsequent
# processes work below pwd and would write their artifacts in a wrong
# location
rr[top_artifacts]=$(abs_path "${rr[top_artifacts]}")

# {toolchain_name}-{toolchain_ver}-{target}-{type_of_test}
IFS=- read -a ci_config <<EOF
${rr[ci_config]}
EOF
# Toolchain version -- master or release
toolchain_ver=${toolchain_ver-${ci_config[1]}}
# type_of_test contains the type of action to perform in this test
# campaign: bootstrap, bootstrap_lto, check_binutils, ....
type_of_test=${type_of_test-${ci_config[3]}}

case "$type_of_test" in
    *_binutils)
	rr[target]="${rr[target]-native}"
	rr[components]="binutils"
	;;
    *bootstrap*|*gcc*)
	rr[target]="${rr[target]-native}"
	rr[components]="gcc"
	;;
    *_cross)
	rr[target]="${rr[target]-${ci_config[2]}}"
	rr[components]="binutils gcc glibc qemu"
	;;
    *) assert_with_msg "Unknown type_of_test: $type_of_test" false ;;
esac

runtestflags=()
if test_array testsuites; then
    # shellcheck disable=SC2154
    for i in "${testsuites[@]}"; do
	runtestflags+=(--set "runtestflags=$i")
    done
fi

# Use baseline branches by default.
for c in ${rr[components]}; do
    rr[${c}_branch]=${rr[${c}_branch]-baseline}
    if [ x"${rr[${c}_branch]}" != x"baseline" ]; then
	obligatory_variables rr[${c}_url]
    fi
done

start_at="${start_at-default}"
finish_at="${finish_at-default}"
verbose="${verbose-true}"
verbose2="${verbose2-false}"

if $verbose2; then set -x; fi

trap print_traceback EXIT

# Set start and finish steps for different modes.
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
    "baseline")
	default_finish_at="update_baseline"
	;;
    "bisect")
	case "$(print_single_updated_component):$type_of_test" in
	    binutils:*) default_start_at="build_abe-binutils" ;;
	    gcc:*_cross) default_start_at="build_abe-stage1" ;;
	    gcc:*) default_start_at="build_abe-${type_of_test#check_}" ;;
	    glibc:*) default_start_at="build_abe-glibc" ;;
	    qemu:*) default_start_at="build_abe-qemu" ;;
	    *) assert_with_msg "Trying to bisecting unknown component: $(print_single_updated_component)" false ;;
	esac
	default_finish_at="check_regression"
	;;
    "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
    start_at="$default_start_at"
fi
if [ x"$finish_at" = x"default" ]; then
    finish_at="$default_finish_at"
fi

run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"

# Exit with code 0 if no regression compared to base-artifacts/results.
no_regression_p ()
{
    (
    set -euf -o pipefail

    no_build_regression_p "$@"

    local ref_artifacts=$1
    local new_artifacts=$2

    local sumfiles_base=$ref_artifacts/sumfiles
    local sumfiles_new=$new_artifacts/sumfiles

    if ! [ -d $sumfiles_base ]; then
	return 0
    elif ! [ -d $sumfiles_new ]; then
	return 1
    fi

    local res

    # We use our modified version of GCC's comparison script
    clone_or_update_repo gcc-compare-results master https://git.linaro.org/toolchain/gcc-compare-results.git

    # (defined by init_step in jenkins-helpers)
    # shellcheck disable=SC2154
    gcc-compare-results/compare_tests -compr none -pass-thresh 0.9 \
				      $sumfiles_base $sumfiles_new \
	| tee $run_step_artifacts/results.compare1 &
    res=0 && wait $! || res=$?

    local xfail="gcc-compare-results/contrib/testsuite-management/flaky"
    if [ -f "$xfail/${rr[ci_config]}.xfail" ]; then
	xfail="$xfail/${rr[ci_config]}"
    fi

    local ignore_ERRORs_opt=""
    if [ ${#runtestflags[@]} != 0 ]; then
	# We are running a subset of the testsuite, which might generate
	# ERRORs in GCC testsuites that will have no tests to run --
	# ignore these ERRORs, because they are unstable from run to run.
	ignore_ERRORs_opt="--ignore_ERRORs"
    fi

    gcc-compare-results/contrib/testsuite-management/validate_failures.py \
	--manifest=$xfail.xfail --clean_build=$sumfiles_base \
	--build_dir=$sumfiles_new $ignore_ERRORs_opt \
	| tee $run_step_artifacts/results.compare2 &
    res=0 && wait $! || res=$?

    if [ $res != 0 ]; then
	local reg_lines
	for i in 1 2; do
	    reg_lines=$(cat $run_step_artifacts/results.compare$i | wc -l)
	    reg_lines=$(($reg_lines-100))
	    cat $run_step_artifacts/results.compare$i | sed -e "s/^/# /" \
		| (head -n100; cat >/dev/null) \
		      > $run_step_artifacts/results.regressions
	    if [ $reg_lines -gt 0 ]; then
		echo "# ... and $reg_lines more entries" \
		     >> $run_step_artifacts/results.regressions
	    fi
	done

	local res1
	gcc-compare-results/contrib/testsuite-management/validate_failures.py \
	    --manifest=$xfail.xfail --clean_build=$sumfiles_base \
	    --build_dir=$sumfiles_new $ignore_ERRORs_opt --verbosity=1 \
	    > $run_step_artifacts/fails.sum &
	res1=0 && wait $! || res1=$?
	assert_with_msg "Result comparison should have failed" \
			[ $res1 = $res ]

	printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
	local exp
	while read exp; do
	    printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params
	done < <(cat $run_step_artifacts/fails.sum \
		     | awk '/^Running .* \.\.\./ { print $2 }')
	printf "\n" >> $run_step_artifacts/extra-bisect-params
    fi

    return $res
    )
}

run_step stop_on_fail -10 reset_artifacts
run_step stop_on_fail x prepare_abe
case "$type_of_test" in
    build_cross)
	run_step skip_on_fail 0 true
	run_step skip_on_fail 1 build_abe binutils
	run_step skip_on_fail 2 build_abe stage1
	run_step skip_on_fail 3 build_abe linux
	run_step skip_on_fail 4 build_abe glibc
	run_step skip_on_fail 5 build_abe stage2
	run_step skip_on_fail 6 build_abe qemu
	;;
    check_cross)
	run_step skip_on_fail -8 build_abe binutils
	run_step skip_on_fail -7 build_abe stage1
	run_step skip_on_fail -6 build_abe linux
	run_step skip_on_fail -5 build_abe glibc
	run_step skip_on_fail -4 build_abe stage2
	run_step skip_on_fail -3 build_abe qemu
	run_step skip_on_fail 0 build_abe dejagnu
	run_step skip_on_fail 1 build_abe check_gcc -- "${runtestflags[@]}"
	;;
    check_binutils)
	run_step skip_on_fail -2 build_abe binutils
	run_step skip_on_fail 0 build_abe dejagnu
	run_step skip_on_fail 1 build_abe check_binutils -- "${runtestflags[@]}"
	;;
    check_gcc*|check_bootstrap*)
	run_step skip_on_fail -2 build_abe binutils
	run_step skip_on_fail -1 build_abe ${type_of_test#check_}
	run_step skip_on_fail 0 build_abe dejagnu
	run_step skip_on_fail 1 build_abe ${type_of_test} -- "${runtestflags[@]}"
	;;
    *)
	run_step skip_on_fail 0 true
	run_step skip_on_fail 1 build_abe ${type_of_test}
	;;
esac
run_step reset_on_fail x check_regression
run_step stop_on_fail x update_baseline
run_step stop_on_fail x push_baseline

trap "" EXIT