summaryrefslogtreecommitdiff
path: root/tcwg_bmk-build.sh
diff options
context:
space:
mode:
authorLaurent Alfonsi <laurent.alfonsi@linaro.org>2023-08-21 09:55:38 +0200
committerLaurent Alfonsi <laurent.alfonsi@linaro.org>2023-08-30 14:33:55 +0200
commitac27d7b761816ec8ae78781bfdca2f73e2db0066 (patch)
tree861710f455ca00d8917cf553f540ad6b178ed7b5 /tcwg_bmk-build.sh
parent0c21bd770fa791de0160c26335801f91b959c5ba (diff)
tcwg_bmk-build.sh: Merge no_regression_p/compare_results routines
Change-Id: I061cfd24226478de89c5a8884cc2e024cb642f42
Diffstat (limited to 'tcwg_bmk-build.sh')
-rwxr-xr-xtcwg_bmk-build.sh64
1 files changed, 27 insertions, 37 deletions
diff --git a/tcwg_bmk-build.sh b/tcwg_bmk-build.sh
index 8bb412a8..2be9de12 100755
--- a/tcwg_bmk-build.sh
+++ b/tcwg_bmk-build.sh
@@ -339,14 +339,36 @@ benchmark ()
)
}
-# Compare results obtained from metric data between $1 and $2
-# and generate results-vs-prev/compare-results.csv
-compare_results ()
+
+# Exit with code 0 if no regression compared to base-artifacts/.
+# Inspect build results ./results and performance results in ./results_id.
+no_regression_p ()
{
(
set -euf -o pipefail
- local metric_id="$1"
+ # check score-based regression
+ no_build_regression_p "$@"
+
+ # At this stage, there's no score-based regression.
+ # We are now checking metric-based regression.
+ local score
+ score=$(grep -v -E "^#|^$" $run_step_top_artifacts/results | tail -n1)
+ # if score is negative, then benchmarking failed and results aren't valid.
+ if [ $score -lt 0 ]; then
+ return 0
+ fi
+
+ assert_with_msg "Benchmarking succeeded, but results_id is missing" \
+ [ -f $run_step_top_artifacts/results_id ]
+
+ # Make sure there is no stray results.regression file, which we use
+ # as failure marker.
+ assert ! [ -f $run_step_artifacts/results.regressions ]
+
+ # At this stage,
+ # - new/results_id should exist (score>0)
+ # - ref/results_id might not exist, (ex: baseline score<0)
local compare_opts=""
case "${rr[target]}:$cflags" in
@@ -459,42 +481,10 @@ compare_results ()
fi
done
+ # return status rely on the presence of the results.regressions file
if [ -f $run_step_artifacts/results.regressions ]; then
assert_with_msg "Found a regression while comparing the build against itself" \
[ "$ref_results" != "$new_results" ]
- fi
- )
-}
-
-# Exit with code 0 if no regression compared to base-artifacts/.
-no_regression_p ()
-{
- (
- set -euf -o pipefail
-
- # check score-based regression
- no_build_regression_p "$@"
-
- # At this stage, there's no score-based regression.
- # We are now checking metric-based regression.
- local score
- score=$(grep -v -E "^#|^$" $run_step_top_artifacts/results | tail -n1)
- # if score is negative, then benchmarking failed and results aren't valid.
- if [ $score -lt 0 ]; then
- return 0
- fi
-
- # Make sure there is no stray results.regression file, which we use
- # as failure marker.
- assert ! [ -f $run_step_artifacts/results.regressions ]
-
- # At this stage,
- # - artifacts/annex/bmk-data should exist (score>0)
- # - base-artifacts/annex/bmk-data should exist if update_baseline!=init
- # we call compare_results to generate the csv, and check metric regressions.
- compare_results "$metric_id"
-
- if [ -f $run_step_artifacts/results.regressions ]; then
return 1
fi
return 0