summaryrefslogtreecommitdiff
path: root/lnt-utils.sh
diff options
context:
space:
mode:
Diffstat (limited to 'lnt-utils.sh')
-rw-r--r--lnt-utils.sh455
1 files changed, 455 insertions, 0 deletions
diff --git a/lnt-utils.sh b/lnt-utils.sh
new file mode 100644
index 00000000..5ad22600
--- /dev/null
+++ b/lnt-utils.sh
@@ -0,0 +1,455 @@
+#!/usr/bin/env bash
+
+# ==============================================================================
+# is_first_lnt_entry : is used in both report_header() and report_test_entry()
+declare is_first_lnt_entry
+
+report_header()
+{
+ local machine_name="$1"
+ local date_time="$2"
+ shift 2
+ local run_infos=("$@")
+ declare -g is_first_lnt_entry
+
+ cat <<-EOF
+ {
+ "Machine": {
+ "Info": {},
+ "Name": "$machine_name"
+ },
+ "Run": {
+ "Info": {
+EOF
+
+ for i in "${run_infos[@]}"; do
+ tag=$(echo $i | cut -d: -f1)
+ val=$(echo $i | cut -d: -f2-)
+ echo " \"$tag\": \"$val\","
+ done
+
+ cat <<-EOF
+ "__report_version__": "1"
+ },
+ "Start Time": "$date_time"
+ },
+ "Tests": [
+ EOF
+ is_first_lnt_entry=true
+}
+
+report_footer()
+{
+ cat <<-EOF
+ ]
+ }
+ EOF
+}
+
+report_test_entry()
+{
+ local name="$1"
+ local value="$2"
+ declare -g is_first_lnt_entry
+
+ $is_first_lnt_entry || echo " ,"
+
+ cat <<-EOF
+ {
+ "Data": [
+ $value
+ ],
+ "Info": {},
+ "Name": "$name"
+ }
+ EOF
+ is_first_lnt_entry=false
+}
+
+get_current_component_url()
+{
+ c=$1
+
+ local url
+ url=$(get_current_git ${c}_url)
+ rev=$(get_current_git ${c}_rev)
+
+ if [[ "$url" =~ git://sourceware.org/git/ ]]; then
+ url="${url#git://sourceware.org/git/}"
+ url="https://sourceware.org/git/?p=$url"
+ echo "$url;a=commit;h=$rev"
+ elif [[ "$url" =~ https://github.com/ ]] \
+ || [[ "$url" =~ https://gitlab.com/ ]]; then
+ echo "${url%.git}/commit/$rev"
+ elif [[ "$url" =~ https://git.linaro.org/ ]]; then
+ echo "${url}/commit/?id=$rev"
+ else
+ echo "$url ($rev)"
+ fi
+}
+
+get_component_changes()
+{
+ local base_rev cur_rev
+ # In "init" mode, no component_changes annotation
+ if [ x"$(get_current_manifest "{rr[update_baseline]}")" != x"init" ]; then
+ base_rev=$(get_baseline_git ${c}_rev)
+ cur_rev=$(get_current_git ${c}_rev)
+ if [ "$base_rev" = "$cur_rev" ]; then
+ echo "(unchanged)"
+ else
+ echo "(+ $(git -C $c rev-list --count $base_rev..$cur_rev || echo "??") commits)"
+ fi
+ fi
+}
+
+get_describe_pad()
+{
+ local nb_changed_components=0 describe_pad=""
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ local base_rev="" cur_rev
+ # in "init" mode, the base_rev will be empty, and considered as
+ # different than cur_rev. the results of the function will be as
+ # if every component changed.
+ [ "$(get_current_manifest "{rr[update_baseline]}")" = "init" ] || \
+ base_rev=$(get_baseline_git ${c}_rev || true)
+ cur_rev=$(get_current_git ${c}_rev)
+ if [ "$base_rev" != "$cur_rev" ]; then
+ nb_changed_components=$((nb_changed_components+1))
+ if [ $nb_changed_components = 1 ]; then
+ describe=$(describe_sha1 "$c" "$cur_rev" false)
+ describe_pad=$(
+ echo "$describe" \
+ | sed 's/\(.*\)-\(.*\)-\(.*\)$/\1 \2 \3/' \
+ | awk '{ $2 = sprintf("%05d", $2); print $1"-"$2"-"$3}'
+ )
+ else
+ describe_pad="$nb_changed_components-changed-components"
+ fi
+ fi
+ done
+
+ if [ $nb_changed_components = 0 ]; then
+ echo "no-change"
+ else
+ echo "$describe_pad"
+ fi
+}
+
+# ==============================================================================
+
+# create a lnt json report for the given sumfiles
+generate_lnt_gnu_check_report()
+{
+ local build_url=${1:?}
+ local ci_project=${2:?} # tcwg_gnu_native_check_gcc
+ local ci_config=${3:?} # master-aarch64
+ local results_date=${4:?}
+ local jira_key=${5:?}
+ local results_summary="$6"
+ local sumfiles_dir=${7:?}
+ local output_file=${8:?}
+
+ if ! [ -f "$results_summary" ]; then
+ # FIXME: generate dashboards for build-only configurations
+ return
+ fi
+
+ local -a sumfiles
+ readarray -t -d '' sumfiles < <(find "$sumfiles_dir" -name '*.sum' -print0)
+ # do not generate any lnt report if there is no sumfile
+ [ ${#sumfiles[@]} = 0 ] && return
+
+ local lnt_testsuite=${ci_project}
+ local machine_name=${ci_config}
+
+ local build_number
+ build_number=$(basename "$build_url")
+
+ (
+ # Generate a header with useful infos for the report
+ additional_run_infos=("tag:$lnt_testsuite" "test_url:$build_url")
+ additional_run_infos+=("run_order:$(printf "%04d" "$build_number") ($(get_describe_pad))")
+ [ "$jira_key" != "-" ] && additional_run_infos+=("regression:https://linaro.atlassian.net/browse/$jira_key")
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ additional_run_infos+=("git_${c}:$(get_current_component_url $c) $(get_component_changes)")
+ done
+
+ report_header "$machine_name" "$results_date" "${additional_run_infos[@]}"
+
+ local field_name field_value
+
+ for field in fails flaky all; do
+ field_value=$(grep "^# of.* $field" "$results_summary" \
+ | sed -e "s/[^0-9]*//")
+ # tcwg_test_gcc_check.TOTAL.fails.nb_fails
+ field_name="TOTAL.$field.nb_$field"
+ report_test_entry "$lnt_testsuite.$field_name" "$field_value"
+ done
+
+ # Below loops scan .sum files and calculate stats for various test
+ # states.
+ # FIXME: consider whether to remove them and rely solely on statistics
+ # generated by validate_failures.py (aka $results_summary) above.
+
+ # disable traces. This function is too verbose
+ set +x
+
+ # an entry and a total value must be generated for each of these fields
+ # (even if the field is missing in the sum file)
+ local all_fields=(
+ FAIL UNRESOLVED UNTESTED ERROR XPASS OTHER
+ KFAIL XFAIL PASS UNSUPPORTED
+ good bad
+ )
+
+ for field in "${all_fields[@]}"; do
+ eval "total_$field=0"
+ done
+
+ for sumfile in "${sumfiles[@]}"; do
+ suite_name=$(basename "$sumfile" .sum) # gcc, g++, libatomic, ...
+ fields=("${all_fields[@]}")
+
+ for field in "${fields[@]}"; do
+ eval "$field=0"
+ done
+
+ while read -r kind; do
+ [[ ! ${fields[*]} =~ $kind ]] && kind="OTHER"
+ eval "(( $kind+=1 ))"
+ eval "(( total_$kind+=1 ))"
+ case "$kind" in
+ KFAIL|XFAIL|PASS|UNSUPPORTED)
+ eval "(( good+=1 ))"
+ eval "(( total_good+=1 ))"
+ ;;
+ FAIL|UNRESOLVED|UNTESTED|ERROR|XPASS|OTHER)
+ eval "(( bad+=1 ))"
+ eval "(( total_bad+=1 ))"
+ ;;
+ *)
+ ;;
+ esac
+ done < <(grep -E '^[A-Z]+:' "$sumfile" | sed 's/:.*//')
+
+ for field in "${fields[@]}"; do
+ # tcwg_test_gcc_check.gcc.FAIL.nb_FAIL
+ field_name="$suite_name.$field.nb_$field"
+ report_test_entry "$lnt_testsuite.$field_name" "$(eval "echo \$$field")"
+ done
+ done
+
+ for field in "${all_fields[@]}"; do
+ # tcwg_test_gcc_check.TOTAL.FAIL.nb_FAIL
+ field_name="TOTAL.$field.nb_$field"
+ report_test_entry "$lnt_testsuite.$field_name" "$(eval "echo \$total_$field")"
+ done
+
+ report_footer
+ ) > "$output_file"
+}
+
+
+# ==============================================================================
+
+# create a lnt json report for the given benchmark results
+generate_lnt_bmk_report()
+{
+ local build_url=${1:?}
+ local ci_project=${2:?} # tcwg_bmk-code_speed-cpu2017rate
+ local ci_config=${3:?} # llvm-aarch64-master-O3
+ local results_date=${4:?}
+ local jira_key=${5:?}
+ local size_csv=${6:?}
+ local perf_csv=${7:?}
+ local status_csv=${8:?}
+ local variability_avg_csv=${9:?}
+ local variability_max_csv=${10:?}
+ local compare_results_internal_csv=${11:?}
+ local output_file=${12:?}
+
+ local lnt_testsuite=${ci_project}
+ local machine_name=${ci_config}
+
+ local build_number
+ build_number=$(basename "$build_url")
+
+ [ -f "$size_csv" ] || return
+ [ -f "$perf_csv" ] || return
+
+ (
+ # Generate a header with useful infos for the report
+ additional_run_infos=("tag:$lnt_testsuite" "test_url:$build_url")
+ additional_run_infos+=("run_order:$(printf "%04d" "$build_number") ($(get_describe_pad))")
+ [ "$jira_key" != "-" ] && additional_run_infos+=("regression:$jira_key")
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ additional_run_infos+=("git_${c}:$(get_current_component_url $c) $(get_component_changes)")
+ done
+
+ report_header "$machine_name" "$results_date" "${additional_run_infos[@]}"
+
+ # disable traces locally because too verbose
+ set +x
+
+ ## Inspect size.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,90926
+ while IFS="," read -r benchmark symbol size; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ report_test_entry "$lnt_testsuite.$benchmark.code_size" "$size"
+
+ done < <(tail -n +2 "$size_csv" | tr -d '\r')
+
+ ## Inspect perf.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,8692,na
+ local -A execution_time
+ while IFS="," read -r benchmark symbol sample _dso; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ execution_time[$symbol]="$sample"
+ report_test_entry "$lnt_testsuite.$benchmark.execution" "$sample"
+
+ done < <(tail -n +2 "$perf_csv" | tr -d '\r')
+
+ ## Inspect bmk-specific-variability-avg.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,0.96,0.0,,
+ if [ -f "$variability_avg_csv" ]; then
+ # shellcheck disable=SC2034
+ while IFS="," read -r benchmark symbol sample_var sve_var vect_var; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ report_test_entry "$lnt_testsuite.$benchmark (avg) (percentage).execution_variation" "$sample_var"
+
+ if [ "${execution_time[$symbol]+abc}" ]; then
+ local var
+ var=$(echo "$sample_var * ${execution_time[$symbol]} / 100" | bc)
+ report_test_entry "$lnt_testsuite.$benchmark (avg).execution_variation" "$var"
+ report_test_entry "$lnt_testsuite.$benchmark.execution_variation" "$var"
+ fi
+ done < <(tail -n +2 "$variability_avg_csv" | tr -d '\r')
+ fi
+
+ ## Inspect bmk-specific-variability-max.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,0.96,0.0,,
+ if [ -f "$variability_max_csv" ]; then
+ # shellcheck disable=SC2034
+ while IFS="," read -r benchmark symbol sample_var sve_var vect_var; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ report_test_entry "$lnt_testsuite.$benchmark (max) (percentage).execution_variation" "$sample_var"
+
+ if [ "${execution_time[$symbol]+abc}" ]; then
+ local var
+ var=$(echo "$sample_var * ${execution_time[$symbol]} / 100" | bc)
+ report_test_entry "$lnt_testsuite.$benchmark (max).execution_variation" "$var"
+ fi
+ done < <(tail -n +2 "$variability_max_csv" | tr -d '\r')
+ fi
+
+ ## Inspect bmk-specific-variability-max.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,0.96,0.0,,
+ if [ -f "$compare_results_internal_csv" ]; then
+
+ # benchmark,symbol,rel_sample,rel_status,rel_size,rel_num_vect_loops,rel_num_sve_loops,rel_symbol_md5sum,
+ # sample_x,sample_y,status_x,status_y,size_x,size_y,num_vect_loops_x,
+ # num_vect_loops_y,num_sve_loops_x,num_sve_loops_y,symbol_md5sum_x,symbol_md5sum_y
+
+ local first_line=true
+ local idx_bmk idx_symb idx_sample_x idx_sample_y idx_md5sum_x idx_md5sum_y
+
+ get_header_index()
+ {
+ local lookup=$1; shift
+ local -a arr=("$@")
+ for i in "${!arr[@]}"; do
+ if [ "${arr[$i]}" == "$lookup" ]; then
+ echo "$i"
+ fi
+ done
+ }
+
+ # shellcheck disable=SC2034
+ while IFS="," read -a line; do
+ if $first_line; then
+ first_line=false
+ idx_bmk=$(get_header_index "benchmark" "${line[@]}")
+ idx_symb=$(get_header_index "symbol" "${line[@]}")
+ idx_sample_x=$(get_header_index "sample_x" "${line[@]}")
+ idx_sample_y=$(get_header_index "sample_y" "${line[@]}")
+ idx_md5sum_x=$(get_header_index "symbol_md5sum_x" "${line[@]}")
+ idx_md5sum_y=$(get_header_index "symbol_md5sum_y" "${line[@]}")
+ if [ -z "$idx_bmk" ] || [ -z "$idx_symb" ] ||
+ [ -z "$idx_sample_x" ] || [ -z "$idx_sample_y" ] || [ -z "$idx_md5sum_x" ] || [ -z "$idx_md5sum_y" ]; then
+ # cannot parse this csv file
+ break
+ fi
+ fi
+ [[ "${line[$idx_symb]}" == *base.default ]] || continue
+ [ "${line[$idx_md5sum_y]}" == "${line[$idx_md5sum_x]}" ] || continue
+ [ "${line[$idx_md5sum_x]}" != "-1" ] || continue
+
+ local var
+ var=$(echo "(${line[$idx_sample_y]}-${line[$idx_sample_x]})" | bc)
+ report_test_entry "$lnt_testsuite.${line[$idx_bmk]} (unexplained diff vs previous).execution_variation" "${var/#-/}"
+
+ local var
+ var=$(echo "" | bc)
+ var=$(echo "100 * (${line[$idx_sample_y]}-${line[$idx_sample_x]}) / (${line[$idx_sample_y]})" | bc -l)
+ report_test_entry "$lnt_testsuite.${line[$idx_bmk]} (unexplained diff vs previous) (percentage).execution_variation" "$(printf "%.2f" ${var/#-/})"
+
+ done < <(cat "$compare_results_internal_csv" | tr -d '\r')
+ fi
+
+ score_success_compile=0
+ score_success_execution=0
+ score_total_test=0
+
+ ## Inspect status.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,failed-to-build
+ while IFS="," read -r benchmark symbol status; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ local compile_success execution_success
+ case "$status" in
+ success)
+ compile_success=1
+ execution_success=1
+ ;;
+ failed-to-run)
+ compile_success=1
+ execution_success=0
+ ;;
+ failed-to-build)
+ compile_success=0
+ execution_success=0
+ ;;
+ *)
+ assert_with_msg "unknown status in $status_csv"
+ ;;
+ esac
+
+ report_test_entry "$lnt_testsuite.$benchmark.compile_status" "$((compile_success==0))"
+ report_test_entry "$lnt_testsuite.$benchmark.execution_status" "$((execution_success==0))"
+
+ score_success_compile=$((score_success_compile+compile_success))
+ score_success_execution=$((score_success_execution+execution_success))
+
+ score_total_test=$((score_total_test+1))
+
+ done < <(tail -n +2 "$status_csv" | tr -d '\r')
+
+ report_test_entry "$lnt_testsuite.nb_compile_successful.score" "$score_success_compile"
+ report_test_entry "$lnt_testsuite.nb_execution_successful.score" "$score_success_execution"
+
+ report_test_entry "$lnt_testsuite.nb_total_tests.score" "$score_total_test"
+
+ report_footer
+ ) > "$output_file"
+}
+
+
+# ==============================================================================