diff options
Diffstat (limited to 'dashboard-generate-squad.sh')
-rwxr-xr-x | dashboard-generate-squad.sh | 422 |
1 files changed, 422 insertions, 0 deletions
diff --git a/dashboard-generate-squad.sh b/dashboard-generate-squad.sh new file mode 100755 index 00000000..dd6ecced --- /dev/null +++ b/dashboard-generate-squad.sh @@ -0,0 +1,422 @@ +#!/bin/bash + +set -e + +# shellcheck source=jenkins-helpers.sh +. "$(dirname $0)/jenkins-helpers.sh" +# shellcheck source=round-robin.sh +. "$(dirname $0)/round-robin.sh" + +verbose=false +squad_mode= +relative_results=false + +# Process args +convert_args_to_variables "$@" + +obligatory_variables top_artifacts baseline_branch components run_date + +declare top_artifacts baseline_branch components run_date verbose squad_mode relative_results + +if $verbose ; then + set -x +fi + +# --------------------CREATE DASHBOARD FILE ------------------- +# Writing dashboard files : json & cmd +create_dashboard_files() +{ + ( + local dashboard_dir=${dbd[dashboard_dir]} + local check_regression_dir=${dbd[check_regression_dir]} + local envlist=() + local squad_mode_ext="" + [ x"$squad_mode" != x"" ] && squad_mode_ext="-$squad_mode" + + squaddir=$dashboard_dir/squad$squad_mode_ext + results_csv_file=results$squad_mode_ext/results-brief.csv + + declare -A project_results + + echo " * create_dashboard_files ($results_csv_file)" + + if [ ${dbd[project_kind]} == "bmk" ]; then + + # BMK PROJECTS + func_fields=(build run) + metric_fields=(perf size vect) + + local results_csv=$top_artifacts/$results_csv_file + local nbtest=0 nbpass=0 nbfail=0 nbskip=0 + + # ----------------------------- + # Parse results-vs-prev-brief.csv + local header_verified=false + local -a header_used=("benchmark" "symbol" + "csv-results-1/results:rel_sample" + "csv-results-1/results:rel_size" + "csv-results-1/results:rel_num_vect_loops" + "csv-results-0/results:sample" + "csv-results-0/results:size" + "csv-results-0/results:num_vect_loops") + + if [ ! -f "$results_csv" ]; then + echo "" + echo "WARNING: no $results_csv_file file. No data to store in dashboard." + echo "" + return + fi + + # read the first line + { + IFS=, + declare -A map + read -ra headers + + # Check if necessary columns exists + if ! $header_verified; then + for h in "${header_used[@]}"; do + assert_with_msg "ERROR: $results_csv_file header doesn't contain $h field" \ + [[ "${IFS}${headers[*]}${IFS}" =~ "${IFS}${h}${IFS}" ]] + done + fi + + while IFS=, read -ra values; do + for i in "${!headers[@]}"; do + map["${headers[i]}"]=${values[i]} + done + + local bench symb + bench=${map[benchmark]} + symb=${map[symbol]} + + local rel_sample rel_size rel_vect + rel_sample=${map[csv-results-1/results:rel_sample]} + rel_size=${map[csv-results-1/results:rel_size]} + rel_vect=${map[csv-results-1/results:rel_num_vect_loops]} + + local sample1 size1 vect1 + sample1=${map[csv-results-1/results:sample]} + size1=${map[csv-results-1/results:size]} + vect1=${map[csv-results-1/results:num_vect_loops]} + + # Skip all "Mean" values other than "Mean,mean" + if [ $bench == "Mean" ] && [ ${symb// /} != "mean" ]; then + continue; + fi + + if [[ -v project_results["func/build/$bench"] ]] && [ ${project_results["func/build/$bench"]} != "pass" ]; then + : # do nothing if fail is already + else + case $sample1 in + 999999999) + project_results["func/build/$bench"]="fail" ; ((nbfail+=1)) + project_results["func/run/$bench"]="skip" ; ((nbskip+=1)) + + # If failing to build, all metrics unknown (-1) + vect1=-1 ; size1=-1 ; sample1=-1 + rel_vect="n/a" ; rel_size="n/a" ; rel_sample="n/a" + ;; + 888888888) + project_results["func/build/$bench"]="pass" ; ((nbpass+=1)) + project_results["func/run/$bench"]="fail" ; ((nbfail+=1)) + + # If failing to run, run metrics unknown (-1) + sample1=-1 + rel_sample="n/a" + ;; + *) + project_results["func/build/$bench"]="pass" ; ((nbpass+=1)); + project_results["func/run/$bench"]="pass" ; ((nbpass+=1)); + ;; + esac + + if $relative_results; then + [ $rel_vect != "n/a" ] && project_results["metrics/vect/$bench"]="$((rel_vect-100))" + [ $rel_size != "n/a" ] && project_results["metrics/size/$bench"]="$((rel_size-100))" + [ $rel_sample != "n/a" ] && project_results["metrics/perf/$bench"]="$((rel_sample-100))" + else + [ $vect1 != "-1" ] && project_results["metrics/vect/$bench"]="$vect1" + [ $size1 != "-1" ] && project_results["metrics/size/$bench"]="$size1" + [ $sample1 != "-1" ] && project_results["metrics/perf/$bench"]="$sample1" + fi + + envlist+=("$bench") + ((nbtest+=2)) + fi + + # echo " $bench : ${project_results["func/build/$bench"]}, ${project_results["func/run/$bench"]}, "\ + # "${project_results["metrics/perf/$bench"]}, ${project_results["metrics/size/$bench"]}, ${project_results["metrics/vect/$bench"]}, $md5_1" + + done + unset IFS + } < "$results_csv" + + echo " [$nbtest test : $nbpass pass, $nbfail fail, $nbskip skip]" + + elif [ ${dbd[project_kind]} == "kernel" ]; then + # KERNEL PROJECTS + : + assert_with_msg "dashboard creation for kernel projects not implemented yet" true + func_fields=(build run) + metric_fields=(score) + + else # ${dbd[project_kind]}=="other" + # OTHER PROJECTS : basic. + # using score only. + func_fields=(build run) + metric_fields=(score) + # one single bench called "test" + project_results["func/build/test"]="pass" + project_results["func/run/test"]="pass" + project_results["metrics/score/test"]="${dbd['score']}" + + envlist=("score") + fi + + # get benchs sorted to have 001.Mean first + mapfile -t envlist < <(printf "%s\n" "${envlist[@]}" | sort -n) + + # ----------------------------- + # Write json files + # for each bench : + # results-functional.json, results-metrics.json, results-metadata.json + rm -rf $squaddir + mkdir -p $squaddir + + echo " - writing json for squad" + + # Generate 3 results files : functional, metrics, metadata + # + for bench in "${envlist[@]}"; do + + # echo " - writing $squaddir/$bench # ${project_results["func/*/$bench"]}" + mkdir -p $squaddir/$bench + + # results-functional.json : for now, only @func_fields=(build run) + resfile=$squaddir/$bench/results-functional.json + echo "{" > "$resfile" + if [ "${func_fields[*]}" == "build run" ]; then + echo " \"build\" : \"${project_results["func/build/$bench"]}\"," >>"$resfile" + echo " \"run\" : \"${project_results["func/run/$bench"]}\"" >>"$resfile" + fi + echo "}" >> "$resfile" + + # results-metrics.json : @metric_fields=(perf size vect) / (score) + local comma="," + local metrics_to_show=() + + resfile=$squaddir/$bench/results-metrics.json + echo "{" > "$resfile" + for metric in "${metric_fields[@]}"; do + [[ -v project_results["metrics/$metric/$bench"] ]] && metrics_to_show+=("$metric") + done + for metric in "${metrics_to_show[@]}"; do + [ $metric == ${metrics_to_show[-1]} ] && comma="" + echo " \"$metric\" : \"${project_results["metrics/$metric/$bench"]}\"$comma" >>"$resfile" + done + echo "}" >> "$resfile" + # results-metadata.json + resfile=$squaddir/$bench/results-metadata.json + local base_artifacts_url=https://git-us.linaro.org/toolchain/ci/base-artifacts.git + + # blanks below are used to order displayed metadata table (dirty!) + cat > $resfile << EOF +{ + " job_status": "${dbd['job_status']//\"/\\\"}", + " details": "$base_artifacts_url/plain/notify/mail-body.txt?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}", + "datetime": "${dbd['datetime']}", + "build_url": "${dbd['master_job_url']}", + "build_log": "${dbd['master_job_url']}console", +EOF + + [[ -v dbd['binutils_rev'] ]] && echo " \"version_binutils\":\"${dbd['binutils_rev']}\"," >> "$resfile" + [[ -v dbd['gcc_rev'] ]] && echo " \"version_gcc\": \"${dbd['gcc_rev']}\"," >> "$resfile" + [[ -v dbd['glibc_rev'] ]] && echo " \"version_glibc\": \"${dbd['glibc_rev']}\"," >> "$resfile" + [[ -v dbd['llvm_rev'] ]] && echo " \"version_llvm\": \"${dbd['llvm_rev']}\"," >> "$resfile" + [[ -v dbd['linux_rev'] ]] && echo " \"version_linux\": \"${dbd['linux_rev']}\"," >> "$resfile" + [[ -v dbd['qemu_rev'] ]] && echo " \"version_qemu\": \"${dbd['qemu_rev']}\"," >> "$resfile" + + cat >> $squaddir/$bench/results-metadata.json << EOF + "artifact_results": "$base_artifacts_url/tree/?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}" +} +EOF + + done + + # ----------------------------- + # Generate one annotation file : A single word summary + # Status is displayed in job_status of metadata. + # + resfile=$squaddir/summary-annotation.txt + touch $resfile + if [ -f $dashboard_dir/../mail-subject.txt ]; then + mail_subject=$(cat $dashboard_dir/../mail-subject.txt) + case "$mail_subject" in + *"grew in size"*) echo "Regression(size)" >> $resfile ;; + *"reduced in size"*) echo "Improvement(size)" >> $resfile ;; + *"slowed down"*) echo "Regression(speed)" >> $resfile ;; + *"speeds up"*) echo "Improvement(speed)" >> $resfile ;; + *"reduced by"*) echo "Regression(vect)" >> $resfile ;; + *"increased up by"*) echo "Improvement(vect)" >> $resfile ;; + *"failed to build"*) echo "Regression(build failed)" >> $resfile ;; + *"built OK, but failed to run"*) echo "Improvement(run still failed)" >> $resfile ;; + *"failed to run"*) echo "Regression(run failed)" >> $resfile ;; + *"No change"*) ;; # No annotation + *) ;; # No annotation + esac + fi + + # ----------------------------- + # Generate one command file to push all benches + # + local pushcmdfile=$squaddir/dashboard-push-squad.sh + rm -f $pushcmdfile + + echo " - generating cmd to push results" + + local squad_server prj grp bld squad_url + local results_results results_metrics results_metadata + + squad_server=https://qa-reports.linaro.org/ + # TCWG_SQUAD_TOKEN is defined in credentials + grp="${FORCE_SQUAD_GRP-${dbd[ci_project]}}" + prj="${FORCE_SQUAD_PJT-${dbd[ci_config]}}" + bld="$(echo ${dbd[master_job_url]}|cut -d/ -f6)" + + cat > $pushcmdfile << EOF +#!/bin/bash +cd \$(dirname \$0) + +set -ex + +squad_server=$squad_server + +if ! wget -q -o /dev/null $squad_server/$grp/$prj/; then + echo "WARNING: No project under $squad_server/$grp/$prj/" + exit 0 +fi +if [ ! -v TCWG_SQUAD_TOKEN ]; then + echo "ERROR: No TCWG_SQUAD_TOKEN defined in your environment" + exit 1 +fi + +top_artifacts=\$(pwd)/../.. +base_artifacts_rev=$(git -C base-artifacts rev-parse ${rr[baseline_branch]}) +sed -i -e "s|#BASE-ARTIFACTS-REV#|\$base_artifacts_rev|" */results-metadata.json + +echo "Uploading results to $squad_server/$grp/$prj/$bld" + +set +x + +EOF + + # If there's no squad project specified, let push cmd file as empty. + if [ x"$grp" == x"" ]; then + echo "echo \"WARNING : Nowhere to push results. grp is empty." >> $pushcmdfile + echo "WARNING : Nowhere to push results. grp is empty." + else + for bench in "${envlist[@]}"; do + + squad_url=$squad_server/api/submit/$grp/$prj/$bld/$bench + + # result files + results_results="$bench/results-functional.json" + results_metrics="$bench/results-metrics.json" + results_metadata="$bench/results-metadata.json" + + cat >> $pushcmdfile << EOF +echo "pushing $squad_url" +curl --silent --header "Authorization: Token \$TCWG_SQUAD_TOKEN" \\ + --form tests=@$results_results \\ + --form metrics=@$results_metrics \\ + --form metadata=@$results_metadata \\ + $squad_url + +EOF + done + + # if annotation file empty, do not push it. + if [ x"$(cat $squaddir/summary-annotation.txt)" != x"" ]; then + cat >> $pushcmdfile << EOF +# Add annotation for this build +api_of_this_build=\$(curl -s \$squad_server/$grp/$prj/build/$bld/ | \\ + grep 'api view of this build' | sed -e 's|.*<a href="\(.*\)">.*|\1|') + +curl --header "Authorization: Token \$TCWG_SQUAD_TOKEN" --data "description=$(cat $squaddir/summary-annotation.txt)&build=\$api_of_this_build" \$squad_server/api/annotations/ + +EOF + fi + fi + + chmod a+x $pushcmdfile + ) +} + + +# --------------------MAIN PROCEDURE ------------------- +set -euf -o pipefail + +# For a short time manifests used "debug" array, so we need +# to declare it when sourcing these manifests. +# shellcheck disable=SC2034 +declare -A debug + +# Source manifest +manifest="" +declare check_regression_dir +[ -f "$top_artifacts/manifest.sh" ] && manifest=manifest.sh +[ -f "$top_artifacts/jenkins/manifest.sh" ] && manifest=jenkins/manifest.sh +[ x"$manifest" == x"" ] && error "Manifest not found" +# shellcheck disable=SC1090 +source $top_artifacts/$manifest + +# override by top_artifacts in this context +rr[top_artifacts]=$top_artifacts + +# set useful infos for dashboard +# +declare -A dbd + +dbd[master_job_url]="${BUILD_URL-$(pwd)}" + +dbd[ci_project]="${rr[ci_project]}" +dbd[ci_config]="${rr[ci_config]}" + +dbd[base-artifacts_branch]="$baseline_branch" + +if [ -d $top_artifacts/.git ]; then + dbd[base-artifacts_sha1]="$(git -C $top_artifacts show --no-patch --pretty=%h)" +else + dbd[base-artifacts_sha1]='#BASE-ARTIFACTS-REV#' +fi + +dbd[datetime]=$run_date + +case $baseline_branch in + */tcwg_kernel*) dbd[project_kind]="kernel" ;; + */tcwg_bmk*) dbd[project_kind]="bmk" ;; + *) dbd[project_kind]="other" ;; +esac + +dbd[check_regression_dir]=$(find $top_artifacts/ -maxdepth 2 -name "*-check_regression") + +dbd[dashboard_dir]=$top_artifacts/notify/dashboard + +dbd[is_regression_build]=false +[ -f ${dbd[check_regression_dir]}/results.regressions ] && dbd[is_regression_build]=true + +dbd['score']=$(grep -v '^#' $top_artifacts/results | tail -1) + +dbd[job_status]="Success" +[ -f $top_artifacts/notify/mail-subject.txt ] && dbd[job_status]="$(cat $top_artifacts/notify/mail-subject.txt)" + +for c in $components; do +dbd[${c}_rev]="${rr[${c}_rev]}" +done + +dbd[benchmark_logs]=bkp-01:/home/tcwg-benchmark/results-$(cat $top_artifacts/results_id || echo '<unknown>') + +# ---------------------------- +# Generate dashboard files : json files, push cmd file +create_dashboard_files |