#!/bin/bash set -e # shellcheck source=jenkins-helpers.sh . "$(dirname $0)/jenkins-helpers.sh" # shellcheck source=round-robin.sh . "$(dirname $0)/round-robin.sh" verbose=false # Process args convert_args_to_variables "$@" obligatory_variables top_artifacts baseline_branch components run_date declare top_artifacts baseline_branch components run_date verbose if $verbose ; then set -x fi # --------------------CREATE DASHBOARD FILE ------------------- # invoked by create_dashboard_dir # Writing dashboard files : json & cmd create_dashboard_files() { ( local dashboard_dir=${dbd[dashboard_dir]} local check_regression_dir=${dbd[check_regression_dir]} local bench local envlist=() declare -A project_results echo " * create_dashboard_files" if [ ${dbd[project_kind]} == "bmk" ]; then # BMK PROJECTS func_fields=(build run) metric_fields=(perf size vect) local results_csv=$check_regression_dir/results-brief.csv local symb rel_sample rel_size rel_vect rel_md5 sample0 sample1 size0 size1 vect0 vect1 md5_0 md5_1 local nbtest=0 nbpass=0 nbfail=0 nbskip=0 # ----------------------------- # Parse results-brief.csv local header_verified=false local header_ref=("benchmark" "symbol" "csv-results-1/results:rel_sample" "csv-results-1/results:rel_size" "csv-results-1/results:rel_num_vect_loops" "csv-results-1/results:rel_symbol_md5sum" "csv-results-0/results:sample" "csv-results-1/results:sample" "csv-results-0/results:size" "csv-results-1/results:size" "csv-results-0/results:num_vect_loops" "csv-results-1/results:num_vect_loops" "csv-results-0/results:symbol_md5sum" "csv-results-1/results:symbol_md5sum") if [ ! -f "$results_csv" ]; then echo "" echo "WARNING: no results-brief.csv file. No data to store in dashboard." echo "" return fi IFS=',' while read bench symb rel_sample rel_size rel_vect rel_md5 sample0 sample1 size0 size1 vect0 vect1 md5_0 md5_1; do if ! $header_verified; then header=("$bench" "$symb" "$rel_sample" "$rel_size" "$rel_vect" "$rel_md5" "$sample0" "$sample1" "$size0" "$size1" "$vect0" "$vect1" "$md5_0" "$md5_1") if [ "${header[*]}" != "${header_ref[*]}" ]; then echo "" echo "ERROR: results.csv header to make sure format is conform to expected format" echo " ${header_ref[*]}" echo " ${header[*]}" echo "" return fi header_verified=true continue fi if [ -v project_results["func/build/$bench"] ] && [ ${project_results["func/build/$bench"]} != "pass" ]; then : # do nothing if fail is already else case $sample1 in 999999999) project_results["func/build/$bench"]="fail" ; ((nbfail+=1)) project_results["func/run/$bench"]="skip" ; ((nbskip+=1)) # If failing to build, all metrics unknown (-1) vect1=-1 ; size1=-1 ; sample1=-1 ;; 888888888) project_results["func/build/$bench"]="pass" ; ((nbpass+=1)) project_results["func/run/$bench"]="fail" ; ((nbfail+=1)) [ $vect1 != "-1" ] && project_results["metrics/vect/$bench"]="$vect1" [ $size1 != "-1" ] && project_results["metrics/size/$bench"]="$size1" # If failing to run, run metrics unknown (-1) sample1=-1 ;; *) project_results["func/build/$bench"]="pass" ; ((nbpass+=1)); project_results["func/run/$bench"]="pass" ; ((nbpass+=1)); ;; esac [ $vect1 != "-1" ] && project_results["metrics/vect/$bench"]="$vect1" [ $size1 != "-1" ] && project_results["metrics/size/$bench"]="$size1" [ $sample1 != "-1" ] && project_results["metrics/perf/$bench"]="$sample1" envlist+=("$bench") ((nbtest+=2)) fi # echo " $bench : ${project_results["func/build/$bench"]}, ${project_results["func/run/$bench"]}, "\ # "${project_results["metrics/perf/$bench"]}, ${project_results["metrics/size/$bench"]}, ${project_results["metrics/vect/$bench"]}, $md5_1" done < "$results_csv" unset IFS echo " [$nbtest test : $nbpass pass, $nbfail fail, $nbskip skip]" elif [ ${dbd[project_kind]} == "kernel" ]; then # KERNEL PROJECTS : assert_with_msg "dashboard creation for kernel projects not implemented yet" true func_fields=(build run) metric_fields=(score) else # ${dbd[project_kind]}=="other" # OTHER PROJECTS : basic. # using score only. func_fields=(build run) metric_fields=(score) # one single bench called "test" project_results["func/build/test"]="pass" project_results["func/run/test"]="pass" project_results["metrics/score/test"]="${dbd['score']}" envlist=("score") fi # get benchs sorted to have 001.Mean first mapfile -t envlist < <(printf "%s\n" "${envlist[@]}" | sort -n) # ----------------------------- # Write json files # for each bench : # results-functional.json, results-metrics.json, results-metadata.json squaddir=$dashboard_dir/squad rm -rf $squaddir mkdir -p $squaddir echo " - writing json for squad" # Generate 3 results files : functional, metrics, metadata # for bench in "${envlist[@]}"; do # echo " - writing $squaddir/$bench # ${project_results["func/*/$bench"]}" mkdir -p $squaddir/$bench # results-functional.json : for now, only @func_fields=(build run) resfile=$squaddir/$bench/results-functional.json echo "{" > "$resfile" if [ "${func_fields[*]}" == "build run" ]; then echo " \"build\" : \"${project_results["func/build/$bench"]}\"," >>"$resfile" echo " \"run\" : \"${project_results["func/run/$bench"]}\"" >>"$resfile" fi echo "}" >> "$resfile" # results-metrics.json : @metric_fields=(perf size vect) / (score) resfile=$squaddir/$bench/results-metrics.json echo "{" > "$resfile" if [ "${metric_fields[*]}" == "perf size vect" ]; then [ -v project_results["metrics/perf/$bench"] ] && echo " \"perf\" : \"${project_results["metrics/perf/$bench"]}\"," >>"$resfile" [ -v project_results["metrics/size/$bench"] ] && echo " \"size\" : \"${project_results["metrics/size/$bench"]}\"," >>"$resfile" [ -v project_results["metrics/vect/$bench"] ] && echo " \"vect\" : \"${project_results["metrics/vect/$bench"]}\"" >>"$resfile" elif [ "${metric_fields[*]}" == "score" ]; then [ -v project_results["metrics/score/test"] ] && echo " \"score\" : \"${project_results["metrics/score/test"]}\"" >>"$resfile" fi echo "}" >> "$resfile" # results-metadata.json resfile=$squaddir/$bench/results-metadata.json local base_artifacts_url=https://git-us.linaro.org/toolchain/ci/base-artifacts.git # blanks below are used to order displayed metadata table (dirty!) cat > $resfile << EOF { " job_status": "${dbd['job_status']}", " details": "$base_artifacts_url/plain/mail/mail-body.txt?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}", "datetime": "${dbd['datetime']}", "build_url": "${dbd['master_job_url']}", "build_log": "${dbd['master_job_url']}console", EOF [ -v dbd['binutils_rev'] ] && echo " \"version_binutils\":\"${dbd['binutils_rev']}\"," >> "$resfile" [ -v dbd['gcc_rev'] ] && echo " \"version_gcc\": \"${dbd['gcc_rev']}\"," >> "$resfile" [ -v dbd['glibc_rev'] ] && echo " \"version_glibc\": \"${dbd['glibc_rev']}\"," >> "$resfile" [ -v dbd['llvm_rev'] ] && echo " \"version_llvm\": \"${dbd['llvm_rev']}\"," >> "$resfile" [ -v dbd['linux_rev'] ] && echo " \"version_linux\": \"${dbd['linux_rev']}\"," >> "$resfile" [ -v dbd['qemu_rev'] ] && echo " \"version_qemu\": \"${dbd['qemu_rev']}\"," >> "$resfile" cat >> $squaddir/$bench/results-metadata.json << EOF "artifact_results": "$base_artifacts_url/tree/?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}" } EOF done # ----------------------------- # Generate one annotation file : A single word summary # Status is displayed in job_status of metadata. # resfile=$squaddir/summary-annotation.txt touch $resfile if [ -f $dashboard_dir/../mail/mail-subject.txt ]; then mail_subject=$(cat $dashboard_dir/../mail/mail-subject.txt) case "$mail_subject" in *"grew in size"*) echo "Regression(size)" >> $resfile ;; *"reduced in size"*) echo "Improvement(size)" >> $resfile ;; *"slowed down"*) echo "Regression(speed)" >> $resfile ;; *"speeds up"*) echo "Improvement(speed)" >> $resfile ;; *"reduced by"*) echo "Regression(vect)" >> $resfile ;; *"increased up by"*) echo "Improvement(vect)" >> $resfile ;; *"failed to build"*) echo "Regression(build failed)" >> $resfile ;; *"built OK, but failed to run"*) echo "Improvement(run still failed)" >> $resfile ;; *"failed to run"*) echo "Regression(run failed)" >> $resfile ;; *"No change"*) echo "No change" >> $resfile ;; *) ;; # No annotation esac fi # ----------------------------- # Generate one command file to push all benches # local pushcmdfile=$squaddir/dashboard-push-squad.sh rm -f $pushcmdfile echo " - generating cmd to push results" local squad_server prj grp bld squad_url local results_results results_metrics results_metadata squad_server=https://qa-reports.linaro.org/ # TCWG_SQUAD_TOKEN is defined in credentials # SQUAD_GRP pass through env by job definition grp="${FORCE_SQUAD_GRP-$SQUAD_GRP}" prj="$(echo ${dbd[master_job_url]}|cut -d/ -f5)" bld="$(echo ${dbd[master_job_url]}|cut -d/ -f6)" cat > $pushcmdfile << EOF #!/bin/bash cd \$(dirname \$0) set -ex squad_server=$squad_server if ! wget -q -o /dev/null $squad_server/$grp/$prj/; then echo "WARNING: No project under $squad_server/$grp/$prj/" exit 0 fi if [ ! -v TCWG_SQUAD_TOKEN ]; then echo "ERROR: No TCWG_SQUAD_TOKEN defined in your environment" exit 1 fi top_artifacts=\$(pwd)/../.. if [ -f \$top_artifacts/git/base-artifacts_rev ]; then base_artifacts_rev=\$(cat \$top_artifacts/git/base-artifacts_rev) sed -i -e "s|#BASE-ARTIFACTS-REV#|\$base_artifacts_rev|" */results-metadata.json fi echo "Uploading results to $squad_server/$grp/$prj/$bld" set +x EOF # If there's no squad project specified, let push cmd file as empty. if [ x"$grp" == x"" ]; then local var="SQUAD_GRP" [ -v FORCE_SQUAD_GRP ] && var="FORCE_SQUAD_GRP" echo "echo \"WARNING : Nowhere to push results $var='${FORCE_SQUAD_GRP-$SQUAD_GRP}'\"" >> $pushcmdfile echo "WARNING : Nowhere to push results $var='${FORCE_SQUAD_GRP-$SQUAD_GRP}'" else for bench in "${envlist[@]}"; do squad_url=$squad_server/api/submit/$grp/$prj/$bld/$bench # result files results_results="$bench/results-functional.json" results_metrics="$bench/results-metrics.json" results_metadata="$bench/results-metadata.json" cat >> $pushcmdfile << EOF echo "pushing $squad_url" curl --silent --header "Authorization: Token \$TCWG_SQUAD_TOKEN" \\ --form tests=@$results_results \\ --form metrics=@$results_metrics \\ --form metadata=@$results_metadata \\ $squad_url EOF done # if annotation file empty, do not push it. if [ x"$(cat $squaddir/summary-annotation.txt)" != x"" ]; then cat >> $pushcmdfile << EOF # Add annotation for this build api_of_this_build=\$(curl -s \$squad_server/$grp/$prj/build/$bld/ | \\ grep 'api view of this build' | sed -e 's|.*.*|\1|') curl --header "Authorization: Token \$TCWG_SQUAD_TOKEN" --data "description=$(cat $squaddir/summary-annotation.txt)&build=\$api_of_this_build" \$squad_server/api/annotations/ EOF fi fi chmod a+x $pushcmdfile ) } # --------------------MAIN PROCEDURE ------------------- set -euf -o pipefail # For a short time manifests used "debug" array, so we need # to declare it when sourcing these manifests. # shellcheck disable=SC2034 declare -A debug # Source manifest manifest="" declare check_regression_dir [ -f "$top_artifacts/manifest.sh" ] && manifest=manifest.sh [ -f "$top_artifacts/jenkins/manifest.sh" ] && manifest=jenkins/manifest.sh [ x"$manifest" == x"" ] && error "Manifest not found" # shellcheck disable=SC1090 source $top_artifacts/$manifest # override by top_artifacts in this context rr[top_artifacts]=$top_artifacts # set useful infos for dashboard # declare -A dbd dbd[master_job_url]="${BUILD_URL-$(pwd)}" dbd[base-artifacts_branch]="$baseline_branch" dbd[base-artifacts_sha1]="$(git -C $top_artifacts show --no-patch --pretty=%h || echo '#BASE-ARTIFACTS-REV#')" dbd[datetime]=$run_date case $baseline_branch in */tcwg_kernel*) dbd[project_kind]="kernel" ;; */tcwg_bmk*) dbd[project_kind]="bmk" ;; *) dbd[project_kind]="other" ;; esac dbd[check_regression_dir]=$(find $top_artifacts/ -maxdepth 2 -name "*-check_regression") dbd[dashboard_dir]=$top_artifacts/dashboard dbd[is_regression_build]=false [ -f ${dbd[check_regression_dir]}/results.regressions ] && dbd[is_regression_build]=true dbd['score']=$(grep -v '^#' $top_artifacts/results | tail -1) dbd[job_status]="Success" [ -f $top_artifacts/mail/mail-subject.txt ] && dbd[job_status]="$(cat $top_artifacts/mail/mail-subject.txt)" for c in $components; do dbd[${c}_rev]="${rr[${c}_rev]}" done dbd[benchmark_logs]=bkp-01:/home/tcwg-benchmark/results-$(cat $top_artifacts/results_id || echo '') # ---------------------------- # Generate dashboard files : json files, push cmd file create_dashboard_files