summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.editorconfig17
-rwxr-xr-xMakeRelease.job7
-rwxr-xr-xabe-bisect.sh2
-rwxr-xr-xbuild_llvm_package.bat203
-rw-r--r--ci-autotest.sh66
-rw-r--r--cimonitor-configs/CI-TCWG-ARM.yaml31
-rw-r--r--cimonitor-configs/CI-TCWG-BISECT.yaml58
-rw-r--r--cimonitor-configs/CI-TCWG.yaml118
-rw-r--r--cimonitor-configs/DEVS-TCWG.yaml70
-rw-r--r--cimonitor-configs/LNT.yaml28
-rw-r--r--cimonitor-configs/TCWG_BMK.yaml73
-rw-r--r--cimonitor-configs/help.html25
-rw-r--r--cimonitor-configs/sorting-table-css/.prettierrc.js7
-rwxr-xr-xcimonitor-configs/sorting-table-css/CODE_OF_CONDUCT.md46
-rwxr-xr-xcimonitor-configs/sorting-table-css/CONTRIBUTING.md5
-rwxr-xr-xcimonitor-configs/sorting-table-css/LICENSE24
-rwxr-xr-xcimonitor-configs/sorting-table-css/PULL_REQUEST_TEMPLATE.md13
-rwxr-xr-xcimonitor-configs/sorting-table-css/README.md302
-rw-r--r--cimonitor-configs/sorting-table-css/example.css138
-rw-r--r--cimonitor-configs/sorting-table-css/example.css.map1
-rw-r--r--cimonitor-configs/sorting-table-css/example.min.css1
-rw-r--r--cimonitor-configs/sorting-table-css/example.min.css.map1
-rw-r--r--cimonitor-configs/sorting-table-css/example.scss42
-rw-r--r--cimonitor-configs/sorting-table-css/sortable-base.css45
-rw-r--r--cimonitor-configs/sorting-table-css/sortable-base.css.map1
-rw-r--r--cimonitor-configs/sorting-table-css/sortable-base.min.css1
-rw-r--r--cimonitor-configs/sorting-table-css/sortable-base.min.css.map1
-rw-r--r--cimonitor-configs/sorting-table-css/sortable-base.scss68
-rw-r--r--cimonitor-configs/sorting-table-css/sortable.css82
-rw-r--r--cimonitor-configs/sorting-table-css/sortable.css.map1
-rwxr-xr-xcimonitor-configs/sorting-table-css/sortable.js128
-rw-r--r--cimonitor-configs/sorting-table-css/sortable.min.css1
-rw-r--r--cimonitor-configs/sorting-table-css/sortable.min.css.map1
-rwxr-xr-xcimonitor-configs/sorting-table-css/sortable.min.js2
-rwxr-xr-xcimonitor-configs/sorting-table-css/sortable.scss44
-rw-r--r--cimonitor-configs/v1-test.yaml67
-rwxr-xr-xdashboard-generate-squad.sh422
-rwxr-xr-xdashboard-push-one-branch.sh179
-rwxr-xr-xdocker-run.sh5
-rw-r--r--downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources-spec2k6.patch30
-rw-r--r--downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources.patch31
-rw-r--r--downstream_patches/llvm-vect-metric.diff57
-rwxr-xr-xgenerate-cimonitor-dashboard.py1037
-rw-r--r--jenkins-helpers.sh1221
-rwxr-xr-xjenkins.sh20
-rw-r--r--lnt-utils.sh359
-rwxr-xr-xprecommit-ssh-apply.sh128
-rwxr-xr-xprecommit-test.sh59
-rwxr-xr-xpw-apply.sh164
-rwxr-xr-xpw-helpers.sh400
-rwxr-xr-xpw-report.sh86
-rwxr-xr-xpw-trigger.sh112
-rwxr-xr-xround-robin-baseline.sh624
-rwxr-xr-xround-robin-bisect.sh824
-rwxr-xr-xround-robin-notify.sh2397
-rw-r--r--round-robin.sh1420
-rwxr-xr-xstart-container-docker.sh415
-rwxr-xr-xstart-container-qemu.sh4
-rwxr-xr-xtcwg-benchmark-bare.sh228
-rwxr-xr-xtcwg-benchmark-results-compare.sh27
-rw-r--r--tcwg-benchmark-results.broken-list102
-rwxr-xr-xtcwg-benchmark-results.sh154
-rwxr-xr-xtcwg-benchmark.sh363
-rwxr-xr-xtcwg-buildfarm.sh19
-rwxr-xr-xtcwg-cleanup-stale-containers.sh25
-rwxr-xr-xtcwg-cleanup-stale-results.sh192
-rwxr-xr-xtcwg-cleanup-stale-workspaces.sh6
-rwxr-xr-xtcwg-dev-build.sh18
-rwxr-xr-xtcwg-generate-source-cache.sh23
-rwxr-xr-xtcwg-llvm-build.sh2
-rwxr-xr-xtcwg-llvm-release.bat23
-rwxr-xr-xtcwg-llvm-release.sh50
-rwxr-xr-xtcwg-lnt/create-server.sh158
-rwxr-xr-xtcwg-lnt/lnt-check.sh73
-rw-r--r--tcwg-lnt/tcwg-lnt-01/config39
-rw-r--r--tcwg-lnt/tcwg-lnt-01/tcwg_bmk.yaml.in55
-rw-r--r--tcwg-lnt/tcwg-lnt-01/tcwg_check.yaml.in55
-rw-r--r--tcwg-lnt/tcwg-lnt-02/config26
-rw-r--r--tcwg-lnt/tcwg-lnt-02/tcwg_check.yaml.in55
-rw-r--r--tcwg-lnt/tcwg-lnt-03/config39
-rw-r--r--tcwg-lnt/tcwg-lnt-03/tcwg_bmk.yaml.in55
-rw-r--r--tcwg-lnt/tcwg-lnt-03/tcwg_check.yaml.in55
-rwxr-xr-xtcwg-report-ci-status.sh53
-rwxr-xr-xtcwg-report-stale-rr-jobs.sh643
-rwxr-xr-xtcwg-start-container.sh89
-rwxr-xr-xtcwg-update-bmk-containers.sh44
-rwxr-xr-xtcwg-update-host-containers.sh20
-rwxr-xr-xtcwg-update-llvmbot-containers.sh72
-rwxr-xr-xtcwg-update-lnt-results.sh93
-rwxr-xr-xtcwg-update-tested.sh17
-rwxr-xr-xtcwg-upstream2gerrit.sh147
-rwxr-xr-xtcwg-wip/push-results-to-squad.sh364
-rwxr-xr-xtcwg-wip/tcwg-convert-interesting-commits.sh219
-rwxr-xr-xtcwg_aosp-build.sh494
-rwxr-xr-xtcwg_bmk-build.sh800
-rw-r--r--tcwg_bmk-config.sh74
-rwxr-xr-xtcwg_chromium-build.sh202
-rwxr-xr-xtcwg_dummy-build.sh39
-rwxr-xr-xtcwg_gnu-build.sh523
-rw-r--r--tcwg_gnu-config.sh177
-rwxr-xr-xtcwg_kernel-build.sh179
-rwxr-xr-xupdate_components_revs.sh47
-rwxr-xr-xwrappers/count-wrapper.sh14
-rwxr-xr-xwrappers/install-wrappers.sh79
-rwxr-xr-xwrappers/shadow-ar.sh246
-rwxr-xr-xwrappers/shadow-cc.sh345
-rwxr-xr-xwrappers/shadow-strip.sh195
107 files changed, 15905 insertions, 2826 deletions
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..531465e1
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,17 @@
+# This file helps editors auto-configure whitespace settings.
+#
+# See here for more information about the format and editor support:
+#
+# https://editorconfig.org/
+
+root = true
+
+[*]
+end_of_line = lf
+insert_final_newline = true
+tab_width = 8
+
+# Shell
+[*.{sh,conf}]
+indent_style = tab
+indent_size = 4
diff --git a/MakeRelease.job b/MakeRelease.job
index a60f8a0b..9432c3bd 100755
--- a/MakeRelease.job
+++ b/MakeRelease.job
@@ -22,6 +22,7 @@ usage() {
echo " --logsdir XXX - Specify where to upload the logs"
echo " --canadian - Perform a Canadian-cross build too"
echo " --buildnumber XXX - Specify build number"
+ echo " --check XXX - Specify package to test"
echo " --help"
exit $ret
}
@@ -63,8 +64,9 @@ manifest_src=
toolchain_config=
artifacts_top="releases"
manifest_validation=true
+check=()
-getopt -o h -l target:,release_name:,fileserver:,workspace:,toolchainconfig:,manifest:,glibc:,gcc:,binutils:,help,abedir:,binariesdir:,logsdir:,canadian,buildnumber:,artifacts_top:,manifest_validation: -Q
+getopt -o h -l target:,release_name:,fileserver:,workspace:,toolchainconfig:,manifest:,glibc:,gcc:,binutils:,help,abedir:,binariesdir:,logsdir:,canadian,buildnumber:,artifacts_top:,manifest_validation:,check: -Q
while test $# -gt 0; do
case $1 in
--abedir) abe_dir=$2 ; shift ;;
@@ -83,6 +85,7 @@ while test $# -gt 0; do
--buildnumber) buildnumber=$2 ; shift ;;
--artifacts_top) artifacts_top=$2; shift ;;
--manifest_validation) manifest_validation=$2; shift ;;
+ --check) check=("--check" "$2"); shift ;;
-h|--help) usage 0 ;;
--) break ;;
*) usage ;;
@@ -188,7 +191,7 @@ fi
# build the mingw32 compiler only if the previous cross-compiler build was
# successful.
if test ${abe_ret} -eq 0; then
- $CONFIG_SHELL ${abe_dir}/abe.sh --list-artifacts ${user_workspace}/artifacts2.txt ${update} --release ${release} --tarbin ${srcs} $target_opt ${host} --build all ${extra} >> ${logfile}
+ $CONFIG_SHELL ${abe_dir}/abe.sh --list-artifacts ${user_workspace}/artifacts2.txt ${update} --release ${release} --tarbin ${srcs} $target_opt ${host} --build all ${extra} "${check[@]}" >> ${logfile}
abe_ret=$?
manifests+=( "$(read_var ${user_workspace}/artifacts2.txt manifest)" )
fi
diff --git a/abe-bisect.sh b/abe-bisect.sh
index 3ec83261..5fefb465 100755
--- a/abe-bisect.sh
+++ b/abe-bisect.sh
@@ -60,7 +60,7 @@ git-new-workdir ./snapshots/gcc.git gcc-bisect-dir
cd gcc-bisect-dir || exit
git bisect reset
-git reset --hard
+git reset -q --hard
git pull
git bisect start --no-checkout
diff --git a/build_llvm_package.bat b/build_llvm_package.bat
new file mode 100755
index 00000000..dbbb69d4
--- /dev/null
+++ b/build_llvm_package.bat
@@ -0,0 +1,203 @@
+@echo on
+setlocal
+
+REM Script for building the LLVM installer on Windows,
+REM used for the the weekly snapshots at http://www.llvm.org/builds.
+REM
+REM Usage: build_llvm_package.bat <revision> [test]
+
+REM Prerequisites:
+REM
+REM Visual Studio 2019, CMake, Ninja, GNUWin32, SWIG, Python 3,
+REM NSIS with the strlen_8192 patch,
+REM Visual Studio 2019 SDK and Nuget (for the clang-format plugin),
+REM Perl (for the OpenMP run-time), 7Zip.
+REM
+REM
+REM For LLDB, SWIG version <= 3.0.8 needs to be used to work around
+REM https://github.com/swig/swig/issues/769
+
+
+REM You need to modify the paths below:
+set vsdevcmd=C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\Common7\Tools\VsDevCmd.bat
+
+set python32_dir=C:\Users\%USERNAME%\AppData\Local\Programs\Python\Python36-32
+set python64_dir=C:\Users\%USERNAME%\AppData\Local\Programs\Python\Python36
+
+for /f "usebackq" %%i in (`PowerShell ^(Get-Date^).ToString^('yyyyMMdd'^)`) do set datestamp=%%i
+
+set revision=%1
+
+if "%2" == "test" (
+ set git_ref=%revision%
+ set package_version=%revision:~0,8%
+) else (
+ set git_ref=llvmorg-%revision%
+ set package_version=%revision%
+)
+
+REM Extract the release version from a revision of the kind x.y.z-rcN
+REM FIXME: Handle other kinds of revisions
+REM FIXME: Error out if we get smth inappropriate.
+for /f "delims=-" %%i in ("%revision%") do (
+ set release_version=%%i
+)
+
+set clang_format_vs_version=13.0.0.%datestamp%
+set build_dir=llvm_package_%package_version%
+
+echo Revision: %revision%
+echo Package version: %package_version%
+echo Clang format plugin version: %clang_format_vs_version%
+echo Build dir: %build_dir%
+echo.
+REM pause
+
+mkdir %build_dir%
+cd %build_dir%
+
+echo Checking out %git_ref%
+curl -L https://github.com/llvm/llvm-project/archive/%git_ref%.zip -o src.zip || exit /b
+7z x src.zip || exit /b
+mv llvm-project-* llvm-project || exit /b
+
+REM Setting CMAKE_CL_SHOWINCLUDES_PREFIX to work around PR27226.
+set cmake_flags=^
+ -DCMAKE_BUILD_TYPE=Release ^
+ -DCMAKE_TRY_COMPILE_CONFIGURATION=Release ^
+ -DLLVM_ENABLE_ASSERTIONS=ON ^
+ -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON ^
+ -DLLVM_BUILD_LLVM_C_DYLIB=ON ^
+ -DCMAKE_INSTALL_UCRT_LIBRARIES=ON ^
+ -DPACKAGE_VERSION=%package_version% ^
+ -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: " ^
+ -DLLVM_DEFAULT_TARGET_TRIPLE=aarch64-pc-windows-msvc ^
+ -DLLVM_HOST_TRIPLE=aarch64-pc-windows-msvc ^
+ -DLLVM_TARGET_ARCH=AArch64 ^
+ -DCLANG_DEFAULT_LINKER=lld
+
+REM TODO: Run the "check-all" tests.
+
+set "VSCMD_START_DIR=%CD%"
+call "%vsdevcmd%" -host_arch=x86 -arch=arm64
+@echo on
+set CC=clang-cl
+set CXX=clang-cl
+mkdir build32_stage0
+cd build32_stage0
+REM Note that we only enable compiler-rt so we can build the builtins, which are
+REM needed in the next stage when building flang
+cmake -GNinja %cmake_flags% ^
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;lld;compiler-rt" ^
+ -DLLVM_TARGETS_TO_BUILD="AArch64" ^
+ -DCMAKE_C_FLAGS="-fms-compatibility-version=19.20" ^
+ -DCMAKE_CXX_FLAGS="-fms-compatibility-version=19.20" ^
+ -DCMAKE_COMPILER_RT_BUILD_BUILTINS=ON ^
+ -DCMAKE_COMPILER_RT_BUILD_CRT=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_SANITIZERS=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_XRAY=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_LIBFUZZER=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_PROFILE=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_MEMPROF=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_ORC=OFF ^
+ -DCMAKE_COMPILER_RT_BUILD_GWP_ASAN=OFF ^
+ ..\llvm-project\llvm || exit /b
+ninja all || ninja all || ninja all || exit /b
+REM ninja check || ninja check || ninja check || exit /b
+REM ninja check-clang || ninja check-clang || ninja check-clang || exit /b
+REM ninja check-lld || ninja check-lld || ninja check-lld || exit /b
+REM ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b
+REM ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b
+REM ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b
+cd..
+
+mkdir build32
+cd build32
+set CC=..\build32_stage0\bin\clang-cl
+set CXX=..\build32_stage0\bin\clang-cl
+set builtins=%cd%\..\build32_stage0\lib\clang\%release_version%\lib\windows\clang_rt.builtins-aarch64.lib
+set "LLDB_USE_LLDB_SERVER=1"
+cmake -GNinja %cmake_flags% ^
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;flang;lld;lldb;compiler-rt" ^
+ -DCMAKE_C_FLAGS="-fms-compatibility-version=19.20" ^
+ -DCMAKE_CXX_FLAGS="-fms-compatibility-version=19.20" ^
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF ^
+ -DCOMPILER_RT_BUILD_MEMPROF=OFF ^
+ -DCOMPILER_RT_BUILD_XRAY=OFF ^
+ -DLLDB_ENABLE_PYTHON=ON ^
+ -DLLDB_RELOCATABLE_PYTHON=ON ^
+ -DLLDB_EMBED_PYTHON_HOME=OFF ^
+ -DCMAKE_EXE_LINKER_FLAGS=%builtins% ^
+ -DCMAKE_SHARED_LINKER_FLAGS=%builtins% ^
+ -DCMAKE_STATIC_LINKER_FLAGS=%builtins% ^
+ -DLLDB_TEST_USER_ARGS="--skip-category=watchpoint" ^
+ -DCMAKE_MODULE_LINKER_FLAGS=%builtins% ^
+ ..\llvm-project\llvm || exit /b
+ninja all || ninja all || ninja all || exit /b
+ninja check-lldb || ninja check-lldb || ninja check-lldb
+ninja check-flang || ninja check-flang || ninja check-flang
+REM ninja check || ninja check || ninja check || exit /b
+REM ninja check-clang || ninja check-clang || ninja check-clang || exit /b
+REM ninja check-lld || ninja check-lld || ninja check-lld || exit /b
+REM ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b
+REM ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b
+REM ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b
+ninja package || exit /b
+
+rename LLVM-%package_version%-win64.exe LLVM-%package_version%-woa64.exe || exit /b
+7z x LLVM-%package_version%-woa64.exe -orepack || exit /b
+rmdir /s /q repack\$PLUGINSDIR || exit /b
+del repack\Uninstall.exe || exit /b
+7z a LLVM-%package_version%-woa64.zip .\repack\* -mx9 || exit /b
+cd ..
+
+exit /b
+
+REM The plug-in is built separately as it uses a statically linked clang-format.exe.
+mkdir build_vsix
+cd build_vsix
+REM Having VSSDKINSTALL set makes devenv *not* find the SDK for some reason.
+set VSSDKINSTALL=
+set CC=..\build32_stage0\bin\clang-cl
+set CXX=..\build32_stage0\bin\clang-cl
+cmake -GNinja %cmake_flags% -DLLVM_USE_CRT_RELEASE=MT -DBUILD_CLANG_FORMAT_VS_PLUGIN=ON -DPYTHON_HOME=%python32_dir% -DPYTHON_EXECUTABLE=%python32_dir%\python.exe ..\llvm-project\llvm || exit /b
+ninja clang_format_vsix || exit /b
+copy ..\llvm-project\llvm\tools\clang\tools\clang-format-vs\ClangFormat\bin\Release\ClangFormat.vsix ClangFormat-r%revision%.vsix
+cd ..
+
+
+set "VSCMD_START_DIR=%CD%"
+call "%vsdevcmd%" -arch=amd64
+set CC=
+set CXX=
+mkdir build64_stage0
+cd build64_stage0
+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python64_dir% -DPYTHON_EXECUTABLE=%python64_dir%\python.exe ..\llvm-project\llvm || exit /b
+ninja all || ninja all || ninja all || exit /b
+ninja check || ninja check || ninja check || exit /b
+ninja check-clang || ninja check-clang || ninja check-clang || exit /b
+ninja check-lld || ninja check-lld || ninja check-lld || exit /b
+ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b
+ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b
+ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b
+cd..
+
+mkdir build64
+cd build64
+set CC=..\build64_stage0\bin\clang-cl
+set CXX=..\build64_stage0\bin\clang-cl
+cmake -GNinja %cmake_flags% -DPYTHON_HOME=%python64_dir% -DPYTHON_EXECUTABLE=%python64_dir%\python.exe ..\llvm-project\llvm || exit /b
+ninja all || ninja all || ninja all || exit /b
+ninja check || ninja check || ninja check || exit /b
+ninja check-clang || ninja check-clang || ninja check-clang || exit /b
+ninja check-lld || ninja check-lld || ninja check-lld || exit /b
+ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit /b
+ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b
+ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b
+ninja package || exit /b
+
+7z x LLVM-%package_version%-win64.exe -orepack
+rmdir /s /q repack\$PLUGINSDIR
+del repack\Uninstall.exe
+7z a LLVM-%package_version%-win64.zip .\repack\* -mx9
+cd ..
diff --git a/ci-autotest.sh b/ci-autotest.sh
new file mode 100644
index 00000000..32aa3202
--- /dev/null
+++ b/ci-autotest.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+
+# ci_autotest harness to be reworked.
+#
+#
+
+
+# Function to check
+ci_autotest_check_guilty_commit()
+{
+ (
+ set -euf -o pipefail
+
+ local current_stage=$1
+
+ # shellcheck disable=SC2154
+ if [ x"${rr[ci_autotest]}" != x"$current_stage" ]; then
+ return
+ fi
+
+ # get guilty commits
+ rm -rf guilty_commits
+ scp -r -q \
+ bkp-01.tcwglab:/home/tcwg-buildslave/ci_autotest/${rr[ci_project]}/${rr[ci_config]}/guilty_commits \
+ guilty_commits
+
+ local -A guilty_commits
+ for depfile in $(find guilty_commits -name '*_rev'||true); do
+ dep=${depfile#guilty_commits/}
+ dep=${dep%_rev}
+ guilty_commits[$dep]=$(cat $depfile)
+ done
+
+ # check if any guilty commits is included
+ for dep in "${!guilty_commits[@]}"; do
+ if ! [ -f "${rr[top_artifacts]}/git/${dep}_rev" ]; then
+ echo "CI_AUTOTEST : Cannot check if $dep guilty commit is included. $dep not tracked"
+ continue
+ fi
+
+ dep_dir=${dep}
+ if [ -f "${rr[top_artifacts]}/git/${dep}_dir" ]; then
+ dep_dir=$(get_current_git ${dep}_dir)
+ fi
+ if ! [ -d "$dep_dir" ]; then
+ echo "CI_AUTOTEST : Cannot check if $dep guilty commit is included. Cannot find the sources"
+ continue
+ fi
+
+ # check if
+ if git -C "$dep_dir" merge-base --is-ancestor ${guilty_commits[$dep]} HEAD >& /dev/null; then
+ echo "CI_AUTOTEST : Found guilty commit included in [$dep] : ${guilty_commits[$dep]}"
+ # found a regression
+ return 1
+ fi
+ done
+
+ echo "CI_AUTOTEST : Didn't find any of the guilty commits"
+ for dep in "${!guilty_commits[@]}"; do
+ echo " - [$dep] : ${guilty_commits[$dep]}"
+ done
+ return 0
+ )
+}
+
diff --git a/cimonitor-configs/CI-TCWG-ARM.yaml b/cimonitor-configs/CI-TCWG-ARM.yaml
new file mode 100644
index 00000000..f2d126fe
--- /dev/null
+++ b/cimonitor-configs/CI-TCWG-ARM.yaml
@@ -0,0 +1,31 @@
+format:
+ server: ci.linaro.org
+ filename: "CI-TCWG-ARM.html"
+ links:
+ - LNT
+
+ details_table:
+ columns:
+ - project
+ - status
+ - last_build
+ - last_success
+ - last_fail
+ #- all_runs
+ #- last_regressed
+ - last_forced
+ - statistics
+ #- last_bisect
+ #- useful_links
+ lines:
+ - "tcwg_bootstrap_build--master-.*-bootstrap-build"
+ - "tcwg_bootstrap_check--master-.*-check_bootstrap-build"
+ - "tcwg_gnu_cross_build--master-.*-build"
+ - "tcwg_gnu_cross_check_binutils--master-.*-build"
+ - "tcwg_gnu_cross_check_gcc--master-.*-build"
+ - "tcwg_gnu_embed_build--master-.*-build"
+ - "tcwg_gnu_embed_check_binutils--master-.*-build"
+ - "tcwg_gnu_embed_check_gcc--master-.*-build"
+
+pattern:
+ - tcwg \ No newline at end of file
diff --git a/cimonitor-configs/CI-TCWG-BISECT.yaml b/cimonitor-configs/CI-TCWG-BISECT.yaml
new file mode 100644
index 00000000..c0c0c263
--- /dev/null
+++ b/cimonitor-configs/CI-TCWG-BISECT.yaml
@@ -0,0 +1,58 @@
+format:
+ server: ci.linaro.org
+ filename: "tcwg-bisect.html"
+
+ links:
+ - tcwg
+
+ summary_table_last_run:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ summary_table_all_runs:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ - "#forced"
+ - "#reducing"
+ - "#aborted"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ details_table:
+ columns:
+ - project
+ - status
+ - last_build
+ - last_success
+ - last_fail
+ - last_forced
+ - statistics
+ lines:
+ - "@pattern@.*-bisect"
+
+pattern:
+ - tcwg
diff --git a/cimonitor-configs/CI-TCWG.yaml b/cimonitor-configs/CI-TCWG.yaml
new file mode 100644
index 00000000..242451b5
--- /dev/null
+++ b/cimonitor-configs/CI-TCWG.yaml
@@ -0,0 +1,118 @@
+format:
+ server: ci.linaro.org
+ filename: "@pattern@.html"
+ links:
+ - tcwg
+ - tcwg-bisect
+ - CI-TCWG-ARM
+ - LNT
+ - "@pattern@"
+
+ summary_table_last_run:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ summary_table_all_runs:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ - "#forced"
+ - "#reducing"
+ - "#aborted"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ details_table:
+ columns:
+ - project
+ - status
+ - last_build
+ - notify_verif
+ - last_success
+ - last_fail
+ #- all_runs
+ #- last_regressed
+ - last_forced
+ - artifact_version
+ - statistics
+ #- last_bisect
+ #- useful_links
+ lines:
+ - "@pattern@.*-build"
+
+pattern:
+ - tcwg
+
+ - tcwg_bmk
+ - tcwg_bmk-code_size
+ - tcwg_bmk-code_size-coremark
+ - tcwg_bmk-code_size-spec2k6
+ - tcwg_bmk-code_speed
+ - tcwg_bmk-code_speed-coremark
+ - tcwg_bmk-code_speed-spec2k6
+ - tcwg_bmk-fujitsu_speed
+ - tcwg_bmk-fujitsu_speed-cpu2017rate
+ - tcwg_bmk-sve_speed
+ - tcwg_bmk-sve_speed-cpu2017rate
+ - tcwg_bmk-vect_speed
+ - tcwg_bmk-vect_speed-cpu2017
+ - tcwg_bmk-vect_speed-spec2k6
+
+ - tcwg_aosp
+ - tcwg_aosp-code_size
+
+ - tcwg_binutils
+ - tcwg_binutils_build
+ - tcwg_binutils_check
+
+ - tcwg_bootstrap
+ - tcwg_bootstrap_build
+ - tcwg_bootstrap_check
+
+ - tcwg_gcc
+ - tcwg_gcc_build
+ - tcwg_gcc_check
+
+ - tcwg_gdb
+ - tcwg_gdb_build
+ - tcwg_gdb_check
+
+ - tcwg_glibc
+ - tcwg_glibc_build
+ - tcwg_glibc_check
+
+ - tcwg_gnu
+ - tcwg_gnu_embed
+ - tcwg_gnu_cross
+ - tcwg_gnu_native
+ - tcwg_gnu_native_check
+ - tcwg_gnu_native_fast_check
+ - tcwg_gnu_woa
+
+ - tcwg_kernel
+ - tcwg_kernel--gnu
+ - tcwg_kernel--llvm
+
diff --git a/cimonitor-configs/DEVS-TCWG.yaml b/cimonitor-configs/DEVS-TCWG.yaml
new file mode 100644
index 00000000..6167b76e
--- /dev/null
+++ b/cimonitor-configs/DEVS-TCWG.yaml
@@ -0,0 +1,70 @@
+format:
+ server: ci.linaro.org
+ filename: "@pattern@.html"
+ links:
+ - "@pattern@"
+
+ summary_table_last_run:
+ columns:
+ - last_run_date
+ - nb_success
+ - nb_failure
+ - nb_forced
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ details_table:
+ columns:
+ - project
+ - status
+ - since_last_build
+ - since_last_success
+ - since_last_fail
+ - since_last_force
+ - last_forced_infos
+ - useful_links
+ lines:
+ - "@pattern@.*-build"
+
+pattern:
+ - tcwg
+
+ - tcwg_bmk
+ - tcwg_bmk-code_size
+ - tcwg_bmk-code_size-coremark
+ - tcwg_bmk-code_size-spec2k6
+ - tcwg_bmk-code_speed
+ - tcwg_bmk-code_speed-coremark
+ - tcwg_bmk-code_speed-spec2k6
+ - tcwg_bmk-fujitsu_speed
+ - tcwg_bmk-fujitsu_speed-cpu2017rate
+ - tcwg_bmk-sve_speed
+ - tcwg_bmk-sve_speed-cpu2017rate
+ - tcwg_bmk-vect_speed
+ - tcwg_bmk-vect_speed-cpu2017
+ - tcwg_bmk-vect_speed-spec2k6
+
+ - tcwg_kernel
+ - tcwg_kernel-gnu
+ - tcwg_kernel-llvm
+
+ - tcwg_gcc
+ - tcwg_gcc_bootstrap
+ - tcwg_gcc_check
+ - tcwg_gnu
+ - tcwg_gnu_cross_build
+ - tcwg_gnu_cross_check
+ - tcwg_gnu_native_check
+ - tcwg_gnu_native_check_binutils
+ - tcwg_gnu_native_check_gcc
+ - tcwg_gnu_native_check_gdb
+ - tcwg_gnu_native_fast_check_gdb
+
diff --git a/cimonitor-configs/LNT.yaml b/cimonitor-configs/LNT.yaml
new file mode 100644
index 00000000..2d33af4c
--- /dev/null
+++ b/cimonitor-configs/LNT.yaml
@@ -0,0 +1,28 @@
+filename: "LNT.html"
+lnt_dashboard:
+ binutils:
+ - aarch64 http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_binutils_check/graph?plot.1034706317292025484.11=231404492126674122.1034706317292025484.11&plot.28257213379791955.12=231404492126674122.28257213379791955.12
+ - arm http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_binutils_check/graph?plot.1034706317292025484.11=1000400360521891559.1034706317292025484.11&plot.28257213379791955.12=1000400360521891559.28257213379791955.12
+ - trend http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_binutils_check/latest_runs_report
+ - details http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_binutils_check/recent_activity
+
+ gcc:
+ - aarch64 http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gcc_check/graph?plot.1034706317292025484.11=231404492126674122.1034706317292025484.11&plot.28257213379791955.12=231404492126674122.28257213379791955.12
+ - arm http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gcc_check/graph?plot.1034706317292025484.11=1000400360521891559.1034706317292025484.11&plot.28257213379791955.12=1000400360521891559.28257213379791955.12
+ - trend http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gcc_check/latest_runs_report
+ - details http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gcc_check/recent_activity
+
+ gdb:
+ - aarch64 http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gdb_check/graph?plot.1034706317292025484.11=231404492126674122.1034706317292025484.11&plot.28257213379791955.12=231404492126674122.28257213379791955.12
+ - arm http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gdb_check/graph?plot.1034706317292025484.11=1000400360521891559.1034706317292025484.11&plot.28257213379791955.12=1000400360521891559.28257213379791955.12
+ - trend http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gdb_check/latest_runs_report
+ - details http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_gdb_check/recent_activity
+
+ glibc:
+ - aarch64 http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_glibc_check/graph?plot.1034706317292025484.11=231404492126674122.1034706317292025484.11&plot.28257213379791955.12=231404492126674122.28257213379791955.12
+ - arm http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_glibc_check/graph?plot.1034706317292025484.11=1000400360521891559.1034706317292025484.11&plot.28257213379791955.12=1000400360521891559.28257213379791955.12
+ - trend http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_glibc_check/latest_runs_report
+ - details http://llvm.validation.linaro.org:38500/db_default/v4/tcwg_glibc_check/recent_activity
+
+ all projects:
+ - list http://llvm.validation.linaro.org:38500/
diff --git a/cimonitor-configs/TCWG_BMK.yaml b/cimonitor-configs/TCWG_BMK.yaml
new file mode 100644
index 00000000..acc5d782
--- /dev/null
+++ b/cimonitor-configs/TCWG_BMK.yaml
@@ -0,0 +1,73 @@
+format:
+ server: ci.linaro.org
+ filename: "@pattern@.html"
+ summary_table:
+ columns:
+ - timelaps
+ - nb_success
+ - nb_failure
+ - nb_reducing
+ - nb_bisected
+ - nb_forced
+ lines:
+ - any-date
+ - day-6
+ - day-5
+ - day-4
+ - day-3
+ - day-2
+ - day-1
+ - day-0
+
+ links:
+ - "@pattern@"
+
+ details_table:
+ columns:
+ - project
+ - status
+ - since_last_build
+ - since_last_fail
+ - since_last_force
+ - useful_links
+ - last_title
+ lines:
+ - "@pattern@.*-build"
+
+pattern:
+ - tcwg
+
+ - tcwg_bmk
+ - tcwg_bmk-code_size
+ - tcwg_bmk-code_size-coremark
+ - tcwg_bmk-code_size-spec2k6
+ - tcwg_bmk-code_speed
+ - tcwg_bmk-code_speed-coremark
+ - tcwg_bmk-code_speed-spec2k6
+ - tcwg_bmk-fujitsu_speed
+ - tcwg_bmk-fujitsu_speed-cpu2017rate
+ - tcwg_bmk-sve_speed
+ - tcwg_bmk-sve_speed-cpu2017rate
+ - tcwg_bmk-vect_speed
+ - tcwg_bmk-vect_speed-cpu2017
+ - tcwg_bmk-vect_speed-spec2k6
+
+ - tcwg_kernel
+ - tcwg_kernel-gnu
+ - tcwg_kernel-llvm
+
+ - tcwg_gcc
+ - tcwg_gcc_bootstrap
+ - tcwg_gcc_check
+ - tcwg_gnu
+ - tcwg_gnu_cross_build
+ - tcwg_gnu_cross_check
+ - tcwg_gnu_native_check
+ - tcwg_gnu_native_check_binutils
+ - tcwg_gnu_native_check_gcc
+ - tcwg_gnu_native_check_gdb
+ - tcwg_gnu_native_fast_check_gdb
+
+
+
+
diff --git a/cimonitor-configs/help.html b/cimonitor-configs/help.html
new file mode 100644
index 00000000..4111003c
--- /dev/null
+++ b/cimonitor-configs/help.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<html>
+<body>
+
+<h1>Help</h1>
+
+<h2>Possible columns that can be used:</h2>
+
+<p><u><b>Project</b></u> : name of the project </p>
+
+<p><u><b>Status</b></u> : Status of the last build </p>
+
+<p><u><b>Last_build</b></u> : Last build date and name </p>
+
+<p><u><b>Last_success</b></u> : Last successful build date and name </p>
+
+<p><u><b>Last_fail</b></u> : Last failed build date and name </p>
+
+<p><u><b>Last_forced</b></u> : Last forced build date and name </p>
+
+<p><u><b>Statistics</b></u> : Statistics of all last builds of this project. Counts the nb of runs that : regressed, forced, failed, success
+</p>
+
+</body>
+</html>
diff --git a/cimonitor-configs/sorting-table-css/.prettierrc.js b/cimonitor-configs/sorting-table-css/.prettierrc.js
new file mode 100644
index 00000000..24df7e84
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/.prettierrc.js
@@ -0,0 +1,7 @@
+module.exports = {
+ singleQuote: true,
+ semi: false,
+ trailingComma: 'all',
+ tabWidth: 2,
+ printWidth: 120,
+}
diff --git a/cimonitor-configs/sorting-table-css/CODE_OF_CONDUCT.md b/cimonitor-configs/sorting-table-css/CODE_OF_CONDUCT.md
new file mode 100755
index 00000000..b2e87d70
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jonas@earendel.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/cimonitor-configs/sorting-table-css/CONTRIBUTING.md b/cimonitor-configs/sorting-table-css/CONTRIBUTING.md
new file mode 100755
index 00000000..89ad0196
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/CONTRIBUTING.md
@@ -0,0 +1,5 @@
+# Contributing
+
+I am grateful for any and all contributions.
+
+If it's a minor thing I guess it's easier to open an issue, but if you prefer creating a fork, go ahead! :)
diff --git a/cimonitor-configs/sorting-table-css/LICENSE b/cimonitor-configs/sorting-table-css/LICENSE
new file mode 100755
index 00000000..cf1ab25d
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/LICENSE
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org>
diff --git a/cimonitor-configs/sorting-table-css/PULL_REQUEST_TEMPLATE.md b/cimonitor-configs/sorting-table-css/PULL_REQUEST_TEMPLATE.md
new file mode 100755
index 00000000..0674d91e
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,13 @@
+# Pull request template
+
+## Purpose
+
+_Describe the problem or feature in addition to a link to the issues._
+
+## Approach
+
+_How does this change address the problem?_
+
+## Fixes
+
+_List of resolved issues._
diff --git a/cimonitor-configs/sorting-table-css/README.md b/cimonitor-configs/sorting-table-css/README.md
new file mode 100755
index 00000000..f4143d6e
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/README.md
@@ -0,0 +1,302 @@
+# sortable - a tiny, vanilla JS table sorter
+
+Makes any table with **class="sortable"**, er, sortable. That is the user can click on a table header and change the sorting of the table rows.
+
+Just include the JavaScript and it will work. No function calls needed, all is done with an **eventListener**.
+(the CSS is not strictly needed, but makes it ~pretty and user friendly)
+
+- [sortable - a tiny, vanilla JS table sorter](#sortable---a-tiny-vanilla-js-table-sorter)
+ - [Factoids](#factoids)
+ - [...with a little help from my friends](#with-a-little-help-from-my-friends)
+ - [Demo](#demo)
+ - [A basic example](#a-basic-example)
+ - [Non-sortable field](#non-sortable-field)
+ - [...using `class` and `css`](#using-class-and-css)
+ - [...using `css` only](#using-css-only)
+ - [Indicators/arrows on the left side](#indicatorsarrows-on-the-left-side)
+ - [Note about css/scss](#note-about-cssscss)
+ - [Sort on value other than the one shown](#sort-on-value-other-than-the-one-shown)
+ - [Alternative sorting](#alternative-sorting)
+ - [Specify which column should be sorted](#specify-which-column-should-be-sorted)
+ - [Ascending sort](#ascending-sort)
+ - [Sort on load](#sort-on-load)
+
+## Factoids
+
+- **921 bytes** minified. (541 bytes gzipped)
+
+- Works with **JavaScript generated tables**. (since we are using an eventListener)
+
+- **Lightning fast**. _Huge_ tables will make it slow and may freeze the browser, especially for mobiles, so you know...
+
+- Requires **thead** and **tbody**.
+
+- **cross browser**, ie9+ (I think, there have been a _whole_ bunch of changes since I last tested it on ie9 🤷)
+
+- ~~eventListeners attached to the rows _WILL_ be removed~~
+
+- eventListeners are no longer removed! 😊
+
+- NOT tested with React, Angular, Vue, etc.
+
+- Works with [Svelte](https://svelte.dev/)!
+
+### ...with a little help from my friends
+
+- `table` > `class="sortable asc"` let's you [sort ascending](#ascending-sort) as default. Thanks [
+ Nikita Dunajevs](https://github.com/dunajevs)!
+
+- `data-sort-alt` in `tbody` > `td` allows for [alternative sorting](#alternative-sorting) while holding `shift` or `alt`. Thanks [wodny](https://github.com/wodny)!
+
+- `data-sort-col` in `thead` > `th` allows you to [specify which column should be sorted](#specify-which-column-should-be-sorted), in case you are using `colspan`, for instance. Thanks [Nick Kocharhook](https://github.com/nk9)!
+
+- **Nested elements** inside `th` now works. Thanks [mxve](https://github.com/mxve)!
+
+- [Sort on load](#sort-on-load) example. Thanks [Christian Petersson](https://github.com/Issen007) and [Abit Salihu](https://github.com/abitsalihu)!
+
+- Thanks to [chatcoda](https://github.com/chatcoda) for the `<td></td>` / `<td>0</td>` sorting bug fix!
+
+## Demo
+
+You can find a simple demo on <https://tofsjonas.github.io/sortable/>
+
+## A basic example
+
+```html
+<table class="sortable">
+ <thead>
+ <tr>
+ <th><span>Role</span></th>
+ <th>Name</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>Genius</td>
+ <td>Rick</td>
+ </tr>
+ <tr>
+ <td><a href="javascript:alert('Inline javascript works!');">Sidekick</a></td>
+ <td>Morty</td>
+ </tr>
+ </tbody>
+</table>
+<link href="https://cdn.jsdelivr.net/gh/tofsjonas/sortable/sortable.min.css" rel="stylesheet" />
+<script src="https://cdn.jsdelivr.net/gh/tofsjonas/sortable/sortable.min.js"></script>
+```
+
+_(The `span` is just there to prove that elements inside `th` works)_
+
+## Non-sortable field
+
+### ...using `class` and `css`
+
+If you wish to disable sorting for a specific field, the easiest way is to add a class to it, like so:
+
+```html
+<tr>
+ <th class="no-sort">Role</th>
+ <th>Name</th>
+</tr>
+```
+
+and then use css to block clicks. like so:
+
+```css
+.sortable th.no-sort {
+ pointer-events: none;
+}
+```
+
+### ...using `css` only
+
+This is a bit trickier, but it doesn't require any changes to the html, so I guess it could be worth it in some cases.
+
+```css
+/* the first column in every sortable table should not be sortable*/
+.sortable th:nth-child(1) {
+ pointer-events: none;
+}
+
+/* the seventh column in the second .sortable table should not be sortable*/
+.sortable:nth-of-type(2) th:nth-child(7) {
+ pointer-events: none;
+}
+```
+
+## Indicators/arrows on the left side
+
+If you have text that is aligned on the right side, you may want to have the arrows on the left side.
+
+This is solved by adding a class to the css and using `::before` instead of `::after`.
+
+(You can of course use a pure css solution, without class names - just like with the [non-sortable field](#non-sortable-field) - but _that_ I will leave for you to figure out.)
+
+```css
+.sortable th.indicator-left::after {
+ content: '';
+}
+.sortable th.indicator-left::before {
+ margin-right: 3px;
+ content: 'â–¸';
+}
+/* etc. */
+```
+
+> _Full example: [CSS](https://github.com/tofsjonas/sortable/blob/main/sortable-base.css), [SCSS](https://github.com/tofsjonas/sortable/blob/main/sortable-base.scss)_
+
+## Note about css/scss
+
+The `css/scss` in this repo was only ever meant as an example. It was never intended to be actually _used_.
+
+That said, if you're feeling lazy, here are two stylesheets you can use:
+
+```html
+<!-- This will add arrows only -->
+<link href="https://cdn.jsdelivr.net/gh/tofsjonas/sortable/sortable-base.min.css" rel="stylesheet" />
+
+<!-- This will make it look like the tables in the example, with arrows, striped rows etc. -->
+<link href="https://cdn.jsdelivr.net/gh/tofsjonas/sortable/sortable.min.css" rel="stylesheet" />
+```
+
+## Sort on value other than the one shown
+
+Using the `data-sort` attribute in `tbody` > `td` you can have one visible value and one sortable value.
+This is useful in case you have for instance sizes like kb, Mb, GB, etc.
+
+```html
+<table class="sortable">
+ <thead>
+ <tr>
+ <th>Movie Name</th>
+ <th>Size</th>
+ <th>Release date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>Zack Snyder's Justice League</td>
+ <td data-sort="943718400">900MB</td>
+ <td data-sort="20210318">03/18/2021</td>
+ </tr>
+ <tr>
+ <td>The Sound of Music</td>
+ <td data-sort="1610612736">1.5GB</td>
+ <td data-sort="19651209">12/09/1965</td>
+ </tr>
+ </tbody>
+</table>
+```
+
+## Alternative sorting
+
+If you click on a table header while holding **shift** or **alt** an alternative
+`data-sort-alt` attribute will override `data-sort`.
+
+```html
+<table class="sortable">
+ <thead>
+ <tr>
+ <th>Movie Name</th>
+ <th>Size</th>
+ <th>Release date</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>Something</td>
+ <td data-sort-alt="c" data-sort="a">A</td>
+ <td data-sort-alt="b" data-sort="c">B</td>
+ <td data-sort-alt="a" data-sort="b">C</td>
+ </tr>
+ <tr>
+ <td>Something else</td>
+ <td data-sort-alt="e" data-sort="f">D</td>
+ <td data-sort-alt="f" data-sort="e">E</td>
+ <td data-sort-alt="d" data-sort="d">F</td>
+ </tr>
+ </tbody>
+</table>
+```
+
+## Specify which column should be sorted
+
+Using the `data-sort-col` attribute in `thead` > `th`, you can sort on a different column than the one that was clicked. For instance if you want to have colspans. Like so:
+
+```html
+<thead>
+ <tr>
+ <th></th>
+ <th>Category</th>
+ <th class="show_name">Show</th>
+ <th colspan="2">Overall</th>
+ <th colspan="2" data-sort-col="5">On Our Dates</th>
+ <th data-sort-col="7">First Sold Out</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td class="tags">&nbsp;</td>
+ <td class="category">Comedy</td>
+ <td class="show_name">Show 1</td>
+ <td class="ratio all" data-sort="72">18/25</td>
+ <td class="pct all">72%</td>
+ <td class="ratio ours" data-sort="75">3/4</td>
+ <td class="pct ours">75%</td>
+ <td>2022-07-30</td>
+ </tr>
+ ...
+</tbody>
+```
+
+## Ascending sort
+
+By adding `asc` to `table`, the default sorting direction will be **ascending** instead of descending
+
+```html
+<table class="sortable asc">
+ <thead>
+ ...
+ </thead>
+ <tbody>
+ ...
+ </tbody>
+</table>
+```
+
+## Sort on load
+
+If you wish to sort a table on load, I would recommend doing something like this:
+
+```html
+<table class="sortable">
+ <thead>
+ <tr>
+ <th>Movie Name</th>
+ <th id="movie-size">Size</th>
+ <th>Release date</th>
+ </tr>
+ </thead>
+ <tbody>
+ ...
+ </tbody>
+</table>
+
+<script>
+ window.addEventListener('load', function () {
+ const el = document.getElementById('movie-size')
+ // without id:
+ // const el = document.querySelector('.sortable th:first-child')
+ // const el = document.querySelector('.sortable th:nth-child(2)')
+ // const el = document.querySelectorAll('.sortable')[3].querySelector('th:nth-child(7)')
+ // etc.
+ if (el) {
+ el.click()
+ }
+ })
+</script>
+```
+
+Combine this with `<table class="sortable asc">` to reverse the sort order. Or do `el.click()` twice!
+
+[![jsdelivr](https://data.jsdelivr.com/v1/package/gh/tofsjonas/sortable/badge)](https://www.jsdelivr.com/package/gh/tofsjonas/sortable)
diff --git a/cimonitor-configs/sorting-table-css/example.css b/cimonitor-configs/sorting-table-css/example.css
new file mode 100644
index 00000000..7e503fb8
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/example.css
@@ -0,0 +1,138 @@
+@charset "UTF-8";
+
+.failure { color: red; }
+
+.sortable th {
+ cursor: pointer;
+}
+.sortable th.no-sort {
+ pointer-events: none;
+}
+.sortable th::after, .sortable th::before {
+ transition: color 0.1s ease-in-out;
+ font-size: 1.2em;
+ color: transparent;
+}
+.sortable th::after {
+ margin-left: 3px;
+ content: "â–¸";
+}
+.sortable th:hover::after {
+ color: inherit;
+}
+.sortable th.dir-d::after {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.dir-u::after {
+ color: inherit;
+ content: "â–´";
+}
+.sortable th.indicator-left::after {
+ content: "";
+}
+.sortable th.indicator-left::before {
+ margin-right: 3px;
+ content: "â–¸";
+}
+.sortable th.indicator-left:hover::before {
+ color: inherit;
+}
+.sortable th.indicator-left.dir-d::before {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.indicator-left.dir-u::before {
+ color: inherit;
+ content: "â–´";
+}
+
+.success { color: green; }
+.none { color: black; }
+.failure { color: red; }
+.aborted { color: darkred; }
+.diag { color: purple; }
+
+
+.sortable {
+ --stripe-color: #c2c2c2;
+ --th-color: #fff;
+ --th-bg: #404040;
+ --td-color: #000;
+ --td-on-stripe-color: #000;
+ border-spacing: 0;
+}
+.sortable tbody tr:nth-child(odd) {
+ background-color: var(--stripe-color);
+ color: var(--td-on-stripe-color);
+}
+.sortable th {
+ background: var(--th-bg);
+ color: var(--th-color);
+ font-weight: normal;
+ text-align: left;
+ text-transform: capitalize;
+ vertical-align: baseline;
+ white-space: nowrap;
+}
+.sortable td {
+ color: var(--td-color);
+}
+.sortable td,
+.sortable th {
+ padding: 5px;
+}
+.sortable td:first-child,
+.sortable th:first-child {
+ border-top-left-radius: 4px;
+}
+.sortable td:last-child,
+.sortable th:last-child {
+ border-top-right-radius: 4px;
+}
+
+body {
+ font-size: 14px;
+}
+
+
+p {
+ line-height: 1.7em;
+}
+
+code {
+ font-family: monospace;
+ background: #eee;
+ padding: 5px;
+ border-radius: 2px;
+}
+
+* {
+ box-sizing: border-box;
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+
+.sortable:nth-of-type(4) th:nth-child(7),
+th.no-sort {
+ background: pink;
+ color: red;
+ pointer-events: none;
+}
+
+.sortable:nth-of-type(4) th:nth-child(7)::after {
+ color: red;
+ content: "(also not sortable)";
+ font-size: 0.9em;
+ display: block;
+}
+
+.lefty td:nth-child(2),
+.lefty th:nth-child(2) {
+ width: 80px;
+ text-align: right;
+}/*# sourceMappingURL=example.css.map */
+
+
+.failure { color: red; }
diff --git a/cimonitor-configs/sorting-table-css/example.css.map b/cimonitor-configs/sorting-table-css/example.css.map
new file mode 100644
index 00000000..e5d7a9cf
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/example.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["example.css","sortable-base.scss","sortable.scss","example.scss"],"names":[],"mappings":"AAAA,gBAAgB;ACCd;EACE,eAAA;ADCJ;ACCI;EACE,oBAAA;ADCN;ACCI;EAEE,kCAAA;EACA,gBAAA;EACA,kBAAA;ADAN;ACGI;EACE,gBAAA;EACA,YAAA;ADDN;ACIM;EACE,cAAA;ADFR;ACOM;EACE,cAAA;EACA,YAAA;ADLR;ACUM;EACE,cAAA;EACA,YAAA;ADRR;ACYM;EACE,WAAA;ADVR;ACYM;EACE,iBAAA;EACA,YAAA;ADVR;ACcQ;EACE,cAAA;ADZV;ACiBQ;EACE,cAAA;EACA,YAAA;ADfV;ACoBQ;EACE,cAAA;EACA,YAAA;ADlBV;;AE1CA;EACE,uBAAA;EACA,gBAAA;EACA,gBAAA;EACA,gBAAA;EACA,0BAAA;EAEA,iBAAA;AF4CF;AExCM;EACE,qCAAA;EACA,gCAAA;AF0CR;AEtCE;EACE,wBAAA;EACA,sBAAA;EACA,mBAAA;EACA,gBAAA;EACA,0BAAA;EACA,wBAAA;EACA,mBAAA;AFwCJ;AEtCE;EACE,sBAAA;AFwCJ;AEtCE;;EAEE,aAAA;AFwCJ;AEtCI;;EACE,2BAAA;AFyCN;AEtCI;;EACE,4BAAA;AFyCN;;AGhFA;EACE,eAAA;AHmFF;;AGhFA;EACE,kBAAA;AHmFF;;AGhFA;EACE,sBAAA;EACA,gBAAA;EACA,YAAA;EACA,kBAAA;AHmFF;;AGhFA;EACE,sBAAA;EACA,8JAAA;EAEA,mCAAA;EACA,kCAAA;AHkFF;;AGhFA;;EAEE,gBAAA;EACA,UAAA;EACA,oBAAA;AHmFF;;AGjFA;EACE,UAAA;EACA,8BAAA;EACA,gBAAA;EACA,cAAA;AHoFF;;AGjFE;;EAEE,WAAA;EACA,iBAAA;AHoFJ","file":"example.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/example.min.css b/cimonitor-configs/sorting-table-css/example.min.css
new file mode 100644
index 00000000..86aa6c1c
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/example.min.css
@@ -0,0 +1 @@
+.sortable th{cursor:pointer}.sortable th.no-sort{pointer-events:none}.sortable th::after,.sortable th::before{transition:color .1s ease-in-out;font-size:1.2em;color:rgba(0,0,0,0)}.sortable th::after{margin-left:3px;content:"â–¸"}.sortable th:hover::after{color:inherit}.sortable th.dir-d::after{color:inherit;content:"â–¾"}.sortable th.dir-u::after{color:inherit;content:"â–´"}.sortable th.indicator-left::after{content:""}.sortable th.indicator-left::before{margin-right:3px;content:"â–¸"}.sortable th.indicator-left:hover::before{color:inherit}.sortable th.indicator-left.dir-d::before{color:inherit;content:"â–¾"}.sortable th.indicator-left.dir-u::before{color:inherit;content:"â–´"}.sortable{--stripe-color: #e4e4e4;--th-color: #fff;--th-bg: #808080;--td-color: #000;--td-on-stripe-color: #000;border-spacing:0}.sortable tbody tr:nth-child(odd){background-color:var(--stripe-color);color:var(--td-on-stripe-color)}.sortable th{background:var(--th-bg);color:var(--th-color);font-weight:normal;text-align:left;text-transform:capitalize;vertical-align:baseline;white-space:nowrap}.sortable td{color:var(--td-color)}.sortable td,.sortable th{padding:10px}.sortable td:first-child,.sortable th:first-child{border-top-left-radius:4px}.sortable td:last-child,.sortable th:last-child{border-top-right-radius:4px}body{font-size:14px}p{line-height:1.7em}code{font-family:monospace;background:#eee;padding:5px;border-radius:2px}*{box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.sortable:nth-of-type(4) th:nth-child(7),th.no-sort{background:pink;color:red;pointer-events:none}.sortable:nth-of-type(4) th:nth-child(7)::after{color:red;content:"(also not sortable)";font-size:.9em;display:block}.lefty td:nth-child(2),.lefty th:nth-child(2){width:80px;text-align:right}/*# sourceMappingURL=example.min.css.map */ \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/example.min.css.map b/cimonitor-configs/sorting-table-css/example.min.css.map
new file mode 100644
index 00000000..c748cb85
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/example.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["example.min.css","sortable-base.scss","sortable.scss","example.scss"],"names":[],"mappings":"AAAA,aCCE,cACE,CAAA,qBAEA,mBACE,CAAA,yCAEF,gCAEE,CAAA,eACA,CAAA,mBACA,CAAA,oBAGF,eACE,CAAA,WACA,CAAA,0BAGA,aACE,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,mCAIF,UACE,CAAA,oCAEF,gBACE,CAAA,WACA,CAAA,0CAIA,aACE,CAAA,0CAKF,aACE,CAAA,WACA,CAAA,0CAKF,aACE,CAAA,WACA,CAAA,UC5DV,uBACE,CAAA,gBACA,CAAA,gBACA,CAAA,gBACA,CAAA,0BACA,CAAA,gBAEA,CAAA,kCAII,oCACE,CAAA,+BACA,CAAA,aAIN,uBACE,CAAA,qBACA,CAAA,kBACA,CAAA,eACA,CAAA,yBACA,CAAA,uBACA,CAAA,kBACA,CAAA,aAEF,qBACE,CAAA,0BAEF,YAEE,CAAA,kDAEA,0BACE,CAAA,gDAGF,2BACE,CAAA,KCvCN,cACE,CAAA,EAGF,iBACE,CAAA,KAGF,qBACE,CAAA,eACA,CAAA,WACA,CAAA,iBACA,CAAA,EAGF,qBACE,CAAA,mJACA,CAAA,kCAEA,CAAA,iCACA,CAAA,oDAEF,eAEE,CAAA,SACA,CAAA,mBACA,CAAA,gDAEF,SACE,CAAA,6BACA,CAAA,cACA,CAAA,aACA,CAAA,8CAGA,UAEE,CAAA,gBACA","file":"example.min.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/example.scss b/cimonitor-configs/sorting-table-css/example.scss
new file mode 100644
index 00000000..2dfb47ea
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/example.scss
@@ -0,0 +1,42 @@
+@import 'sortable.scss';
+body {
+ font-size: 14px;
+}
+
+p {
+ line-height: 1.7em;
+}
+
+code {
+ font-family: monospace;
+ background: #eee;
+ padding: 5px;
+ border-radius: 2px;
+}
+
+* {
+ box-sizing: border-box;
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans',
+ 'Droid Sans', 'Helvetica Neue', sans-serif;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+.sortable:nth-of-type(4) th:nth-child(7),
+th.no-sort {
+ background: pink;
+ color: red;
+ pointer-events: none;
+}
+.sortable:nth-of-type(4) th:nth-child(7)::after {
+ color: red;
+ content: '(also not sortable)';
+ font-size: 0.9em;
+ display: block;
+}
+.lefty {
+ td:nth-child(2),
+ th:nth-child(2) {
+ width: 80px;
+ text-align: right;
+ }
+}
diff --git a/cimonitor-configs/sorting-table-css/sortable-base.css b/cimonitor-configs/sorting-table-css/sortable-base.css
new file mode 100644
index 00000000..39d50784
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable-base.css
@@ -0,0 +1,45 @@
+@charset "UTF-8";
+.sortable th {
+ cursor: pointer;
+}
+.sortable th.no-sort {
+ pointer-events: none;
+}
+.sortable th::after, .sortable th::before {
+ transition: color 0.1s ease-in-out;
+ font-size: 1.2em;
+ color: transparent;
+}
+.sortable th::after {
+ margin-left: 3px;
+ content: "â–¸";
+}
+.sortable th:hover::after {
+ color: inherit;
+}
+.sortable th.dir-d::after {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.dir-u::after {
+ color: inherit;
+ content: "â–´";
+}
+.sortable th.indicator-left::after {
+ content: "";
+}
+.sortable th.indicator-left::before {
+ margin-right: 3px;
+ content: "â–¸";
+}
+.sortable th.indicator-left:hover::before {
+ color: inherit;
+}
+.sortable th.indicator-left.dir-d::before {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.indicator-left.dir-u::before {
+ color: inherit;
+ content: "â–´";
+}/*# sourceMappingURL=sortable-base.css.map */ \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable-base.css.map b/cimonitor-configs/sorting-table-css/sortable-base.css.map
new file mode 100644
index 00000000..d6b8cdc4
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable-base.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["sortable-base.css","sortable-base.scss"],"names":[],"mappings":"AAAA,gBAAgB;ACCd;EACE,eAAA;ADCJ;ACCI;EACE,oBAAA;ADCN;ACCI;EAEE,kCAAA;EACA,gBAAA;EACA,kBAAA;ADAN;ACGI;EACE,gBAAA;EACA,YAAA;ADDN;ACIM;EACE,cAAA;ADFR;ACOM;EACE,cAAA;EACA,YAAA;ADLR;ACUM;EACE,cAAA;EACA,YAAA;ADRR;ACYM;EACE,WAAA;ADVR;ACYM;EACE,iBAAA;EACA,YAAA;ADVR;ACcQ;EACE,cAAA;ADZV;ACiBQ;EACE,cAAA;EACA,YAAA;ADfV;ACoBQ;EACE,cAAA;EACA,YAAA;ADlBV","file":"sortable-base.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable-base.min.css b/cimonitor-configs/sorting-table-css/sortable-base.min.css
new file mode 100644
index 00000000..3a2dde2c
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable-base.min.css
@@ -0,0 +1 @@
+.sortable th{cursor:pointer}.sortable th.no-sort{pointer-events:none}.sortable th::after,.sortable th::before{transition:color .1s ease-in-out;font-size:1.2em;color:rgba(0,0,0,0)}.sortable th::after{margin-left:3px;content:"â–¸"}.sortable th:hover::after{color:inherit}.sortable th.dir-d::after{color:inherit;content:"â–¾"}.sortable th.dir-u::after{color:inherit;content:"â–´"}.sortable th.indicator-left::after{content:""}.sortable th.indicator-left::before{margin-right:3px;content:"â–¸"}.sortable th.indicator-left:hover::before{color:inherit}.sortable th.indicator-left.dir-d::before{color:inherit;content:"â–¾"}.sortable th.indicator-left.dir-u::before{color:inherit;content:"â–´"}/*# sourceMappingURL=sortable-base.min.css.map */ \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable-base.min.css.map b/cimonitor-configs/sorting-table-css/sortable-base.min.css.map
new file mode 100644
index 00000000..df0a5ba9
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable-base.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["sortable-base.min.css","sortable-base.scss"],"names":[],"mappings":"AAAA,aCCE,cACE,CAAA,qBAEA,mBACE,CAAA,yCAEF,gCAEE,CAAA,eACA,CAAA,mBACA,CAAA,oBAGF,eACE,CAAA,WACA,CAAA,0BAGA,aACE,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,mCAIF,UACE,CAAA,oCAEF,gBACE,CAAA,WACA,CAAA,0CAIA,aACE,CAAA,0CAKF,aACE,CAAA,WACA,CAAA,0CAKF,aACE,CAAA,WACA","file":"sortable-base.min.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable-base.scss b/cimonitor-configs/sorting-table-css/sortable-base.scss
new file mode 100644
index 00000000..ee059b58
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable-base.scss
@@ -0,0 +1,68 @@
+.sortable {
+ th {
+ cursor: pointer;
+
+ &.no-sort {
+ pointer-events: none;
+ }
+ &::after,
+ &::before {
+ transition: color 0.1s ease-in-out;
+ font-size: 1.2em;
+ color: transparent;
+ }
+
+ &::after {
+ margin-left: 3px;
+ content: '\025B8';
+ }
+ &:hover {
+ &::after {
+ color: inherit;
+ }
+ }
+
+ &.dir-d {
+ &::after {
+ color: inherit;
+ content: '\025BE';
+ }
+ }
+
+ &.dir-u {
+ &::after {
+ color: inherit;
+ content: '\025B4';
+ }
+ }
+ &.indicator-left {
+ &::after {
+ content: '';
+ }
+ &::before {
+ margin-right: 3px;
+ content: '\025B8';
+ }
+
+ &:hover {
+ &::before {
+ color: inherit;
+ }
+ }
+
+ &.dir-d {
+ &::before {
+ color: inherit;
+ content: '\025BE';
+ }
+ }
+
+ &.dir-u {
+ &::before {
+ color: inherit;
+ content: '\025B4';
+ }
+ }
+ }
+ }
+}
diff --git a/cimonitor-configs/sorting-table-css/sortable.css b/cimonitor-configs/sorting-table-css/sortable.css
new file mode 100644
index 00000000..e6d321a5
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.css
@@ -0,0 +1,82 @@
+@charset "UTF-8";
+.sortable th {
+ cursor: pointer;
+}
+.sortable th.no-sort {
+ pointer-events: none;
+}
+.sortable th::after, .sortable th::before {
+ transition: color 0.1s ease-in-out;
+ font-size: 1.2em;
+ color: transparent;
+}
+.sortable th::after {
+ margin-left: 3px;
+ content: "â–¸";
+}
+.sortable th:hover::after {
+ color: inherit;
+}
+.sortable th.dir-d::after {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.dir-u::after {
+ color: inherit;
+ content: "â–´";
+}
+.sortable th.indicator-left::after {
+ content: "";
+}
+.sortable th.indicator-left::before {
+ margin-right: 3px;
+ content: "â–¸";
+}
+.sortable th.indicator-left:hover::before {
+ color: inherit;
+}
+.sortable th.indicator-left.dir-d::before {
+ color: inherit;
+ content: "â–¾";
+}
+.sortable th.indicator-left.dir-u::before {
+ color: inherit;
+ content: "â–´";
+}
+
+.sortable {
+ --stripe-color: #e4e4e4;
+ --th-color: #fff;
+ --th-bg: #808080;
+ --td-color: #000;
+ --td-on-stripe-color: #000;
+ border-spacing: 0;
+}
+.sortable tbody tr:nth-child(odd) {
+ background-color: var(--stripe-color);
+ color: var(--td-on-stripe-color);
+}
+.sortable th {
+ background: var(--th-bg);
+ color: var(--th-color);
+ font-weight: normal;
+ text-align: left;
+ text-transform: capitalize;
+ vertical-align: baseline;
+ white-space: nowrap;
+}
+.sortable td {
+ color: var(--td-color);
+}
+.sortable td,
+.sortable th {
+ padding: 10px;
+}
+.sortable td:first-child,
+.sortable th:first-child {
+ border-top-left-radius: 4px;
+}
+.sortable td:last-child,
+.sortable th:last-child {
+ border-top-right-radius: 4px;
+}/*# sourceMappingURL=sortable.css.map */ \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable.css.map b/cimonitor-configs/sorting-table-css/sortable.css.map
new file mode 100644
index 00000000..4e0279f5
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["sortable.css","sortable-base.scss","sortable.scss"],"names":[],"mappings":"AAAA,gBAAgB;ACCd;EACE,eAAA;ADCJ;ACCI;EACE,oBAAA;ADCN;ACCI;EAEE,kCAAA;EACA,gBAAA;EACA,kBAAA;ADAN;ACGI;EACE,gBAAA;EACA,YAAA;ADDN;ACIM;EACE,cAAA;ADFR;ACOM;EACE,cAAA;EACA,YAAA;ADLR;ACUM;EACE,cAAA;EACA,YAAA;ADRR;ACYM;EACE,WAAA;ADVR;ACYM;EACE,iBAAA;EACA,YAAA;ADVR;ACcQ;EACE,cAAA;ADZV;ACiBQ;EACE,cAAA;EACA,YAAA;ADfV;ACoBQ;EACE,cAAA;EACA,YAAA;ADlBV;;AE1CA;EACE,uBAAA;EACA,gBAAA;EACA,gBAAA;EACA,gBAAA;EACA,0BAAA;EAEA,iBAAA;AF4CF;AExCM;EACE,qCAAA;EACA,gCAAA;AF0CR;AEtCE;EACE,wBAAA;EACA,sBAAA;EACA,mBAAA;EACA,gBAAA;EACA,0BAAA;EACA,wBAAA;EACA,mBAAA;AFwCJ;AEtCE;EACE,sBAAA;AFwCJ;AEtCE;;EAEE,aAAA;AFwCJ;AEtCI;;EACE,2BAAA;AFyCN;AEtCI;;EACE,4BAAA;AFyCN","file":"sortable.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable.js b/cimonitor-configs/sorting-table-css/sortable.js
new file mode 100755
index 00000000..4622eb60
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.js
@@ -0,0 +1,128 @@
+/**
+ * sortable 1.5.1 (or something, I always forget to update this)
+ *
+ * Makes html tables sortable, ie9+
+ *
+ * Styling is done in css.
+ *
+ * Copyleft 2017 Jonas Earendel
+ *
+ * This is free and unencumbered software released into the public domain.
+ *
+ * Anyone is free to copy, modify, publish, use, compile, sell, or
+ * distribute this software, either in source code form or as a compiled
+ * binary, for any purpose, commercial or non-commercial, and by any
+ * means.
+ *
+ * In jurisdictions that recognize copyright laws, the author or authors
+ * of this software dedicate any and all copyright interest in the
+ * software to the public domain. We make this dedication for the benefit
+ * of the public at large and to the detriment of our heirs and
+ * successors. We intend this dedication to be an overt act of
+ * relinquishment in perpetuity of all present and future rights to this
+ * software under copyright law.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * For more information, please refer to <http://unlicense.org>
+ *
+ */
+
+// sort is super fast, even with huge tables, so that is probably not the issue
+// Not solved with documentFragment, same issue... :(
+// My guess is that it is simply too much to hold in memory, since
+// it freezes even before sortable is called if the table is too big in index.html
+
+document.addEventListener('click', function (e) {
+ try {
+ // allows for elements inside TH
+ function findElementRecursive(element, tag) {
+ return element.nodeName === tag ? element : findElementRecursive(element.parentNode, tag)
+ }
+
+ var descending_th_class = ' dir-d '
+ var ascending_th_class = ' dir-u '
+ var ascending_table_sort_class = 'asc'
+ var regex_dir = / dir-(u|d) /
+ var regex_table = /\bsortable\b/
+ var alt_sort = e.shiftKey || e.altKey
+ var element = findElementRecursive(e.target, 'TH')
+ var tr = findElementRecursive(element, 'TR')
+ var table = findElementRecursive(tr, 'TABLE')
+
+ function reClassify(element, dir) {
+ element.className = element.className.replace(regex_dir, '') + dir
+ }
+
+ function getValue(element) {
+ // If you aren't using data-sort and want to make it just the tiniest bit smaller/faster
+ // comment this line and uncomment the next one
+ var value =
+ (alt_sort && element.getAttribute('data-sort-alt')) || element.getAttribute('data-sort') || element.innerText
+ return value
+ // return element.innerText
+ }
+ if (regex_table.test(table.className)) {
+ var column_index
+ var nodes = tr.cells
+
+ // Reset thead cells and get column index
+ for (var i = 0; i < nodes.length; i++) {
+ if (nodes[i] === element) {
+ column_index = element.getAttribute('data-sort-col') || i
+ } else {
+ reClassify(nodes[i], '')
+ }
+ }
+
+ var dir = descending_th_class
+
+ // Check if we're sorting ascending or descending
+ if (
+ element.className.indexOf(descending_th_class) !== -1 ||
+ (table.className.indexOf(ascending_table_sort_class) !== -1 &&
+ element.className.indexOf(ascending_th_class) == -1)
+ ) {
+ dir = ascending_th_class
+ }
+
+ // Update the `th` class accordingly
+ reClassify(element, dir)
+
+ // Extract all table rows
+ var org_tbody = table.tBodies[0]
+
+ // Get the array rows in an array, so we can sort them...
+ var rows = [].slice.call(org_tbody.rows, 0)
+
+ var reverse = dir === ascending_th_class
+
+ // Sort them using Array.prototype.sort()
+ rows.sort(function (a, b) {
+ var x = getValue((reverse ? a : b).cells[column_index])
+ var y = getValue((reverse ? b : a).cells[column_index])
+ var bool = x.length && y.length && !isNaN(x - y) ? x - y : x.localeCompare(y)
+ return bool
+ })
+
+ // Make a clone without content
+ var clone_tbody = org_tbody.cloneNode()
+
+ // Fill it with the sorted values
+ while (rows.length) {
+ clone_tbody.appendChild(rows.splice(0, 1)[0])
+ }
+
+ // And finally replace the unsorted table with the sorted one
+ table.replaceChild(clone_tbody, org_tbody)
+ }
+ } catch (error) {
+ // console.log(error)
+ }
+})
diff --git a/cimonitor-configs/sorting-table-css/sortable.min.css b/cimonitor-configs/sorting-table-css/sortable.min.css
new file mode 100644
index 00000000..59abc454
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.min.css
@@ -0,0 +1 @@
+.sortable th{cursor:pointer}.sortable th.no-sort{pointer-events:none}.sortable th::after,.sortable th::before{transition:color .1s ease-in-out;font-size:1.2em;color:rgba(0,0,0,0)}.sortable th::after{margin-left:3px;content:"â–¸"}.sortable th:hover::after{color:inherit}.sortable th.dir-d::after{color:inherit;content:"â–¾"}.sortable th.dir-u::after{color:inherit;content:"â–´"}.sortable th.indicator-left::after{content:""}.sortable th.indicator-left::before{margin-right:3px;content:"â–¸"}.sortable th.indicator-left:hover::before{color:inherit}.sortable th.indicator-left.dir-d::before{color:inherit;content:"â–¾"}.sortable th.indicator-left.dir-u::before{color:inherit;content:"â–´"}.sortable{--stripe-color: #e4e4e4;--th-color: #fff;--th-bg: #808080;--td-color: #000;--td-on-stripe-color: #000;border-spacing:0}.sortable tbody tr:nth-child(odd){background-color:var(--stripe-color);color:var(--td-on-stripe-color)}.sortable th{background:var(--th-bg);color:var(--th-color);font-weight:normal;text-align:left;text-transform:capitalize;vertical-align:baseline;white-space:nowrap}.sortable td{color:var(--td-color)}.sortable td,.sortable th{padding:10px}.sortable td:first-child,.sortable th:first-child{border-top-left-radius:4px}.sortable td:last-child,.sortable th:last-child{border-top-right-radius:4px}/*# sourceMappingURL=sortable.min.css.map */ \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable.min.css.map b/cimonitor-configs/sorting-table-css/sortable.min.css.map
new file mode 100644
index 00000000..ec7420b5
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["sortable.min.css","sortable-base.scss","sortable.scss"],"names":[],"mappings":"AAAA,aCCE,cACE,CAAA,qBAEA,mBACE,CAAA,yCAEF,gCAEE,CAAA,eACA,CAAA,mBACA,CAAA,oBAGF,eACE,CAAA,WACA,CAAA,0BAGA,aACE,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,0BAKF,aACE,CAAA,WACA,CAAA,mCAIF,UACE,CAAA,oCAEF,gBACE,CAAA,WACA,CAAA,0CAIA,aACE,CAAA,0CAKF,aACE,CAAA,WACA,CAAA,0CAKF,aACE,CAAA,WACA,CAAA,UC5DV,uBACE,CAAA,gBACA,CAAA,gBACA,CAAA,gBACA,CAAA,0BACA,CAAA,gBAEA,CAAA,kCAII,oCACE,CAAA,+BACA,CAAA,aAIN,uBACE,CAAA,qBACA,CAAA,kBACA,CAAA,eACA,CAAA,yBACA,CAAA,uBACA,CAAA,kBACA,CAAA,aAEF,qBACE,CAAA,0BAEF,YAEE,CAAA,kDAEA,0BACE,CAAA,gDAGF,2BACE","file":"sortable.min.css"} \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable.min.js b/cimonitor-configs/sorting-table-css/sortable.min.js
new file mode 100755
index 00000000..629fa509
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.min.js
@@ -0,0 +1,2 @@
+document.addEventListener("click",function(b){try{var p=function(a){return v&&a.getAttribute("data-sort-alt")||a.getAttribute("data-sort")||a.innerText},q=function(a,c){a.className=a.className.replace(w,"")+c},f=function(a,c){return a.nodeName===c?a:f(a.parentNode,c)},w=/ dir-(u|d) /,v=b.shiftKey||b.altKey,e=f(b.target,"TH"),r=f(e,"TR"),g=f(r,"TABLE");if(/\bsortable\b/.test(g.className)){var l,d=r.cells;for(b=0;b<d.length;b++)d[b]===e?l=e.getAttribute("data-sort-col")||b:q(d[b],"");d=" dir-d ";if(-1!==
+e.className.indexOf(" dir-d ")||-1!==g.className.indexOf("asc")&&-1==e.className.indexOf(" dir-u "))d=" dir-u ";q(e,d);var m=g.tBodies[0],n=[].slice.call(m.rows,0),t=" dir-u "===d;n.sort(function(a,c){var h=p((t?a:c).cells[l]),k=p((t?c:a).cells[l]);return h.length&&k.length&&!isNaN(h-k)?h-k:h.localeCompare(k)});for(var u=m.cloneNode();n.length;)u.appendChild(n.splice(0,1)[0]);g.replaceChild(u,m)}}catch(a){}}); \ No newline at end of file
diff --git a/cimonitor-configs/sorting-table-css/sortable.scss b/cimonitor-configs/sorting-table-css/sortable.scss
new file mode 100755
index 00000000..69965354
--- /dev/null
+++ b/cimonitor-configs/sorting-table-css/sortable.scss
@@ -0,0 +1,44 @@
+@import 'sortable-base.scss';
+
+.sortable {
+ --stripe-color: #e4e4e4;
+ --th-color: #fff;
+ --th-bg: #808080;
+ --td-color: #000;
+ --td-on-stripe-color: #000;
+
+ border-spacing: 0;
+
+ tbody {
+ tr {
+ &:nth-child(odd) {
+ background-color: var(--stripe-color);
+ color: var(--td-on-stripe-color);
+ }
+ }
+ }
+ th {
+ background: var(--th-bg);
+ color: var(--th-color);
+ font-weight: normal;
+ text-align: left;
+ text-transform: capitalize;
+ vertical-align: baseline;
+ white-space: nowrap;
+ }
+ td {
+ color: var(--td-color);
+ }
+ td,
+ th {
+ padding: 10px;
+
+ &:first-child {
+ border-top-left-radius: 4px;
+ }
+
+ &:last-child {
+ border-top-right-radius: 4px;
+ }
+ }
+}
diff --git a/cimonitor-configs/v1-test.yaml b/cimonitor-configs/v1-test.yaml
new file mode 100644
index 00000000..23801106
--- /dev/null
+++ b/cimonitor-configs/v1-test.yaml
@@ -0,0 +1,67 @@
+format:
+ server: ci.linaro.org
+ filename: "@pattern@.html"
+ links:
+ - "@pattern@"
+
+ summary_table_last_run:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ - "#forced"
+ - "#reducing"
+ - "#bisected"
+ - "#aborted"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ summary_table_all_runs:
+ columns:
+ - last_run_date
+ - "#success"
+ - "#failure"
+ - "#regressed"
+ - "#forced"
+ - "#reducing"
+ - "#bisected"
+ - "#aborted"
+ lines:
+ - anytime
+ - last-week
+ - 6-days-ago
+ - 5-days-ago
+ - 4-days-ago
+ - 3-days-ago
+ - 2-days-ago
+ - 1-days-ago
+ - 0-days-ago
+
+ details_table:
+ columns:
+ - project
+ - status
+ - last_build
+ - notify_verif
+ - last_success
+ - last_fail
+ - last_forced
+ - nb_force
+ - last_regressed
+ - artifact_version
+ lines:
+ - "tcwg_bmk-code_size-cpu2017rate--gnu-arm-master-Os-build"
+ - "tcwg_bmk-code_size-coremark--gnu_eabi-arm_eabi-master-O2-build"
+ - "tcwg_bmk-code_size-cpu2017rate--gnu-arm-master-O2_LTO-build"
+
+pattern:
+ - v1-test
diff --git a/dashboard-generate-squad.sh b/dashboard-generate-squad.sh
new file mode 100755
index 00000000..dd6ecced
--- /dev/null
+++ b/dashboard-generate-squad.sh
@@ -0,0 +1,422 @@
+#!/bin/bash
+
+set -e
+
+# shellcheck source=jenkins-helpers.sh
+. "$(dirname $0)/jenkins-helpers.sh"
+# shellcheck source=round-robin.sh
+. "$(dirname $0)/round-robin.sh"
+
+verbose=false
+squad_mode=
+relative_results=false
+
+# Process args
+convert_args_to_variables "$@"
+
+obligatory_variables top_artifacts baseline_branch components run_date
+
+declare top_artifacts baseline_branch components run_date verbose squad_mode relative_results
+
+if $verbose ; then
+ set -x
+fi
+
+# --------------------CREATE DASHBOARD FILE -------------------
+# Writing dashboard files : json & cmd
+create_dashboard_files()
+{
+ (
+ local dashboard_dir=${dbd[dashboard_dir]}
+ local check_regression_dir=${dbd[check_regression_dir]}
+ local envlist=()
+ local squad_mode_ext=""
+ [ x"$squad_mode" != x"" ] && squad_mode_ext="-$squad_mode"
+
+ squaddir=$dashboard_dir/squad$squad_mode_ext
+ results_csv_file=results$squad_mode_ext/results-brief.csv
+
+ declare -A project_results
+
+ echo " * create_dashboard_files ($results_csv_file)"
+
+ if [ ${dbd[project_kind]} == "bmk" ]; then
+
+ # BMK PROJECTS
+ func_fields=(build run)
+ metric_fields=(perf size vect)
+
+ local results_csv=$top_artifacts/$results_csv_file
+ local nbtest=0 nbpass=0 nbfail=0 nbskip=0
+
+ # -----------------------------
+ # Parse results-vs-prev-brief.csv
+ local header_verified=false
+ local -a header_used=("benchmark" "symbol"
+ "csv-results-1/results:rel_sample"
+ "csv-results-1/results:rel_size"
+ "csv-results-1/results:rel_num_vect_loops"
+ "csv-results-0/results:sample"
+ "csv-results-0/results:size"
+ "csv-results-0/results:num_vect_loops")
+
+ if [ ! -f "$results_csv" ]; then
+ echo ""
+ echo "WARNING: no $results_csv_file file. No data to store in dashboard."
+ echo ""
+ return
+ fi
+
+ # read the first line
+ {
+ IFS=,
+ declare -A map
+ read -ra headers
+
+ # Check if necessary columns exists
+ if ! $header_verified; then
+ for h in "${header_used[@]}"; do
+ assert_with_msg "ERROR: $results_csv_file header doesn't contain $h field" \
+ [[ "${IFS}${headers[*]}${IFS}" =~ "${IFS}${h}${IFS}" ]]
+ done
+ fi
+
+ while IFS=, read -ra values; do
+ for i in "${!headers[@]}"; do
+ map["${headers[i]}"]=${values[i]}
+ done
+
+ local bench symb
+ bench=${map[benchmark]}
+ symb=${map[symbol]}
+
+ local rel_sample rel_size rel_vect
+ rel_sample=${map[csv-results-1/results:rel_sample]}
+ rel_size=${map[csv-results-1/results:rel_size]}
+ rel_vect=${map[csv-results-1/results:rel_num_vect_loops]}
+
+ local sample1 size1 vect1
+ sample1=${map[csv-results-1/results:sample]}
+ size1=${map[csv-results-1/results:size]}
+ vect1=${map[csv-results-1/results:num_vect_loops]}
+
+ # Skip all "Mean" values other than "Mean,mean"
+ if [ $bench == "Mean" ] && [ ${symb// /} != "mean" ]; then
+ continue;
+ fi
+
+ if [[ -v project_results["func/build/$bench"] ]] && [ ${project_results["func/build/$bench"]} != "pass" ]; then
+ : # do nothing if fail is already
+ else
+ case $sample1 in
+ 999999999)
+ project_results["func/build/$bench"]="fail" ; ((nbfail+=1))
+ project_results["func/run/$bench"]="skip" ; ((nbskip+=1))
+
+ # If failing to build, all metrics unknown (-1)
+ vect1=-1 ; size1=-1 ; sample1=-1
+ rel_vect="n/a" ; rel_size="n/a" ; rel_sample="n/a"
+ ;;
+ 888888888)
+ project_results["func/build/$bench"]="pass" ; ((nbpass+=1))
+ project_results["func/run/$bench"]="fail" ; ((nbfail+=1))
+
+ # If failing to run, run metrics unknown (-1)
+ sample1=-1
+ rel_sample="n/a"
+ ;;
+ *)
+ project_results["func/build/$bench"]="pass" ; ((nbpass+=1));
+ project_results["func/run/$bench"]="pass" ; ((nbpass+=1));
+ ;;
+ esac
+
+ if $relative_results; then
+ [ $rel_vect != "n/a" ] && project_results["metrics/vect/$bench"]="$((rel_vect-100))"
+ [ $rel_size != "n/a" ] && project_results["metrics/size/$bench"]="$((rel_size-100))"
+ [ $rel_sample != "n/a" ] && project_results["metrics/perf/$bench"]="$((rel_sample-100))"
+ else
+ [ $vect1 != "-1" ] && project_results["metrics/vect/$bench"]="$vect1"
+ [ $size1 != "-1" ] && project_results["metrics/size/$bench"]="$size1"
+ [ $sample1 != "-1" ] && project_results["metrics/perf/$bench"]="$sample1"
+ fi
+
+ envlist+=("$bench")
+ ((nbtest+=2))
+ fi
+
+ # echo " $bench : ${project_results["func/build/$bench"]}, ${project_results["func/run/$bench"]}, "\
+ # "${project_results["metrics/perf/$bench"]}, ${project_results["metrics/size/$bench"]}, ${project_results["metrics/vect/$bench"]}, $md5_1"
+
+ done
+ unset IFS
+ } < "$results_csv"
+
+ echo " [$nbtest test : $nbpass pass, $nbfail fail, $nbskip skip]"
+
+ elif [ ${dbd[project_kind]} == "kernel" ]; then
+ # KERNEL PROJECTS
+ :
+ assert_with_msg "dashboard creation for kernel projects not implemented yet" true
+ func_fields=(build run)
+ metric_fields=(score)
+
+ else # ${dbd[project_kind]}=="other"
+ # OTHER PROJECTS : basic.
+ # using score only.
+ func_fields=(build run)
+ metric_fields=(score)
+ # one single bench called "test"
+ project_results["func/build/test"]="pass"
+ project_results["func/run/test"]="pass"
+ project_results["metrics/score/test"]="${dbd['score']}"
+
+ envlist=("score")
+ fi
+
+ # get benchs sorted to have 001.Mean first
+ mapfile -t envlist < <(printf "%s\n" "${envlist[@]}" | sort -n)
+
+ # -----------------------------
+ # Write json files
+ # for each bench :
+ # results-functional.json, results-metrics.json, results-metadata.json
+ rm -rf $squaddir
+ mkdir -p $squaddir
+
+ echo " - writing json for squad"
+
+ # Generate 3 results files : functional, metrics, metadata
+ #
+ for bench in "${envlist[@]}"; do
+
+ # echo " - writing $squaddir/$bench # ${project_results["func/*/$bench"]}"
+ mkdir -p $squaddir/$bench
+
+ # results-functional.json : for now, only @func_fields=(build run)
+ resfile=$squaddir/$bench/results-functional.json
+ echo "{" > "$resfile"
+ if [ "${func_fields[*]}" == "build run" ]; then
+ echo " \"build\" : \"${project_results["func/build/$bench"]}\"," >>"$resfile"
+ echo " \"run\" : \"${project_results["func/run/$bench"]}\"" >>"$resfile"
+ fi
+ echo "}" >> "$resfile"
+
+ # results-metrics.json : @metric_fields=(perf size vect) / (score)
+ local comma=","
+ local metrics_to_show=()
+
+ resfile=$squaddir/$bench/results-metrics.json
+ echo "{" > "$resfile"
+ for metric in "${metric_fields[@]}"; do
+ [[ -v project_results["metrics/$metric/$bench"] ]] && metrics_to_show+=("$metric")
+ done
+ for metric in "${metrics_to_show[@]}"; do
+ [ $metric == ${metrics_to_show[-1]} ] && comma=""
+ echo " \"$metric\" : \"${project_results["metrics/$metric/$bench"]}\"$comma" >>"$resfile"
+ done
+ echo "}" >> "$resfile"
+ # results-metadata.json
+ resfile=$squaddir/$bench/results-metadata.json
+ local base_artifacts_url=https://git-us.linaro.org/toolchain/ci/base-artifacts.git
+
+ # blanks below are used to order displayed metadata table (dirty!)
+ cat > $resfile << EOF
+{
+ " job_status": "${dbd['job_status']//\"/\\\"}",
+ " details": "$base_artifacts_url/plain/notify/mail-body.txt?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}",
+ "datetime": "${dbd['datetime']}",
+ "build_url": "${dbd['master_job_url']}",
+ "build_log": "${dbd['master_job_url']}console",
+EOF
+
+ [[ -v dbd['binutils_rev'] ]] && echo " \"version_binutils\":\"${dbd['binutils_rev']}\"," >> "$resfile"
+ [[ -v dbd['gcc_rev'] ]] && echo " \"version_gcc\": \"${dbd['gcc_rev']}\"," >> "$resfile"
+ [[ -v dbd['glibc_rev'] ]] && echo " \"version_glibc\": \"${dbd['glibc_rev']}\"," >> "$resfile"
+ [[ -v dbd['llvm_rev'] ]] && echo " \"version_llvm\": \"${dbd['llvm_rev']}\"," >> "$resfile"
+ [[ -v dbd['linux_rev'] ]] && echo " \"version_linux\": \"${dbd['linux_rev']}\"," >> "$resfile"
+ [[ -v dbd['qemu_rev'] ]] && echo " \"version_qemu\": \"${dbd['qemu_rev']}\"," >> "$resfile"
+
+ cat >> $squaddir/$bench/results-metadata.json << EOF
+ "artifact_results": "$base_artifacts_url/tree/?h=${dbd['base-artifacts_branch']}&id=${dbd['base-artifacts_sha1']}"
+}
+EOF
+
+ done
+
+ # -----------------------------
+ # Generate one annotation file : A single word summary
+ # Status is displayed in job_status of metadata.
+ #
+ resfile=$squaddir/summary-annotation.txt
+ touch $resfile
+ if [ -f $dashboard_dir/../mail-subject.txt ]; then
+ mail_subject=$(cat $dashboard_dir/../mail-subject.txt)
+ case "$mail_subject" in
+ *"grew in size"*) echo "Regression(size)" >> $resfile ;;
+ *"reduced in size"*) echo "Improvement(size)" >> $resfile ;;
+ *"slowed down"*) echo "Regression(speed)" >> $resfile ;;
+ *"speeds up"*) echo "Improvement(speed)" >> $resfile ;;
+ *"reduced by"*) echo "Regression(vect)" >> $resfile ;;
+ *"increased up by"*) echo "Improvement(vect)" >> $resfile ;;
+ *"failed to build"*) echo "Regression(build failed)" >> $resfile ;;
+ *"built OK, but failed to run"*) echo "Improvement(run still failed)" >> $resfile ;;
+ *"failed to run"*) echo "Regression(run failed)" >> $resfile ;;
+ *"No change"*) ;; # No annotation
+ *) ;; # No annotation
+ esac
+ fi
+
+ # -----------------------------
+ # Generate one command file to push all benches
+ #
+ local pushcmdfile=$squaddir/dashboard-push-squad.sh
+ rm -f $pushcmdfile
+
+ echo " - generating cmd to push results"
+
+ local squad_server prj grp bld squad_url
+ local results_results results_metrics results_metadata
+
+ squad_server=https://qa-reports.linaro.org/
+ # TCWG_SQUAD_TOKEN is defined in credentials
+ grp="${FORCE_SQUAD_GRP-${dbd[ci_project]}}"
+ prj="${FORCE_SQUAD_PJT-${dbd[ci_config]}}"
+ bld="$(echo ${dbd[master_job_url]}|cut -d/ -f6)"
+
+ cat > $pushcmdfile << EOF
+#!/bin/bash
+cd \$(dirname \$0)
+
+set -ex
+
+squad_server=$squad_server
+
+if ! wget -q -o /dev/null $squad_server/$grp/$prj/; then
+ echo "WARNING: No project under $squad_server/$grp/$prj/"
+ exit 0
+fi
+if [ ! -v TCWG_SQUAD_TOKEN ]; then
+ echo "ERROR: No TCWG_SQUAD_TOKEN defined in your environment"
+ exit 1
+fi
+
+top_artifacts=\$(pwd)/../..
+base_artifacts_rev=$(git -C base-artifacts rev-parse ${rr[baseline_branch]})
+sed -i -e "s|#BASE-ARTIFACTS-REV#|\$base_artifacts_rev|" */results-metadata.json
+
+echo "Uploading results to $squad_server/$grp/$prj/$bld"
+
+set +x
+
+EOF
+
+ # If there's no squad project specified, let push cmd file as empty.
+ if [ x"$grp" == x"" ]; then
+ echo "echo \"WARNING : Nowhere to push results. grp is empty." >> $pushcmdfile
+ echo "WARNING : Nowhere to push results. grp is empty."
+ else
+ for bench in "${envlist[@]}"; do
+
+ squad_url=$squad_server/api/submit/$grp/$prj/$bld/$bench
+
+ # result files
+ results_results="$bench/results-functional.json"
+ results_metrics="$bench/results-metrics.json"
+ results_metadata="$bench/results-metadata.json"
+
+ cat >> $pushcmdfile << EOF
+echo "pushing $squad_url"
+curl --silent --header "Authorization: Token \$TCWG_SQUAD_TOKEN" \\
+ --form tests=@$results_results \\
+ --form metrics=@$results_metrics \\
+ --form metadata=@$results_metadata \\
+ $squad_url
+
+EOF
+ done
+
+ # if annotation file empty, do not push it.
+ if [ x"$(cat $squaddir/summary-annotation.txt)" != x"" ]; then
+ cat >> $pushcmdfile << EOF
+# Add annotation for this build
+api_of_this_build=\$(curl -s \$squad_server/$grp/$prj/build/$bld/ | \\
+ grep 'api view of this build' | sed -e 's|.*<a href="\(.*\)">.*|\1|')
+
+curl --header "Authorization: Token \$TCWG_SQUAD_TOKEN" --data "description=$(cat $squaddir/summary-annotation.txt)&build=\$api_of_this_build" \$squad_server/api/annotations/
+
+EOF
+ fi
+ fi
+
+ chmod a+x $pushcmdfile
+ )
+}
+
+
+# --------------------MAIN PROCEDURE -------------------
+set -euf -o pipefail
+
+# For a short time manifests used "debug" array, so we need
+# to declare it when sourcing these manifests.
+# shellcheck disable=SC2034
+declare -A debug
+
+# Source manifest
+manifest=""
+declare check_regression_dir
+[ -f "$top_artifacts/manifest.sh" ] && manifest=manifest.sh
+[ -f "$top_artifacts/jenkins/manifest.sh" ] && manifest=jenkins/manifest.sh
+[ x"$manifest" == x"" ] && error "Manifest not found"
+# shellcheck disable=SC1090
+source $top_artifacts/$manifest
+
+# override by top_artifacts in this context
+rr[top_artifacts]=$top_artifacts
+
+# set useful infos for dashboard
+#
+declare -A dbd
+
+dbd[master_job_url]="${BUILD_URL-$(pwd)}"
+
+dbd[ci_project]="${rr[ci_project]}"
+dbd[ci_config]="${rr[ci_config]}"
+
+dbd[base-artifacts_branch]="$baseline_branch"
+
+if [ -d $top_artifacts/.git ]; then
+ dbd[base-artifacts_sha1]="$(git -C $top_artifacts show --no-patch --pretty=%h)"
+else
+ dbd[base-artifacts_sha1]='#BASE-ARTIFACTS-REV#'
+fi
+
+dbd[datetime]=$run_date
+
+case $baseline_branch in
+ */tcwg_kernel*) dbd[project_kind]="kernel" ;;
+ */tcwg_bmk*) dbd[project_kind]="bmk" ;;
+ *) dbd[project_kind]="other" ;;
+esac
+
+dbd[check_regression_dir]=$(find $top_artifacts/ -maxdepth 2 -name "*-check_regression")
+
+dbd[dashboard_dir]=$top_artifacts/notify/dashboard
+
+dbd[is_regression_build]=false
+[ -f ${dbd[check_regression_dir]}/results.regressions ] && dbd[is_regression_build]=true
+
+dbd['score']=$(grep -v '^#' $top_artifacts/results | tail -1)
+
+dbd[job_status]="Success"
+[ -f $top_artifacts/notify/mail-subject.txt ] && dbd[job_status]="$(cat $top_artifacts/notify/mail-subject.txt)"
+
+for c in $components; do
+dbd[${c}_rev]="${rr[${c}_rev]}"
+done
+
+dbd[benchmark_logs]=bkp-01:/home/tcwg-benchmark/results-$(cat $top_artifacts/results_id || echo '<unknown>')
+
+# ----------------------------
+# Generate dashboard files : json files, push cmd file
+create_dashboard_files
diff --git a/dashboard-push-one-branch.sh b/dashboard-push-one-branch.sh
new file mode 100755
index 00000000..19343fea
--- /dev/null
+++ b/dashboard-push-one-branch.sh
@@ -0,0 +1,179 @@
+#!/bin/bash
+
+# This scripts iterates over a base-artifact BRANCH and for each base-artifact :
+# - recreates the dashboard directory if necessary
+# - push the results to squad
+#
+# Example to push a base-artifacts branch :
+# dashboard-push-one-branch.sh --top_artifacts base-artifacts --baseline_branch linaro-local/ci/tcwg_bmk_gnu_apm/gnu-master-aarch64-spec2k6-Os_LTO
+#
+# This required the squad project to exist
+
+set -e
+scripts=$(dirname $0)
+
+dryrun=false
+interactive=false
+
+# shellcheck source=jenkins-helpers.sh
+. "$(dirname $0)/jenkins-helpers.sh"
+# shellcheck source=round-robin.sh
+. "$(dirname $0)/round-robin.sh"
+
+convert_args_to_variables "$@"
+
+obligatory_variables top_artifacts baseline_branch
+
+declare top_artifacts baseline_branch
+
+# rebuild component list from the artifacts state.
+get_components()
+{
+ (
+ local components=""
+
+ manifest=""
+ [ -f "$top_artifacts/manifest.sh" ] && manifest=manifest.sh
+ [ -f "$top_artifacts/jenkins/manifest.sh" ] && manifest=jenkins/manifest.sh
+ [ x"$manifest" == x"" ] && error "Manifest not found"
+ # shellcheck disable=SC1090
+ source $top_artifacts/$manifest
+
+ for c in binutils gcc glibc llvm linux; do
+ if [ -f $top_artifacts/git/${c}_rev ] || [[ -v rr[${c}_rev] ]] ; then
+ components="$components $c"
+ fi
+ done
+ echo $components
+ )
+}
+
+# ----------------------- RECREATE FIRST RESULTS -----------------------------
+generate_csv_if_necessary()
+{
+ (
+ echo "* generate_csv_if_necessary ..."
+
+ # Exit if NN-check_regression/results-vs-xxx-brief.csv already exists
+ local check_regression_dir
+ check_regression_dir=$(find $top_artifacts/ -maxdepth 2 -name "*-check_regression")
+ if [ -f $top_artifacts/results-vs-first/results-brief.csv ] &&
+ [ -f $top_artifacts/results-vs-prev/results-brief.csv ]; then
+ return;
+ fi
+
+ # If no NN-check_regression dir exist : Create XX-check_regression
+ # and regenerate files results-vs-xxx-brief.csv.
+ if [ x"$check_regression_dir" == x"" ]; then
+ check_regression_dir="$top_artifacts/XX-check_regression"
+ mkdir -p $check_regression_dir
+ fi
+
+ local compare_opts
+ case $baseline_branch in
+ *arm_eabi*_LTO*) compare_opts="--num_symbols 0 --entry_threshold 10 --has_perf_logs no" ;;
+ *_LTO*) compare_opts="--num_symbols 0 --entry_threshold 10" ;;
+ *arm_eabi*) compare_opts="--has_perf_logs no" ;;
+ esac
+
+ local first_rev
+ first_rev=$(git -C $top_artifacts rev-list HEAD | tail -n 1)
+ git -C $top_artifacts show $first_rev:results_id > "$top_artifacts/results_id.first"
+
+ # if baseline is empty. need to compare against itself
+ if [ ! -f $baseline_artifacts/results_id ]; then
+ cp $top_artifacts/results_id $baseline_artifacts/results_id
+ fi
+
+ local pid1 pid2
+ # Compare vs first run
+ $scripts/tcwg-benchmark-results.sh \
+ --results_ref "$(cat $top_artifacts/results_id.first)" ++results "$(cat $top_artifacts/results_id)" \
+ --top_artifacts "$top_artifacts/results-vs-first" --verbose true $compare_opts \
+ > $top_artifacts/results-vs-first/tcwg-benchmark-results.log 2>&1 &
+ pid1=$!
+
+ # Compare vs previous run
+ $scripts/tcwg-benchmark-results.sh \
+ --results_ref "$(cat $baseline_artifacts/results_id)" ++results "$(cat $top_artifacts/results_id)" \
+ --top_artifacts "$top_artifacts/results-vs-prev" --verbose true $compare_opts \
+ > $top_artifacts/results-vs-prev/tcwg-benchmark-results.log 2>&1 &
+ pid2=$!
+
+ local res=0
+ wait $pid1 || res=$?
+ wait $pid2 || res=$?
+ if [ $res != 0 ]; then
+ echo "Failure while comparing run results."
+ fi
+ )
+}
+
+# --------------------------- MAIN PROCEDURE --------------------------------
+gitserver=https://git-us.linaro.org/toolchain/ci/base-artifacts.git
+top_artifacts=top_artifacts
+baseline_artifacts=current_baseline
+
+baseline_branch=${baseline_branch#refs/heads/}
+
+if [ ! -d $top_artifacts ]; then
+ git clone --single-branch --branch empty $gitserver $top_artifacts
+fi
+
+# Create emtpy dir to imitate baseline
+rm -rf $baseline_artifacts
+mkdir $baseline_artifacts
+
+echo "# base_artifact : fetch $baseline_branch"
+git -C $top_artifacts reset -q --hard
+git -C $top_artifacts fetch -q origin $baseline_branch
+git -C $top_artifacts branch -f $baseline_branch FETCH_HEAD
+git -C $top_artifacts clean -d -f
+git -C $top_artifacts checkout -q $baseline_branch
+
+# Dump list of commit that will be treated
+echo "----------------------------- $baseline_branch -----------------"
+git -C $top_artifacts log $baseline_branch --graph --oneline
+
+for sha1 in $(git -C $top_artifacts log $baseline_branch --pretty=%H | tac); do
+ (
+ echo "--------------------------------------------------------------------------------------------------"
+ # Dump current treated commit
+ git -C $top_artifacts show --no-patch --oneline $sha1
+
+ git -C $top_artifacts reset -q --hard
+ git -C $top_artifacts checkout -q -f $sha1
+
+ ans="y"
+ if $interactive; then
+ read -p "Do you want to add this results (y/n) ?" ans
+ fi
+
+ if [ "$ans" == "y" ]; then
+ declare -A rr
+ rr[top_artifacts]=$top_artifacts
+ rr[baseline_branch]=$baseline_branch
+ rr[components]=$(get_components)
+
+ # - create results-*.csv
+ generate_csv_if_necessary
+
+ # - create dashboard-*.sh scripts
+ # - run dashboard-generate.sh
+ create_dashboard_dir
+
+ # - run dashboard-push.sh
+ if ! $dryrun; then
+ $top_artifacts/dashboard/dashboard-push.sh
+ else
+ echo "Results NOT pushed"
+ fi
+ fi
+
+ rm -rf ${baseline_artifacts:?}/*
+ mv ${top_artifacts:?}/* $baseline_artifacts/
+
+ # clean artifacts dir
+ git -C $top_artifacts clean -d -f
+ )
+done
diff --git a/docker-run.sh b/docker-run.sh
index b021b9bf..6e8f6025 100755
--- a/docker-run.sh
+++ b/docker-run.sh
@@ -35,4 +35,7 @@ else
JENKINS_FLOCK=""
fi
-${prefix}container_exec "$@"
+${prefix}container_exec "$@" &
+res=0 && wait $! || res=$?
+
+exit $res
diff --git a/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources-spec2k6.patch b/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources-spec2k6.patch
new file mode 100644
index 00000000..76e081b3
--- /dev/null
+++ b/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources-spec2k6.patch
@@ -0,0 +1,30 @@
+From bdf863eb6885b0a0bfe8c311d3aa9e3850738031 Mon Sep 17 00:00:00 2001
+From: Laurent Alfonsi <laurent.alfonsi@linaro.org>
+Date: Thu, 19 Jan 2023 14:03:50 +0000
+Subject: [PATCH] Makefile.defaults: Atomic creation of fortran sources files
+
+---
+ benchspec/Makefile.defaults | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/benchspec/Makefile.defaults b/benchspec/Makefile.defaults
+index 4787ea4c..4b5e0dff 100644
+--- a/benchspec/Makefile.defaults
++++ b/benchspec/Makefile.defaults
+@@ -384,9 +384,12 @@ ifdef NEEDATFILE
+ $(ECHO) $@ >> objectnames
+ endif
+
++TMPDIR:=$(shell mktemp -d)
++
+ # Pre-processed FORTRAN90
+ %.fppized.f90: %.F90
+- specperl $(SPEC)/bin/specpp $(FINAL_FPPFLAGS) $< -o $(addsuffix .fppized.f90,$(basename $<))
++ specperl $(SPEC)/bin/specpp $(FINAL_FPPFLAGS) $< -o $(TMPDIR)/$(addsuffix .fppized.f90,$(basename $<)) && \
++ mv $(TMPDIR)/$(addsuffix .fppized.f90,$(basename $<)) $(addsuffix .fppized.f90,$(basename $<))
+
+ %$(OBJ): %.fppized.f90
+ $(FC) $(FOBJOPT) $(FINAL_FFLAGS) $<
+--
+2.25.1
+
diff --git a/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources.patch b/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources.patch
new file mode 100644
index 00000000..57ee5d8f
--- /dev/null
+++ b/downstream_patches/Makefile.defaults-Atomic-creation-of-fortran-sources.patch
@@ -0,0 +1,31 @@
+From d285ae0fabb454ac8ee59a26bb127a571fc92749 Mon Sep 17 00:00:00 2001
+From: Laurent Alfonsi <laurent.alfonsi@linaro.org>
+Date: Mon, 21 Nov 2022 15:56:30 +0000
+Subject: [PATCH] Makefile.defaults: Atomic creation of fortran sources
+ files
+
+---
+ benchspec/Makefile.defaults | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/benchspec/Makefile.defaults b/benchspec/Makefile.defaults
+index 426b00de..bf02edf0 100644
+--- a/benchspec/Makefile.defaults
++++ b/benchspec/Makefile.defaults
+@@ -405,9 +405,12 @@ ifdef NEEDATFILE
+ $(file >>$(OBJNAMES),$@)
+ endif
+
++TMPDIR:=$(shell mktemp -d)
++
+ # Pre-processed FORTRAN90
+ %.fppized.f90: %.F90
+- $(SPEC)/bin/specperl $(SPEC)/bin/harness/specpp $(FINAL_FPPFLAGS) $< -o $(addsuffix .fppized.f90,$(basename $<))
++ $(SPEC)/bin/specperl $(SPEC)/bin/harness/specpp $(FINAL_FPPFLAGS) $< -o $(TMPDIR)/$(addsuffix .fppized.f90,$(basename $<)) && \
++ mv $(TMPDIR)/$(addsuffix .fppized.f90,$(basename $<)) $(addsuffix .fppized.f90,$(basename $<))
+
+ %$(OBJ): %.fppized.f90
+ $(FC) $(FOBJOPT) $(FINAL_FFLAGS) $<
+--
+2.25.1
+
diff --git a/downstream_patches/llvm-vect-metric.diff b/downstream_patches/llvm-vect-metric.diff
new file mode 100644
index 00000000..574fd8a8
--- /dev/null
+++ b/downstream_patches/llvm-vect-metric.diff
@@ -0,0 +1,57 @@
+diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+index 0807d2a7e5a2..da8a7df58ac8 100644
+--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
++++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+@@ -149,6 +149,7 @@
+ #include <string>
+ #include <tuple>
+ #include <utility>
++#include <fstream>
+
+ using namespace llvm;
+
+@@ -432,6 +433,28 @@ static std::optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE,
+ return std::nullopt;
+ }
+
++/// Log vect metric to <srcfile>.vect.csv
++static void logVectMetric(Function& F, unsigned loopsVectorizedBefore,
++ unsigned loopsVectorized)
++{
++ auto fname = F.getParent()->getSourceFileName() + ".vect.csv";
++ bool writeHeader = false;
++ std::ifstream tmp_f(fname);
++ if (!tmp_f)
++ writeHeader = true;
++ else
++ tmp_f.close();
++
++ std::ofstream vectStatsFile;
++ vectStatsFile.open(fname, std::ios_base::app);
++ if (writeHeader)
++ vectStatsFile << "symbol,num_vect_loops" << "\n";
++
++ unsigned loopsVectorizedFunc = loopsVectorized - loopsVectorizedBefore;
++ vectStatsFile << F.getName().str() << "," << loopsVectorizedFunc << "\n";
++ vectStatsFile.close();
++}
++
+ /// Return a vector containing interleaved elements from multiple
+ /// smaller input vectors.
+ static Value *interleaveVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vals,
+@@ -10360,6 +10383,7 @@ LoopVectorizeResult LoopVectorizePass::runImpl(
+
+ LoopsAnalyzed += Worklist.size();
+
++ unsigned loopsVectorizedBefore = LoopsVectorized.getValue();
+ // Now walk the identified inner loops.
+ while (!Worklist.empty()) {
+ Loop *L = Worklist.pop_back_val();
+@@ -10380,6 +10404,7 @@ LoopVectorizeResult LoopVectorizePass::runImpl(
+ }
+ }
+
++ logVectMetric(F, loopsVectorizedBefore, LoopsVectorized.getValue());
+ // Process each loop nest in the function.
+ return LoopVectorizeResult(Changed, CFGChanged);
+ }
diff --git a/generate-cimonitor-dashboard.py b/generate-cimonitor-dashboard.py
new file mode 100755
index 00000000..920a3d25
--- /dev/null
+++ b/generate-cimonitor-dashboard.py
@@ -0,0 +1,1037 @@
+#!/usr/bin/python3
+
+# Usage :
+# generate-cimonitor-dashboard.py cimonitor-configs/TCWG.yaml /public_html/
+#
+
+import sys
+import json
+import os
+import yaml
+import datetime
+import re
+import tempfile
+import traceback
+
+scripts_dir=os.path.dirname(sys.argv[0])
+
+import argparse
+parser = argparse.ArgumentParser(description='Generate ci monitor html files',
+ prog='generate-cimonitor-dashboard.py',
+ usage='%(prog)s -o public_html [config_files]+')
+parser.add_argument('-o', '--output', type=str, nargs=1,
+ help='Output directory of html generated files')
+parser.add_argument('configs', type=str, nargs='+',
+ help='Yaml config files to describe html format')
+
+
+########################################################################################################################
+# Basic, low level functions
+def nice_print(data):
+ json_formatted_str = json.dumps(data, indent=4)
+ print(json_formatted_str)
+
+def download_and_open(url):
+ fd, tmpnm=tempfile.mkstemp()
+ os.system("wget " + "-o /dev/null " + "-O " + tmpnm + " " + url)
+ return os.fdopen(fd, 'r'), tmpnm
+
+def close_and_remove(tmpf, tmpnm):
+ tmpf.close()
+ os.remove(tmpnm)
+
+dt_now=datetime.datetime.now()
+def days_since(timestamp):
+ return round(((int(dt_now.strftime('%s')) - round(timestamp/1000)) / 3600) / 24)
+def time_since(timestamp):
+ nhours=round((int(dt_now.strftime('%s')) - round(timestamp/1000)) / 3600)
+ ndays=int(nhours / 24)
+ nhours=round( nhours - ndays*24 )
+ return "%02ddays %02dhrs ago"%(ndays, nhours)
+
+########################################################################################################################
+## CONFIG FILE
+ci_url="https://ci.linaro.org/"
+ci_url_view=ci_url+"view/"
+ci_url_job=ci_url+"job/"
+ci={}
+
+"""
+Read and load yaml config file as it is."""
+def get_config(config_name):
+ with open(config_name, 'r') as file:
+ config = yaml.safe_load(file)
+ config_sanity_check(config)
+ # nice_print(config)
+ all_configs=config_instantiate_on_all_pattern(config)
+ # nice_print(all_configs)
+ return all_configs
+
+def config_sanity_check(config):
+ # TO BE DONE
+ # nice_print(config)
+ if 'format' not in config:
+ assert("format not exists")
+
+def config_instantiate_on_all_pattern(config):
+ all_configs=[]
+
+ # Assume no pattern means LNT (static) dashboard, keep as-is
+ if 'pattern' not in config:
+ all_configs.append(config)
+ return all_configs
+
+ all_jobs=get_ci_page("https://ci.linaro.org", request="/api/json?tree=jobs[name]")
+ for pattern in config['pattern']:
+ config_fmt=config['format']
+ instantiated_config={}
+ instantiated_config['filename']=re.sub("@pattern@", pattern, config_fmt['filename'])
+
+ # summary_tables
+ if 'summary_table_all_runs' in config_fmt:
+ instantiated_config['summary_table_all_runs']=config_fmt['summary_table_all_runs']
+ if 'summary_table_last_run' in config_fmt:
+ instantiated_config['summary_table_last_run']=config_fmt['summary_table_last_run']
+
+ # links
+ if 'links' in config_fmt:
+ instantiated_config['links']=[]
+ for j in range(0, len(config_fmt['links'])):
+ if re.search('@pattern@', config_fmt['links'][j]):
+ # Get matching pattern on all config[pattern] list
+ link_pattern=re.sub("@pattern@", pattern, config_fmt['links'][j])
+ for pat in config['pattern']:
+ if link_pattern==pat or not re.search(pattern, pat):
+ continue
+
+ if re.search(link_pattern+"-", pat): char="-"
+ elif re.search(link_pattern+"_", pat): char="_"
+
+ # print("ref=%s tst=%s (char=%s)" %(pattern,pat,char))
+ if pat.count(char)>link_pattern.count(char)+1:
+ continue
+ instantiated_config['links'].append(pat)
+
+ else:
+ instantiated_config['links'].append( config_fmt['links'][j] )
+
+ # details_table
+ instantiated_config['details_table']={}
+ instantiated_config['details_table']['columns']=config_fmt['details_table']['columns']
+ instantiated_config['details_table']['lines']=[]
+ for j in range(0, len(config_fmt['details_table']['lines'])):
+ if re.search('@pattern@', config_fmt['details_table']['lines'][j]) or re.search('\*', config_fmt['details_table']['lines'][j]):
+ # Get matching pattern on all CI jobs
+ if pattern=="tcwg": pattern="tcwg_"
+ pjt_pattern = re.sub("@pattern@", pattern, config_fmt['details_table']['lines'][j])
+ for job in all_jobs['jobs']:
+ if re.search("^"+pjt_pattern+"$", job['name']):
+ instantiated_config['details_table']['lines'].append( job['name'] )
+ else:
+ instantiated_config['details_table']['lines'].append( config_fmt['details_table']['lines'][j] )
+
+ all_configs.append(instantiated_config)
+
+ return all_configs
+
+
+########################################################################################################################
+## COMPUTE MESSAGE ROUTINES
+"""
+Compute best message to display using internal ci-status representation
+- compute_smart_status()
+- compute_smart_diag()
+- compute_color()
+"""
+
+########################
+# compute_smart_status
+"""
+compute_smart_status()
+
+Status reported can any stage of RR algorithm :
+ init / success / reducing / bisecting / forced / failure
+"""
+def compute_smart_status(build):
+ ret_attr={'text':"-", 'hlink':"", 'class':"", 'color':""}
+
+ if not build['result']:
+ return ret_attr
+
+ # default status (Success, failure, aborted)
+ ret_attr['text']=build['result']
+ ret_attr['color']=compute_color(build, ret_attr['text'])
+
+ # refine with , displayName, nb_components
+ displayname=build['displayName']
+ components=re.sub("^#[0-9]*(.*-)R.*",r'\1',displayname)
+ nb_components=components.count("-")-1
+
+ if re.search(r'.*-force', displayname):
+ ret_attr['text']="FORCED"
+ elif re.search(r'.*-init', displayname):
+ ret_attr['text']="INIT"
+ elif re.search(r'.*-trigger-bisect', displayname):
+ ret_attr['text']="BISECTING"
+ elif re.search(r'slowed down|grew in size|vect reduced|sve reduced|failed to build', displayname):
+ ret_attr['text']="REGRESSED"
+ elif nb_components==1 and 'actions' in build:# and 'parameters' in build['actions']:
+ params=[]
+ for act in build['actions']:
+ if 'parameters' in act:
+ params=act['parameters']
+ for param in params:
+ if re.sub("-(.*)-",r'\1_git', components) == param['name'] and \
+ param['value'] != "default":
+ ret_attr['text']="REDUCING"
+ break;
+
+ build['rr_status']=ret_attr
+ return ret_attr;
+
+########################
+# compute_smart_diag
+"""
+compute_smart_diag()
+
+It mainly reads lastBuild project, artifacts/results, and console to compute the best diag for this build
+"""
+def compute_smart_diag(project, build):
+ ret_attr={'text':"-", 'hlink':"", 'class':"", 'color':""}
+ #ret_attr['hlink']=ci_url_job+project['project_name']+"/lastCompletedBuild"+"/artifact/artifacts/results"+"/*view*/"
+ if 'result' in build and build['result'] == "SUCCESS":
+ file, tmpf = download_and_open(ci_url_job+project['project_name']+"/"+str(build['number'])+"/artifact/artifacts/results-vs-prev/csv-results-1/status.csv")
+ nb_failed=0
+ nb_passed=0
+ if not file:
+ return ret_attr
+ ret_attr={'text':"none", 'hlink':"", 'class':"", 'color':""}
+ for items in file:
+ if re.search("failed-to-build", items) or re.search("failed-to-run", items):
+ nb_failed=nb_failed+1
+ elif re.search("success", items):
+ nb_passed=nb_passed+1
+ close_and_remove(file, tmpf)
+ if nb_failed!=0 or nb_passed!=0:
+ ret_attr['text']="%d fails out of %d"% (nb_failed, nb_failed+nb_passed)
+ return ret_attr
+
+ # return diag if found
+ file, tmpf = download_and_open(ci_url_job+project['project_name']+"/"+str(build['number'])+"/artifact/artifacts/results")
+ last_step="-"
+ for items in file:
+ if re.search("Benchmarking infra is offline", items):
+ ret_attr['text']="Board is offline"
+ ret_attr['class']='diag'
+ break
+ elif re.search("slowed down", items):
+ ret_attr['text']="slowed down"
+ ret_attr['class']='diag'
+ break
+ elif re.search("speeds up", items):
+ ret_attr['text']="speeds up"
+ ret_attr['class']='diag'
+ break
+ elif re.search("grew in size", items):
+ ret_attr['text']="grew in size"
+ ret_attr['class']='diag'
+ break
+ elif re.search(r'vect reduced|sve reduced', items):
+ ret_attr['text']="vect/sve reduced"
+ ret_attr['class']='diag'
+ break
+
+ elif re.search("build errors in logs", items):
+ ret_attr['text']="Build errors in : "+last_step
+ break
+
+ elif re.search("internal compiler error", items):
+ ret_attr['text']="ICE in : "+last_step
+ break
+
+ elif re.search(r'^# .*(reset_artifacts|build_abe|build_bmk_llvm|benchmark|linux_n_obj)', items):
+ last_step=re.sub("^# ","", items)
+ last_step=re.sub(" --.*","", last_step)
+ last_step=re.sub(":","", last_step)
+ ret_attr['text']=last_step
+ close_and_remove(file, tmpf)
+
+ file, tmpf = download_and_open(ci_url_job+project['project_name']+"/"+str(build['number'])+"/consoleText")
+ build_machine=""
+ for items in file:
+ if re.search("No space left on device", items):
+ ret_attr['text']="No space left on device"
+ ret_attr['class']='diag'
+ break
+ elif re.search(r'java.*Exception', items):
+ ret_attr['text']="Java Exception"
+ ret_attr['class']='diag'
+ break
+ break
+ elif re.search("Build timed out", items):
+ ret_attr['text']="Build timed out"
+ ret_attr['class']='diag'
+ break
+ elif re.search("STOPPING at .* due to failure", items):
+ ret_attr['text']=re.sub("STOPPING at (.*) due to failure\n",r'\1', items)
+ ret_attr['class']='diag'
+ break
+ elif re.search("CARRYING ON after failure in .*", items):
+ ret_attr['text']=re.sub("CARRYING ON after failure in (.*)\n",r'\1', items)
+ ret_attr['text']=re.sub("build_abe ",'', ret_attr['text'])
+ ret_attr['class']='diag'
+ break
+ elif re.search("^ERROR: .* failed", items):
+ ret_attr['text']=re.sub("ERROR: (.*) failed\n",r'\1', items)
+ ret_attr['class']='diag'
+ break
+ close_and_remove(file, tmpf)
+
+ if ret_attr['color'] == "":
+ ret_attr['color']=compute_color(build, ret_attr['text'])
+
+ if ret_attr['text'] != "-":
+ return ret_attr
+
+ # Otherwise returns last stepg
+ ret_attr['text']=last_step
+ return ret_attr
+
+
+"""
+compute_color()
+
+Choose best color
+"""
+def compute_color(pjt_info_build, text):
+ # CI failure
+ if re.search(r'ABORTED', text):
+ return "purple"
+ elif re.search(r'Board is offline|Java Exception|Board is offline|No space left on device', text):
+ return "purple"
+
+ # failure (normal flow)
+ elif re.search(r'FAILURE', text):
+ return "red"
+
+ elif re.search(r'REGRESSED|REDUCING|BISECTING', text):
+ return "#E57373" # lightred
+
+ # Sucess (normal flow)
+ elif re.search(r'FORCED|INIT|SUCCESS', text):
+ return "green"
+
+ elif not pjt_info_build or not pjt_info_build['result']:
+ return ""
+
+ elif re.search(r'ABORTED', pjt_info_build['result']):
+ return "purple"
+ elif re.search(r'SUCCESS', pjt_info_build['result']):
+ return "green"
+
+ return ""
+
+
+"""
+get_artifact_format_version()
+
+Get version of artifact format <major>.<minor>
+"""
+def get_artifact_format_version(project_name):
+ file, tmpf = download_and_open(ci_url_job+project_name+"/artifact/artifacts/manifest.sh")
+ major="-"
+ minor="-"
+ for items in file:
+ if re.search("rr\[major\]=", items):
+ major=re.sub("rr\[major\]=\"([0-9]*)\"",r'\1', items.strip())
+ elif re.search("rr\[minor\]=", items):
+ minor=re.sub("rr\[minor\]=\"([0-9]*)\"",r'\1', items.strip())
+ close_and_remove(file, tmpf)
+ return "%s.%s" % (major,minor)
+
+########################################################################################################################
+## BUILD CI STATUS REPRESENTATION
+"""
+Get info from CI server and build ci-status representation
+- get_ci_page()
+- get_ci_project_infos()
+- get_ci_project_attribute()
+- get_ci_project()
+- get_ci()
+"""
+
+"""
+get_ci_page()
+
+Download CI page as json.
+"""
+ci_pages={}
+def get_ci_page(url, request=""):
+ global ci_pages
+ if url+request in ci_pages:
+ return ci_pages[url+request]
+ else:
+ #print(".", end='')
+ try:
+ print("wget "+url)
+ os.system("wget " + "-o /dev/null " + "-O /tmp/download.json " + url + "/api/json" + request)
+ f = open('/tmp/download.json')
+ ci_pages[url+request]=json.load(f)
+ f.close()
+ except:
+ ci_pages[url+request]={}
+ return ci_pages[url+request]
+
+
+"""
+get_ci_project_infos()
+
+Retrieve the information from a given project out of the CI server
+"""
+def get_ci_project_infos(pjt_name):
+ ci_pjt={}
+ usual_requests="?tree=number,result,timestamp,displayName,building,inQueue"
+ usual_requests+=",builds[number,building,result,timestamp,displayName,actions[parameters[name,value]]]"
+ usual_requests+=",actions[causes[upstreamUrl,upstreamBuild],parameters[name,value]]"
+
+ ci_pjt=get_ci_page(ci_url_job+pjt_name, request=usual_requests)
+ ci_pjt['project_name']=pjt_name
+
+ if 'builds' not in ci_pjt: return ci_pjt
+
+ for bld in ci_pjt['builds']:
+ if not bld['result']:
+ continue
+
+ if 'lastCompletedBuild' not in ci_pjt:
+ ci_pjt['lastCompletedBuild']=bld
+ if 'lastSuccessfulBuild' not in ci_pjt and bld['result']=='SUCCESS':
+ ci_pjt['lastSuccessfulBuild']=bld
+ if 'lastFailedBuild' not in ci_pjt and not bld['result']=='SUCCESS':
+ ci_pjt['lastFailedBuild']=bld
+ if 'lastRegressedBuild' not in ci_pjt and re.search(r'.*-trigger-bisect', bld['displayName']):
+ ci_pjt['lastRegressedBuild']=bld
+
+ bisect_name=re.sub("-build$", r'-bisect', pjt_name)
+ bld2=get_ci_page(ci_url_job+bisect_name, request=usual_requests)
+ # May need to check if -bisect/lastBuild bld is the upstream of -build/lastRegressedBuild
+ ci_pjt['lastRegressedBuild']['bisectJob']={}
+ ci_pjt['lastRegressedBuild']['bisectJob']['project_name']=bisect_name
+ if 'inQueue' in bld2:
+ ci_pjt['lastRegressedBuild']['bisectJob']['inQueue']=bld2['inQueue']
+ if 'builds' in bld2 and 0 in bld2['builds'] and bld2['builds'][0]:
+ ci_pjt['lastRegressedBuild']['bisectJob']['building']=bld2['builds'][0]['building']
+ ci_pjt['lastRegressedBuild']['bisectJob']['displayName']=bld2['builds'][0]['displayName']
+ ci_pjt['lastRegressedBuild']['bisectJob']['result']=bld2['builds'][0]['result']
+ ci_pjt['lastRegressedBuild']['bisectJob']['timestamp']=bld2['builds'][0]['timestamp']
+ if 'lastForcedBuild' not in ci_pjt and re.match('.*-force', bld['displayName']):
+ ci_pjt['lastForcedBuild']=bld
+
+ bld2=get_ci_page(ci_url_job+pjt_name+"/"+str(bld['number']), request=usual_requests)
+ if not bld2: continue
+ for action in bld2['actions']:
+ if 'causes' in action:
+ for cause in action['causes']:
+ if 'upstreamUrl' in cause:
+ ci_pjt['lastForcedBuild']['upstreamUrl']=cause['upstreamUrl']
+ ci_pjt['lastForcedBuild']['upstreamBuild']=cause['upstreamBuild']
+ bld3=get_ci_page(ci_url+cause['upstreamUrl']+str(cause['upstreamBuild']), request=usual_requests)
+ if bld3:
+ ci_pjt['lastForcedBuild']['upstreamBuildName']=bld3['displayName']
+ compute_smart_status(bld)
+
+ return ci_pjt
+
+
+"""
+get_ci_project_attribute()
+
+Get one info from the CI server (1 project - 1 attribute)
+"""
+def get_ci_project_attribute(pjt_infos, attr_name):
+ ret_attr={'text':"-", 'hlink':"", 'class':"", 'color':""}
+
+ #if True:
+ try:
+ if attr_name=="project":
+ ret_attr['text']=pjt_infos['project_name']
+ ret_attr['hlink']=ci_url_job+pjt_infos['project_name']
+
+ elif attr_name=="display_name" or attr_name=="last_title":
+ ret_attr['text']=pjt_infos['lastCompletedBuild']['displayName']
+ ret_attr['hlink']=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])
+
+ elif attr_name=="build_number":
+ ret_attr['text']=pjt_infos['lastCompletedBuild']['number']
+ ret_attr['hlink']=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])
+
+ elif attr_name=="pure_status":
+ ret_attr=pjt_infos['lastCompletedBuild']['rr_status']
+
+ elif attr_name=="diag":
+ ret_attr=compute_smart_diag( pjt_infos, pjt_infos['lastCompletedBuild'] )
+
+ elif attr_name=="status":
+ ret_status=pjt_infos['lastCompletedBuild']['rr_status']
+ ret_diag=compute_smart_diag( pjt_infos, pjt_infos['lastCompletedBuild'] )
+ if ret_diag['text'] != "-":
+ ret_attr['text']=ret_status['text']+" ("+ret_diag['text']+")"
+ else:
+ ret_attr['text']=ret_status['text']
+ ret_attr['color']=compute_color(pjt_infos['lastCompletedBuild'], ret_attr['text'])
+ ret_attr['text']="<input type=\"checkbox\">" + ret_attr['text']
+
+ elif attr_name=="since_last_build":
+ ret_attr['text']=time_since(pjt_infos['lastCompletedBuild']['timestamp'])
+
+ elif attr_name=="since_last_success":
+ ret_attr['text']=time_since(pjt_infos['lastSuccessfulBuild']['timestamp'])
+
+ elif attr_name=="since_last_fail":
+ ret_attr['text']=time_since(pjt_infos['lastFailedBuild']['timestamp'])
+
+ elif attr_name=="since_last_force":
+ ret_attr['text']=time_since(pjt_infos['lastForcedBuild']['timestamp'])
+
+ elif attr_name=="since_last_regressed":
+ ret_attr['text']=time_since(pjt_infos['lastRegressedBuild']['timestamp'])
+
+ elif attr_name=="nb_force":
+ total=0
+ forced=0
+ for bld in pjt_infos['builds']:
+ total=total+1
+ if re.match('.*-force', bld['displayName']):
+ forced=forced+1
+ ret_attr['text']="%02d%% (%d/%d)"% (int(forced/total*100), forced, total)
+
+ elif attr_name=="nb_fail":
+ total=0
+ failed=0
+ #nice_print(pjt_infos['builds'])
+ for bld in pjt_infos['builds']:
+ total=total+1
+ if not bld['result']=='SUCCESS':
+ failed=failed+1
+ ret_attr['text']="%.2f%% (%d out of %d)"% (forced/total, forced, total)
+
+ # Useful links
+ elif attr_name=="useful_links":
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/results/*view*/"
+ ret_attr['text']="<a href="+lnk+">res</a>"
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/console"
+ ret_attr['text']+=" / <a href="+lnk+">console</a>"
+
+ elif attr_name=="last_build":
+ if 'lastCompletedBuild' not in pjt_infos:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ ret_attr['text']=time_since(pjt_infos['lastCompletedBuild']['timestamp'])
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])
+ ret_attr['text']+=" : <a href="+lnk+">"+str(pjt_infos['lastCompletedBuild']['displayName'])+"</a>"
+
+ elif attr_name=="last_success":
+ if 'lastSuccessfulBuild' not in pjt_infos:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ ret_attr['text']=time_since(pjt_infos['lastSuccessfulBuild']['timestamp'])
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastSuccessfulBuild']['number'])
+ ret_attr['text']+=" : <a href="+lnk+">"+str(pjt_infos['lastSuccessfulBuild']['displayName'])+"</a>"
+
+ elif attr_name=="last_fail":
+ if 'lastFailedBuild' not in pjt_infos:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ ret_attr['text']=time_since(pjt_infos['lastFailedBuild']['timestamp'])
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastFailedBuild']['number'])
+ ret_attr['text']+=" : <a href="+lnk+">"+str(pjt_infos['lastFailedBuild']['displayName'])+"</a>"
+
+ elif attr_name=="last_forced":
+ if 'lastForcedBuild' not in pjt_infos:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ ret_attr['text']=time_since(pjt_infos['lastForcedBuild']['timestamp'])
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastForcedBuild']['number'])
+ ret_attr['text']+=" : <a href="+lnk+">"+str(pjt_infos['lastForcedBuild']['displayName'])+"</a>"
+
+ lnk=ci_url+pjt_infos['lastForcedBuild']['upstreamUrl']+str(pjt_infos['lastForcedBuild']['upstreamBuild'])
+ ret_attr['text']+=" (<a href="+lnk+">bisect</a> : "
+
+ bldname=pjt_infos['lastForcedBuild']['upstreamBuildName']
+ if re.match(r'.*spurious|.*baseline', bldname):
+ bldname=re.sub("#([0-9]*)-(.*)-(.*)", r'\2-\3', bldname)
+ lnk=False
+ else:
+ compon=re.sub("#([0-9]*)-(.*)-(.*)", r'\2', bldname)
+ sha1=re.sub("#([0-9]*)-(.*)-(.*)", r'\3', bldname)
+ bldname = compon+"-"+sha1[0:7]
+ if not re.match('tcwg_bmk.*speed', pjt_infos['project_name']):
+ lnk="https://git.linaro.org/toolchain/ci/interesting-commits.git/tree/%s/sha1/%s"%(compon, sha1)
+
+ if lnk:
+ ret_attr['text']+="<a href="+lnk+">"+bldname+"</a>)"
+ else:
+ ret_attr['text']+=bldname+")"
+
+ elif attr_name=="last_regressed":
+ if 'lastRegressedBuild' not in pjt_infos:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ ret_attr['text']=time_since(pjt_infos['lastRegressedBuild']['timestamp'])
+
+ total=0
+ matched=0
+ for bld in pjt_infos['builds']:
+ total=total+1
+ if re.match('.*-trigger-bisect', bld['displayName']):
+ matched=matched+1
+ ret_attr['text']+="(%d/%d) : "% (matched, total)
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastRegressedBuild']['number'])
+ ret_attr['text']+="<a href="+lnk+">"+str(pjt_infos['lastRegressedBuild']['displayName'])+"</a>"
+
+ elif attr_name=="last_bisect":
+ if 'lastRegressedBuild' not in pjt_infos or 'bisectJob' not in pjt_infos['lastRegressedBuild'] or \
+ not pjt_infos['lastRegressedBuild']['bisectJob']:
+ return {'text':"never", 'color':"#90A4AE", 'hlink':"", 'class':""}
+ bisect_job=pjt_infos['lastRegressedBuild']['bisectJob']
+ ret_attr['text']=time_since(bisect_job['timestamp'])
+
+ lnk=ci_url_job+bisect_job['project_name']
+ state=""
+ #nice_print(pjt_infos['lastRegressedBuild'])
+ if 'inQueue' in bisect_job and bisect_job['inQueue']:
+ state="(waiting)"
+ elif 'building' in bisect_job and bisect_job['building']:
+ lnk+="/lastBuild"
+ state="(running)"
+ elif not bisect_job['result']=="SUCCESS":
+ lnk+="/lastBuild"
+ state="(failed)"
+ elif re.match('.*-spurious|.*-advance-baseline', bisect_job['displayName']):
+ lnk+="/lastBuild"
+ state="(spurious)"
+ elif re.match('#[0-9]*-(.*)-[a-z0-9]{40}$', bisect_job['displayName']):
+ lnk+="/lastBuild"
+ state="(ok)"
+ else:
+ lnk+="/lastBuild"
+ #print(bisect_job)
+ state="("+bisect_job['displayName']+")"
+ ret_attr['text']+="<a href="+lnk+">bisect"+state+"</a> / "
+
+ ret_attr['text']+="<a href="+lnk+">"+str(bisect_job['displayName'])+"</a>"
+
+ elif attr_name=="statistics":
+ (tot, fail, forc, trig, pas)=(0, 0, 0, 0, 0)
+ #nice_print(pjt_infos['builds'])
+ for bld in pjt_infos['builds']:
+ tot=tot+1
+ if re.match('.*-force', bld['displayName']):
+ forc=forc+1
+ elif re.match('.*-trigger-bisect', bld['displayName']):
+ trig=trig+1
+ elif bld['result']=='FAILURE' or bld['result']=='ABORTED':
+ fail=fail+1
+ elif bld['result']=='SUCCESS':
+ pas=pas+1
+
+ ret_attr['text']="regressed(%02d%%) force(%02d%%) fail(%02d%%) ok(%02d%%) nbruns=%d"\
+ %(int(100*trig/tot), int(100*forc/tot), int(100*fail/tot), int(100*pas/tot), tot)
+
+ elif attr_name=="artifact_version":
+ if 'lastSuccessfulBuild' not in pjt_infos:
+ return {'text':"--", 'color':"#90A4AE", 'hlink':"", 'class':""}
+
+ # get_artifact_version
+ ret_attr['text']=get_artifact_format_version(pjt_infos['project_name']+"/"+str(pjt_infos['lastSuccessfulBuild']['number']))
+
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastSuccessfulBuild']['number'])+"/artifact/artifacts/99-rewrite/more"
+ res=os.system("wget -o /dev/null -O /tmp/more "+lnk)
+ if res == 0:
+ ret_attr['text']+=" (not-completed)"
+
+ elif attr_name=="result_file":
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/results/*view*/"
+ ret_attr['text']="<a href="+lnk+">res</a>"
+ elif attr_name=="console":
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/console"
+ ret_attr['text']="<a href="+lnk+">console</a>"
+
+ # TODO
+ elif attr_name=="notify_verif":
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/"
+ ret_attr['text']="<a href="+lnk+">artifacts</a>/"
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/results/*view*/"
+ ret_attr['text']+="<a href="+lnk+">results</a>/ "
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/jenkins/mail-body.txt/*view*/"
+ ret_attr['text']+="<a href="+lnk+">email</a>/"
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/jenkins/jira-body.txt/*view*/"
+ ret_attr['text']+="<a href="+lnk+">jira</a>/ "
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/jenkins/notify.log/*view*/"
+ ret_attr['text']+="<a href="+lnk+">log</a>/"
+ lnk=ci_url_job+pjt_infos['project_name']+"/"+str(pjt_infos['lastCompletedBuild']['number'])+"/artifact/artifacts/results-vs-prev/csv-results-1/status.csv/*view*/"
+ ret_attr['text']+="<a href="+lnk+">status</a>"
+
+ elif attr_name=="bmk_job":
+ file, tmpf = download_and_open(ci_url_job+pjt_infos['project_name']+"/lastCompletedBuild"+"/artifact/artifacts/results_id")
+ for items in file:
+ ret_attr['text']=re.sub(".*/","", items)
+ ret_attr['hlink']=ci_url_job+"tcwg-benchmark"+"/"+ret_attr['text']
+ close_and_remove(file, tmpf)
+
+ elif attr_name=="components":
+ ret_attr['text']=re.sub("^#[0-9]*-(.*)-R.*",r'\1',pjt_infos['lastCompletedBuild']['displayName'])
+
+ elif attr_name=="failing_step":
+ ret_attr=compute_smart_diag(pjt_infos)
+
+ except:
+ ret_attr['text']="-"
+ traceback.print_exc()
+
+ return ret_attr
+
+"""
+get_ci_project()
+
+Get ci-status internal representation for a project (PJT)
+Iterates over the CONFIG columns, and request the infos for each one
+"""
+def get_ci_project(config, ci, pjt_name):
+ ci_project={}
+ ci_project['data']=get_ci_project_infos(pjt_name)
+ for attr in config['details_table']['columns']:
+ ci_project[attr] = get_ci_project_attribute(ci_project['data'], attr)
+ return ci_project
+
+
+def count_jobs(ci, requested_status, summary_style, mindays, maxdays):
+ nb=0
+ for job in ci:
+ if not 'data' in ci[job] or not 'lastCompletedBuild' in ci[job]['data']:
+ continue
+
+ if summary_style=='summary_table_last_run':
+ bld=ci[job]['data']['lastCompletedBuild']
+ if 'timestamp' not in bld or not bld['rr_status']['text']:
+ continue
+ days=days_since(bld['timestamp'])
+ if days>=mindays and days<=maxdays and re.search(requested_status, bld['rr_status']['text']):
+ nb=nb+1
+
+ elif summary_style=='summary_table_all_runs':
+ for bld in ci[job]['data']['builds']:
+ #nice_print(bld)
+ if 'timestamp' not in bld:
+ continue
+ if 'rr_status' not in bld or not bld['rr_status']['text']:
+ continue
+ days=days_since(bld['timestamp'])
+ if days>=mindays and days<=maxdays and re.search(requested_status, bld['rr_status']['text']):
+ nb=nb+1
+
+ #if nb==0: return "-"
+ #else: return nb
+ return nb
+
+"""
+get_ci_summary_laps()
+"""
+def get_ci_summary_laps(ci, timelaps, nb_of, summary_style):
+ ret_attr={'text':"-", 'hlink':"", 'class':"", 'color':""}
+
+ mindays=0
+ maxdays=9999
+ if re.search(r'([0-9]*)-days-ago$', timelaps):
+ mindays=maxdays=int(re.sub("([0-9]*)-days-ago$",r'\1', timelaps))
+ if re.search(r'last-([0-9]*)-days', timelaps):
+ maxdays=int(re.sub("last-([0-9]*)-days",r'\1', timelaps))
+ if re.search(r'last-week', timelaps):
+ maxdays=7
+
+ #try:
+ if nb_of=="last_run_date":
+ ret_attr['text']=timelaps
+ ret_attr['color']="blue"
+ elif nb_of[0]=="#":
+ ret_attr['text']=count_jobs(ci, nb_of[1:].upper(), summary_style, mindays, maxdays)
+ ret_attr['color']=compute_color(None, nb_of[1:].upper())
+ #except:
+ # ret_attr['text']="-"
+
+ return ret_attr
+
+
+"""
+get_ci()
+
+Get ci-status internal representation main routine.
+Iterates over the CONFIG project, and request the infos for each one
+"""
+def get_ci(config):
+ ci={}
+ # LNT dashboard does not have a details_table
+ if 'details_table' not in config:
+ return ci
+
+ for line in config['details_table']['lines']:
+ all_jobs=get_ci_page("https://ci.linaro.org", request="/api/json?tree=jobs[name]")
+ for job in all_jobs['jobs']:
+ if re.search(line, job['name']):
+ ci[job['name']]=get_ci_project(config, ci, job['name'])
+
+ if 'summary_table_last_run' in config:
+ summary={}
+ for timelaps in config['summary_table_last_run']['lines']:
+ summary[timelaps]={}
+ for nb_of in config['summary_table_last_run']['columns']:
+ summary[timelaps][nb_of]=get_ci_summary_laps(ci, timelaps, nb_of, 'summary_table_last_run')
+ ci['summary_table_last_run']=summary
+
+ if 'summary_table_all_runs' in config:
+ summary={}
+ for timelaps in config['summary_table_all_runs']['lines']:
+ summary[timelaps]={}
+ for nb_of in config['summary_table_all_runs']['columns']:
+ summary[timelaps][nb_of]=get_ci_summary_laps(ci, timelaps, nb_of, 'summary_table_all_runs')
+ ci['summary_table_all_runs']=summary
+
+ return ci
+
+
+
+
+########################################################################################################################
+## DUMP HTML
+
+"""
+dump html routines
+- dump_html_one_line
+- dump_html
+"""
+
+html_header = """<html>
+<style>
+ table, td, th {
+ border-collapse: collapse;
+ }
+ tbody tr:nth-child(even) td {
+ background-color: #ededed;
+ }
+</style>
+<head>
+<title>CI Infrastructure Status - %s</title>
+<link rel="stylesheet" type="text/css" href="sorting-table-css/example.css" />
+</head>
+<body>
+<h1>CI Infrastructure Status - %s</h1>
+"""
+
+
+html_footer = """
+ <script>
+ var table = document.querySelector('.massive')
+ var tbody = table.tBodies[0]
+ var rows = [].slice.call(tbody.rows, 0)
+ var fragment = document.createDocumentFragment()
+
+ for (var k = 0; k < 50; k++) {
+ for (var i = 0; i < rows.length; i++) {
+ fragment.appendChild(rows[i].cloneNode(true))
+ }
+ }
+ tbody.innerHTML = ''
+ tbody.appendChild(fragment)
+ </script>
+ <!-- <script type="text/javascript" src="sortable.js"></script> -->
+ <script src="sorting-table-css/sortable.js"></script>
+ <script>
+ function prepareAdvancedTable() {
+ function convertSizeToBytes(str) {
+ var matches = str.match(/^([0-9.]+)(\w+)$/)
+ if (matches) {
+ var vals = {
+ kB: 1, // 1024 B
+ KiB: 1,// 1024 B
+ MB: 2, // 1024 * 1024 B
+ GB: 3, // 1024 * 1024 * 1024 B
+ TB: 4, // 1024 * 1024 * 1024 *1024 B
+ }
+ return (matches[1] || 0) * Math.pow(1024, vals[matches[2]])
+ }
+ return str
+ }
+
+ var size_table = document.querySelector('.advanced-table')
+ var rows = size_table.tBodies[0].rows
+ for (let i = 0; i < rows.length; i++) {
+ const date_element = rows[i].cells[2]
+ const size_element = rows[i].cells[1]
+ date_element.setAttribute('data-sort', date_element.innerText.replace(/(\d+)\/(\d+)\/(\d+)/, '$3$1$2'))
+ size_element.setAttribute('data-sort', convertSizeToBytes(size_element.innerText))
+ }
+ }
+ prepareAdvancedTable()
+ </script>
+</body>
+</html>
+"""
+
+def dump_html_one_line(f, config_table, ci_pjt):
+ f.write(" <tr>\n")
+ for attr in config_table['columns']:
+ f.write(" <td>")
+ if ci_pjt[attr]['color']:
+ f.write("<font color=\""+ci_pjt[attr]['color']+"\">")
+ if ci_pjt[attr]['hlink']:
+ f.write("<a href='"+ci_pjt[attr]['hlink']+"'>")
+ f.write(str(ci_pjt[attr]['text']))
+ if ci_pjt[attr]['hlink']:
+ f.write("</a>")
+ if ci_pjt[attr]['color']:
+ f.write("</font>")
+ f.write("</td>\n");
+ f.write(" </tr>\n")
+
+def dump_html_util(config_name, output_dirname):
+ print("# Copy yaml : "+ config_name)
+ os.system("cp " + config_name + " " + output_dirname)
+
+ print("# Copy html : help.html")
+ os.system("cp "+scripts_dir+"/cimonitor-configs/help.html " + output_dirname)
+
+ print("# Copy css : sorting-table-css")
+ os.system("cp -ar "+scripts_dir+"/cimonitor-configs/sorting-table-css " + output_dirname)
+
+def dump_html(config, ci):
+ print("# Emit html : "+ config['filename'])
+ os.system("mkdir -p "+os.path.dirname(output_dirname+"/"+config['filename']))
+ f = open(output_dirname+"/"+config['filename'], 'w')
+
+ f.write(html_header % (os.path.basename(config['filename']), os.path.basename(config['filename'])))
+ f.write("<p> date = "+str(datetime.datetime.now())+"</p>\n")
+ f.write("<p> config = <a href="+os.path.basename(config_name)+">"+os.path.basename(config_name)+"</a></p>\n")
+
+ # LNT dashboard (upstream projects health)
+ if 'lnt_dashboard' in config:
+ f.write("<h2>LNT dashboard (upstream projects health)</h2>\n")
+ f.write("<ul>\n")
+ for project in config['lnt_dashboard']:
+ f.write(" <h1>"+project+"</h1>\n")
+ for entry in config['lnt_dashboard'][project]:
+ text=entry.split()
+ f.write(" <li><a href=\""+text[1]+"\">"+text[0]+"</a></li>\n")
+ f.write("</ul>\n\n")
+
+ # LINKS
+ if 'links' in config:
+ f.write("<h2> LINKS </h2>\n")
+ f.write("<ul>\n")
+ for lnk in config['links']:
+ f.write(" <li><a href=\""+lnk+".html\">"+lnk+".html</a></li>\n")
+ f.write("</ul>\n\n")
+
+ # SUMMARY_TABLE (Last run)
+ if 'summary_table_last_run' in config:
+ f.write("<h2> SUMMARY_TABLE (Last completed build only)</h2>\n")
+ f.write("<p> statistics on last-completed build of all jenkins job</p>\n")
+ f.write("<table border=1 cellspacing=1 cellpadding=3 class=\"sortable\">\n")
+ f.write(" <thead>\n")
+ for col in config['summary_table_last_run']['columns']:
+ f.write(" <th>"+col+"</th>\n")
+ f.write(" </thead>\n")
+ for laps in config['summary_table_last_run']['lines']:
+ dump_html_one_line(f, config['summary_table_last_run'], ci['summary_table_last_run'][laps])
+ f.write("</table>\n\n")
+
+ # SUMMARY_TABLE (All runs)
+ if 'summary_table_all_runs' in config:
+ f.write("<h2> SUMMARY_TABLE (All completed runs)</h2>\n")
+ f.write("<p> statistics on all completed builds of all jenkins job</p>\n")
+ f.write("<table border=1 cellspacing=1 cellpadding=3 class=\"sortable\">\n")
+ f.write(" <thead>\n")
+ for col in config['summary_table_all_runs']['columns']:
+ f.write(" <th>"+col+"</th>\n")
+ f.write(" </thead>\n")
+ for laps in config['summary_table_all_runs']['lines']:
+ dump_html_one_line(f, config['summary_table_all_runs'], ci['summary_table_all_runs'][laps])
+ f.write("</table>\n\n")
+
+ # DETAILS_TABLE
+ if 'details_table' in config:
+ f.write("<h2> DETAILS_TABLE </h2>\n")
+ f.write("<p> help for the table format : <a href=\"help.html\">here</a>")
+ f.write("<table border=1 cellspacing=1 cellpadding=3 class=\"sortable\">\n")
+ f.write(" <thead>\n")
+ for col in config['details_table']['columns']:
+ f.write(" <th>"+col+"</th>\n")
+ f.write(" </thead>\n")
+
+ for pjt in config['details_table']['lines']:
+ dump_html_one_line(f, config['details_table'], ci[pjt])
+
+ f.write("</table>\n")
+
+ f.write("<p> time to build html = "+str(datetime.datetime.now() - dt_now)+"</p>\n")
+
+ f.write(html_footer)
+ f.close()
+
+########################################################################################################################
+## BASIC ASCII DUMP ROUTINES
+
+"""
+Basic ascii dump routines
+- dump_ascii_one_line
+- dump_ascii
+"""
+def dump_ascii_fmt(k):
+ if k=="project":
+ return "%70s,"
+ else:
+ return "%20s,"
+
+def dump_ascii_one_line(config_table, ci, pjt_name):
+ for attr in config_table['columns']:
+ fmt=dump_ascii_fmt(attr)
+ print(fmt % (ci[pjt_name][attr]['text']), end='')
+ print("")
+
+
+def dump_ascii(config, ci):
+ i=0
+ print("\n\n=== DETAILS TABLE - %s" %(config['filename']))
+
+ # Array header
+ for col in config['details_table']['columns']:
+ fmt=dump_ascii_fmt(col)
+ print(fmt % (col), end='');
+ print("")
+
+ for pjt in config['details_table']['lines']:
+ dump_ascii_one_line(config['details_table'], ci, pjt)
+
+
+########################################################################################################################
+## MAIN PROCEDURE
+
+if __name__ == "__main__":
+ args = parser.parse_args(sys.argv)
+
+ output_dirname=args.output[0]
+ if not os.path.isdir(output_dirname):
+ parser.print_help()
+ exit(1)
+
+ for config_name in args.configs[1:]:
+
+ all_configs=get_config(config_name)
+
+ for config in all_configs:
+ ci=get_ci(config)
+ dump_html(config, ci)
+ # dump_ascii(config, ci)
+
+ dump_html_util(config_name, output_dirname)
+ print("# Time to generated all html files : " + str(datetime.datetime.now() - dt_now))
diff --git a/jenkins-helpers.sh b/jenkins-helpers.sh
index 7feb3edf..a412e803 100644
--- a/jenkins-helpers.sh
+++ b/jenkins-helpers.sh
@@ -29,12 +29,12 @@ abs_path ()
assert_with_msg ()
{
(
- set -euf -o pipefail
+ set -euf -o pipefail +x
local failure_message=$1
shift
- eval "$@" || (echo "$failure_message" && exit 1)
+ eval "$*" || (echo "$failure_message" >&2 && exit 1)
)
}
@@ -42,9 +42,9 @@ assert_with_msg ()
assert ()
{
(
- set -euf -o pipefail
+ set -euf -o pipefail +x
- eval "$@"
+ eval "$*"
)
}
@@ -78,6 +78,9 @@ fresh_dir ()
done
done
+ # Make sure we can delete the files
+ find "$dir" "${find_opts[@]}" -type d -exec chmod +rwx {} \;
+ chmod -R +rw "$dir"
find "$dir" "${find_opts[@]}" -delete
)
}
@@ -123,7 +126,7 @@ print_node_with_least_containers ()
# Re. --random-sort below: shuffle node list to mitigate races
# when starting multiple containers at the same time
- testers=$(print_nodes_in_labels ${tester_labels[*]} | sort --random-sort)
+ testers=$(print_nodes_in_labels "${tester_labels[@]}" | sort --random-sort)
for tester in $testers; do
ret=0
tester_host=$(print_host_for_node $tester "ignore_fail")
@@ -153,8 +156,8 @@ print_arch_for_label ()
case $label in
tcwg-x86_64-*) echo amd64 ;;
tcwg-x86_32-*) echo i386 ;;
- tcwg-amp_64-*|tcwg-apm_64-*|tcwg-d05_64-*|tcwg-lc_64*|tcwg-sq_64-*|tcwg-thx1_64-*|tcwg-tx1_64-*) echo arm64 ;;
- tcwg-amp_32-*|tcwg-apm_32-*|tcwg-d05_32-*|tcwg-sq_32-*|tcwg-tk1_32-*|tcwg-tx1_32-*) echo armhf ;;
+ tcwg-amp_64-*|tcwg-apm_64-*|tcwg-armv8_64|tcwg-d05_64-*|tcwg-lc_64*|tcwg-sq_64-*|tcwg-thx1_64-*|tcwg-tx1_64-*) echo arm64 ;;
+ tcwg-amp_32-*|tcwg-apm_32-*|tcwg-armv7|tcwg-armv8_32|tcwg-d05_32-*|tcwg-sq_32-*|tcwg-tk1_32-*|tcwg-tx1_32-*) echo armhf ;;
*) echo "ERROR: Unsupported label: $label" >&2; exit 1 ;;
esac
)
@@ -185,7 +188,7 @@ print_host_for_node ()
# .ssh/config (in dockerfiles.git/tcwg-base/tcwg-buildslave/).
for suffix in "" ".tcwglab"; do
host="$1$suffix"
- if timeout 30s ssh "$host" true >& /dev/null; then
+ if timeout 30s ssh "$host" true &> /dev/null; then
break
fi
host=""
@@ -249,12 +252,55 @@ print_tester_label_for_target ()
# for cross-testing. This means we no longer test on armv7
# hardware.
aarch64-linux*) echo "tcwg-apm_64-test" ;;
- armv8l-linux*) echo "tcwg-apm_32-test" ;;
- arm-linux*) echo "tcwg-apm_32-test" ;;
+ armv8l-linux*) echo "tcwg-armv8_32" ;;
+ arm-linux*) echo "tcwg-armv7" ;;
esac
)
}
+# Print number of busy executors on a jenkins node
+# $1: node
+print_number_of_busy_executors ()
+{
+ (
+ set -euf -o pipefail
+ local node="$1"
+
+ local json
+ json=$(mktemp)
+ # shellcheck disable=SC2064
+ trap "rm $json" EXIT
+
+ curl -s "https://ci.linaro.org/computer/$node/api/json?depth=1" > "$json"
+
+ local n n_busy idle
+
+ n=$(jq -r ".numExecutors" < "$json")
+ n_busy="$n"
+
+ while [ "$n" -gt "0" ]; do
+ n=$(($n - 1))
+ idle=$(jq -r ".executors[$n].idle" < "$json")
+ if [ "$idle" = "true" ]; then
+ n_busy=$(($n_busy - 1))
+ fi
+ done
+
+ n=0
+ while true; do
+ idle=$(jq -r ".oneOffExecutors[$n].idle" < "$json")
+ if [ "$idle" = "null" ]; then
+ break
+ elif [ "$idle" != "true" ]; then
+ n_busy=$(($n_busy + 1))
+ fi
+ n=$(($n + 1))
+ done
+
+ echo "$n_busy"
+ )
+}
+
# Run command on remote machine in given directory via ssh on a given port
# "$1" -- <host>[:<port>[:<dir>[:<ssh_opts>[:<env>]]]]
# "$2, $3, etc" -- command and its arguments
@@ -467,6 +513,9 @@ clone_or_update_repo_no_checkout ()
if git -C $ref_dir rev-parse --git-dir >/dev/null 2>&1; then
refopt="--reference $ref_dir"
break
+ elif [ -d $ref_dir ]; then
+ refopt="--reference-if-able $ref_dir"
+ break
fi
done
;;
@@ -477,14 +526,21 @@ clone_or_update_repo_no_checkout ()
if ! git -C "$dir" status >/dev/null 2>&1; then
# Git repo doesn't exist or is corrupted. Make a new clone.
rm -rf "$dir"
+ fi
- local single_branch_opt=""
- if [ x"$single_branch" != x"" ]; then
- single_branch_opt="--single-branch --branch $single_branch"
- fi
+ if [ -d "$dir" ] && [ x"$refopt" != x"" ] \
+ && [ "$(du -s "$dir/.git" | cut -f 1)" -gt $((1024*1024)) ]; then
+ # Current clone has grown above 1GB, and we have a reference repo,
+ # which should cut down the size significantly.
+ # Redo the clone to save disk space.
+ # PS: Unfortunately, I could not find a way to make the current clone
+ # use new objects from the reference repo without a full re-clone.
+ # Fortunately, a new clone with a reference repo is quick.
+ rm -rf "$dir"
+ fi
- run_with_timeout_and_retry 1h 3 git clone $refopt $single_branch_opt "$url" "$dir"
- else
+ # This is a not a real loop; just easier to skip parts with "break".
+ while [ -d "$dir" ]; do
# Clean up the clone (this is supposed to re-share objects from
# reference clone and keep the size of the clone minimal).
# It's possible that previous GC process was interrupted and left
@@ -495,46 +551,86 @@ clone_or_update_repo_no_checkout ()
# Also, prune all loose objects to avoid "git gc --auto" failing
# and creating .git/gc.log to warn us.
rm -f "$dir/.git/gc.log"
- git -C "$dir" gc --auto --force --prune=all
+ # Do not detach into background for GC. Running in the background may
+ # cause a failure during bisect's rsync, which may see some of
+ # the files disappering mid-rsync.
+ git -C "$dir" config gc.autoDetach false
+ if ! git -C "$dir" gc --auto --force --prune=all; then
+ # "git gc" can fail due to corrupted packs.
+ rm -rf "$dir"
+ break
+ fi
+
# Delete stale locks -- especially .git/refs/remotes/REMOTE/BRANCH.lock
- # These occur when builds are aborted during "git remote update" or similar.
+ # These occur when builds are aborted during "git remote update" or
+ # similar.
find "$dir/.git" -name "*.lock" -delete
- fi
- git_set_remote "$dir" "$remote" "$url" "$single_branch"
+ # Recover from any previous am/cherry-pick/rebase.
+ # In pre-commit CI we apply patches with "git am", which can fail
+ # and leave clone in a bad state.
+ local i
+ for i in am cherry-pick rebase; do
+ git -C "$dir" "$i" --abort &>/dev/null || true
+ done
- local refspec
- if [ x"$single_branch" = x"" ]; then
- run_with_timeout_and_retry 1h 3 git -C "$dir" remote update -p "$remote"
- refspec="+refs/changes/*:refs/changes/*"
- else
- refspec="+refs/heads/$single_branch:refs/remotes/$remote/$single_branch"
- fi
- run_with_timeout_and_retry 1h 3 git -C "$dir" fetch -q $remote $refspec --prune
+ break
+ done
+
+ local fresh_clone=false
+ while true; do
+ if ! [ -d "$dir" ]; then
+ local single_branch_opt=""
+ if [ x"$single_branch" != x"" ]; then
+ single_branch_opt="--single-branch --branch $single_branch"
+ fi
+
+ run_with_timeout_and_retry 1h 3 git clone \
+ $refopt $single_branch_opt "$url" "$dir"
+ fresh_clone=true
+ fi
+
+ git_set_remote "$dir" "$remote" "$url" "$single_branch"
+
+ local refspec
+ if [ x"$single_branch" = x"" ]; then
+ run_with_timeout_and_retry 1h 3 git -C "$dir" remote update -p \
+ "$remote" 2>/dev/null
+ refspec="+refs/changes/*:refs/changes/*"
+ else
+ refspec="+refs/heads/$single_branch:refs/remotes/$remote/$single_branch"
+ fi
+
+ if ! run_with_timeout_and_retry 1h 3 git -C "$dir" fetch -q \
+ $remote $refspec --prune; then
+ # "git fetch --prune" can fail due to running out of memory space
+ # on 32-bit architectures on big repos. Remove the repo and retry
+ # with a fresh clone.
+ if $fresh_clone; then
+ return 1
+ fi
+
+ rm -rf "$dir"
+ continue
+ fi
+
+ break
+ done
)
}
-# Clone or update a git repo
+# Checkout branch/ref/SHA1 in a git repo
# $1 -- repo directory
# $2 -- ref to checkout
-# $3 -- master git repo
-# $4 -- optional reference git repo (to speedup initial cloning)
-# $5 -- optional single-branch to reduce fetching from remote repo
-# $6 -- optional name of remote (default is "origin")
-clone_or_update_repo ()
+# $3 -- name of the git remote
+git_checkout ()
{
(
set -euf -o pipefail
local dir="$1"
local ref="$2"
- local url="$3"
- local reference="${4-auto}"
- local single_branch="${5-}"
- local remote="${6-origin}"
-
- clone_or_update_repo_no_checkout "$dir" "$url" "$reference" \
- "$single_branch" "$remote"
+ local remote="$3"
git_clean "$dir"
# Convert git branch/tag names into SHA1
@@ -545,35 +641,29 @@ clone_or_update_repo ()
)
}
-# Print baseline git repo
-# $1 -- project name
-# $3 -- whether to make the new remote read-only or read-write.
-print_baseline_repo ()
+# Clone or update a git repo
+# $1 -- repo directory
+# $2 -- ref to checkout
+# $3 -- master git repo
+# $4 -- optional reference git repo (to speedup initial cloning)
+# $5 -- optional single-branch to reduce fetching from remote repo
+# $6 -- optional name of remote (default is "origin")
+clone_or_update_repo ()
{
(
set -euf -o pipefail
local dir="$1"
- local read_only="$2"
-
- local repo
- case "$dir" in
- binutils) repo=binutils-gdb.git ;;
- llvm) repo=llvm-project.git ;;
- *) repo=$dir.git ;;
- esac
+ local ref="$2"
+ local url="$3"
+ local reference="${4-auto}"
+ local single_branch="${5-}"
+ local remote="${6-origin}"
- # Use git-us.l.o to avoid delays between review.l.o and git.l.o
- local url="git-us.linaro.org/toolchain/ci/$repo"
- if $read_only; then
- url="https://$url"
- else
- # Use gitolite access. Gerrit's ssh access verifies pushed commits,
- # which can slow-down server on big pushes.
- url="ssh://$url"
- fi
+ clone_or_update_repo_no_checkout "$dir" "$url" "$reference" \
+ "$single_branch" "$remote"
- echo "$url"
+ git_checkout "$dir" "$ref" "$remote"
)
}
@@ -649,27 +739,28 @@ untar_url ()
# Wait until the ssh server is ready to accept connexions
# $1: host
-# $2: port
-# $3: retry count (optional)
+# $2: retry count; use "" for the default
+# $3+: ssh options
# Returns 0 on success, 1 in case of error
wait_for_ssh_server ()
{
(
set -euf -o pipefail
local session_host="$1"
- local session_port="$2"
- local count="${3-20}"
+ local count="${2:-20}"
+ shift 2
+ local -a session_opts=("$@")
while [ $count -gt 0 ]
do
- timeout 30s ssh -p $session_port $session_host true && break
+ timeout 30s ssh "${session_opts[@]}" $session_host true && break
echo "SSH server not ready, waiting....."
sleep 5
count=$((count - 1))
done
if [ $count -eq 0 ]; then
- echo "ERROR: SSH server did not respond ($session_host:$session_port)"
+ echo "ERROR: SSH server did not respond (ssh ${session_opts[*]} $session_host)"
return 1
fi
return 0
@@ -706,17 +797,24 @@ print_memory_limit ()
local memlimit="$4"
local memory
case "$task" in
- build)
- # 2GB per compilation core, with 4GB minimum and
- # half of total system RAM maximum.
- memory=$(( 2000 * $weight * $nproc ))
-
- memlimit=$(( $memlimit / 2 ))
+ build|precommit)
if [ "$memlimit" -lt "4000" ]; then
- # Don't limit memory on machines with less than 8GB RAM.
+ # Don't limit memory on machines with less than 4GB RAM.
memory="unlimited"
else
- # Use at most half of RAM
+ # We want to have at least 2GB of RAM for every core. E.g.,
+ # on a machine with 32 cores and 128GB RAM we can run
+ # 2 concurrent builds, while on a 32-core machine with
+ # 64GB RAM we can run only 1 build at a time.
+ # Note that number of concurrent builds is controlled by number
+ # of node executors in jenkins.
+ memory=$(( 2000 * $weight * $nproc ))
+
+ # Also, trim 5% off total RAM to have a bit of RAM reserved
+ # for processes on the bare machine, which really helps when
+ # build container goes into swap.
+ memlimit=$(( $memlimit * 95 / 100 ))
+
if [ "$memory" -gt "$memlimit" ]; then
memory="$memlimit"
fi
@@ -744,79 +842,110 @@ print_pids_limit ()
local task="$1"
local weight="$2"
local pids
- pids=$(( $weight * 5000 )) # 5000 processes per executor
+
+ # On startup of GCC's guality tests we have $NCPUs guality_check$PID.exe
+ # processes, each of which forks into GDB with ($NCPUs+1) threads.
+ # This means that on a 160-core system we need around 30k PID limit.
+ pids=$(nproc --all)
+ pids=$(( pids * (pids + 1) + 5000 ))
+ pids=$(( pids * weight ))
+
+ # Make sure we are using at most half of system PID limit
+ local pid_max_2
+ pid_max_2=$(cat /proc/sys/kernel/pid_max)
+ pid_max_2=$(( pid_max_2 / 2 ))
+
+ if [ $pids -gt $pid_max_2 ]; then
+ pids=$pid_max_2
+ fi
+
echo "$pids"
)
}
-# Print default bind mounts for $task
+# Print default bind and volume mounts for $task and $job
# $1: task
-print_bind_mounts ()
+# $2: job
+# $3: Suffix to be appended to the volume names (e.g., -$container_arch-$distro)
+# $4+: ssh command
+print_mounts ()
{
(
set -euf -o pipefail
local task="$1"
- local ssh="$2"
- local -a bind_mounts
+ local job="$2"
+ local suffix="$3"
+ shift 3
+ local ssh=("$@")
+
+ if [ "${WORKSPACE+set}" = "set" ]; then
+ case $task in
+ bench|build)
+ echo "$WORKSPACE:$WORKSPACE"
+ ;;
+ precommit)
+ # Note the difference between "-v $WORKSPACE:$WORKSPACE" above
+ # and "-v $WORKSPACE" here. In the above case $WORKSPACE is
+ # bind-mounted from the host; but in this case a scratch volume
+ # is created and mounted inside container.
+ echo "$WORKSPACE"
+ # The only reason why we are bind-mounting base-artifacts/
+ # is that it can be very big. Rsync-ing tens of gigs back and
+ # forth can take as much time as the actual pre-commit test.
+ # As a nice side-effect having base-artifacts/ read-only checks
+ # that our build scripts don't try to modify it by mistake.
+ echo "$WORKSPACE/base-artifacts:$WORKSPACE/base-artifacts:ro"
+ ;;
+ esac
+ fi
case $task in
- bench|build)
- if [ x"${WORKSPACE+set}" = x"set" ]; then
- bind_mounts+=("$WORKSPACE")
- fi
+ build|precommit)
+ echo /home/tcwg-buildslave/snapshots-ref:/home/tcwg-buildslave/snapshots-ref:ro
+ ;;
+ bench)
+ echo /home/shared/git:/home/shared/git:ro
;;
- esac
-
- case $task in
- build) bind_mounts+=(/home/tcwg-buildslave/snapshots-ref:ro) ;;
- bench) bind_mounts+=(/home/shared/git:ro) ;;
esac
local key
- for key in $($ssh find /etc/ssh/ -name "ssh_host_*_key" \
- -o -name "ssh_host_*_key.pub"); do
- bind_mounts+=("$key:ro")
+ for key in $("${ssh[@]}" find /etc/ssh/ -name "ssh_host_*_key" \
+ -o -name "ssh_host_*_key.pub"); do
+ echo "$key:$key:ro"
done
- echo "${bind_mounts[@]:+${bind_mounts[@]}}"
- )
-}
-
-# Print default volume mounts for $job
-# $1: job
-# $2: Suffix to be appended to the volume names (e.g., -$container_arch-$distro)
-print_volume_mounts ()
-{
- (
- set -euf -o pipefail
- local job="$1"
- local suffix="$2"
-
- local -a mounts
- local volume_id
-
case "$job" in
tcwg_*-*)
# Add ccache volume for tcwg_* jobs.
# These jobs depend on ccache for fast rebuilds of LLVM and GCC with
# the host compiler.
- # tcwg_* jobs use per-executor WORKSPACES, and ccache uses separate
- # cache entries for different paths. Therefore we need to use
- # separate caches for different $WORKSPACES. Otherwise we get
- # a lot of cache polution on high-executor machines, e.g., for
- # tcwg_bmk builds on tcwg-x86_64-dev-01 node.
local prefix
if [ x"${WORKSPACE+set}" = x"set" ]; then
prefix=$(basename $WORKSPACE)
else
prefix=$(echo $job | cut -d- -f 1)
fi
+ # tcwg_* jobs use per-executor WORKSPACES, and we configure ccache
+ # to use CCACHE_BASEDIR=$WORKSPACE so that ccache sees same paths
+ # for builds on different executors.
+ # Strip "_$EXECUTOR_NUMBER" from the job/workspace ID.
+ prefix="${prefix%_[0-9]*}"
+
+ local volume_id
volume_id=$(print_docker_name "$prefix$suffix")
- mounts+=(ccache-"$volume_id":"$HOME"/.ccache)
+
+ local readonly=""
+ if [ "$task" = "precommit" ]; then
+ readonly=":ro"
+ fi
+ echo "ccache-$volume_id:$HOME/.ccache$readonly"
;;
esac
+
case "$job" in
tcwg_bmk*)
+ assert_with_msg "Precommit benchmarking requires more thought" \
+ [ "$task" != "precommit" ]
# Add scratch mount for tcwg-benchmark's $HOME.
# tcwg_bmk-* jobs trigger tcwg-benchmark jenkins jobs, which
# then ssh to the build container to compile benchmark objects
@@ -824,10 +953,9 @@ print_volume_mounts ()
# parameter -- see tcwg_bmk-build.sh:benchmark()).
# This generates a fair bit of disk trafic on /home/tcwg-benchmark,
# and it's best to use docker scratch volume, rather than overlayfs.
- mounts+=(/home/tcwg-benchmark)
+ echo /home/tcwg-benchmark
;;
esac
- echo "${mounts[@]:+${mounts[@]}}"
)
}
@@ -851,9 +979,11 @@ __manifest_filename=("/dev/null")
# Set new file name for manifest
# $1: File name
+# $2: Optional true/false on whether start a new manifest
manifest_push ()
{
local filename="$1"
+ local clean="${2-true}"
# Resolve absolute path to manifest.
local dir
@@ -862,7 +992,9 @@ manifest_push ()
dir=$(cd "$dir"; pwd)
__manifest_filename=("$dir/$(basename "$filename")" "${__manifest_filename[@]}")
- rm -f "${__manifest_filename[0]}"
+ if $clean; then
+ rm -f "${__manifest_filename[0]}"
+ fi
}
# Return to previous manifest filename
@@ -881,17 +1013,356 @@ manifest_out ()
cat >> "${__manifest_filename[0]}"
}
+# Fetch and print value from manifest
+# $1: Manifest file
+# $2: Variable to fetch
+# $3: Whether to ignore lack of the variable or lack of the manifest
+get_manifest ()
+{
+ (
+ set +x
+ set -euf -o pipefail
+ local manifest="$1"
+ local var="$2"
+ local strict="${3-true}"
+
+ # Emtpy result if no manifest found (udpate_baseline=init for instance)
+ if ! [ -f "$manifest" ]; then
+ if $strict; then
+ return 1
+ fi
+ return 0
+ fi
+
+ # Unwrap $var down to variable name that we can unset.
+ local name="$var"
+ # {name} -> name
+ name=$(echo "$name" | sed -e 's/^{\(.*\)}$/\1/')
+ # Strip "+, -, :+, :-" suffixes
+ name=$(echo "$name" | sed -e 's/[-+:].*$//')
+
+ # remove any existing declarations of $name
+ unset "$name"
+
+ # FIXME: manifest should declare "rr" itself
+ declare -A rr
+
+ # shellcheck disable=SC1090
+ source "$manifest"
+
+ if ! $strict; then
+ # Do not complain about unbound variables
+ set +u
+ fi
+
+ eval echo "\$$var"
+ )
+}
+
+# Fetch and print value from manifest of a baseline build
+# $1: Variable to fetch.
+get_baseline_manifest ()
+{
+ get_manifest base-artifacts/manifest.sh "$1" false
+}
+
+# Fetch and print value from manifest of the current build
+# $1: Variable to fetch.
+get_current_manifest ()
+{
+ get_manifest "${rr[top_artifacts]}/manifest.sh" "$1"
+}
+
+get_baseline_git ()
+{
+ (
+ set -euf -o pipefail
+
+ local base_artifacts="base-artifacts"
+
+ assert_with_msg "ERROR: No $1 in baseline git" \
+ [ -f "$base_artifacts/git/$1" ]
+ cat "$base_artifacts/git/$1"
+ )
+}
+
+get_current_git ()
+{
+ (
+ set -euf -o pipefail
+ assert_with_msg "ERROR: No $1 in current git" \
+ [ -f "${rr[top_artifacts]}/git/$1" ]
+ cat "${rr[top_artifacts]}/git/$1"
+ )
+}
+
+set_current_git ()
+{
+ (
+ set -euf -o pipefail
+ mkdir -p ${rr[top_artifacts]}/git
+ cat > "${rr[top_artifacts]}/git/$1"
+ )
+}
+
+# returns the date of the last component ($1) commit
+get_baseline_component_date ()
+{
+ (
+ set -euf -o pipefail
+ local base_artifacts="base-artifacts"
+ assert_with_msg "ERROR: No $1 in current git" \
+ [ -f "$base_artifacts/git/${1}_rev" ]
+
+ git -C "$1" show --no-patch --pretty=%ct "$(cat "$base_artifacts/git/${1}_rev")"
+ )
+}
+
+get_current_component_date ()
+{
+ (
+ set -euf -o pipefail
+ assert_with_msg "ERROR: No $1 in current git" \
+ [ -f "${rr[top_artifacts]}/git/${1}_rev" ]
+
+ git -C "$1" show --no-patch --pretty=%ct "$(cat "${rr[top_artifacts]}/git/${1}_rev")"
+ )
+}
+
+# Print round-robin components that are being updated in this build
+# (the ones using non-baseline branches).
+print_updated_components ()
+{
+ (
+ set -euf -o pipefail
+
+ local c delim=""
+ for c in ${rr[components]}; do
+ if [ x"${rr[${c}_git]}" != x"baseline" ]; then
+ echo -ne "$delim$c"
+ delim=" "
+ fi
+ done
+ echo
+ )
+}
+
+# Print the single round-robin component being updated in this build.
+# Print nothing if multiple components are being updated.
+print_single_updated_component ()
+{
+ (
+ set -euf -o pipefail
+
+ local -a updated_components
+ IFS=" " read -r -a updated_components <<< "$(print_updated_components)"
+
+ if [ ${#updated_components[@]} -eq 1 ]; then
+ echo "${updated_components[0]}"
+ fi
+ )
+}
+
+# Print round-robin components that have new commits in this build
+# compared to the baseline.
+# This expects all components to be cloned and checked out at appropriate revs.
+# During bisect we have only a single component updated by definition, and
+# it is guaranteed to have clone_repo() called for it.
+print_changed_components ()
+{
+ (
+ set -euf -o pipefail
+
+ local c delim=""
+ for c in $(print_updated_components); do
+ if [ x"$(get_current_git ${c}_rev)" \
+ != x"$(get_baseline_git ${c}_rev)" ]; then
+ echo -ne "$delim$c"
+ delim=${1- }
+ fi
+ done
+ echo
+ )
+}
+
+# Breakup changed components into $culprit and the rest of components.
+# This will reduce the number of builds when $culprit is responsible for
+# majority of regressions.
+breakup_changed_components ()
+{
+ (
+ set -euf -o pipefail
+
+ local culprit="${1-}"
+
+ if [ "$culprit" = "" ] \
+ || ! print_changed_components "\n" \
+ | grep "^$culprit\$" >/dev/null; then
+ print_changed_components "\n"
+ else
+ echo "$culprit"
+ print_changed_components "\n" | grep -v "^$culprit\$" | tr '\n' ' ' \
+ | sed -e "s/ \$//g"
+ echo
+ fi
+ )
+}
+
+# Fetch paths from git history
+# $1 -- number of versions to fetch; if none of the paths are present in
+# a particular revision that revision doesn't count towards this number;
+# positive values will fetch the most recent N revisions starting from
+# most recent to less recent;
+# negative values will fetch the oldest -N revisions starting from oldest
+# to less old.
+# As a special case "0" will fetch you all revisions in "positive" order,
+# and "-0" will fetch you all revisions in "negative" order.
+# $2 -- git repo
+# $3 -- paths in git repo; can be files or directories, only the 1st path existing
+# in a revision is fetched (useful for renamed/moved files)
+#
+# This function fetches files into a temporary directory (pointed to by the first
+# line of output) and prints out paths under that temporary directory for subsequent
+# fetches of ${paths[@]}" from appropriate revisions.
+# Once one of the paths is found in a given revision, we check it out and
+# move on to the next revision.
+get_git_history ()
+{
+ (
+ set -euf -o pipefail
+
+ local n_revs="$1"
+ local repo="$2"
+ shift 2
+ local -a paths=("$@")
+
+ local -a rev_list_cmd
+ rev_list_cmd=(git -C "$repo" rev-list)
+ if [ "$n_revs" = "-0" ] || [ "$n_revs" -lt "0" ]; then
+ rev_list_cmd+=(--reverse)
+ n_revs=$((-$n_revs))
+ fi
+ rev_list_cmd+=(HEAD -- "${paths[@]}")
+
+ local rev tmp_root
+ tmp_root=$(mktemp -d)
+ echo "$tmp_root"
+
+ while read rev; do
+ local tmp_dir found path
+ tmp_dir="$tmp_root/$rev"
+ mkdir "$tmp_dir"
+
+ found=false
+ for path in "${paths[@]}"; do
+ git -C "$repo" archive "$rev" -- "$path" | tar -x -C "$tmp_dir" &
+ # "git archive" fails when $path was deleted in $rev.
+ if wait $!; then
+ found=true
+ break
+ fi
+ done
+
+ if $found; then
+ echo "$tmp_dir/$path"
+ n_revs=$(($n_revs-1))
+ if [ $n_revs = 0 ]; then
+ break
+ fi
+ else
+ rm -r "$tmp_dir"
+ fi
+ done < <("${rev_list_cmd[@]}")
+ )
+}
+
+convert_arg_var ()
+{
+ declare -g "$1=$2"
+ cat <<EOF | manifest_out
+declare -g "$1=$2"
+EOF
+}
+
+convert_arg_arr ()
+{
+ if ! test_array $1; then
+ declare -ag $1
+ cat <<EOF | manifest_out
+declare -ga $1
+EOF
+ fi
+ eval "$1+=(\"$2\")"
+ cat <<EOF | manifest_out
+$1+=("$2")
+EOF
+}
+
+convert_arg_declare ()
+{
+ local name="$1"
+
+ case "$name" in
+ *"["*"]")
+ local arr="${1%\[*\]}"
+ if ! test_array $arr; then
+ declare -Ag $arr
+ cat <<EOF | manifest_out
+declare -gA $arr
+EOF
+ fi
+ ;;
+ *)
+ declare -g "$name"
+ cat <<EOF | manifest_out
+declare -g $name
+EOF
+ ;;
+ esac
+}
+
+convert_arg_set ()
+{
+ eval "$1=\"$2\""
+ cat <<EOF | manifest_out
+$1="$2"
+EOF
+}
+
+convert_arg_assarr ()
+{
+ convert_arg_declare "$1"
+ convert_arg_set "$1" "$2"
+}
+
+convert_arg_source ()
+{
+ assert_with_msg "ERROR: manifest/include does not exist: $1" \
+ [ -f "$1" ]
+ # shellcheck disable=SC1090
+ source "$1"
+ echo "# Start of include $1" | manifest_out
+ cat "$1" | manifest_out
+ echo "# End of include $1" | manifest_out
+}
+
# Process "--var value" and "++arr elem" arguments and define corresponding
# variables and arrays.
# "--var value" defines shell variable "$var" to "value".
+# "__var value" defines shell variable "$var" to "value", but doesn't store
+# it to the manifest. This is useful for passing secrets.
# "++arr elem" defines shell array "$arr[@]" and adds "elem" to it.
# "==arr[key] value" defines shell associative array "$arr[@]" and sets
# "${arr[key]}" to "value".
# "@@ file" sources file.
-# "%% file" starts manifest in file. Also see "^^ true".
-# "^^ true/false %% manifest" whether to reproduce the build using manifest.
-# If "true" -- source manifest instead of generating it, then discard
-# all following options at to separator "--".
+# "@@artifacts_var dir" defines artifacts directory and sources the manifest in
+# from dir/manifest.sh. This is useful for reproducing
+# builds.
+# "%%artifacts_var dir" defines artifacts directory and starts manifest in
+# dir/manifest.sh. Also see "^^ true".
+# "^^ true/false %%artifacts_var dir" whether to reproduce the build using manifest.
+# If "true" -- source dir/manifest.sh instead of generating it, then discard
+# all following options up to separator "--".
# If "false" -- do nothing and proceed as usual.
#
# Shell array $CONVERTED_ARGS is set to the arguments processed.
@@ -911,73 +1382,91 @@ convert_args_to_variables ()
break
;;
"--"*)
- name="${1#--}"
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ convert_arg_var "${1#--}" "$2"
+ num=2
+ ;;
+ "__"*)
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ name="${1#__}"
+ # FIXME: Can we add "set +x" here?
declare -g "$name=$2"
- cat <<EOF | manifest_out
-declare -g "$name=$2"
-EOF
num=2
;;
"++"*)
- name="${1#++}"
- if ! test_array $name; then
- declare -ag $name
- cat <<EOF | manifest_out
-declare -ga $name
-EOF
- fi
- eval "$name+=(\"$2\")"
- cat <<EOF | manifest_out
-$name+=("$2")
-EOF
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ convert_arg_arr "${1#++}" "$2"
num=2
;;
"=="*)
- name="${1#==}"
- arr="${name%\[*\]}"
- if ! test_array $arr; then
- declare -Ag $arr
- cat <<EOF | manifest_out
-declare -gA $arr
-EOF
- fi
- if [ $# -lt 2 ]; then
- echo "ERROR: Parameter value not provided for $1."
- exit 1
- fi
- eval "$name=\"$2\""
- cat <<EOF | manifest_out
-$name="$2"
-EOF
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ convert_arg_assarr "${1#==}" "$2"
num=2
;;
"@@")
- # shellcheck disable=SC1090
- source "$2"
- echo "# Start of include $2" | manifest_out
- cat "$2" | manifest_out
- echo "# End of include $2" | manifest_out
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ convert_arg_source "$2"
+ num=2
+ ;;
+ "@@"*)
+ # TODO: It should be possible to simplify handling of "^^"
+ # now that we have @@artifacts dir.
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+
+ name="${1#@@}"
+
+ # FIXME: This should not be necessary since manifests should
+ # "declare -Ag rr" themselves, but current manifests don't
+ # do that, due to "declare -A rr" in round-robin.sh. That
+ # declaration makes convert_arg_declare think that rr was
+ # already added to the manifest.
+ convert_arg_declare "$name"
+
+ convert_arg_source "$2/manifest.sh"
+ manifest_push "$2/manifest.sh" false
+
+ # Builds are supposed to be re-runnable from different
+ # directories, so do not put artifacts directory into manifest.
+ eval "$name=\"$2\""
+
num=2
;;
- "%%")
- manifest_push "$2"
+ "%%"*)
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 2 ]
+ manifest_push "$2/manifest.sh"
cat <<EOF | manifest_out
-# Start option processing
-jenkins_scripts_rev=$(git -C "$(dirname "$0")" rev-parse HEAD)
+declare -g "jenkins_scripts_rev=$(git -C "$(dirname "$0")" rev-parse HEAD)"
+# Artifacts directory
+EOF
+ name="${1#%%}"
+ convert_arg_declare "$name"
+
+ # Builds are supposed to be re-runnable from different
+ # directories, so do not put artifacts directory into manifest.
+ eval "$name=\"$2\""
+
+ cat <<EOF | manifest_out
+# Recording parameters to manifest: $2/manifest.sh
EOF
num=2
;;
"^^")
+ assert_with_msg "ERROR: Parameter value not provided for $1." \
+ [ $# -ge 4 ]
if [ x"$2" = x"true" ]; then
- # Check that we have a manifest to reproduce
- if [ x"$3" != x"%%" ] || [ ! -f "$4" ]; then
- echo "ERROR: '^^ true' must be followed by '%% <MANIFEST>'"
- exit 1
- fi
-
- # Source the manifest for reproduction.
- # shellcheck disable=SC1090
- source "$4"
+ name="${3#%%}"
+ case "$name" in
+ *"["*"]") convert_arg_assarr "$name" "$4" ;;
+ *) convert_arg_var "$name" "$4" ;;
+ esac
+ convert_arg_source "$4/manifest.sh"
# Skip processing all following arguments.
num=0
@@ -1004,9 +1493,6 @@ EOF
done
done
eval "SHIFT_CONVERTED_ARGS=$total"
- cat <<EOF | manifest_out
-# Processed $total options
-EOF
}
# Check that varible names in "$@" are set
@@ -1016,10 +1502,21 @@ obligatory_variables ()
(
set -euf -o pipefail
for i in "$@"; do
- if eval "[ x\"\${$i+set}\" != x\"set\" ]"; then
- echo "ERROR: required parameter $i not set"
- exit 1
- fi
+ case "$i" in
+ *"["*"]")
+ if eval "[ x\"\${$i+set}\" != x\"set\" ]"; then
+ echo "ERROR: required parameter $i not set"
+ exit 1
+ fi
+ ;;
+ *)
+ if [[ "$(declare -p "$i" 2>/dev/null)" \
+ != "declare "* ]]; then
+ echo "ERROR: required parameter $i not set"
+ exit 1
+ fi
+ ;;
+ esac
done
)
}
@@ -1043,14 +1540,25 @@ print_gnu_target ()
set -euf -o pipefail
local target="$1"
- if [ x"$target" = x"native" ]; then
- target=$(uname -m)
- fi
case "$target" in
"aarch64") target="aarch64-linux-gnu" ;;
- "arm_eabi") target="arm-eabi" ;;
+ arm*_eabi) target="arm-eabi" ;;
+ thumb*_eabi) target="arm-eabi" ;;
"arm"*) target="arm-linux-gnueabihf" ;;
+ "woa64") target="aarch64-w64-mingw32" ;;
"x86_64") target="x86_64-linux-gnu" ;;
+ "native")
+ case "$(uname -m)" in
+ "aarch64") target="aarch64-unknown-linux-gnu" ;;
+ "armv7l") target="armv7l-unknown-linux-gnueabihf" ;;
+ "armv8l") target="armv8l-unknown-linux-gnueabihf" ;;
+ "x86_64") target="x86_64-pc-linux-gnu" ;;
+ *)
+ echo "ERROR: Unknown native target $(uname -m)" >&2
+ exit 1
+ ;;
+ esac
+ ;;
*) echo "ERROR: Unknown target $target" >&2; exit 1 ;;
esac
echo "$target"
@@ -1103,54 +1611,15 @@ print_kernel_target ()
git_clean () {
(
set -euf -o pipefail
-
- fresh_dir "$1" "$1/.git/*"
- git -C "$1" reset --hard
- )
-}
-
-# Add git remote pointing to linaro's git repo/mirrors with writable
-# toolchain/ci/* repo. Deduce repo's URL from URL of existing
-# "origin" git remote.
-# $1: Git clone directory (must have "origin" remote configured)
-# $2: Name of the new remote.
-# $3: Whether to make the new remote read-only or read-write.
-git_init_linaro_local_remote ()
-{
- (
- set -euf -o pipefail
local dir="$1"
- local remote="$2"
- local read_only="$3"
-
- local origin_url
- local new_url
- origin_url=$(git -C "$dir" remote get-url origin)
-
- # Figure out mirror repo on linaro's servers.
- case "$origin_url" in
- *"kernel.org/"*"/linux"*)
- new_url="toolchain/ci/linux.git"
- ;;
- *"linaro.org/toolchain/gcc-compare-results.git")
- new_url="toolchain/gcc-compare-results.git"
- ;;
- *)
- new_url="toolchain/ci/$(basename $origin_url)"
- ;;
- esac
+ shift
- # Use git-us.l.o to avoid delays between review.l.o and git.l.o
- new_url="git-us.linaro.org/$new_url"
- if $read_only; then
- new_url="https://$new_url"
- else
- # Use gitolite access. Gerrit's ssh access verifies pushed commits,
- # which can slow-down server on big pushes.
- new_url="ssh://$new_url"
+ fresh_dir "$dir" "$dir/.git/*"
+ if ! git -C "$dir" reset -q --hard "$@"; then
+ # "git reset" may fail if index gets corrupted -- remove it and retry.
+ rm -f "$dir/.git/index"
+ git -C "$dir" reset -q --hard "$@"
fi
-
- git_set_remote "$dir" "$remote" "$new_url"
)
}
@@ -1177,6 +1646,8 @@ git_push ()
# Initialize run_step state
# $1: Step to start execution at (or "" to start at the very first step)
+# Appending "+" to the step name, e.g., "__start_at reset_artifacts+"
+# makes us start on the step right AFTER the specified step.
# $2: Step to finish execution at (or "" to run till the very end)
# $3: Top artifact directory
# $4: Whether to enable "set -x" verbosity for execution steps.
@@ -1202,7 +1673,33 @@ finishing at step \"$run_step_finish_at\""
run_step_top_artifacts=$(cd "$run_step_top_artifacts"; pwd)
rm -f $run_step_top_artifacts/console.log
+ rm -f $run_step_top_artifacts/console.log.xz
rm -f $run_step_top_artifacts/results
+
+ # If no manifest file was provided, supply a default one.
+ if [ ${#__manifest_filename[@]} -eq 1 ]; then
+ manifest_push "$run_step_top_artifacts/manifest.sh"
+ fi
+}
+
+# Patch environment for subsequent steps. This works by generating
+# a source-able file patch-env.sh in the artifacts of the current step.
+# Run_step() then sources this file to update the environment.
+# Note that we build walls around individual steps on purpose. This allows
+# us to SKIP several initial steps during bisect builds, and have a clear
+# record of environment modifications in artifacts/NN-step/patch-env.sh
+# scripts, which could be applied in correct order.
+#
+# $@: parameters in the format that convert_args_to_variables() understands.
+run_step_patch_env ()
+{
+ # !!! Each step is limited to a single invocation of run_step_patch_env()
+ # !!! due to manifest_push() re-writing the manifest.
+ assert_with_msg "patch-env.sh manifest already exists" \
+ ! [ -e $run_step_artifacts/patch-env.sh ]
+ manifest_push $run_step_artifacts/patch-env.sh
+ convert_args_to_variables "$@"
+ manifest_pop
}
# Run execution step and handle its failure as requested
@@ -1215,8 +1712,7 @@ finishing at step \"$run_step_finish_at\""
# Step commands have $run_step_artifacts pointing to artifact directory
# for current step.
# 3. logging -- dump stdout and and stderr output of step commands
-# into per-step console.log files, and, also, into the top-level
-# console.log file.
+# into per-step console.log files
# 4. result handling -- output provided success result to artifacts/results
# for successful steps. Special value "x" means to let the step itself
# update artifacts/results. Results are written to artifacts/results
@@ -1244,6 +1740,12 @@ run_step ()
step=("$@")
+ if [ "$success_result" != "x" ]; then
+ cat >> $run_step_top_artifacts/results <<EOF
+# ${step[@]}:
+EOF
+ fi
+
pretty_step="$1"
shift
while [ $# -gt 0 ]; do
@@ -1257,13 +1759,19 @@ run_step ()
run_step_count=$(($run_step_count+1))
+ local full_step_name
+ full_step_name=$(printf "%02d" $run_step_count)-$pretty_step
+ # This is used when accessing the workspace
+ run_step_artifacts=$run_step_top_artifacts/$full_step_name
+
# Start running steps if:
# the current step is the starting step OR
# we haven't run any steps yet and
# there is no set starting step
- if [ x"$pretty_step" = x"$run_step_start_at" ] || \
- ( [ x"$run_step_start_at" = x"" ] && \
- [ x"$run_step_prev_step" = x"" ] ); then
+ if [ "$pretty_step" = "$run_step_start_at" ] \
+ || [ "${run_step_prev_step}+" = "$run_step_start_at" ] \
+ || ( [ "$run_step_start_at" = "" ] \
+ && [ "$run_step_prev_step" = "" ] ); then
run_step_active=true
fi
@@ -1271,7 +1779,7 @@ run_step ()
local skip=false
case "$run_step_status:$run_mode" in
0:*) ;;
- $EXTERNAL_FAIL:stop_on_fail)
+ "$EXTERNAL_FAIL:stop_on_fail")
echo "STOPPING before ${step[*]} due to previous external failure"
return $EXTERNAL_FAIL
;;
@@ -1291,14 +1799,10 @@ run_step ()
esac
if ! $skip; then
- local full_step_name
- full_step_name=$(printf "%02d" $run_step_count)-$pretty_step
- # This is used when accessing the workspace
- run_step_artifacts=$run_step_top_artifacts/$full_step_name
local log_url=""
if [ -v BUILD_URL ]; then
# Link to jenkins, valid once the job has finished
- log_url="(${BUILD_URL}artifact/artifacts/$full_step_name/console.log)"
+ log_url="(${BUILD_URL}artifact/artifacts/$full_step_name/console.log.xz)"
fi
rm -rf "$run_step_artifacts"
@@ -1306,17 +1810,53 @@ run_step ()
echo "RUNNING ${step[*]}; see tail -f $run_step_artifacts/console.log" $log_url
run_step_status=0
- eval "if $run_step_verbose; then set -x; else set +x; fi; ${step[*]}" 2>&1 | ts -s "%T" | tee -a $run_step_top_artifacts/console.log > $run_step_artifacts/console.log &
- wait $! || run_step_status=$?
+ # We are running "${step[@]}" in a sub-shell, so that any
+ # modifications to environment will be lost.
+ # The steps can modify environment for subsequent steps by using
+ # run_step_patch_env().
+ # We redirect stdout and stderr of "${step[@]} to a pipe, which
+ # is connected to timestamping console. Piping "step | ts -s"
+ # directly causes weird issue with failed exit code always being
+ # "1" instead of, e.g., 125.
+
+ local pipe step_pid ts_pid
+ pipe=$(mktemp -u)
+ mkfifo "$pipe"
+
+ (
+ if $run_step_verbose; then
+ set -x
+ else
+ set +x
+ fi
+ "${step[@]}"
+ ) &> "$pipe" &
+ step_pid=$!
+
+ ts -s "%T" < "$pipe" > $run_step_artifacts/console.log &
+ ts_pid=$!
+
+ wait $step_pid || run_step_status=$?
+ wait $ts_pid
+ rm "$pipe"
+
+ xz $run_step_artifacts/console.log
+
+ if [ x"$success_result" != x"x" ] \
+ && [ x"$run_step_status" != x"0" ]; then
+ cat >> $run_step_top_artifacts/results <<EOF
+# FAILED
+EOF
+ fi
case "$run_step_status:$run_mode" in
0:*) ;;
- $EXTERNAL_FAIL:stop_on_fail|$EXTERNAL_FAIL:reset_on_fail)
+ "$EXTERNAL_FAIL:stop_on_fail"|"$EXTERNAL_FAIL:reset_on_fail")
echo "STOPPING at ${step[*]} due to external failure"
return $EXTERNAL_FAIL
;;
*:stop_on_fail|*:reset_on_fail)
- echo "STOPPING at ${step[*]} due to internal failure"
+ echo "STOPPING at ${step[*]} due to failure"
return $INTERNAL_FAIL
;;
*:skip_on_fail)
@@ -1330,13 +1870,17 @@ run_step ()
echo "SKIPPING ${step[*]}"
fi
- if [ x"$run_step_status" = x"0" ] && [ x"$success_result" != x"x" ]; then
+ if [ x"$success_result" != x"x" ] && [ x"$run_step_status" = x"0" ]; then
cat >> $run_step_top_artifacts/results <<EOF
-# ${step[@]}:
$success_result
EOF
fi
+ if [ -f $run_step_artifacts/patch-env.sh ]; then
+ # shellcheck disable=SC1090
+ source $run_step_artifacts/patch-env.sh
+ fi
+
if [ x"$pretty_step" = x"$run_step_finish_at" ]; then
run_step_active=false
fi
@@ -1368,7 +1912,7 @@ print_traceback ()
{
local exit_status=$?
case $exit_status in
- $INTERNAL_FAIL|$EXTERNAL_FAIL) ;;
+ "$INTERNAL_FAIL"|"$EXTERNAL_FAIL") ;;
*)
echo "ERROR Traceback (most recent call last):"
# Show most recent calls last
@@ -1388,3 +1932,186 @@ print_traceback ()
;;
esac
}
+
+# Print destination sub-directory of interesting-commit.git for ...
+# $1: component
+# $2: sha1 of the commit
+# $3: ci_project
+# $4: ci_config
+interesting_subdir ()
+{
+ local dir="$1/sha1" # $component/sha1
+ if [ $# -ge 2 ]; then dir="$dir/$2"; fi # /$sha1
+ if [ $# -ge 3 ]; then dir="$dir/$3"; fi # /$ci_project
+ if [ $# -ge 4 ]; then dir="$dir/$4"; fi # /$ci_config
+ echo "$dir"
+}
+
+# Print user-friendly "git describe" of a given commit
+# $1: Component (gcc, llvm, etc.)
+# $2: Commit hash
+# $3: If "true", never fail to describe and print out something sensible.
+# Otherwise return empty string on failure.
+describe_sha1 ()
+{
+ local component="$1"
+ local sha1="$2"
+ local anything="$3"
+
+ local -a match=()
+ case "$component" in
+ gcc) match=(--match "basepoints/*" --match "releases/*") ;;
+ binutils) match=(--match "binutils*") ;;
+ gdb) match=(--match "gdb*") ;;
+ newlib) match=(--match "newlib*") ;;
+ esac
+
+ if ! git -C "$component" describe "${match[@]}" $sha1 2>/dev/null \
+ && $anything; then
+ echo "$component#$(git -C "$component" rev-parse --short $sha1)"
+ fi
+}
+
+# To avoid committing unwanted files into git (e.g., raw benchmarking
+# data) we implement "annex" support. Files in base-artifacts/annex
+# can be either symlinks to directories or regular files containing
+# rsync-able urls.
+#
+# Here we convert directory symlinks into tarballs, upload to bkp-01
+# and replace symlinks with files pointing to their uploaded location.
+#
+# In git_annex_download we do the opposite: download and extract tarball
+# into a temporary directory, and replace the file with a symlink
+# to that directory.
+#
+# The end result is that during a build base-artifacts/annex/bmk-data
+# is a symlink with directory-like behavior. Outside of a build
+# base-artifacts/ repo contains a regular file pointing to a tarball
+# on a private fileserver.
+#
+# FIXME: We do not automatically remove annex tarballs when trimming
+# or rewriting history. We rely on tcwg-cleanup-stale-results.sh for that.
+#
+# $1: git repo
+# $2: annex directory inside the repo
+# $3: tarball name prefix
+git_annex_upload ()
+{
+ (
+ set -euf -o pipefail
+ local repo="$1"
+ local annex_dir="$2"
+ local pretty_id="$3"
+
+ if ! [ -d "$repo/$annex_dir" ]; then
+ return 0
+ fi
+
+ local n_cpus=0
+ if [ "$(getconf LONG_BIT)" = "32" ]; then
+ # XZ allocates few hundred megabytes per thread, which can easily
+ # exhaust VM in armhf containers on 160-core machines. Limit xz
+ # parallelism to 8.
+ n_cpus=$(nproc --all)
+ if [ "$n_cpus" -gt "8" ]; then
+ n_cpus=8
+ fi
+ fi
+
+ # Convert annex symlinks to remote links
+ local symlink dir md5 remote_path newlink
+ while IFS= read -r -d '' symlink; do
+ dir=$(readlink "$repo/$annex_dir/$symlink")
+
+ # Generate MD5 hash of the contents of the annex: find all files
+ # and generate md5sum for each of them, and then generate md5sum
+ # of that list.
+ # We avoid using md5sum of the tarball because using tar with
+ # multi-threaded xz compression may produce different tarballs.
+ md5=$(cd "$dir"; find -L -type f -print0 | xargs -0 md5sum | sort \
+ | md5sum - | awk '{ print $1 }')
+
+ remote_path="$HOME/$repo/$annex_dir/${pretty_id}$md5.tar.xz"
+ newlink="bkp-01.tcwglab:$remote_path"
+
+ # Check if bkp-01.tcwglab already has an annexed tarball with our data.
+ # When re-writing history in round-robin-baseline.sh we download and
+ # re-upload same data multiple times. This optimization saves up
+ # on compression and upload time.
+ if ! ssh -n bkp-01.tcwglab test -f "$remote_path"; then
+ local tarball
+ tarball=$(mktemp --suffix=.tar.xz)
+ chmod 0644 "$tarball"
+ # We have a local link to the annex -- make it remote.
+ XZ_OPT=-T$n_cpus tar cJf "$tarball" -C "$dir" .
+
+ ssh -n bkp-01.tcwglab mkdir -p "$(dirname "$remote_path")"
+ rsync -a "$tarball" "$newlink"
+
+ rm "$tarball"
+ fi
+
+ # In normal builds files inside $dir will be owned by tcwg-benchmark,
+ # so we will fail trying to delete them. Still, try to delete
+ # the directory to avoid running of disk space when re-writing history.
+ rm -rf "$dir" &>/dev/null || true
+
+ git -C "$repo" rm "$annex_dir/$symlink"
+ # if $symlink is the last file in $annex_dir, then "git rm" will
+ # remove the directory as well. Re-create it.
+ mkdir -p "$repo/$annex_dir"
+ echo "$newlink" > "$repo/$annex_dir/$symlink"
+ git -C "$repo" add "$annex_dir/$symlink"
+ done < <(cd "$repo/$annex_dir"; find . -type l -print0)
+
+ # update commit with new links.
+ git -C "$repo" commit --amend -C HEAD
+ )
+}
+
+# $1: git repo
+# $2: annex directory inside the repo
+git_annex_download ()
+{
+ (
+ set -euf -o pipefail
+ local repo="$1"
+ local annex_dir="$2"
+
+ # FIXME: Remove workaround for old-style bmk-data after history rewrite.
+ if [ -f "$repo/results_id" ]; then
+ local link dir
+ link=$(cat "$repo/results_id")
+
+ dir=$(mktemp -d)
+ rsync -a --del "bkp-01.tcwglab:/home/tcwg-benchmark/results-$link/" \
+ "$dir/"
+
+ rm -rf "${repo:?}/$annex_dir"
+ mkdir "$repo/$annex_dir"
+
+ ln -s "$dir" "$repo/$annex_dir/bmk-data"
+ fi
+
+ if ! [ -d "$repo/$annex_dir" ]; then
+ return 0
+ fi
+
+ # Resolve annex links to local symlinks.
+ # See round-robin-baseline.sh:push_baseline() for details.
+ local linkfile link tarball dir
+ while IFS= read -r -d '' linkfile; do
+ link=$(cat "$repo/$annex_dir/$linkfile")
+
+ tarball=$(mktemp --suffix=.tar.xz)
+ rsync -a "$link" "$tarball"
+
+ dir=$(mktemp -d)
+ tar xf "$tarball" -C "$dir"
+ rm "$tarball"
+
+ rm "$repo/$annex_dir/$linkfile"
+ ln -s "$dir" "$repo/$annex_dir/$linkfile"
+ done < <(cd "$repo/$annex_dir"; find . -type f -print0)
+ )
+}
diff --git a/jenkins.sh b/jenkins.sh
index bb3d7e0f..aace08ef 100755
--- a/jenkins.sh
+++ b/jenkins.sh
@@ -197,7 +197,7 @@ something_to_upload=true
orig_parameters=( "$@" )
-getopt -o s:g:w:o:l:rt:b:h -l override:,gcc-branch:,snapshots:,gitrepo:,abedir:,workspace:,options:,logserver:,logname:,languages:,runtests,target:,testcontainer:,bootstrap,help,excludecheck:,norebuild,extraconfig:,send-results-to: -Q -- "$@"
+getopt -o s:g:w:o:l:rt:b:h -l override:,gcc-branch:,snapshots:,gitrepo:,abedir:,workspace:,options:,logserver:,logname:,languages:,runtests,target:,testcontainer:,bootstrap,help,excludecheck:,norebuild,extraconfig:,send-results-filter:,send-results-to: -Q -- "$@"
while test $# -gt 0; do
case $1 in
--gcc-branch) change="$change gcc=$2"; shift ;;
@@ -216,6 +216,7 @@ while test $# -gt 0; do
-r|--runtests) runtests="true" ;;
-b|--bootstrap) try_bootstrap="true" ;;
--excludecheck) excludecheck_opt="$excludecheck_opt --excludecheck $2"; shift ;;
+ --send-results-filter) change="${change} --send-results-filter $2"; shift ;;
--send-results-to) change="${change} --send-results-to $2"; shift ;;
--norebuild) rebuild=false ;;
-h|--help) usage 0 ;;
@@ -286,7 +287,8 @@ while [ x"$logserver" != x"" ]; do
;;
*)
echo "ERROR: Unexpected status of logs: $log_status"
- exit 1
+ status=1
+ exit $status
;;
esac
@@ -425,8 +427,9 @@ fi
# We used to delete *.sum files, but this should not be necessary. This
# check is transitional, and will be removed later.
if [ -n "$(find ${user_workspace} -name \*.sum)" ]; then
- echo "Found *.sum files, but workspace should be empty!"
- exit 1
+ echo "Found *.sum files, but workspace should be empty!"
+ status=1
+ exit $status
fi
if test x"${try_bootstrap}" = xtrue; then
@@ -461,7 +464,8 @@ if test $ret -gt 0; then
echo "================= TAIL OF LOG: BEGIN ================="
tail -n 50 build.out
echo "================= TAIL OF LOG: FINISH ================="
- exit 1
+ status=1
+ exit $status
fi
# if runtests is true, then run make check after the build completes
@@ -476,7 +480,8 @@ if $runtests; then
ls -l /dev/pts
echo "running: grep devpts /proc/mounts"
grep devpts /proc/mounts
- exit 1
+ status=1
+ exit $status
fi
check="--check all"
@@ -490,7 +495,8 @@ if $runtests; then
echo "================= TAIL OF LOG: BEGIN ================="
tail -n 50 check.out
echo "================= TAIL OF LOG: FINISH ================="
- exit 1
+ status=1
+ exit $status
fi
fi
diff --git a/lnt-utils.sh b/lnt-utils.sh
new file mode 100644
index 00000000..1081021d
--- /dev/null
+++ b/lnt-utils.sh
@@ -0,0 +1,359 @@
+#!/usr/bin/env bash
+
+# ==============================================================================
+# is_first_lnt_entry : is used in both report_header() and report_test_entry()
+declare is_first_lnt_entry
+
+report_header()
+{
+ local machine_name="$1"
+ local date_time="$2"
+ shift 2
+ local run_infos=("$@")
+ declare -g is_first_lnt_entry
+
+ cat <<-EOF
+ {
+ "Machine": {
+ "Info": {},
+ "Name": "$machine_name"
+ },
+ "Run": {
+ "Info": {
+EOF
+
+ for i in "${run_infos[@]}"; do
+ tag=$(echo $i | cut -d: -f1)
+ val=$(echo $i | cut -d: -f2-)
+ echo " \"$tag\": \"$val\","
+ done
+
+ cat <<-EOF
+ "__report_version__": "1"
+ },
+ "Start Time": "$date_time"
+ },
+ "Tests": [
+ EOF
+ is_first_lnt_entry=true
+}
+
+report_footer()
+{
+ cat <<-EOF
+ ]
+ }
+ EOF
+}
+
+report_test_entry()
+{
+ local name="$1"
+ local value="$2"
+ declare -g is_first_lnt_entry
+
+ $is_first_lnt_entry || echo " ,"
+
+ cat <<-EOF
+ {
+ "Data": [
+ $value
+ ],
+ "Info": {},
+ "Name": "$name"
+ }
+ EOF
+ is_first_lnt_entry=false
+}
+
+get_current_component_url()
+{
+ c=$1
+
+ local url
+ url=$(get_current_git ${c}_url)
+ rev=$(get_current_git ${c}_rev)
+
+ if [[ "$url" =~ git://sourceware.org/git/ ]]; then
+ url="${url#git://sourceware.org/git/}"
+ url="https://sourceware.org/git/?p=$url"
+ echo "$url;a=commit;h=$rev"
+ elif [[ "$url" =~ https://github.com/ ]] \
+ || [[ "$url" =~ https://gitlab.com/ ]]; then
+ echo "${url%.git}/commit/$rev"
+ elif [[ "$url" =~ https://git.linaro.org/ ]]; then
+ echo "${url}/commit/?id=$rev"
+ else
+ echo "$url ($rev)"
+ fi
+}
+
+get_component_changes()
+{
+ local base_rev cur_rev
+ # In "init" mode, no component_changes annotation
+ if [ x"$(get_current_manifest "{rr[update_baseline]}")" != x"init" ]; then
+ base_rev=$(get_baseline_git ${c}_rev)
+ cur_rev=$(get_current_git ${c}_rev)
+ if [ "$base_rev" = "$cur_rev" ]; then
+ echo "(unchanged)"
+ else
+ echo "(+ $(git -C $c rev-list --count $base_rev..$cur_rev || echo "??") commits)"
+ fi
+ fi
+}
+
+get_describe_pad()
+{
+ local nb_changed_components=0 describe_pad=""
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ local base_rev="" cur_rev
+ # in "init" mode, the base_rev will be empty, and considered as
+ # different than cur_rev. the results of the function will be as
+ # if every component changed.
+ [ "$(get_current_manifest "{rr[update_baseline]}")" = "init" ] || \
+ base_rev=$(get_baseline_git ${c}_rev || true)
+ cur_rev=$(get_current_git ${c}_rev)
+ if [ "$base_rev" != "$cur_rev" ]; then
+ nb_changed_components=$((nb_changed_components+1))
+ if [ $nb_changed_components = 1 ]; then
+ describe=$(describe_sha1 "$c" "$cur_rev" false)
+ describe_pad=$(
+ echo "$describe" \
+ | sed 's/\(.*\)-\(.*\)-\(.*\)$/\1 \2 \3/' \
+ | awk '{ $2 = sprintf("%05d", $2); print $1"-"$2"-"$3}'
+ )
+ else
+ describe_pad="$nb_changed_components-changed-components"
+ fi
+ fi
+ done
+
+ if [ $nb_changed_components = 0 ]; then
+ echo "no-change"
+ else
+ echo "$describe_pad"
+ fi
+}
+
+# ==============================================================================
+
+# create a lnt json report for the given sumfiles
+generate_lnt_gnu_check_report()
+{
+ local build_url=${1:?}
+ local ci_project=${2:?} # tcwg_gnu_native_check_gcc
+ local ci_config=${3:?} # master-aarch64
+ local results_date=${4:?}
+ local jira_key=${5:?}
+ local sumfiles_dir=${6:?}
+ local output_file=${7:?}
+
+ local -a sumfiles
+ readarray -t -d '' sumfiles < <(find "$sumfiles_dir" -name '*.sum' -print0)
+ # do not generate any lnt report if there is no sumfile
+ [ ${#sumfiles[@]} = 0 ] && return
+
+ local lnt_testsuite=${ci_project}
+ local machine_name=${ci_config}
+
+ local build_number
+ build_number=$(basename "$build_url")
+
+ (
+ # Generate a header with useful infos for the report
+ additional_run_infos=("tag:$lnt_testsuite" "test_url:$build_url")
+ additional_run_infos+=("run_order:$(printf "%04d" "$build_number") ($(get_describe_pad))")
+ [ "$jira_key" != "-" ] && additional_run_infos+=("regression:https://linaro.atlassian.net/browse/$jira_key")
+
+ # disable traces. This function is too verbose
+ set +x
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ additional_run_infos+=("git_${c}:$(get_current_component_url $c) $(get_component_changes)")
+ done
+
+ report_header "$machine_name" "$results_date" "${additional_run_infos[@]}"
+
+ # an entry and a total value must be generated for each of these fields
+ # (even if the field is missing in the sum file)
+ local all_fields=(
+ FAIL UNRESOLVED UNTESTED ERROR XPASS OTHER
+ KFAIL XFAIL PASS UNSUPPORTED
+ good bad
+ )
+
+ for field in "${all_fields[@]}"; do
+ eval "total_$field=0"
+ done
+
+ for sumfile in "${sumfiles[@]}"; do
+ suite_name=$(basename "$sumfile" .sum) # gcc, g++, libatomic, ...
+ fields=("${all_fields[@]}")
+
+ for field in "${fields[@]}"; do
+ eval "$field=0"
+ done
+
+ while read -r kind; do
+ [[ ! ${fields[*]} =~ $kind ]] && kind="OTHER"
+ eval "(( $kind+=1 ))"
+ eval "(( total_$kind+=1 ))"
+ case "$kind" in
+ KFAIL|XFAIL|PASS|UNSUPPORTED)
+ eval "(( good+=1 ))"
+ eval "(( total_good+=1 ))"
+ ;;
+ FAIL|UNRESOLVED|UNTESTED|ERROR|XPASS|OTHER)
+ eval "(( bad+=1 ))"
+ eval "(( total_bad+=1 ))"
+ ;;
+ *)
+ ;;
+ esac
+ done < <(grep -E '^[A-Z]+:' "$sumfile" | sed 's/:.*//')
+
+ for field in "${fields[@]}"; do
+ # tcwg_test_gcc_check.gcc.FAIL.nb_FAIL
+ field_name="$suite_name.$field.nb_$field"
+ report_test_entry "$lnt_testsuite.$field_name" "$(eval "echo \$$field")"
+ done
+ done
+
+ for field in "${all_fields[@]}"; do
+ # tcwg_test_gcc_check.TOTAL.FAIL.nb_FAIL
+ field_name="TOTAL.$field.nb_$field"
+ report_test_entry "$lnt_testsuite.$field_name" "$(eval "echo \$total_$field")"
+ done
+
+ report_footer
+ ) > "$output_file"
+}
+
+
+# ==============================================================================
+
+# create a lnt json report for the given benchmark results
+generate_lnt_bmk_report()
+{
+ local build_url=${1:?}
+ local ci_project=${2:?} # tcwg_bmk-code_speed-cpu2017rate
+ local ci_config=${3:?} # llvm-aarch64-master-O3
+ local results_date=${4:?}
+ local jira_key=${5:?}
+ local size_csv=${6:?}
+ local perf_csv=${7:?}
+ local status_csv=${8:?}
+ local variability_csv=${9:?}
+ local output_file=${10:?}
+
+ local lnt_testsuite=${ci_project}
+ local machine_name=${ci_config}
+
+ local build_number
+ build_number=$(basename "$build_url")
+
+ [ -f "$size_csv" ] || return
+ [ -f "$perf_csv" ] || return
+
+ (
+ # Generate a header with useful infos for the report
+ additional_run_infos=("tag:$lnt_testsuite" "test_url:$build_url")
+ additional_run_infos+=("run_order:$(printf "%04d" "$build_number") ($(get_describe_pad))")
+ [ "$jira_key" != "-" ] && additional_run_infos+=("regression:$jira_key")
+
+ for c in $(get_current_manifest "{rr[components]}"); do
+ additional_run_infos+=("git_${c}:$(get_current_component_url $c) $(get_component_changes)")
+ done
+
+ report_header "$machine_name" "$results_date" "${additional_run_infos[@]}"
+
+ # disable traces locally because too verbose
+ set +x
+
+ ## Inspect size.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,90926
+ while IFS="," read -r benchmark symbol size; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ report_test_entry "$lnt_testsuite.$benchmark.code_size" "$size"
+
+ done < <(tail -n +2 "$size_csv" | tr -d '\r')
+
+ ## Inspect perf.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,8692,na
+ local -A execution_time
+ while IFS="," read -r benchmark symbol sample _dso; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ execution_time[$symbol]="$sample"
+ report_test_entry "$lnt_testsuite.$benchmark.execution" "$sample"
+
+ done < <(tail -n +2 "$perf_csv" | tr -d '\r')
+
+ ## Inspect bmk-specific-variability.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,0.96,0.0,,
+ if [ -f "$variability_csv" ]; then
+ # shellcheck disable=SC2034
+ while IFS="," read -r benchmark symbol sample_var sve_var vect_var; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ if [ "${execution_time[$symbol]+abc}" ]; then
+ local var
+
+ var=$(echo "$sample_var * ${execution_time[$symbol]} / 100" | bc)
+ report_test_entry "$lnt_testsuite.$benchmark.execution_variation" "$var"
+ fi
+ done < <(tail -n +2 "$variability_csv" | tr -d '\r')
+ fi
+
+ score_success_compile=0
+ score_success_execution=0
+ score_total_test=0
+
+ ## Inspect status.csv
+ # 531.deepsjeng_r,deepsjeng_r_base.default,failed-to-build
+ while IFS="," read -r benchmark symbol status; do
+ [[ "$symbol" == *base.default ]] || continue
+
+ local compile_success execution_success
+ case "$status" in
+ success)
+ compile_success=1
+ execution_success=1
+ ;;
+ failed-to-run)
+ compile_success=1
+ execution_success=0
+ ;;
+ failed-to-build)
+ compile_success=0
+ execution_success=0
+ ;;
+ *)
+ assert_with_msg "unknown status in $status_csv"
+ ;;
+ esac
+
+ report_test_entry "$lnt_testsuite.$benchmark.compile_status" "$((compile_success==0))"
+ report_test_entry "$lnt_testsuite.$benchmark.execution_status" "$((execution_success==0))"
+
+ score_success_compile=$((score_success_compile+compile_success))
+ score_success_execution=$((score_success_execution+execution_success))
+
+ score_total_test=$((score_total_test+1))
+
+ done < <(tail -n +2 "$status_csv" | tr -d '\r')
+
+ report_test_entry "$lnt_testsuite.nb_compile_successful.score" "$score_success_compile"
+ report_test_entry "$lnt_testsuite.nb_execution_successful.score" "$score_success_execution"
+
+ report_test_entry "$lnt_testsuite.nb_total_tests.score" "$score_total_test"
+
+ report_footer
+ ) > "$output_file"
+}
+
+
+# ==============================================================================
diff --git a/precommit-ssh-apply.sh b/precommit-ssh-apply.sh
new file mode 100755
index 00000000..e9603370
--- /dev/null
+++ b/precommit-ssh-apply.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+set -euf -o pipefail +x
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=pw-helpers.sh
+. $scripts/pw-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables pw_url
+declare pw_url
+
+fetch_only="${fetch_only-false}"
+series_dir="${series_dir-}"
+verbose="${verbose-true}"
+
+if $verbose; then
+ set -x
+fi
+
+case "$pw_url" in
+ "ssh://"*)
+ # ssh://ssh_host:path/to/maildir[#num_patch]
+ # Note that CI will ssh to $ssh_host as tcwg-buildslave user.
+ true
+ ;;
+ *) assert_with_msg "pw_url does not match ssh://*" false ;;
+esac
+
+series_url=$(echo "$pw_url" | cut -d/ -f 3- | sed -e "s/#.*//")
+case "$series_url" in
+ *".patch") ;;
+ *"/") ;;
+ *) series_url="$series_url/" ;;
+esac
+
+num_patch=$(echo "$pw_url" | cut -s -d# -f 2)
+if [ "$num_patch" = "" ]; then
+ num_patch="0"
+fi
+
+if [ "$series_dir" = "" ]; then
+ series_dir=$(mktemp -d)
+ trap 'rm -rf "$series_dir"' EXIT
+fi
+rsync -a "$series_url" "$series_dir/new/"
+
+if $fetch_only; then
+ exit 0
+fi
+
+obligatory_variables project pw_dir build_url patch_submitter
+declare project pw_dir build_url patch_submitter
+
+url=$(get_baseline_git "${project}_url")
+rev=$(get_baseline_git "${project}_rev")
+echo "Fetching baseline $url#$rev"
+clone_or_update_repo "$project" "$rev" "$url" > /dev/null
+
+# Create a state file for this patch. This is sourced by pw-report.sh to avoid
+# passing a bunch of parameters on the command line.
+mkdir -p "$pw_dir"
+cat > "$pw_dir/$project" <<EOF
+pw[project]='$project'
+pw[${project}]='$project'
+pw[${project}_patch_id]='$num_patch'
+pw[${project}_check_cmd]='echo REPORT'
+pw[${project}_build_url]='$build_url'
+pw[${project}_patch_url]='$pw_url'
+pw[${project}_patch_message_id]='no_message_id'
+pw[${project}_patch_submitter]='$patch_submitter'
+EOF
+
+prev_head=$(git -C "$project" rev-parse HEAD)
+
+apply_series_with_retry "$project" $prev_head am "$series_dir/" "$series_url" &
+res=0 && wait $! || res=$?
+patches_applied=$(git -C "$project" rev-list --count HEAD "^$prev_head")
+
+if [ "$patches_applied" -le "$num_patch" ]; then
+ apply_result="fail"
+ if [ "$res" = "0" ] && [ "$patches_applied" = "0" ]; then
+ # "series apply" finished successfully, but no patch was applied;
+ # this means that the patch is already merged.
+ apply_result="merged"
+ fi
+
+ cat > "$pw_dir/mail-recipients.txt" <<EOF
+$patch_submitter
+EOF
+ cat > "$pw_dir/mail-subject.txt" <<EOF
+[Linaro-TCWG-CI] $pw_url failed to apply
+EOF
+ cat > "$pw_dir/mail-body.txt" <<EOF
+${build_url}artifact/artifacts/jenkins/precommit-ssh-apply.log/*view*/
+
+Patch series $pw_url applied fewer patches than expected: $patches_applied vs expected $(($num_patch + 1))"
+EOF
+ if [ "$apply_result" = "merged" ]; then
+ cat >> "$pw_dir/mail-body.txt" <<EOF
+
+Most likely the patch series is now merged into mainline"
+EOF
+ fi
+
+ if [ $res != 0 ]; then
+ exit $res
+ fi
+
+
+ exit 5
+fi
+
+git -C "$project" checkout --detach HEAD~$num_patch
+
+# Some debug traces, to check which files were modified and their time stamps
+(
+ set +e
+ cd "$project"
+ git status
+ git status -s | awk '{print $NF;}' | while IFS='' read -r line
+ do
+ ls -l "$line"
+ done
+)
diff --git a/precommit-test.sh b/precommit-test.sh
new file mode 100755
index 00000000..249eae94
--- /dev/null
+++ b/precommit-test.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables project pw_url
+declare project pw_url
+
+dryrun="${dryrun-true}"
+job_filter="${job_filter-.*}"
+notify="${notify-$USER@linaro.org}"
+num_patches="${num_patches-0}"
+verbose="${verbose-false}"
+
+if $verbose; then
+ set -x
+fi
+
+series_dir=$(mktemp -d)
+$scripts/precommit-ssh-apply.sh --pw_url "$pw_url" --fetch_only true \
+ --series_dir "$series_dir" --verbose "$verbose"
+if ! [ "$num_patches" -gt "0" ]; then
+ num_patches=$(find "$series_dir" -type f | wc -l)
+fi
+rm -rf "$series_dir"
+
+jenkins_query=(ssh -p2222 -l "$USER@linaro.org" ci.linaro.org)
+
+jenkins_run=()
+if $dryrun; then
+ jenkins_run+=(echo DRYRUN)
+fi
+jenkins_run+=("${jenkins_query[@]}")
+
+readarray -t job_list < <("${jenkins_query[@]}" list-jobs \
+ | grep -e "tcwg_.*-precommit" \
+ | grep -e "$job_filter")
+
+for ci_job in "${job_list[@]}"; do
+ if ! "${jenkins_query[@]}" get-job "$ci_job" \
+ | grep "^ *<name>${project}_git</name>\$" >/dev/null; then
+ continue
+ fi
+
+ echo "START: $ci_job: $num_patches builds"
+
+ num="$num_patches"
+ while [ "$num" -gt "0" ]; do
+ num=$(($num - 1))
+ "${jenkins_run[@]}" build "$ci_job" \
+ -p "${project}_git=$pw_url#$num" \
+ -p "notify=$notify"
+ done
+done
diff --git a/pw-apply.sh b/pw-apply.sh
new file mode 100755
index 00000000..176ee1b1
--- /dev/null
+++ b/pw-apply.sh
@@ -0,0 +1,164 @@
+#!/bin/bash
+
+set -euf -o pipefail +x
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=pw-helpers.sh
+. $scripts/pw-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables ci_bot project pw_url pw_token pw_dir build_url
+declare ci_bot project pw_url pw_token pw_dir build_url
+
+verbose="${verbose-true}"
+
+if $verbose; then
+ set -x
+fi
+
+case "$pw_url" in
+ "pw://series/"*)
+ series_id=$(echo "$pw_url" | cut -d/ -f 4)
+ ;;
+ *) assert_with_msg "pw_url does not match pw://series/*" false ;;
+esac
+
+retrigger=false
+case "$pw_url/" in
+ "pw://series/"*"/retrigger/"*) retrigger=true ;;
+esac
+
+url=$(get_baseline_git "${project}_url")
+rev=$(get_baseline_git "${project}_rev")
+echo "Fetching baseline $url#$rev"
+clone_or_update_repo "$project" "$rev" "$url" > /dev/null
+
+# BE CAREFUL WITH $pw_token
+# shellcheck disable=SC2064
+trap "pw_deinit $project" EXIT
+(set +x; pw_init "$project" "$pw_token")
+
+if ! pw_series_complete_p "$project" "$series_id"; then
+ echo "ERROR: Series $series_id is not complete"
+ exit 2
+fi
+
+# Look for the first untested patch in the series.
+num_patch=0
+while true; do
+ patch_id=$(pw_get_patch_from_series \
+ "$project" "$series_id" "$num_patch" || true)
+ if [ "$patch_id" = "" ] || $retrigger; then
+ break
+ fi
+
+ check_state=$(pw_patch_check_state "$patch_id" "$ci_bot")
+ if [ "$check_state" = "pending" ]; then
+ break
+ fi
+
+ num_patch=$(($num_patch + 1))
+done
+
+if [ "$patch_id" = "" ]; then
+ echo "ERROR: All patches in series $series_id are already tested"
+ exit 3
+fi
+
+# Fetch and setup glibc-cicd.git. We use check.py to send status updates
+# to the patchwork instance.
+clone_or_update_repo glibc-cicd main \
+ https://gitlab.com/djdelorie/glibc-cicd.git > /dev/null
+(
+ # BE CAREFUL WITH $pw_token
+ set +x;
+ if [ "$pw_token" != "" ]; then
+ cat >> glibc-cicd/cicd-config.py <<EOF
+
+patchwork_token = "$pw_token"
+EOF
+ fi
+)
+pw_check_cmd=(glibc-cicd/check.py --patch_id "$patch_id" --context "$ci_bot")
+
+patch_url="https://patchwork.sourceware.org/patch/$patch_id"
+patch_message_id=$(pw_get_patch_data "$project" "$patch_id" "Message ID")
+patch_submitter=$(pw_get_patch_data "$project" "$patch_id" "Submitter" \
+ | sed -e "s/.*(\(.*\)).*/\1/")
+
+# Create a state file for this patch. This is sourced by pw-report.sh to avoid
+# passing a bunch of parameters on the command line.
+mkdir -p "$pw_dir"
+cat > "$pw_dir/$project" <<EOF
+pw[project]='$project'
+pw[${project}]='$project'
+pw[${project}_patch_id]='$patch_id'
+pw[${project}_check_cmd]='${pw_check_cmd[*]}'
+pw[${project}_build_url]='$build_url'
+pw[${project}_patch_url]='$patch_url'
+pw[${project}_patch_message_id]='$patch_message_id'
+pw[${project}_patch_submitter]='$patch_submitter'
+EOF
+
+# Below calls to pw-report.sh will run their own pw_init/pw_deinit
+pw_deinit "$project"
+trap "" EXIT
+
+(
+ set +x
+ $scripts/pw-report.sh --check triggered --result pass --pw_dir "$pw_dir" \
+ __pw_token "$pw_token"
+)
+
+prev_head=$(git -C "$project" rev-parse HEAD)
+
+apply_series_with_retry "$project" $prev_head pw "$series_id" &
+res=0 && wait $! || res=$?
+patches_applied=$(git -C "$project" rev-list --count HEAD "^$prev_head")
+
+if [ "$patches_applied" -le "$num_patch" ]; then
+ apply_result="fail"
+ if [ "$res" = "0" ] && [ "$patches_applied" = "0" ]; then
+ # "series apply" finished successfully, but no patch was applied;
+ # this means that the patch is already merged.
+ apply_result="merged"
+ fi
+
+ (
+ set +x
+ $scripts/pw-report.sh --check apply --result "$apply_result" \
+ --pw_dir "$pw_dir" __pw_token "$pw_token"
+ )
+
+ if [ $res != 0 ]; then
+ exit $res
+ fi
+
+ echo "WARNING: patch series applied fewer patches than expected"
+ echo "WARNING: applied $patches_applied vs expected $(($num_patch + 1))"
+ echo "WARNING: most likely the patch series is now merged into mainline"
+
+ exit 5
+fi
+
+git -C "$project" checkout --detach HEAD~$num_patch
+
+# Some debug traces, to check which files were modified and their time stamps
+(
+ set +e
+ cd "$project"
+ git status
+ git status -s | awk '{print $NF;}' | while IFS='' read -r line
+ do
+ ls -l "$line"
+ done
+)
+
+(
+ set +x
+ $scripts/pw-report.sh --check apply --result pass --pw_dir "$pw_dir" \
+ __pw_token "$pw_token"
+)
diff --git a/pw-helpers.sh b/pw-helpers.sh
new file mode 100755
index 00000000..d2ee4ca2
--- /dev/null
+++ b/pw-helpers.sh
@@ -0,0 +1,400 @@
+#!/bin/bash
+
+# Helper functions for accessing patchwork (pw) API. Almost all access
+# is implemented via git-pw's commands and its YAML output.
+
+# Scripts that use below functions:
+# - pw-trigger.sh -- looks for new patch series to test in patchwork
+# and creates trigger-* files for jenkins.
+# - pw-apply.sh -- fetches and applies a series to a local git clone.
+# - pw-report.sh -- sends "check" feedback back to patchwork.
+#
+# The general workflow is:
+# 1. At the end of successful post-commit testing, after baseline was
+# updated, pw-trigger.sh generates trigger-* files for jenkins. This
+# populates jenkins queue with jobs for testing patches posted since
+# the last successful post-commit build. Pw-trigger.sh looks at the state
+# of the latest "check", and triggers builds for all "pending" patches.
+#
+# 2. Jenkins starts a build for a given patch series, and applies to
+# the local git checkout -- this is done with pw-apply.sh. As soon
+# as pw-apply.sh has a patch ID, it calls pw-report.sh to add a "pending"
+# check to patchwork indicating that testing has started.
+#
+# 3. If patch applied successfully, pw-apply.sh generates a state file
+# (artifacts/jenkins/<project>), which has information necessary for
+# pw-report.sh to send subsequent "check" feedback to patchwork.
+#
+# 4. The build proceeds as a normal post-commit build.
+#
+# 5. Once the build finishes pw-report.sh is called to send the final
+# "check" -- whether patch passed or failed testing. Only the final
+# pw-report.sh sets check state to something other than "pending".
+# Therefore, if for whatever reason the final pw-report.sh does not run,
+# patch testing will be retriggered on the next round.
+
+# Initialize git-pw in $project.
+# $1 -- existing git clone of $project
+# $2 -- PW API token; without it modifications fail
+pw_init ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+
+ git -C "$project" config pw.server \
+ "https://patchwork.sourceware.org/api/1.2/"
+ git -C "$project" config pw.project "$project"
+
+ pw_clear_cache
+
+ # BE CAREFUL WITH $token
+ set +x
+ local token="$2"
+ if [ "$token" != "" ]; then
+ git -C "$project" config pw.token "$token"
+ fi
+ )
+}
+
+# De-initialize git-pw in $project.
+# $1 -- existing git clone of $project
+pw_deinit ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+
+ rm -rf "/tmp/pw-yaml-cache-$$"
+
+ # ignore error if pw.token is not set.
+ git -C "$project" config --unset pw.token || true
+ )
+}
+
+# Clear pw_yaml cache.
+pw_clear_cache ()
+{
+ (
+ set -euf -o pipefail
+
+ rm -rf "/tmp/pw-yaml-cache-$$"
+ mkdir "/tmp/pw-yaml-cache-$$"
+ )
+}
+
+# Get specified piece of data from git-pw yaml output.
+# This is reasonably unstable and relies heavily on git-pw's yaml format and
+# field names not changing.
+#
+# $1 -- $project git directory
+# $2 -- git-pw section: series, patch, etc.
+# $3 -- identifier of object in the section; usually series or patch ID.
+# $4, $5, $6 -- find object with field name $4 has value $5, and return value
+# field $6 from this entry: if (data.$5 == data.$5) return data.$6;
+# A special match_value ".*" selects the first entry that has match_field
+# regardless of match_value.
+# $7 -- optional value that stops search for object with match_field==match_value.
+# This is necessary to avoid going "outside" of our data of interest and
+# matching a random object that happens to have similarly named fields.
+# $8 -- optional index of the entry to match; first N-1 matching entries
+# will be skipped.
+#
+# Note: we match entries starting from the tail, since that is where
+# the interesting stuff is most of the time.
+pw_yaml_get ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local section="$2"
+ local id="$3"
+ local match_field="$4"
+ local match_value="$5"
+ local get_field="$6"
+ local match_stop="${7-}"
+ local match_num="${8-0}"
+
+ # Reduce noise in the logs
+ set +x
+
+ local -a cmd
+ case "$id" in
+ "--owner"*)
+ # shellcheck disable=SC2206
+ cmd=(list $id)
+ ;;
+ *)
+ cmd=(show "$id")
+ ;;
+ esac
+
+ local -a git_cmd
+ git_cmd=(git -C "$project" pw "$section" "${cmd[@]}" -f yaml)
+
+ local yaml
+ yaml=$(IFS="_"; echo "${git_cmd[*]}" | tr "/" "_")
+ yaml="/tmp/pw-yaml-cache-$$/$yaml"
+ if [ -f "$yaml" ]; then
+ touch "$yaml"
+ else
+ # Timeout if PW throttles connection. Otherwise, this would hang
+ # indefinitely.
+ timeout 1m "${git_cmd[@]}" > "$yaml" &
+ local res
+ res=0 && wait $! || res=$?
+ if [ $res != 0 ]; then
+ rm -f "$yaml"
+ return $res
+ fi
+ fi
+
+ local len
+ len=$(shyaml get-length < "$yaml")
+ while [ "$len" -gt "0" ]; do
+ len=$(($len - 1))
+ if [ "$match_value" != ".*" ]; then
+ if shyaml get-value "$len.$match_field" < "$yaml" \
+ | grep "$match_value" >/dev/null; then
+ if [ "$match_num" = "0" ]; then
+ shyaml get-value "$len.$get_field" < "$yaml"
+ return 0
+ fi
+ match_num=$(($match_num - 1))
+ fi
+ else
+ # Special case for $match_value == ".*".
+ # Only check that $match_field exist, and don't look at the value.
+ # This handles empty values without involving grep, which can't
+ # match EOF generated by empty value.
+ if shyaml get-value "$len.$match_field" < "$yaml" >/dev/null; then
+ if [ "$match_num" = "0" ]; then
+ shyaml get-value "$len.$get_field" < "$yaml"
+ return 0
+ fi
+ match_num=$(($match_num - 1))
+ fi
+ fi
+ if [ "$match_stop" != "" ] \
+ && shyaml get-value "$len.$match_field" < "$yaml" \
+ | grep "$match_stop" >/dev/null; then
+ return 1
+ fi
+ done
+
+ assert_with_msg "Missing $match_field == $match_value" false
+ )
+}
+
+# Return true if patch series is complete.
+pw_series_complete_p ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local series_id="$2"
+
+ local value
+ value=$(pw_yaml_get "$project" series "$series_id" property Complete value)
+ if [ "$value" = "True" ]; then
+ return 0
+ fi
+ return 1
+ )
+}
+
+# Return patch ID at specified index from series.
+# $2 -- series ID
+# $3 -- index of the patch *from the end* of series; "0" should always
+# work.
+pw_get_patch_from_series ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local id="$2"
+ local num="$3"
+
+ local patch_id
+ patch_id=$(pw_yaml_get "$project" series "$id" property ".*" \
+ value Patches "$num" | cut -d" " -f 1)
+ if [ "$patch_id" = "" ]; then
+ return 1
+ fi
+ echo "$patch_id"
+ )
+}
+
+# Fetch patch entry data
+# $2 -- patch ID
+# $3 -- data field
+pw_get_patch_data ()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local patch_id="$2"
+ local field="$3"
+
+ pw_yaml_get "$project" patch "$patch_id" property "$field" value
+ )
+}
+
+# Fetch current state of $patch_id's check for $ci_bot configuration.
+# Prints out "pending/warning/fail/success"; with no matching checks
+# prints out "pending".
+pw_patch_check_state ()
+{
+ (
+ set -euf -o pipefail
+ local patch_id="$1"
+ local ci_owner_bot="$2"
+
+ # Split $ci_owner_bot into [optional] $ci_owner and $ci_bot.
+ local ci_owner ci_bot
+ ci_owner="$(echo "$ci_owner_bot" | cut -s -d/ -f1)"
+ ci_bot="$(echo "$ci_owner_bot" | cut -s -d/ -f2)"
+ if [ "$ci_bot" = "" ]; then
+ ci_owner="linaro-tcwg-bot"
+ ci_bot="$ci_owner_bot"
+ fi
+
+ local json1 json2
+ json1=$(mktemp)
+ json2=$(mktemp)
+ # shellcheck disable=SC2064
+ trap "rm $json1 $json2" EXIT
+
+ curl -s \
+ "https://patchwork.sourceware.org/api/1.2/patches/$patch_id/checks/" \
+ > "$json1"
+
+ local i="-1" cur_date="0" cur_state="pending"
+ local username context date
+ while true; do
+ i=$(($i + 1))
+
+ jq -r ".[$i]" < "$json1" > "$json2"
+ if [ "$(cat "$json2")" = "null" ]; then
+ break
+ fi
+
+ username=$(jq -r ".user.username" < "$json2")
+ if [ "$username" != "$ci_owner" ]; then
+ continue
+ fi
+
+ context=$(jq -r ".context" < "$json2")
+ if [ "$context" != "$ci_bot" ]; then
+ continue
+ fi
+
+ date=$(jq -r ".date" < "$json2")
+ date=$(date -d "$date" +%s)
+ if [ "$cur_date" -le "$date" ]; then
+ cur_date="$date"
+ cur_state=$(jq -r ".state" < "$json2")
+ fi
+ done
+
+ echo "$cur_state"
+ )
+}
+
+# Apply a patch series
+# $1: project
+# $2: method (either 'am' for plain git, or 'pw' for patchwork interface)
+# $3: series dir (for 'am') or ID (for 'pw')
+# $4: "patch paths" option (eg -p0 or -p1, passed to 'git am')
+# $5: optional series_url when $2='am'
+apply_series()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local method="$2"
+ local series_id="$3"
+ local patch_paths="$4"
+ local series_url=""
+ local res=0
+ local subcommand=""
+ local series_name=""
+
+ case "$method" in
+ am)
+ subcommand="am"
+ series_url="$5"
+ series_name="$series_url"
+ ;;
+ pw)
+ subcommand="pw series apply"
+ series_name="$series_id"
+ ;;
+ *)
+ echo "ERROR: method $method not supported by apply_series()"
+ return 4
+ ;;
+ esac
+
+ # Apply the whole series and then roll-back to the desired patch.
+ if ! git -C "$project" $subcommand "$series_id" "$patch_paths"; then
+ echo "WARNING: Series $series_name did not apply cleanly"
+ # "git am" sometimes detects email text as a patch, and complains that it
+ # has no actual code changes. Workaround this by skipping empty patches.
+ res=4
+ patch_file="$project/.git/rebase-apply/patch"
+ while [ "$res" = "4" ] \
+ && [ -f "$patch_file" ] && ! [ -s "$patch_file" ]; do
+ # The patch is empty, so skip it.
+ res=0
+ if ! git -C "$project" am --skip; then
+ res=4
+ fi
+ done
+ fi
+ return $res
+ )
+}
+
+# Apply a patch series first with -p1, retry with -p0 if needed
+# $1: project
+# $2: prev_head
+# $3: method (either 'am' for plain git, or 'pw' for patchwork interface)
+# $4: series dir (for 'am') or ID (for 'pw')
+# $5: optional series_url when $2='am'
+apply_series_with_retry()
+{
+ (
+ set -euf -o pipefail
+ local project="$1"
+ local prev_head="$2"
+ local method="$3"
+ local series_id="$4"
+ local series_url=""
+ local res=0
+
+ if [ "$method" = "am" ]; then
+ series_url="$5"
+ fi
+
+ # Try to apply patches with -p1 first
+ apply_series "$project" $method "$series_id" "-p1" "$series_url" &
+ res=0 && wait $! || res=$?
+
+ # It can happen that the patch series was merged between the trigger and
+ # this build. Make sure that we have applied enough patches to test
+ # something interesting.
+ patches_applied=$(git -C "$project" rev-list --count HEAD "^$prev_head")
+
+ # If we couldn't apply any patch, retry with -p0
+ if [ "$res" != "0" ] && [ "$patches_applied" = "0" ]; then
+ # Restore a clean state
+ git -C "$project" am --abort || true
+ git -C "$project" checkout --detach $prev_head
+ apply_series "$project" $method "$series_id" "-p0" "$series_url" &
+ res=0 && wait $! || res=$?
+ fi
+
+ return $res
+ )
+}
diff --git a/pw-report.sh b/pw-report.sh
new file mode 100755
index 00000000..7b3d462e
--- /dev/null
+++ b/pw-report.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+set -euf -o pipefail +x
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=pw-helpers.sh
+. $scripts/pw-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables check result pw_dir pw_token
+declare check result pw_dir pw_token
+
+verbose="${verbose-true}"
+
+if ! [ -d "$pw_dir" ]; then
+ exit 0
+fi
+
+while IFS= read -r -d '' pw_file; do
+ (
+ declare -A pw
+ # shellcheck disable=SC1090
+ source "$pw_file"
+
+ project="${pw[project]}"
+ build_url="${pw[${project}_build_url]}"
+
+ # BE CAREFUL WITH $pw_token
+ set +x
+ # shellcheck disable=SC2064
+ trap "pw_deinit $project" EXIT
+ pw_init "$project" "$pw_token"
+
+ if $verbose; then
+ set -x
+ fi
+
+ case "$check-$result" in
+ triggered-*)
+ desc="Test started"
+ url="${build_url}console"
+ state="pending"
+ ;;
+ apply-fail)
+ desc="Patch failed to apply"
+ url="${build_url}artifact/artifacts/jenkins/pw-apply.log"
+ state="fail"
+ ;;
+ apply-merged)
+ desc="Patch is already merged"
+ url="${build_url}artifact/artifacts/jenkins/pw-apply.log"
+ # We can test patches hours after they have been
+ # submitted, and the patches can be merged into master
+ # branch already. Mark such cases as WARNING.
+ state="warning"
+ ;;
+ apply-pass)
+ desc="Patch applied"
+ url="${build_url}console"
+ state="pending"
+ ;;
+ test-fail)
+ desc="Testing failed"
+ url="${build_url}artifact/artifacts/artifacts.precommit/notify/mail-body.txt"
+ state="fail"
+ ;;
+ test-pass)
+ desc="Testing passed"
+ url="${build_url}artifact/artifacts/artifacts.precommit/notify/mail-body.txt"
+ state="success"
+ ;;
+ test-ignore)
+ # Testing failed due to sporadic failure. Allow retrigger
+ # testing of this patch in the next round.
+ desc="Build did not complete; will be retriggered"
+ url="${build_url}artifact/artifacts/artifacts.precommit/"
+ state="pending"
+ ;;
+ esac
+ ${pw[${project}_check_cmd]} --state "$state" \
+ --description "$desc" --url "$url"
+ )
+done < <(find "$pw_dir" -type f -print0)
diff --git a/pw-trigger.sh b/pw-trigger.sh
new file mode 100755
index 00000000..bddf3753
--- /dev/null
+++ b/pw-trigger.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+set -euf -o pipefail +x
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=pw-helpers.sh
+. $scripts/pw-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables ci_bot project pw_token out_dir
+declare ci_bot project pw_token out_dir
+
+verbose="${verbose-true}"
+
+if $verbose; then
+ set -x
+fi
+
+# Print a list of prerequisite bots to wait for before triggering $ci_bot.
+print_prereq_bots ()
+{
+ (
+ set -euf -o pipefail
+ local ci_bot="$1"
+
+ # Wait for "build" bots to succeed before triggering "check" bot.
+ case "$ci_bot" in
+ tcwg_*_check--*)
+ echo "$ci_bot" | sed -e "s/_check--/_build--/"
+ ;;
+ esac
+
+ # Wait for apply_patch to succeed before triggering glibc builds.
+ case "$ci_bot" in
+ tcwg_glibc_*--*)
+ echo "redhat-pt-bot/TryBot-apply_patch"
+ ;;
+ esac
+ )
+}
+
+# BE CAREFUL WITH $pw_token
+# shellcheck disable=SC2064
+trap "pw_deinit $project" EXIT
+(set +x; pw_init "$project" "$pw_token")
+
+yaml=$(mktemp)
+# shellcheck disable=SC2064
+trap "pw_deinit $project; rm -f $yaml" EXIT
+
+readarray -t prereq_bots < <(print_prereq_bots "$ci_bot")
+
+git -C "$project" pw series list -f yaml > "$yaml"
+mkdir -p "$out_dir"
+
+len=$(shyaml get-length < "$yaml")
+i=$len
+j=0
+# Go through series with the oldest first.
+while [ "$i" -gt "0" ]; do
+ i=$(($i - 1))
+ series_id=$(shyaml get-value $i.id < "$yaml")
+
+ if ! pw_series_complete_p "$project" "$series_id"; then
+ continue
+ fi
+
+ # Look for an untested patch in the series.
+ num_patch=0
+ while true; do
+ patch_id=$(pw_get_patch_from_series \
+ "$project" "$series_id" "$num_patch" || true)
+ if [ "$patch_id" = "" ]; then
+ break
+ fi
+
+ check_state=$(pw_patch_check_state "$patch_id" "$ci_bot")
+ if [ "$check_state" = "pending" ]; then
+ break
+ fi
+ num_patch=$(($num_patch + 1))
+ done
+
+ if [ "$patch_id" = "" ]; then
+ continue
+ fi
+
+ # Check that prerequisite bots finished OK.
+ check_state="success"
+ for prereq_bot in "${prereq_bots[@]}"; do
+ check_state=$(pw_patch_check_state "$patch_id" "$prereq_bot")
+ if [ "$check_state" != "success" ]; then
+ break
+ fi
+ done
+ if [ "$check_state" != "success" ]; then
+ echo "WARNING: Waiting for $patch_id:$prereq_bot to succeed."
+ continue
+ fi
+
+ j=$(($j + 1))
+ cat > "$out_dir/trigger-precommit-$project-$j-$series_id" <<EOF
+${project}_git=pw://series/$series_id
+EOF
+done
+
+rm "$yaml"
+
+echo "Processed $len series and created $j pre-commit triggers"
diff --git a/round-robin-baseline.sh b/round-robin-baseline.sh
new file mode 100755
index 00000000..eff32441
--- /dev/null
+++ b/round-robin-baseline.sh
@@ -0,0 +1,624 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables rr[top_artifacts] rr[update_baseline]
+declare -A rr
+
+push_base_artifacts="${push_base_artifacts-false}"
+rewrite_base_artifacts="${rewrite_base_artifacts-false}"
+rewrite_num="${rewrite_num-1}"
+commit_artifacts="${commit_artifacts-true}"
+verbose="${verbose-true}"
+max_removed_revs="${max_removed_revs-10%}"
+skip_annex_downloads="${skip_annex_downloads-false}"
+
+if $rewrite_base_artifacts; then
+ obligatory_variables build_script
+ declare build_script
+fi
+
+# To enable rewrite:
+# - set rewrite_base_artifacts to true, and
+# - set rewrite_num to N+1 to rewrite N oldest revisions (0 for all)
+
+# To rewrite local base-artifacts (e.g., for testing of round-robin-notify.sh
+# or bmk-scripts):
+# 1. Increase rr[minor] or rr[major] in your local build script.
+# 2. Do a baseline build (to clone all repos and checkout dependencies of
+# round-robin-notify.sh).
+# ~/jenkins-scripts/tcwg_bmk-build.sh '%%rr[top_artifacts]' artifacts \
+# '==rr[ci_project]' CI_PROJECT '==rr[ci_config]' CI_CONFIG
+# 2a. If you know that you have all dependencies already present, then just
+# copy latest artifacts from base-artifacts's HEAD, and edit manifest
+# manually to increase rr[minor] or rr[major].
+# rsync -a --del --exclude /.git base-artifacts/ artifacts/
+# vi artifacts/manifest.sh
+# 3. Run this script with "__push_base_artifacts false",
+# "__commit_artifacts false" and "__rewrite_num 0".
+# ~/jenkins-scripts/round-robin-baseline.sh \
+# '@@rr[top_artifacts]' artifacts __build_script tcwg_bmk-build.sh \
+# __push_base_artifacts false __commit_artifacts false \
+# __rewrite_base_artifacts true __rewrite_num 0
+# 4. Note that the above will not change upstream base-artifacts, but notify
+# logic may push to interesting commits and/or update jira cards.
+
+# The patch version represent the version of the generated notification files.
+# upgrading it will automatically enable the rewrite process bellow.
+rr[patch]=0
+
+if $verbose; then
+ set -x
+fi
+
+# Trim history of base-artifacts to keep repo size managable.
+trim_base_artifacts ()
+{
+ (
+ set -euf -o pipefail
+
+ # - For the last 100 builds: keep everything
+ # - For the next 100 builds: keep essential artifacts
+ # -- NN-<step> directories are non-essential, the rest -- jenkins/,
+ # dashboard/, etc. -- are essential.
+ # - For the rest of the builds: keep only "update_baseline==force" builds
+ local old_commit
+ old_commit=$(git -C base-artifacts rev-parse --verify HEAD~100 \
+ 2>/dev/null || true)
+
+ if [ "$old_commit" = "" ]; then
+ return 0
+ fi
+
+ local head
+ head=$(git -C base-artifacts rev-parse HEAD)
+
+ # Remove step directories (start with a number) from $old_commit
+ # and older.
+ git -C base-artifacts checkout --detach $old_commit
+ git -C base-artifacts filter-repo --force \
+ --invert-paths --path-regex '^[0-9].*' \
+ --refs HEAD
+
+ local new_old_commit
+ new_old_commit=$(git -C base-artifacts rev-parse HEAD)
+
+ # Walk through even older history (starting with new_old_commit~100)
+ # and leave only commits that have update_baseline=={force,init}.
+ local child orig_parent new_parent
+ child=$(git -C base-artifacts rev-parse --verify HEAD~100 \
+ 2>/dev/null || true)
+
+ while [ "$child" != "" ]; do
+ git -C base-artifacts checkout --detach $child
+
+ orig_parent=$(git -C base-artifacts rev-parse --verify HEAD^ \
+ 2>/dev/null || true)
+ # Find new_parent -- commit that has update_baseline!=onsuccess.
+ new_parent=""
+ while true; do
+ new_parent=$(git -C base-artifacts rev-parse --verify HEAD^ \
+ 2>/dev/null || true)
+ if [ "$new_parent" = "" ]; then
+ break
+ fi
+
+ git -C base-artifacts checkout --detach $new_parent
+
+ local u_b
+ u_b=$(get_baseline_manifest "{rr[update_baseline]}")
+ if [ "$u_b" != "onsuccess" ]; then
+ break
+ fi
+ done
+
+ # Replace $orig_parent with $new_parent, and update new_old_commit.
+ if [ "$new_parent" != "$orig_parent" ]; then
+ # Note that if $new_parent is empty, then $child will become
+ # the root commit.
+ git -C base-artifacts replace --force --graft $child $new_parent
+ git -C base-artifacts checkout --detach $new_old_commit
+ git -C base-artifacts filter-repo --force --refs HEAD
+ git -C base-artifacts replace --delete $child
+ new_old_commit=$(git -C base-artifacts rev-parse HEAD)
+ fi
+
+ # Proceed to the next commit in history.
+ child="$new_parent"
+ done
+
+ git -C base-artifacts checkout --detach $head
+
+ # Reparent history on the new version of $old_commit.
+ if [ "$old_commit" != "$new_old_commit" ]; then
+ git -C base-artifacts replace --force $old_commit $new_old_commit
+ git -C base-artifacts filter-repo --force --refs HEAD
+ git -C base-artifacts replace --delete $old_commit
+ fi
+ )
+}
+
+# Commit current result and artifacts to the baseline repository
+update_baseline ()
+{
+ (
+ set -euf -o pipefail
+
+ # Rsync current artifacts. Make sure to use -I rsync option since
+ # quite often size and timestamp on artifacts/results will be the same
+ # as on base-artifacts/results due to "git reset --hard HEAD^" below.
+ # This caused rsync's "quick check" heuristic to skip "results" file.
+ # !!! From this point on, logs and other artifacts won't be included
+ # in base-artifacts.git repo (though they will be uploaded to jenkins).
+ rsync -aI --del --exclude /.git ${rr[top_artifacts]}/ base-artifacts/
+
+ local amend=""
+
+ if [ x"${rr[update_baseline]}" = x"init" ]; then
+ amend="--amend"
+ fi
+
+ local msg_title="${rr[update_baseline]}"
+ if [ x"${BUILD_URL+set}" = x"set" ]; then
+ # Add build number
+ msg_title="$msg_title: #$(basename "$BUILD_URL")"
+ fi
+ msg_title="$msg_title: $(grep -v "^#" ${rr[top_artifacts]}/results | tail -n1)"
+ msg_title="$msg_title: [TCWG CI] ${BUILD_URL-$(pwd)}"
+
+ git -C base-artifacts add .
+ git -C base-artifacts commit $amend -m "$msg_title
+
+Results :
+$(cat ${rr[top_artifacts]}/results | sed -e 's/^/ | /')
+
+check_regression status : ${rr[no_regression_result]}
+"
+ )
+}
+
+rewrite_single_revision ()
+{
+ (
+ set -euf -o pipefail
+ local old_commit="$1"
+ local log_prefix="$2"
+
+ local orig_head
+ orig_head=$(git -C base-artifacts rev-parse HEAD)
+
+ # Return to $orig_head in case of an error.
+ # shellcheck disable=SC2064
+ trap "echo CLEANUP; git -C base-artifacts reset --hard $orig_head" EXIT
+
+ local -a fixup_opts=()
+
+ echo "Rewriting revision $old_commit :"
+ echo " $(git -C base-artifacts show --no-patch --oneline $old_commit)"
+ echo ""
+
+ # Verify that parent of $old_commit is reasonable:
+ # 1. it has manifest.sh;
+ # 2. TODO: maybe check that git_annex_download() succeeds for
+ # base-artifacts? Thinking about it, that should have been
+ # verified on the previous call of rewrite_single_revision()
+ # that processed the parent revision.
+ #
+ # At least in one case we had history starting with an empty commit, which
+ # wasn't ammended in update_baseline(). Scanning of history in
+ # rewrite_base_artifacts() ignored the empty commit because it had no
+ # manifest.sh file. The loop below can be used to remove unwanted weird
+ # commits from history.
+ local old_parent="$old_commit"
+ while true; do
+ old_parent=$(git -C base-artifacts rev-parse --verify "$old_parent^" \
+ 2>/dev/null || true)
+ if [ "$old_parent" = "" ]; then
+ # We reached beginning of history.
+ break
+ fi
+
+ git -C base-artifacts checkout --detach "$old_parent"
+ if ! [ -f base-artifacts/manifest.sh ]; then
+ # Remove commits with no manifest.sh .
+ continue
+ fi
+
+ break
+ done
+
+ if [ "$old_parent" != "" ]; then
+ git_annex_download base-artifacts annex
+ else
+ # Initialize a new baseline when base-artifacts is empty.
+ fixup_opts+=("==rr[update_baseline]" init)
+ # FIXME: Move empty.git to bkp.tcwglab.
+ git -C base-artifacts fetch \
+ https://git-us.linaro.org/toolchain/ci/base-artifacts/empty.git \
+ refs/heads/empty
+ git -C base-artifacts checkout --detach FETCH_HEAD
+ fi
+
+ local old_artifacts="${rr[top_artifacts]}/99-rewrite/artifacts.old"
+
+ # Fetch artifacts/ of old build
+ rm -rf "$old_artifacts"
+ mkdir "$old_artifacts"
+ git -C base-artifacts archive "$old_commit" | tar x -C "$old_artifacts"
+
+ # FIXME: Remove workarounds for out-dated files:
+ # Remove .gitignore that ignores annex/bmk-data symlink.
+ rm -f "$old_artifacts/.gitignore"
+ # Remove results_id now that we use annex/bmk-data . Note that we have
+ # fetched the results pointed to by results_id in git_annex_download().
+ rm -f "$old_artifacts/results_id"
+
+ # Fetch old rr values before they are re-written
+ local old_manifest="$old_artifacts/manifest.sh"
+ local -A old
+ old[major]=$(get_manifest "$old_manifest" "{rr[major]-0}")
+ old[minor]=$(get_manifest "$old_manifest" "{rr[minor]-0}")
+ old[patch]=$(get_manifest "$old_manifest" "{rr[patch]-0}")
+ old[notify]=$(get_manifest "$old_manifest" "{notify-}")
+ old[update_baseline]=$(get_manifest "$old_manifest" \
+ "{rr[update_baseline]-}")
+ old[ci_project]=$(get_manifest "$old_manifest" "{rr[ci_project]-}")
+ old[ci_config]=$(get_manifest "$old_manifest" "{rr[ci_config]-}")
+
+ # downloading the annex, unless the user explicitely asked to skip
+ local res force_remove=false
+ if ! $skip_annex_downloads; then
+ git_annex_download "$old_artifacts" annex &
+ res=0 && wait $! || res=$?
+ if [ $res != 0 ]; then
+ # Something has happened to the annex'ed files. Remove the result.
+ force_remove=true
+ fi
+ fi
+
+ case "${old[major]}.${old[minor]}" in
+ "0."*)
+ # FIXME: Workaround old/renamed names of ci_project/ci_config.
+ # This is, mostly, for tcwg_bmk_tx1 and tcwg_bmk_tk1 projects.
+ if [ "${old[ci_project]}" != "${rr[ci_project]}" ]; then
+ fixup_opts+=("==rr[ci_project]" "${rr[ci_project]}")
+ fi
+ if [ "${old[ci_config]}" != "${rr[ci_config]}" ]; then
+ fixup_opts+=("==rr[ci_config]" "${rr[ci_config]}")
+ fi
+
+ # FIXME: Remove old result with no git/ information.
+ # We have switched to storing git information in artifacts/git/
+ # directory long time ago, so it doesn't worth the effort to
+ # workaround such cases. Just remove the result.
+ # Note that this will remove the result even when only minor
+ # (not major) version is increased.
+ if ! [ -d "$old_artifacts/git" ]; then
+ force_remove=true
+ fi
+ ;;
+ esac
+
+ res=0
+ # If major and minor are the same, it means that check_regression stage
+ # is already up-to-date. Only append the manifest with patch version
+ if [ "${rr[major]-0}.${rr[minor]-0}" == "${old[major]}.${old[minor]}" ]; then
+ echo "rr[patch]=${rr[patch]}" | manifest_out
+ else
+ # otherwise run the check_regression stage
+ $scripts/$build_script \
+ @@rr[top_artifacts] "$old_artifacts" __start_at check_regression \
+ "${fixup_opts[@]}" &
+ res=0 && wait $! || res=$?
+ fi
+
+ if [ $res != 0 ]; then
+ assert_with_msg "check_regression() failed on forced update_baseline" \
+ [ "${old[update_baseline]}" = "onsuccess" ]
+
+ if [ "${rr[major]-0}" -gt "${old[major]}" ]; then
+ # $build_script [somewhat expectedly] failed to process old results,
+ # so remove it from history.
+ # In this case $new_old_commit will be set to $old_commit's parent,
+ # so $old_commit will be removed from history.
+ force_remove=true
+ fi
+ fi
+
+ if $force_remove; then
+ res=1
+ fi
+
+ if [ $res = 0 ]; then
+ local -a notify_opts=()
+
+ case "${old[major]}.${old[minor]}" in
+ "0."*)
+ # FIXME: Workaround possible lack of "$notify" in v0.*
+ # manifests.
+ # Remove once there are no configurations with v0.0 manifests.
+ case "${old[notify]}":"${old[update_baseline]}" in
+ "":"force") notify_opts=(--notify onregression) ;;
+ "":*) notify_opts=(--notify ignore) ;;
+ esac
+ ;;
+ esac
+
+ $scripts/round-robin-notify.sh \
+ @@rr[top_artifacts] "$old_artifacts" __post_mail false \
+ __post_jira_comment false "${notify_opts[@]}" \
+ __build_script "$build_script" \
+ __verbose "$verbose" &> "$log_prefix-notify.log"
+
+ (
+ unset rr
+ manifest_pop
+ declare -A rr
+ convert_args_to_variables @@rr[top_artifacts] "$old_artifacts"
+ update_baseline
+
+ local repo1="${rr[baseline_branch]#linaro-local/ci/}"
+ git_annex_upload base-artifacts annex \
+ "$repo1/$(basename "${BUILD_URL-0}")-"
+ )
+
+ git -C base-artifacts diff "$old_commit" "HEAD" -- manifest.sh \
+ &> "$log_prefix-manifest.diff"
+ git -C base-artifacts diff "$old_commit" "HEAD" -- notify/ \
+ &> "$log_prefix-notify.diff"
+ git -C base-artifacts diff --stat -p "$old_commit" "HEAD" -- \
+ ':(exclude)manifest.sh' ':(exclude)notify/' \
+ &> "$log_prefix-other.diff"
+ elif $force_remove; then
+ touch "$log_prefix.removed"
+ # Above "git_annex_download base-artifacts annex" may have changed
+ # files in base-artifacts/annex/ directory. Restore to prestine
+ # state to avoid failure in "git -C base-artifacts checkout" below.
+ git_clean base-artifacts
+ else
+ # $build_script [unexpectedly] failed to process old results,
+ # so fail and notify developers (by sending error-mail).
+ assert_with_msg "$build_script failed to process $old_commit" false
+ fi
+
+ local new_old_commit
+ new_old_commit=$(git -C base-artifacts rev-parse HEAD)
+
+ assert_with_msg "Rewritten commit did not change" \
+ [ "$old_commit" != "$new_old_commit" ]
+
+ # Reparent history on the new version of $old_commit.
+ trap "" EXIT
+ git -C base-artifacts checkout --detach $orig_head
+ git -C base-artifacts replace --force $old_commit $new_old_commit
+ git -C base-artifacts filter-repo --force --refs HEAD
+ git -C base-artifacts replace --delete $old_commit
+ )
+}
+
+declare -g rewrite_base_artifacts_first=true
+# Update history of base-artifacts
+rewrite_base_artifacts ()
+{
+ (
+ set -euf -o pipefail
+
+ set +x
+
+ local n_rev=0 total_revs=-1
+
+ # Fetch flaky tests from base-artifacts history.
+ local manifest history_root="" old_revision=""
+ local -A old
+ while read -r manifest; do
+ total_revs=$(($total_revs + 1))
+ if [ "$history_root" = "" ]; then
+ history_root="$manifest"
+ continue
+ elif [ "$old_revision" != "" ]; then
+ # Continue reading from get_git_history() to have it finish
+ # gracefully.
+ continue
+ fi
+ n_rev=$(($n_rev + 1))
+
+ old[major]=$(get_manifest "$manifest" "{rr[major]-0}")
+ old[minor]=$(get_manifest "$manifest" "{rr[minor]-0}")
+ old[patch]=$(get_manifest "$manifest" "{rr[patch]-0}")
+
+ assert_with_msg "rr[minor] should be less than 100" [ "${rr[minor]-0}" -lt 100 ]
+ assert_with_msg "rr[patch] should be less than 100" [ "${rr[patch]-0}" -lt 100 ]
+
+ if [ "$(( rr[major]*100*100 + rr[minor]*100 + rr[patch] ))" -gt \
+ "$(( old[major]*100*100 + old[minor]*100 + old[patch] ))" ]; then
+ # Found old entry to update;
+ # directory name of $manifest is the revision
+ old_revision=$(basename "$(dirname "$manifest")")
+ fi
+ done < <(get_git_history -0 base-artifacts manifest.sh)
+
+ if $verbose; then
+ set -x
+ fi
+
+ rm -rf "$history_root"
+
+ if [ "$old_revision" = "" ]; then
+ return 0
+ fi
+
+ local rewrite_top="${rr[top_artifacts]}/99-rewrite"
+
+ if $rewrite_base_artifacts_first; then
+ change_tag="v${old[major]}.${old[minor]}.${old[patch]}_to_v${rr[major]-0}.${rr[minor]-0}.${rr[patch]-0}"
+ if [ "${BUILD_URL-}" != "" ]; then
+ change_tag="$change_tag-$(basename "$BUILD_URL")"
+ fi
+
+ local backup_branch
+ backup_branch=$(echo "${rr[baseline_branch]}" \
+ | sed -e "s#linaro-local/ci/#linaro-local/$change_tag/#")
+ if $push_base_artifacts; then
+ local repo="${rr[baseline_branch]#linaro-local/ci/}"
+ repo="ssh://bkp.tcwglab/home/tcwg-buildslave/base-artifacts/$repo.git"
+
+ git -C base-artifacts push --force \
+ "$repo" "HEAD:refs/heads/$backup_branch"
+ else
+ git -C base-artifacts branch --force "$backup_branch" HEAD
+ fi
+
+ rm -rf "$rewrite_top"
+ fi
+
+ local log_prefix="$rewrite_top/$rewrite_num-$n_rev-$total_revs"
+ mkdir -p "$(dirname "$log_prefix")"
+
+ echo -e "\n"" Rewriting: $(git -C base-artifacts show --no-patch --oneline $old_revision)""\n"
+
+ rewrite_single_revision "$old_revision" "$log_prefix" \
+ &> "$log_prefix-rewrite.log"
+
+ # Rescan base-artifacts again for another entry to update.
+ touch "$rewrite_top/more"
+ )
+}
+
+# Push base-artifacts, or, maybe, skip.
+# The first push, which is outside of rewrite process, always happens.
+# Subsequent pushes may be skipped, if the previous push is still running.
+# This is an optimization to avoid re-pushing histories during rewrite,
+# which are only to be discarded moments later.
+declare -g push_baseline_pid=0
+declare -g push_baseline_skipped=0
+push_baseline ()
+{
+ if [ "$push_baseline_pid" != "0" ]; then
+ if ! ps -p "$push_baseline_pid" >/dev/null; then
+ wait "$push_baseline_pid"
+ push_baseline_pid=0
+ fi
+
+ if [ "$push_baseline_pid" != "0" ]; then
+ push_baseline_skipped=$(($push_baseline_skipped + 1))
+ return 0
+ fi
+ fi
+
+ push_baseline_skipped=0
+
+ local repo1="${rr[baseline_branch]#linaro-local/ci/}"
+ repo="ssh://bkp.tcwglab/home/tcwg-buildslave/base-artifacts/$repo1.git"
+
+ (
+ set -euf -o pipefail
+
+ git_annex_upload base-artifacts annex "$repo1/$(basename "${BUILD_URL-0}")-"
+
+ if ! git ls-remote --heads "$repo" &>/dev/null; then
+ ssh bkp.tcwglab git init --bare \
+ "/home/tcwg-buildslave/base-artifacts/$repo1.git"
+ fi
+ )
+
+ git -C base-artifacts push --force \
+ "$repo" "HEAD:refs/heads/${rr[baseline_branch]}" &
+ push_baseline_pid=$!
+}
+
+if $commit_artifacts; then
+ update_baseline
+fi
+
+# Compute the maximum of revisions that we accept to remove. If we remove
+# more than we expected. This is suspicious, stop the rewriting process
+declare nb_revs nb_removed_revs
+nb_revs=$(git -C base-artifacts rev-list --count HEAD)
+nb_revs=$((nb_revs<rewrite_num ? nb_revs : rewrite_num))
+if [[ "$max_removed_revs" =~ .*% ]]; then
+ # If max_removed_revs is expressed in percentage of the total revisions
+ # convert max_removed_revs in term of number of revisions.
+ max_removed_revs=${max_removed_revs/\%/ / 100}
+ max_removed_revs=$((nb_revs * $max_removed_revs))
+fi
+
+while true; do
+ if $push_base_artifacts; then
+ if $rewrite_base_artifacts_first; then
+ # Trimming base-artifacts takes a lot of time on big histories,
+ # and it doesn't really do anything on repeat trimmings during
+ # history rewrite. Therefore, trim only on the first iteration
+ # of this loop.
+ trim_base_artifacts
+ fi
+ push_baseline
+ if $rewrite_base_artifacts_first; then
+ # We create a backup copy of the branch when rewriting the first
+ # revision. If we don't have the initial push done by that time
+ # it would start to push a duplicate copy of baseline, thus slowing
+ # the initial push. Therefore, wait for the initial push here.
+ wait "$push_baseline_pid"
+ push_baseline_pid=0
+ fi
+ fi
+
+ if $rewrite_base_artifacts; then
+ rewrite_num=$(($rewrite_num - 1))
+ if [ "$rewrite_num" = "0" ]; then
+ break
+ fi
+
+ rm -f "${rr[top_artifacts]}/99-rewrite/more"
+ rewrite_base_artifacts &
+ res=0 && wait $! || res=$?
+ rewrite_base_artifacts_first=false
+
+ if [ "$res" != "0" ]; then
+ echo "WARNING: failed rewriting base-artifacts"
+ if [ -d ${rr[top_artifacts]}/jenkins ]; then
+ echo "maxim.kuvyrkov@linaro.org, laurent.alfonsi@linaro.org" \
+ > artifacts/jenkins/error-mail-recipients.txt
+ echo -e "${BUILD_URL-}\nWARNING: failed rewriting base-artifacts" \
+ >> artifacts/jenkins/error-mail-body.txt
+ fi
+ break
+ fi
+
+ if [ -f "${rr[top_artifacts]}/99-rewrite/more" ]; then
+
+ nb_removed_revs="$(find ${rr[top_artifacts]}/99-rewrite/ -maxdepth 1 \
+ -name '*.removed' | wc -l)"
+
+ if [ "$nb_removed_revs" -gt "$max_removed_revs" ]; then
+ echo "WARNING: Too many revisions removed. Aborting."
+ if [ -d ${rr[top_artifacts]}/jenkins ]; then
+ echo "maxim.kuvyrkov@linaro.org, laurent.alfonsi@linaro.org" \
+ > artifacts/jenkins/error-mail-recipients.txt
+ echo -e "${BUILD_URL-}\nWARNING: Too many revisions removed while "\
+ "rewriting base-artifacts" >> artifacts/jenkins/error-mail-body.txt
+ fi
+ break
+ fi
+
+ # Push current version and search for another revision to update.
+ continue
+ fi
+ fi
+
+ break
+done
+
+if [ "$push_baseline_pid" != "0" ]; then
+ wait "$push_baseline_pid"
+ push_baseline_pid=0
+ if [ "$push_baseline_skipped" != "0" ]; then
+ # Do the final push, which was previously skipped.
+ push_baseline
+ fi
+fi
diff --git a/round-robin-bisect.sh b/round-robin-bisect.sh
index 8ded4ea6..1abba2f9 100755
--- a/round-robin-bisect.sh
+++ b/round-robin-bisect.sh
@@ -6,12 +6,7 @@ scripts=$(dirname $0)
# shellcheck source=jenkins-helpers.sh
. $scripts/jenkins-helpers.sh
-# Relative artifacts are used for generation of manifests and reproduction
-# instructions.
-rel_artifacts=artifacts
-artifacts=$(pwd)/$rel_artifacts
-
-fresh_dir $artifacts "$artifacts/manifests/*" "$artifacts/jenkins/*"
+declare -A rr
# Process bisect-only args
convert_args_to_variables "$@"
@@ -20,16 +15,39 @@ shift "$SHIFT_CONVERTED_ARGS"
obligatory_variables bad_git build_script current_project
declare bad_git build_script current_project
+# Relative artifacts are used for generation of manifests and reproduction
+# instructions.
+rel_artifacts="${rel_artifacts-artifacts}"
+artifacts=$(pwd)/$rel_artifacts
+
+fresh_dir $artifacts \
+ "$artifacts/manifest.sh" \
+ "$artifacts/build-parameters/manifest.sh" \
+ "$artifacts/jenkins/*"
+
BUILD_URL="${BUILD_URL:-$(pwd)}"
replay_log="${replay_log-}"
reproduce_bisect="${reproduce_bisect:-false}"
# Process build args and record them in build-parameters.sh
-convert_args_to_variables ^^ $reproduce_bisect %% $artifacts/manifests/build-parameters.sh "$@"
+convert_args_to_variables ^^ $reproduce_bisect %%build_parameters $artifacts/build-parameters "$@"
$reproduce_bisect || manifest_pop
+# Account for "^^ false %%foo bar" options
+SHIFT_CONVERTED_ARGS=$(($SHIFT_CONVERTED_ARGS-4))
+shift "$SHIFT_CONVERTED_ARGS"
-obligatory_variables rr[ci_project] rr[ci_config]
-declare -A rr
+obligatory_variables build_parameters rr[ci_project] rr[ci_config]
+declare build_parameters
+
+# Process build args and record them in build-parameters.sh
+convert_args_to_variables ^^ $reproduce_bisect %%baseline_parameters $artifacts/baseline-parameters "$@"
+$reproduce_bisect || manifest_pop
+# Account for "^^ false %%foo bar" options
+SHIFT_CONVERTED_ARGS=$(($SHIFT_CONVERTED_ARGS-4))
+shift "$SHIFT_CONVERTED_ARGS"
+
+obligatory_variables baseline_parameters
+declare baseline_parameters
verbose="${verbose-true}"
@@ -42,6 +60,15 @@ touch $artifacts/jenkins/build-name
trap print_traceback EXIT
+# Exit with success
+exit_0 () {
+ # Cleanup bisect/ directory, which has a full [unneeded] copy of the build.
+ chmod -R +rwx bisect/
+ rm -rf bisect/
+ trap "" EXIT
+ exit 0
+}
+
bad_url="${bad_git%#*}"
bad_branch="${bad_git#*#}"
@@ -51,72 +78,40 @@ rebase_workaround_opts=()
case "${rr[ci_project]}/${rr[ci_config]}:$current_project" in
tcwg_kernel/*-next-*:linux)
# Workaround linux-next/master rebasing on top of linux-next/stable.
- # Search for regressions against linux-mainline:master (aka linux-next:stable).
- clone_or_update_repo $current_project stable $bad_url
- # Just in case linux-next:stable has advanced between the build and bisect jobs,
- # use merge base between linux-next:stable and $bad_git.
- bad_rev="${bad_rev-$(git_rev_parse_long $current_project $bad_branch)}"
- linux_next_stable="${linux_next_stable-$(git -C $current_project merge-base HEAD $bad_rev)}"
- cat <<EOF | manifest_out
-declare -g linux_next_stable=$linux_next_stable
-EOF
- echo "Rebase workaround: forcing linux baseline to $linux_next_stable"
+ # Search for regressions between linux-next:stable and
+ # linux-next:master.
+ echo "Rebase workaround: forcing linux baseline to linux-next:stable"
rebase_workaround=true
rebase_workaround_opts+=("==rr[linux_git]"
- "$bad_url#$linux_next_stable")
+ "$bad_url#stable")
;;
esac
# Build baseline that we are going to re-use to speed-up bisection.
# (This also confirms that infrastructure is OK.)
-echo "Testing baseline (should be success)"
+echo "Testing baseline revision (expecting success)"
$build_script \
^^ $reproduce_bisect \
- %% $rel_artifacts/manifests/build-baseline.sh \
- @@ $rel_artifacts/manifests/build-parameters.sh \
- ==rr[mode] baseline \
- ==rr[update_baseline] push \
- ==rr[top_artifacts] "$rel_artifacts/build-baseline" \
+ %%rr[top_artifacts] "$rel_artifacts/build-baseline" \
+ @@ $build_parameters/manifest.sh \
+ @@ $baseline_parameters/manifest.sh \
+ ==rr[mode] build \
+ ==rr[update_baseline] force \
--verbose "$verbose" \
"${rebase_workaround_opts[@]}"
+# Establish results in build-baseline as the baseline to compare test builds
+# against.
+$scripts/round-robin-baseline.sh \
+ @@rr[top_artifacts] "$rel_artifacts/build-baseline" \
+ __base_artifacts base-artifacts
+
baseline_rev="${baseline_rev-$(git -C ${current_project} rev-parse HEAD)}"
cat <<EOF | manifest_out
declare -g baseline_rev=$baseline_rev
EOF
ln -f -s "build-baseline" "$artifacts/build-$baseline_rev"
-ln -f -s "build-baseline.sh" "$artifacts/manifests/build-$baseline_rev.sh"
-
-case "${rr[ci_project]}/${rr[ci_config]}" in
- tcwg_gnu/*-check_*|tcwg_cross/*-check_*)
- (
- # Build up lists of flaky tests. We do this by comparing
- # just-created baseline vs sumfiles in base-artifacts.
- fails=$(find $rel_artifacts/build-baseline \
- -path "*-check_regression/fails.sum")
- xfail_short="contrib/testsuite-management/flaky/${rr[ci_config]}.xfail"
- xfail="gcc-compare-results/$xfail_short"
-
- if ! [ -f "$fails" ] || ! [ -f "$xfail" ]; then
- exit
- fi
-
- (
- echo
- echo "# From $BUILD_URL:"
- cat "$fails" | sed -e "s/^\([A-Z]\+: \)/flaky \| \1/"
- ) >> "$xfail"
-
- git -C gcc-compare-results add "$xfail_short"
- git -C gcc-compare-results commit -m "From $BUILD_URL"
- git -C gcc-compare-results review -s
- git -C gcc-compare-results push gerrit HEAD:refs/heads/master
- ) &
- res=0 && wait $! || res=$?
- # Ignore any failures in the above.
- ;;
-esac
mkdir -p ./bisect
@@ -127,25 +122,24 @@ baseline_exclude=(
)
rsync -a --del --delete-excluded "${baseline_exclude[@]}" ./ ./bisect/baseline/
-cd $current_project
-
mkdir $artifacts/git-logs
# Make sure the sources are clean before bisecting
-git reset --hard
+git -C $current_project reset -q --hard
if [ -f "$replay_log" ]; then
cp "$replay_log" $artifacts/git-logs/bisect_replay.sh
- git bisect replay $artifacts/git-logs/bisect_replay.sh
+ git -C $current_project bisect replay $artifacts/git-logs/bisect_replay.sh
else
- git bisect start
+ git -C $current_project bisect start
fi
# Hard-link BISECT_LOG inside $artifacts to guarantee its upload to jenkins.
-ln -f "$(pwd)"/.git/BISECT_LOG $artifacts/git-logs/bisect_log
+ln -f "$(pwd)"/$current_project/.git/BISECT_LOG $artifacts/git-logs/bisect_log
-if ! git bisect log | grep -q "^git bisect .* $baseline_rev\$"; then
- git bisect good $baseline_rev
+if ! git -C $current_project bisect log \
+ | grep "^git bisect .* $baseline_rev\$" >/dev/null; then
+ git -C $current_project bisect good $baseline_rev
fi
# Bisect script.
@@ -177,11 +171,11 @@ set -euf -o pipefail
rev=\$(git rev-parse HEAD)
-if git bisect log | grep -q "^git bisect bad \$rev\\\$"; then
+if git bisect log | grep "^git bisect bad \$rev\\\$" >/dev/null; then
exit 1
-elif git bisect log | grep -q "^git bisect skip \$rev\\\$"; then
+elif git bisect log | grep "^git bisect skip \$rev\\\$" >/dev/null; then
exit 125
-elif git bisect log | grep -q "^git bisect good \$rev\\\$"; then
+elif git bisect log | grep "^git bisect good \$rev\\\$" >/dev/null; then
exit 0
fi
@@ -192,21 +186,47 @@ rsync -a --del ${baseline_exclude[@]} ./bisect/baseline/ ./
$build_script \
^^ $reproduce_bisect \
- %% $rel_artifacts/manifests/build-\$rev.sh \
- @@ $rel_artifacts/manifests/build-parameters.sh \
+ %%rr[top_artifacts] $rel_artifacts/build-\$rev \
+ @@ $rel_artifacts/build-parameters/manifest.sh \
==rr[mode] bisect \
+ ==rr[update_baseline] ignore \
==rr[${current_project}_git] "$bad_url#\$rev" \
- ==rr[top_artifacts] $rel_artifacts/build-\$rev \
--verbose "$verbose" &
res=0 && wait \$! || res=\$?
-git -C $current_project reset --hard
+git -C $current_project reset -q --hard
if [ x"\$res" != x"0" ]; then
if [ -f $rel_artifacts/build-\$rev/trigger-build-$current_project ]; then
exit 1
else
- exit 125
+ # The build failed due to an uninteresting problem -- a prerequisite
+ # failed to build or benchmarking harness went down. We mark such
+ # revisions "skipped", but up to a point. If we skip more revisions
+ # in a row, than half the number of tests necessary to finish the bisect,
+ # then we mark such "skipped" revision as "bad".
+
+ # Number of "git bisect skip" in a row
+ n_skips=\$(git -C $current_project bisect log | awk '
+BEGIN { n_skips=0 }
+/git bisect skip/ { n_skips++; next }
+/git bisect/ { n_skips=0 }
+END { print n_skips }
+')
+ revs_left=\$(git -C $current_project bisect view --pretty=%H | wc -l)
+ # Half the number of steps to finish the bisect
+ n_steps_2=\$(echo "n_steps=l(\$revs_left)/l(2); scale=0; n_steps/2" | bc -l)
+ if [ \$n_steps_2 -lt 2 ]; then
+ # Avoid skipping revisions at the end of the bisect.
+ n_steps_2=2
+ fi
+ if [ \$n_skips -le \$n_steps_2 ]; then
+ exit 125
+ else
+ # We had several skips in a row and still have many revisions to bisect.
+ # Mark this one "bad" to progress the bisect.
+ exit 1
+ fi
fi
else
exit 0
@@ -215,9 +235,17 @@ EOF
chmod +x $artifacts/test.sh
# Fetch $bad_branch/$bad_rev from $bad_url
-prev_rev=$(git rev-parse HEAD)
-clone_or_update_repo . "$bad_branch" "$bad_url"
-bad_rev="${bad_rev-$(git_rev_parse_long . $bad_branch)}"
+prev_rev=$(git -C $current_project rev-parse HEAD)
+# Note: avoid using clone_or_update_repo(), which, potentially, can delete
+# and re-clone the repo. Deleting the repo would be bad, since we would
+# lose bisect state initialized by the above "git bisect" commands.
+# Note: avoid using clone_or_update_repo(), which calls git_checkout(), which
+# does a more thorough job in cleaning up the repo directory than
+# "git reset -q --hard" in ./test.sh. The logic here is that we want
+# the "bad" build to run in the same environment as the "test" builds.
+git -C "$current_project" fetch "$bad_url" "$bad_branch"
+git -C "$current_project" checkout --detach FETCH_HEAD
+bad_rev="${bad_rev-$(git -C "$current_project" rev-parse HEAD)}"
cat <<EOF | manifest_out
declare -g bad_rev=$bad_rev
EOF
@@ -225,150 +253,62 @@ EOF
if [ x"$baseline_rev" = x"$bad_rev" ]; then
echo "WARNING: Detected regression with no change in sources of $current_project"
sed -i -e "s/\$/-no_change/" $artifacts/jenkins/build-name
- trap "" EXIT
- exit 1
+ res=0
+else
+ # Confirm regression in $bad_rev vs $baseline_rev.
+ echo "Testing bad revision (expecting failure)"
+ git -C $current_project checkout --detach $bad_rev
+ cd $current_project
+ $artifacts/test.sh &
+ res=0 && wait $! || res=$?
+ cd ..
fi
-# Confirm regression in $bad_rev vs $baseline_rev.
-git checkout --detach $bad_rev
-$artifacts/test.sh &
-res=0 && wait $! || res=$?
# Restore revision previously checked out. Otherwise "git bisect run"
# below will not use replay info.
-git checkout --detach $prev_rev
+git -C $current_project checkout --detach $prev_rev
if [ x"$res" = x"0" ]; then
if $rebase_workaround; then
echo "Rebase workaround: no regression between $baseline_rev and $bad_rev"
sed -i -e "s/\$/-bad_rev-good/" $artifacts/jenkins/build-name
- project_name="${rr[ci_project]}/${rr[ci_config]}:$current_project"
- case $project_name in
- tcwg_kernel/llvm-*-next-*:linux)
- cat > $artifacts/trigger-build-rebase <<EOF
-llvm_git=baseline
-EOF
- ;;
- tcwg_kernel/gnu-*-next-*:linux)
- cat > $artifacts/trigger-build-rebase <<EOF
-binutils_git=baseline
-gcc_git=baseline
-EOF
- ;;
- *) assert_with_msg "Unknown project name: $project_name" false ;;
- esac
cat >> $artifacts/trigger-build-rebase <<EOF
linux_git=$bad_url#$baseline_rev
-update_baseline=reset
+update_baseline=force
EOF
else
+ # Build for $bad_rev is successful, which can be due to a number
+ # of things:
+ # - Regressions in speed benchmarking are not 100% stable,
+ # - Something has changed in the build environment,
+ # - Underlying hardware has changed,
+ # - Something entirely different.
+ # In all these cases we recover by rebuilding from baseline sources
+ # and updating the baseline.
+ # If baseline reset->build->bisect->reset happens again and again,
+ # then this is a scripting or infrastructure problem, and we detect
+ # it by unusually high ratio of forced builds in CI dashboard.
echo "WARNING: build for bad_rev $bad_rev succeeded"
sed -i -e "s/\$/-spurious/" $artifacts/jenkins/build-name
- # Regressions in speed benchmarking are not stable,
- # so retry with resetting baseline artifacts.
- # Retry with default parameters for other cases.
- case "${rr[ci_project]}/${rr[ci_config]}" in
- tcwg_bmk*/gnu*-O[23]*)
- cat > $artifacts/trigger-build-reset <<EOF
-binutils_git=baseline
-gcc_git=baseline
-glibc_git=baseline
-update_baseline=reset
-EOF
- ;;
- tcwg_bmk*/llvm-*-O[23]*)
- cat > $artifacts/trigger-build-reset <<EOF
-binutils_git=baseline
-gcc_git=baseline
-glibc_git=baseline
-llvm_git=baseline
-update_baseline=reset
+ cat > $artifacts/trigger-build-reset <<EOF
+update_baseline=force
EOF
- ;;
- esac
fi
- echo > $artifacts/jenkins/mail-recipients.txt
- trap "" EXIT
- exit 0
+ exit_0
elif [ x"$res" = x"125" ]; then
# We have confirmed a regression, but not what we have been triggered
# to bisect.
echo "WARNING: build for bad_rev $bad_rev showed uninteresting regression"
sed -i -e "s/\$/-uninteresting/" $artifacts/jenkins/build-name
- echo > $artifacts/jenkins/mail-recipients.txt
- trap "" EXIT
- exit 0
+ exit_0
fi
-if ! git bisect log | grep -q "^git bisect .* $bad_rev\$"; then
- git bisect bad $bad_rev
+if ! git -C $current_project bisect log \
+ | grep "^git bisect .* $bad_rev\$" >/dev/null; then
+ git -C $current_project bisect bad $bad_rev
ln -f -s "build-$bad_rev" "$artifacts/build-bad"
- ln -f -s "build-$bad_rev.sh" "$artifacts/manifests/build-bad.sh"
fi
-# Clone interesting-commits.git repo, which contains a list of SHA1s
-# that might cut down bisection time. Mostly, these are first_bad and
-# last_good commits.
-interesting_commits="../bisect/interesting-commits"
-
-interesting_commits_url=https://git-us.linaro.org/toolchain/ci/interesting-commits.git
-interesting_commits_branch=linaro-local/ci/${rr[ci_project]}
-if ! git ls-remote --heads $interesting_commits_url \
- | grep -q ".* refs/heads/$interesting_commits_branch"; then
- interesting_commits_branch=empty
-fi
-interesting_commits_rev=${interesting_commits_rev-$interesting_commits_branch}
-clone_or_update_repo $interesting_commits $interesting_commits_rev $interesting_commits_url auto $interesting_commits_branch
-interesting_commits_rev=$(git -C $interesting_commits rev-parse HEAD)
-cat <<EOF | manifest_out
-declare -g interesting_commits_rev=$interesting_commits_rev
-EOF
-
-# Add SHA1 commit $1 to interesting-commits and push the repo.
-# If $2 is "regression" then record current configuration as having regressed
-# at this commit.
-# Ignore failures (since this is cache handling).
-push_interesting_commit ()
-{
- declare -g push_interesting_commit_result=0
- (
- set -euf -o pipefail
-
- local sha1="$1"
- local kind="$2"
- local -a configs
-
- if ! grep -q "^$sha1" $interesting_commits/$current_project; then
- echo "$sha1" >> $interesting_commits/$current_project
- fi
-
- if [ x"$kind" = x"regression" ]; then
- mapfile -t configs < <(grep "^$sha1" $interesting_commits/$current_project | sed -e "s/^$sha1 *//")
- configs+=("${rr[ci_project]}"/"${rr[ci_config]}")
- mapfile -t configs < <(echo "${configs[@]}" | tr ' ' '\n' | sort -u)
- sed -i -e "s#^$sha1.*\$#$sha1 ${configs[*]}#" $interesting_commits/$current_project
- fi
-
- git -C $interesting_commits add .
- if [ x"$(git -C $interesting_commits status --short)" = x"" ]; then
- # No file has changed. We've been here before...
- # E.g., this is a re-occuring regression in linux-next.
- exit 125
- fi
- git -C $interesting_commits commit -m "Add $kind $sha1 from $BUILD_URL
-
-${configs[*]}" &
- local res=0 && wait $! || res=$?
-
- if [ x"$res" = x"0" ]; then
- # Interesting-commits.git do not have .gitreview, so it's
- # simpler to push via gitolite.
- git_init_linaro_local_remote $interesting_commits baseline false
- git_push $interesting_commits baseline linaro-local/ci/${rr[ci_project]}
- fi
- ) &
- wait $! || push_interesting_commit_result=$?
-}
-
# Print first_bad revision (if detected)
get_first_bad ()
{
@@ -378,7 +318,8 @@ get_first_bad ()
# excplicitly set "+o pipefail".
set -euf +o pipefail
- git bisect log | tail -n1 | grep "^# first bad commit:" \
+ git -C $current_project bisect log | tail -n1 \
+ | grep "^# first bad commit:" \
| sed -e "s/^# first bad commit: \[\([0-9a-f]*\)\].*/\1/"
)
}
@@ -395,63 +336,160 @@ print_tested_revs ()
set -euf +o pipefail
local kind="${1-[a-z]*}"
- git bisect log | grep "^git bisect $kind " | sed -e "s/^git bisect $kind //"
+ git -C $current_project bisect log | grep "^git bisect $kind " \
+ | sed -e "s/^git bisect $kind //"
)
}
-# Try to reduce bisection range by testing regressions (and their parents)
-# identified in other configurations.
-touch $interesting_commits/$current_project
-# Generate list of commits inside the bisection range.
-commits_to_test=$artifacts/git-logs/commits_to_test
-git bisect view --pretty=%H > $commits_to_test
+# Print ratio at which commit splits current bisection range, or "-1" if
+# the commit is outside of bisection range. The ideal split is 50/50 --
+# right in the middle.
+# $1: Commit SHA1
+print_sha1_split ()
+{
+ local sha1="$1"
-# Record commits in the bisect range. These are commits_to_test plus
-# commits that have been tested.
-commits_in_range=$artifacts/git-logs/commits_in_range
-cp $commits_to_test $commits_in_range
-print_tested_revs >> $commits_in_range
+ local line
+ line=$(grep -n "^$sha1\$" $commits_to_test | cut -d":" -f 1)
+ if [ x"$line" = x"" ]; then
+ echo "-1"
+ return
+ fi
+
+ # Skip revisions that were already tested. Good revisions are filtered
+ # out in the above $commits_to_test check, and here we filter out
+ # "bad" and "skip" revisions.
+ if git -C $current_project bisect log \
+ | grep "^git bisect .* $sha1\$" >/dev/null; then
+ echo "-1"
+ return
+ fi
-# This loop can generate lots of console noise.
-set +x
-while [ x"$(get_first_bad </dev/null)" = x"" ] && read -a arr; do
+ line=$((100 * $line / $(cat $commits_to_test | wc -l)))
+ if [ $line -gt 50 ]; then
+ line=$((100 - $line))
+ fi
+ echo "$line"
+}
+
+# Try to reduce bisection range by testing regressions (and their parents)
+# identified in other configurations.
+print_interesting_commit ()
+{
(
set -euf -o pipefail
- sha1="${arr[0]}"
+ # Generate list of commits inside the bisection range.
+ git -C $current_project bisect view --pretty=%H > $commits_to_test
- # Ignore commits outside of bisection range.
- if ! grep -q "^$sha1\$" $commits_to_test; then
- exit 0
+ # Bisecting linux-next.git regressions is difficult enough due to how
+ # the tree is constructed, so we prefer to not use interesting-commits
+ # when $rebase_workaround is true. This makes linux-next bisects as
+ # natural as they can be.
+ if $rebase_workaround; then
+ return
fi
- # Skip revisions that were already tested. Good revisions are filtered
- # out in the above $commits_to_test check, and here we filter out
- # "bad" and "skip" revisions.
- if git bisect log | grep -q "^git bisect .* $sha1\$"; then
- exit 0
+ # Clone interesting-commits.git repo, which contains a list of SHA1s
+ # that might cut down bisection time. Mostly, these are first_bad and
+ # last_good commits.
+ local icommits="bisect/interesting-commits"
+ clone_or_update_repo $icommits master \
+ https://git-us.linaro.org/toolchain/ci/interesting-commits.git \
+ auto master >/dev/null 2>&1
+
+ local project_dir
+ project_dir=$icommits/$(interesting_subdir $current_project)
+
+ if ! [ -d "$project_dir" ]; then
+ return
fi
+ # Below loop can generate lots of console noise.
+ set +x
+
+ # Find an interesting commit that splits bisection range best.
+ local sha1 prev_sha1 best_split=-1 best_sha1=""
+ while read sha1; do
+ while read prev_sha1; do
+ split=$(print_sha1_split "$prev_sha1")
+ if [ $split -gt $best_split ]; then
+ best_split=$split
+ best_sha1=$prev_sha1
+ fi
+ done < <(echo "$sha1"
+ cd "$project_dir/$sha1"
+ find -name last_good -print0 | xargs -0 cat | sort -u)
+ done < <(cd "$project_dir"; ls)
+
if $verbose; then set -x; fi
- git checkout --detach $sha1
+ if [ "$best_sha1" = "" ]; then
+ # We didn't find an interesting sha1, so use a stock recommendation by
+ # git. Note that we want to remain in the "try-interesting-commits"
+ # loop (rather than switching to "git bisect run") in the hopes that
+ # some other job will add a new entry to interesting-commits while
+ # we are testing the stock revisions.
+ best_sha1=$(git -C $current_project bisect next | tail -n1 \
+ | sed -e "s/^\[\([0-9a-f]\+\)\].*\$/\1/")
+ # Ensure that best_sha1 is indeed a sha1. I could not figure out
+ # how to tell "git bisect next" to print only sha1 without extra
+ # annotations, so have to parse the string with "sed".
+ if ! echo "$best_sha1" | grep '^[0-9a-f]\+$' &>/dev/null; then
+ best_sha1=""
+ fi
+ fi
+ echo "$best_split $best_sha1"
+ )
+}
+
+commits_to_test=$artifacts/git-logs/commits_to_test
+
+IFS=" " read -r split sha1 <<< "$(print_interesting_commit)"
+
+# Record commits in the initial bisect range. These are commits_to_test plus
+# commits that have been tested.
+commits_in_range=$artifacts/git-logs/commits_in_range
+cp $commits_to_test $commits_in_range
+print_tested_revs >> $commits_in_range
+
+while [ x"$sha1" != x"" ] \
+ && [ x"$(get_first_bad </dev/null)" = x"" ]; do
+ if [ "$split" = "-1" ]; then
+ echo "Trying $sha1 stock recommendation"
+ else
+ echo "Trying $sha1 interesting commit, which splits bisections range" \
+ "at ${split}%"
+ fi
+
+ git -C $current_project checkout --detach $sha1
+ cd $current_project
$artifacts/test.sh &
res=0 && wait $! || res=$?
+ cd ..
if [ x"$res" = x"0" ]; then
- git bisect good
+ git -C $current_project bisect good || break
elif [ x"$res" = x"125" ]; then
- git bisect skip
+ # It may happen that we get to the point where only skipped commits
+ # are left to test, and any new "git bisect skip" will return an error.
+ # In this case break from this loop, and let "git bisect run" below
+ # handle this case. [We also add "|| break" for good and bad cases
+ # for symmetry.]
+ git -C $current_project bisect skip || break
else
- git bisect bad
+ git -C $current_project bisect bad || break
fi
- git bisect view --pretty=%H > $commits_to_test
- ) </dev/null
-done < <(cat $interesting_commits/$current_project)
-if $verbose; then set -x; fi
+
+ IFS=" " read -r split sha1 <<< "$(print_interesting_commit)"
+done
if [ x"$(get_first_bad)" = x"" ]; then
+ # Run stock "git bisect run" for corner cases like $rebase_workaround.
+ # Most of the time we have bisected the failure down to first_bad above.
+ cd $current_project
git bisect run $artifacts/test.sh &
res=0 && wait $! || res=$?
+ cd ..
if [ x"$res" = x"0" ]; then
assert_with_msg "Didn't find first bad commit!" [ x"$(get_first_bad)" != x"" ]
@@ -459,15 +497,20 @@ if [ x"$(get_first_bad)" = x"" ]; then
fi
first_bad=$(get_first_bad)
-reset_rev="$first_bad"
-notify_devs=true
-notify_author=""
+
+if [ x"$first_bad" != x"" ] \
+ && ! [ -f $artifacts/build-$first_bad/trigger-build-$current_project ]; then
+ # First_bad is not a real or "interesting" regression. Perhaps it was
+ # a "skipped" commit marked as "bad.
+ first_bad=""
+fi
+
if [ x"$first_bad" != x"" ]; then
# "git bisect run" succeeded. Check whether this is an actual regression
# or bisection artifact.
last_good=""
bad_last_good=""
- for sha1 in $(git rev-parse $first_bad^@); do
+ for sha1 in $(git -C $current_project rev-parse $first_bad^@); do
# It seems that git-bisect assumes parent commit as "good" on
# the basis of one of its children being "good". Therefore we
# can have a situation when we have parent P with children C1 and C2,
@@ -483,7 +526,7 @@ if [ x"$first_bad" != x"" ]; then
if ! grep -q "^$sha1\$" $commits_in_range; then
child_tested_good=false
for tested_good in $(print_tested_revs good); do
- if git merge-base --is-ancestor $sha1 $tested_good; then
+ if git -C $current_project merge-base --is-ancestor $sha1 $tested_good; then
child_tested_good=true
break
fi
@@ -494,9 +537,11 @@ if [ x"$first_bad" != x"" ]; then
fi
echo "Testing first_bad's parent $sha1 (hoping for success)"
- git checkout --detach "$sha1"
+ git -C $current_project checkout --detach "$sha1"
+ cd $current_project
$artifacts/test.sh &
res=0 && wait $! || res=$?
+ cd ..
if [ x"$res" = x"0" ]; then
last_good=$sha1
break
@@ -504,30 +549,20 @@ if [ x"$first_bad" != x"" ]; then
bad_last_good=$sha1
done
- if [ x"$last_good" != x"" ]; then
- # We have successfully identified a bad commit with a good parent!
- # Add both $last_good and $first_bad to interesting commits.
- push_interesting_commit $last_good "last-good"
- push_interesting_commit $first_bad "regression"
- if [ x"$push_interesting_commit_result" = x"125" ]; then
- notify_devs=false
- fi
- else
+ if [ x"$last_good" = x"" ]; then
assert_with_msg "Broken bisection range" [ x"$bad_last_good" != x"" ]
- # All parents of $first_bad tested bad, so retrigger bisection with
- # a reduced bisection range.
- cat > $artifacts/trigger-bisect <<EOF
-current_project=$current_project
-bad_git=$bad_url#$bad_last_good
-EOF
- sed -i -e "s/\$/-retry-bisect/" $artifacts/jenkins/build-name
- # Don't send any emails.
- echo > $artifacts/jenkins/mail-recipients.txt
- trap "" EXIT
- exit 0
+ # All parents of $first_bad tested bad. This is, essencially,
+ # same situation as "git bisect" failing, which we handle below.
+ first_bad=""
fi
+fi
+
+reset_rev=""
+if [ x"$first_bad" != x"" ]; then
+ reset_rev="$first_bad"
else
- # "Git bisect" didn't find the first_bad commit.
+ # "Git bisect" didn't find the first_bad commit or this commit points to
+ # an uninteresting regression.
# Possible reasons include:
# - We have marked the regressing commit as "skip", which can happen
# to tcwg_bmk* projects when benchmarking infra has a problem.
@@ -536,6 +571,9 @@ else
# We want to reset baseline to HEAD in this case, so that we catch most
# of the commits that introduced change in the result metric.
#
+ # - We have marked a skipped commit as bad to advance bisect, even though
+ # the failure was uninteresting.
+ #
# So, to make at least some progress on narrowing down the regression ...
# - look for the last commit that tested "good". If it's not $baseline_rev,
# then we have narrowed down the bisection range somewhat. Therefore,
@@ -543,8 +581,8 @@ else
# - one to ADVANCE baseline to the last GOOD commit, and
# - another to expose the regression again.
#
- # - If $baseline_rev is the only good commit, then we have got outselves
- # to a tricky situation: we can't find the regression, and can't make
+ # - If $baseline_rev is the only good commit, then we have got ourselves
+ # into a tricky situation: we can't find the regression and can't make
# a good build, which would advance the baseline. To resolve this,
# trigger two builds:
# - one to RESET baseline to the last BAD commit, and
@@ -557,23 +595,40 @@ else
[ x"$last_good" != x"" ]
if [ x"$last_good" != x"$baseline_rev" ]; then
- # $reset_rev=="" due to $first_bad==""
- cat > $artifacts/trigger-build-1-advance <<EOF
-${current_project}_git=$bad_url#$last_good
-EOF
sed -i -e "s/\$/-advance-baseline/" $artifacts/jenkins/build-name
else
- reset_rev=$(print_tested_revs bad | tail -n1)
+ # Reset baseline to the earliest bad revision with a true regression.
+ # For this we look through tested-bad revisions in reverse order.
+ for reset_rev in $(print_tested_revs bad | tac); do
+ if [ -f $artifacts/build-$reset_rev/trigger-build-$current_project ]; then
+ break
+ fi
+ done
sed -i -e "s/\$/-reset-baseline/" $artifacts/jenkins/build-name
fi
- # Don't send any emails.
- notify_devs=false
fi
-# Bisect if officially over.
-cd ..
+# Bisect is officially over.
# Create trigger-build-* files for subsequent builds.
+
+# Advance the baseline to $last_good revision. Don't trigger unnecessary
+# build for $baseline_rev itself.
+if [ x"$last_good" != x"$baseline_rev" ]; then
+ assert_with_msg "last_good should not be empty" [ x"$last_good" != x"" ]
+ cat > $artifacts/trigger-build-1-last-good <<EOF
+${current_project}_git=$bad_url#$last_good
+update_baseline=force
+EOF
+ # Note that even though $last_good build should succeed, we are forcing
+ # it to update the baseline. Otherwise, should the last-good build fail,
+ # it will start a bisect job, which will trigger some more builds.
+ # The problem is that, as soon as our first-bad build resets the baseline,
+ # all those builds triggered by the 2nd bisect will operate "in the past".
+ # Therefore, we either need to handle builds "from the past", or, easier,
+ # just force the last-good build to always succeed.
+fi
+
if ! [ -f $artifacts/build-$bad_rev/trigger-build-$current_project ]; then
# This can happen *only* when replaying bisects.
# Otherwise $bad_rev is always tested second to $baseline_rev.
@@ -599,242 +654,33 @@ if [ x"$reset_rev" != x"" ]; then \
# Reset baseline to the regressed commit so that we will catch subsequent
# regressions (worse than $bad_rev).
cp $artifacts/build-$reset_rev/trigger-build-$current_project \
- $artifacts/trigger-build-1-reset
- echo "update_baseline=reset" >> $artifacts/trigger-build-1-reset
+ $artifacts/trigger-build-2-reset
+ echo "update_baseline=force" >> $artifacts/trigger-build-2-reset
+
+ if [ "$reset_rev" = "$first_bad" ]; then
+ # We have identified a single-commit regression, so notify developers
+ # about it.
+ echo "notify=onregression" >> $artifacts/trigger-build-2-reset
+ fi
fi
# Trigger master build now instead of waiting for next timed SCM trigger.
+# Make sure git specification is as it was passed to the bisect
+# (i.e., master branch, not a specific SHA1).
cp $artifacts/build-$bad_rev/trigger-build-$current_project \
- $artifacts/trigger-build-2-default
+ $artifacts/trigger-build-3-default
+sed -i -e "s%^\(${current_project}_git\)=.*\$%\1=$bad_git%" \
+ $artifacts/trigger-build-3-default
# Save BISECT_* logs
-find "$current_project" -path "$current_project/.git/BISECT_*" -print0 | xargs -0 -I@ mv @ $artifacts/git-logs/
-
-# Remove any fail-safe email body
-rm -f $artifacts/jenkins/mail-body.txt
+find "$current_project" -path "$current_project/.git/BISECT_*" -print0 \
+ | xargs -0 -I@ mv @ $artifacts/git-logs/
if [ x"$first_bad" != x"" ]; then
- mkdir -p $artifacts/jenkins
sed -i -e "s/\$/-$first_bad/" $artifacts/jenkins/build-name
ln -f -s "build-$first_bad" "$artifacts/build-first_bad"
- ln -f -s "build-$first_bad.sh" "$artifacts/manifests/build-first_bad.sh"
-
- good_name="last_good"
- good_sha1="$last_good"
- bad_name="first_bad"
- bad_sha1="$first_bad"
-
ln -f -s "build-$last_good" "$artifacts/build-last_good"
- ln -f -s "build-$last_good.sh" "$artifacts/manifests/build-last_good.sh"
-
- occurences="$(cat $current_project/$interesting_commits/$current_project | grep "^$first_bad" | sed -e "s/^$first_bad *//" | tr ' ' '\n' | sed "s#^# - #")"
- if [ "$(echo "$occurences" | wc -l)" -le 1 ]; then
- git_log_level="medium"
- notify_author=$(git -C $current_project log --pretty="%an <%ae>" -n 1 \
- "$first_bad")
- else
- git_log_level="short"
- fi
-
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-Successfully identified regression in *$current_project* in CI configuration ${rr[ci_project]}/${rr[ci_config]}. So far, this commit has regressed CI configurations:
-$occurences
-
-Culprit:
-<cut>
-$(git -C $current_project log --pretty=$git_log_level -n 1 $first_bad)
-</cut>
-
-EOF
-else
- good_name="baseline_rev"
- good_sha1="$baseline_rev"
- bad_name="bad"
- bad_sha1="$bad_rev"
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-Could not identify regression in *$current_project* in CI configuration ${rr[ci_project]}/${rr[ci_config]}. See 'Bisect log' in the links below for bisection details.
-
-EOF
-fi
-
-if [ x"${TCWG_JIRA_TOKEN+set}" = x"set" ] && [ x"$first_bad" != x"" ]; then
- case "${rr[ci_project]}/${rr[ci_config]}:$current_project" in
- tcwg_kernel/gnu-*:linux) jira_card="GNU-681" ;;
- tcwg_kernel/gnu-*:*) jira_card="GNU-680" ;;
- tcwg_kernel/llvm-*:linux) jira_card="LLVM-647" ;;
- tcwg_kernel/llvm-*:*) jira_card="LLVM-646" ;;
- tcwg_bmk_*/gnu*-O[23]*) jira_card="GNU-689" ;;
- tcwg_bmk_*/gnu*) jira_card="GNU-686" ;;
- tcwg_bmk_*/llvm-*O[23]*) jira_card="LLVM-651" ;;
- tcwg_bmk_*/llvm-*) jira_card="LLVM-650" ;;
- tcwg_binutils/*) jira_card="GNU-692" ;;
- tcwg_cross/*) jira_card="GNU-692" ;;
- tcwg_gnu/*) jira_card="GNU-692" ;;
- # Catch-all case for when project/config IDs change, so that we
- # won't miss notifications. Forward all that to GNU-692.
- *) jira_card="GNU-692" ;;
- esac
-
- if $notify_devs && [ x"$jira_card" != x"" ]; then
- cat > $artifacts/jenkins/jira-body.txt <<EOF
-[$jira_card]
-$(cat $artifacts/jenkins/mail-body.txt)
-
-Details: ${BUILD_URL}artifact/$rel_artifacts/jenkins/mail-body.txt/*view*/
-Even more details: ${BUILD_URL}artifact/$rel_artifacts/
-EOF
- if ! [ -f $HOME/.jipdate.yml ]; then
- cat > $HOME/.jipdate.yml <<EOF
-server:
- url: https://linaro.atlassian.net
- token: #TCWG_JIRA_TOKEN#
-text-editor: False
-username: team-toolchain+tcwg-jira@linaro.org
-EOF
- fi
- sed -i -e "s/#TCWG_JIRA_TOKEN#/$TCWG_JIRA_TOKEN/" $HOME/.jipdate.yml
- echo y | jipdate.py -f $artifacts/jenkins/jira-body.txt
- fi
-fi
-
-cat >> $artifacts/jenkins/mail-body.txt <<EOF
-Results regressed to (for $bad_name == $bad_sha1)
-$(cat $artifacts/build-$bad_sha1/results)
-
-from (for $good_name == $good_sha1)
-$(cat $artifacts/build-$good_sha1/results)
-
-EOF
-
-good_build_artifacts="Artifacts of $good_name build: ${BUILD_URL}artifact/$rel_artifacts/build-$good_sha1/"
-bad_build_artifacts="Artifacts of $bad_name build: ${BUILD_URL}artifact/$rel_artifacts/build-$bad_sha1/"
-
-# Benchmark builds have a results ID that is used to download the results
-if test -f "$artifacts/build-$good_sha1/results_id"; then
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-$good_build_artifacts
-Results ID of $good_name: $(cat $artifacts/build-$good_sha1/results_id)
-$bad_build_artifacts
-Results ID of $bad_name: $(cat $artifacts/build-$bad_sha1/results_id)
-EOF
-# Os vs Os LTO builds have two result IDs per job
-elif test -f "$artifacts/build-$good_sha1/results_id-1"; then
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-$good_build_artifacts
-Results IDs of $good_name:
-$(cat $artifacts/build-$good_sha1/results_id-1)
-$(cat $artifacts/build-$good_sha1/results_id-2)
-$bad_build_artifacts
-Results ID of $bad_name:
-$(cat $artifacts/build-$bad_sha1/results_id-1)
-$(cat $artifacts/build-$bad_sha1/results_id-2)
-EOF
-else
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-$good_build_artifacts
-$bad_build_artifacts
-EOF
-fi
-
-cat >> $artifacts/jenkins/mail-body.txt <<EOF
-Build top page/logs: ${BUILD_URL}
-
-Configuration details:
-$(cat $artifacts/manifests/build-baseline.sh | grep '_git]' | grep -v '="no_')
-
-Reproduce builds:
-<cut>
-mkdir investigate-$current_project-$bad_sha1
-cd investigate-$current_project-$bad_sha1
-
-git clone https://git.linaro.org/toolchain/jenkins-scripts
-
-mkdir -p $rel_artifacts/manifests
-curl -o $rel_artifacts/manifests/build-baseline.sh ${BUILD_URL}artifact/$rel_artifacts/manifests/build-baseline.sh --fail
-curl -o $rel_artifacts/manifests/build-parameters.sh ${BUILD_URL}artifact/$rel_artifacts/manifests/build-parameters.sh --fail
-curl -o $rel_artifacts/test.sh ${BUILD_URL}artifact/$rel_artifacts/test.sh --fail
-chmod +x $rel_artifacts/test.sh
-
-# Reproduce the baseline build (build all pre-requisites)
-$build_script @@ $rel_artifacts/manifests/build-baseline.sh
-
-# Save baseline build state (which is then restored in $rel_artifacts/test.sh)
-mkdir -p ./bisect
-rsync -a --del --delete-excluded ${baseline_exclude[@]} ./ ./bisect/baseline/
-
-cd $current_project
-
-# Reproduce $bad_name build
-git checkout --detach $bad_sha1
-../$rel_artifacts/test.sh
-
-# Reproduce $good_name build
-git checkout --detach $good_sha1
-../$rel_artifacts/test.sh
-
-cd ..
-</cut>
-
-History of pending regressions and results: https://git.linaro.org/toolchain/ci/base-artifacts.git/log/?h=linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}
-
-Artifacts: ${BUILD_URL}artifact/$rel_artifacts/
-Build log: ${BUILD_URL}consoleText
-EOF
-
-if [ x"$first_bad" != x"" ]; then
- cat >> $artifacts/jenkins/mail-body.txt <<EOF
-
-Full commit (up to 1000 lines):
-<cut>
-$(git -C $current_project show --stat --patch $first_bad | head -n 1000)
-</cut>
-EOF
-fi
-
-# Set mail recipients last to preserve catch-error value from .yaml file.
-# Email developers.
-CI_MAIL_RECIPIENTS=("tcwg-validation@linaro.org")
-case "$notify_author@${rr[ci_project]}/${rr[ci_config]}:$current_project" in
- ""@*/*:*) ;;
- *@tcwg_gnu/*-check_bootstrap*:*) ;; # We are building up list of flaky tests
- *@tcwg_cross/*-check_cross:*) ;; # We are building up list of flaky tests
- *@tcwg_gnu/*:*)
- CI_MAIL_RECIPIENTS+=("$notify_author")
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- ;;
- *@tcwg_cross/*:*)
- CI_MAIL_RECIPIENTS+=("$notify_author")
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- ;;
- *@tcwg_kernel/llvm-*:linux)
- CI_MAIL_RECIPIENTS+=("$notify_author")
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- CI_MAIL_RECIPIENTS+=("clang-built-linux@googlegroups.com")
- CI_MAIL_RECIPIENTS+=("arnd@linaro.org")
- ;;
- *@tcwg_kernel/llvm-*:llvm)
- CI_MAIL_RECIPIENTS+=("$notify_author")
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- CI_MAIL_RECIPIENTS+=("clang-built-linux@googlegroups.com")
- ;;
- *@tcwg_bmk*/*:*)
- # Don't notify patch authors until we improve benchmarking email
- # reporting.
- #CI_MAIL_RECIPIENTS+=("$notify_author")
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- ;;
- *@*/*:*)
- CI_MAIL_RECIPIENTS+=("linaro-toolchain@lists.linaro.org")
- ;;
-esac
-if $notify_devs; then
- (
- IFS=","
- cat > $artifacts/jenkins/mail-recipients.txt <<EOF
-${CI_MAIL_RECIPIENTS[*]}
-EOF
- )
fi
-trap "" EXIT
+exit_0
diff --git a/round-robin-notify.sh b/round-robin-notify.sh
new file mode 100755
index 00000000..318b1db6
--- /dev/null
+++ b/round-robin-notify.sh
@@ -0,0 +1,2397 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+
+# DEBUG
+echo -e "\n$0 $*\n"
+
+convert_args_to_variables "$@"
+shift "$SHIFT_CONVERTED_ARGS"
+
+obligatory_variables rr[top_artifacts] notify build_script
+declare -A rr
+declare notify build_script
+
+generate_all="${generate_all-true}"
+generate_jira="${generate_jira-$generate_all}"
+generate_mail="${generate_mail-$generate_all}"
+generate_jenkins_html=${generate_jenkins_html-$generate_all}
+generate_dashboard="${generate_dashboard-$generate_all}"
+generate_lnt="${generate_lnt-$generate_all}"
+post_all="${post_all-true}"
+post_jira_comment="${post_jira_comment-$post_all}"
+post_jira_card="${post_jira_card-$post_all}"
+post_mail="${post_mail-$post_all}"
+post_gcc_testresults="${post_gcc_testresults-$post_mail}"
+post_icommits="${post_icommits-$post_all}"
+post_dashboard="${post_dashboard-$post_all}"
+
+dryrun="${dryrun-false}"
+icommits="interesting-commits"
+stage="${stage-full}"
+verbose="${verbose-false}"
+
+# Generate notification files based on current and previous artifacts, which
+# are assumed to be from two consecutive builds.
+#
+# Notes:
+# 1. Current artifacts are in $top_artifacts. All files from $top_artifacts
+# are accessible read-only, and round-robin-notify.sh writes its files
+# into $top_artifacts/notify/.
+# 2. Files in "numbered" directories NN-* should not be used, since they
+# will be eventually removed.
+# 3. Previous artifacts are in base-artifacts/, which is a git repository.
+# All files from base-artifacts/ are accessible read-only.
+# 4. Round-robin-notify.sh can assume that files in $top_artifacts/ and
+# in base-artifacts/ have been generated by the latest version of scripts.
+# Developers will run round-robin-rewrite.sh when format of files changes
+# to bring "history" results up-to-date with what current scripts expect.
+# 5. Round-robin-notify.sh can assume that check_regression() step was run
+# just before invocation of round-robin-notify.sh.
+
+if $verbose; then
+ set -x
+fi
+
+if $dryrun; then
+ dryrun="echo DRYRUN: "
+else
+ dryrun=""
+fi
+
+
+#========================================================================================
+#
+# SETUP/FINALIZE PROCEDURES
+#
+#========================================================================================
+
+# affect the following global variables
+declare top_artifacts ci_project ci_config notify_email
+setup_notify_environment ()
+{
+ echo "# ${FUNCNAME[0]}"
+
+ # setup global variables
+ top_artifacts="${rr[top_artifacts]}"
+ ci_project=$(get_current_manifest "{rr[ci_project]}")
+ ci_config=$(get_current_manifest "{rr[ci_config]}")
+
+ # Debug dump
+ echo "# Debug traces :"
+ echo "# Baseline : $(get_baseline_manifest BUILD_URL)"
+ echo "# Using dir : base-artifacts"
+ echo "# Artifacts : $(get_current_manifest BUILD_URL)"
+ echo "# Using dir : $top_artifacts"
+ echo ""
+
+ mkdir -p "$top_artifacts/notify"
+
+ case "$notify" in
+ *"@"*)
+ # Normally pw[PROJECT_patch_submitter] will have been set to
+ # "$notify" by precommit-ssh-apply.sh or pw-apply.sh. Manually
+ # started jobs where ${component}_git doesn't start with "ssh://" or
+ # "pw://" are an exception though, because then those scripts aren't
+ # called and thus the $pw array will be empty. Let's save the value
+ # here in case it's needed later.
+ notify_email="$notify"
+ notify="precommit"
+ ;;
+ esac
+
+ declare -Ag pw
+ if [ "$notify" = "precommit" ]; then
+ obligatory_variables pw_dir
+ declare -g pw_dir
+
+ local pw_file
+ while IFS= read -r -d '' pw_file; do
+ # shellcheck disable=SC1090
+ source "$pw_file"
+ done < <(find "$pw_dir" -type f -print0)
+ fi
+}
+
+# Exit with 0 status if given component has a single commit compared to baseline.
+# $1: component
+single_commit_p ()
+{
+ (
+ set -euf -o pipefail
+
+ local c="$1"
+
+ local base_rev cur_rev sha1
+ base_rev=$(get_baseline_git "${c}_rev")
+ cur_rev=$(get_current_git "${c}_rev")
+
+ for sha1 in $(git -C "$c" rev-parse "$cur_rev^@"); do
+ if [ "$sha1" = "$base_rev" ]; then
+ # We have a single-commit build
+ return 0
+ fi
+ done
+
+ return 1
+ )
+}
+
+# check_source_changes : Looks at base-artifacts and artifacts to compute the
+# changes for the toolchain source files.
+# This procedure updates the following global variables for the rest of notify pass.
+declare change_kind changed_single_component last_good first_bad
+declare -a changed_components
+
+check_source_changes ()
+{
+ echo "# ${FUNCNAME[0]}"
+
+ # Set ${changed_components[@]} unless this is "init" build. For "init"
+ # builds we have no baseline_git data, so leave changed_components empty.
+ if [ "$(get_current_manifest "{rr[update_baseline]}")" != "init" ]; then
+ IFS=" " read -r -a changed_components <<< "$(print_changed_components)"
+ else
+ changed_components=()
+ fi
+
+ local c base_rev cur_rev c_commits
+ if [ ${#changed_components[@]} = 0 ]; then
+ change_kind="no_change"
+ changed_single_component=""
+ elif [ ${#changed_components[@]} = 1 ]; then
+ changed_single_component="${changed_components[0]}"
+ first_bad="$(get_current_git ${changed_single_component}_rev)"
+ last_good="$(get_baseline_git ${changed_single_component}_rev)"
+
+ # single_commit_p() depend on $c to exist
+ local res
+ git -C "$changed_single_component" rev-parse --verify "HEAD" \
+ &>/dev/null &
+ res=0 && wait $! || res=$?
+ assert_with_msg "Cannot parse HEAD in repo $changed_single_component" \
+ [ $res = 0 ]
+
+ if single_commit_p "$changed_single_component"; then
+ change_kind="single_commit"
+ else
+ change_kind="single_component"
+ fi
+ else
+ change_kind="multiple_components"
+ changed_single_component=""
+ fi
+
+ # Debug dump
+ echo "# Debug traces :"
+ echo "# change_kind=$change_kind : ${changed_components[*]}"
+ for c in "${changed_components[@]}"; do
+ base_rev=$(get_baseline_git ${c}_rev)
+ cur_rev=$(get_current_git ${c}_rev)
+ c_commits=$(git -C $c rev-list --count $base_rev..$cur_rev || echo "??")
+ echo "# rev for $c : $base_rev..$cur_rev ($c_commits commits)"
+ done
+ echo ""
+}
+
+# print routines pointers
+declare print_commits_f print_result_f print_config_f print_last_icommit_f
+
+setup_stages_to_run ()
+{
+ # if everything is fine. No reason to report to jira & icommits
+ if [ "$notify" = "onregression" ] \
+ && { [ "${rr[no_regression_result]}" = "0" ] \
+ || [ "$change_kind" != "single_commit" ]; }; then
+ notify=ignore
+ elif [ "$notify" = "precommit" ] \
+ && [ "${rr[no_regression_result]}" = "0" ]; then
+ notify=ignore
+ fi
+
+ if [ "$notify" = "ignore" ] || [ "$notify" = "precommit" ]; then
+ post_jira_comment=false
+ post_jira_card=false
+ post_icommits=false
+ # Even in "ignore" mode (successful build with no regression),
+ # we want to post gcc_testresults if relevant.
+ if [ "$notify" = "ignore" ]; then
+ post_mail=false
+ fi
+ if [ "$notify" = "precommit" ]; then
+ post_gcc_testresults=false
+ fi
+ fi
+
+ # Disable dashboard generation as results-vs-first is now disabled
+ generate_dashboard=false
+ post_dashboard=false
+
+ # print routines pointers
+ print_commits_f=print_commits
+ print_result_f=print_result
+ print_config_f=print_config
+ print_last_icommit_f=print_last_icommit
+ generate_extra_details_f=generate_extra_details
+ case "$ci_project" in
+ tcwg_binutils*|tcwg_bootstrap*|tcwg_gcc*|tcwg_gdb*|tcwg_glibc*|tcwg_gnu*)
+ print_result_f=gnu_print_result
+ generate_extra_details_f=gnu_generate_extra_details
+ print_config_f=gnu_print_config
+ ;;
+ tcwg_bmk*)
+ print_result_f=bmk_print_result
+ print_config_f=bmk_print_config
+ generate_extra_details_f=bmk_generate_extra_details
+ ;;
+ *)
+ # By default keep generic ones set above
+ ;;
+ esac
+}
+
+release_notification_files ()
+{
+ echo "# ${FUNCNAME[0]}"
+ if ! [ -d $top_artifacts/jenkins ]; then
+ return
+ fi
+
+ local f
+ for f in mail-body.txt mail-subject.txt mail-recipients.txt; do
+ # copy the file if exists, and not emtpy.
+ # this important for mail-recipient.txt that may be empty.
+ if [ -s $top_artifacts/notify/$f ]; then
+ cp $top_artifacts/notify/$f $top_artifacts/jenkins/$f
+ fi
+ done
+
+ echo "... Done"
+}
+
+release_gcc_testresults_files ()
+{
+ echo "# ${FUNCNAME[0]}"
+ if ! [ -d $top_artifacts/jenkins ]; then
+ return
+ fi
+
+ # Send GCC test results only if the baseline was updated more than
+ # 1 day ago, so that we send at most one such email per day.
+ if [ -f $top_artifacts/testresults/testresults-mail-body.txt ]; then
+ base_d=$(get_baseline_manifest "{rr[gcc_testresults_date]}")
+ cur_d=$(get_current_component_date gcc || true)
+ if [ x"$cur_d" = x"" ]; then
+ return
+ fi
+ if [ x"$base_d" = x"" ]; then
+ base_d=0
+ fi
+
+ if [ $(( $cur_d - $base_d )) -ge 86400 ]; then
+ # The current date will become the baseline for the next build
+ # if the current baseline occurred more than one day ago.
+ cp $top_artifacts/testresults/testresults-mail-subject.txt \
+ $top_artifacts/testresults/testresults-mail-body.txt $top_artifacts/jenkins/
+ echo "gcc-testresults@gcc.gnu.org" > $top_artifacts/jenkins/testresults-mail-recipients.txt
+ cat <<EOF | manifest_out
+rr[gcc_testresults_date]=$cur_d
+EOF
+ else
+ # Otherwise, record the baseline date as the current
+ # gcc_testresults_date. Failing that, the next build will
+ # compare its date with 0 and may send its results more
+ # often than every day.
+ cat <<EOF | manifest_out
+rr[gcc_testresults_date]=$base_d
+EOF
+ fi
+ fi
+
+ echo "... Done"
+}
+
+#========================================================================================
+#
+# GENERATE EXTRA DETAILS
+#
+#========================================================================================
+gnu_generate_extra_details()
+{
+ (
+ set -euf -o pipefail
+
+ # Extract 'configure' and 'make' lines for steps where it makes sense.
+ local tmpfile
+ tmpfile=$(mktemp)
+
+ echo > $top_artifacts/notify/configure-make.txt
+
+ while read step_console
+ do
+ artifact_dir="$(dirname ${step_console})"
+ (
+ # We want to accept that 'xzgrep' can have zero match below
+ set +o pipefail
+ xzgrep RUN: ${step_console} | sed 's/.* RUN: //' > $tmpfile
+ )
+
+ grep /configure $tmpfile > $tmpfile-configure || true
+ grep "^make " $tmpfile > $tmpfile-make || true
+ if [ -s $tmpfile-configure ] || [ -s $tmpfile-make ]; then
+ echo "# $(basename $artifact_dir)" >> $top_artifacts/notify/configure-make.txt
+ cat $tmpfile-configure $tmpfile-make >> $top_artifacts/notify/configure-make.txt
+ echo >> $top_artifacts/notify/configure-make.txt
+ fi
+ done < <(set +f ; find $top_artifacts/[0-9][0-9]-* -name console.log.xz)
+
+ rm -f $tmpfile $tmpfile-configure $tmpfile-make
+
+ [ -s $top_artifacts/notify/configure-make.txt ] || rm -f $top_artifacts/notify/configure-make.txt
+
+ if ! [ -d $top_artifacts/sumfiles ]; then
+ return 0
+ fi
+
+ # Compare results using compare_tests for information purposes.
+ # It works well only when we compare complete testsuites.
+ # shellcheck disable=SC2154
+ gcc-compare-results/compare_tests -compr none -pass-thresh 0.9 \
+ base-artifacts/sumfiles \
+ "$top_artifacts/sumfiles" \
+ > $top_artifacts/notify/results.compare.txt &
+ wait $! || true
+ )
+}
+
+bmk_generate_extra_details()
+{
+ (
+ set -euf -o pipefail
+ local artifacts_mail_dir
+
+ artifacts_mail_dir=$top_artifacts/notify
+
+ $scripts/../bmk-scripts/output-bmk-results.py \
+ --compare_results $top_artifacts/results-vs-prev/compare-results-internal.csv \
+ --variability_file $top_artifacts/results-vs-prev/bmk-specific-variability-max.csv \
+ --variability_file_data "max" \
+ --run_step_dir "$artifacts_mail_dir/" \
+ --metric "${rr[metric_id]}" --mode "${rr[mode]}" \
+ --details verbose > "$artifacts_mail_dir/output-bmk-results.log" &
+
+ local res=0 && wait $! || res=$?
+ assert_with_msg "ERROR while trying to regenerate bmk-data results. Aborting.." [ $res = 0 ]
+ )
+}
+
+generate_extra_details()
+{
+ (
+ # nothing to do
+ true
+ )
+}
+
+#========================================================================================
+#
+# PRINT ROUTINES
+#
+#========================================================================================
+
+dump_model_only=${dump_model_only-false}
+
+#==========================================
+# *GENERIC* PRINT ROUTINES
+#==========================================
+
+###### PRINT COMMITS
+# --link : link to the commit (only if single_commit)
+# --oneline : commit title
+# --short : commit log
+print_commits()
+{
+ (
+ set -euf -o pipefail
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+ local print_arg=$1
+
+ if [ "$change_kind" = "no_change" ]; then
+ echo "baseline build"
+ return 0
+ fi
+
+ local more_lines
+ if [ "$change_kind" = "single_commit" ]; then
+ local c="${changed_single_component}"
+
+ if [ "$print_arg" = "--link" ]; then
+ local url
+ url=$(get_baseline_git ${c}_url)
+ if [[ "$url" =~ git://sourceware.org/git/ ]]; then
+ url="${url#git://sourceware.org/git/}"
+ url="https://sourceware.org/git/?p=$url"
+ echo "$url;a=commitdiff;h=$first_bad"
+ elif [[ "$url" =~ https://github.com/ ]] \
+ || [[ "$url" =~ https://gitlab.com/ ]]; then
+ echo "${url%.git}/commit/$first_bad"
+ elif [[ "$url" =~ https://git.linaro.org/ ]]; then
+ echo "${url}/commit/?id=$first_bad"
+ else
+ echo "See in $url"
+ fi
+
+ return 0
+ fi
+
+ local describe
+ if [ "${pw[$c]-}" = "" ]; then
+ describe=$(describe_sha1 "$c" "$first_bad" true)
+ # Remove the leading "basepoints/"
+ describe=$(echo "$describe" | sed 's,^basepoints/,,')
+ else
+ describe="$c patch #${pw[${c}_patch_id]}"
+ fi
+
+ if [ "$print_arg" = "--oneline" ]; then
+ echo "$describe"
+ return 0
+ fi
+
+ if [ "${pw[$c]-}" = "" ]; then
+ echo "commit $describe"
+ else
+ echo "$c patch ${pw[${c}_patch_url]}"
+ fi
+ local tmpfile
+ tmpfile=$(mktemp)
+ git -C "$c" log -n1 "$first_bad" | tail -n +2 > "$tmpfile"
+ head -n 10 $tmpfile
+ more_lines=$(($(cat "$tmpfile" | wc -l) - 10))
+ if [ $more_lines -gt 0 ]; then
+ echo "... $more_lines lines of the commit log omitted."
+ fi
+ rm $tmpfile
+
+ if [ "${pw[$c]-}" != "" ]; then
+ echo "... applied on top of baseline commit:"
+ git -C $c log -n1 --oneline $last_good || true
+ fi
+ return 0
+ fi
+
+ if [ "$change_kind" = "single_component" ] \
+ || [ "$change_kind" = "multiple_components" ]; then
+ local new_commits c base_rev cur_rev c_commits components
+
+ local commits_or_patches
+ if [ "${pw[project]-}" != "" ]; then
+ commits_or_patches="patches"
+ else
+ commits_or_patches="commits"
+ fi
+
+ new_commits=0
+ for c in "${changed_components[@]}"; do
+ base_rev=$(get_baseline_git ${c}_rev)
+ cur_rev=$(get_current_git ${c}_rev)
+ c_commits=$(git -C $c rev-list --count $base_rev..$cur_rev \
+ || echo 0)
+ new_commits=$(($new_commits + $c_commits))
+ done
+ components=$(echo "${changed_components[@]}" | tr ' ' ',')
+
+ echo "$new_commits $commits_or_patches in $components"
+
+ if [ "$print_arg" = "--oneline" ]; then
+ return 0
+ fi
+
+ for c in "${changed_components[@]}"; do
+ base_rev=$(get_baseline_git ${c}_rev)
+ cur_rev=$(get_current_git ${c}_rev)
+ c_commits=$(git -C $c rev-list --count $base_rev..$cur_rev \
+ || echo 0)
+
+ if [ "${pw[$c]-}" != "" ]; then
+ echo "Patchwork URL: ${pw[${c}_patch_url]}"
+ fi
+
+ git -C $c log -n 5 --oneline $base_rev..$cur_rev || true
+ if [ $c_commits -gt 5 ]; then
+ echo "... and $(($c_commits-5)) more $commits_or_patches in $c"
+ fi
+
+ if [ "${pw[$c]-}" != "" ]; then
+ echo "... applied on top of baseline commit:"
+ git -C $c log -n1 --oneline $base_rev || true
+ fi
+ done
+
+ return 0
+ fi
+ )
+}
+
+###### PRINT the RESULTS of this build
+# --oneline : either success ot failure
+# --short : change of results.txt file between baseline and artifact
+# --long : idem
+print_result()
+{
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+
+ local print_arg=$1
+ case "$print_arg" in
+ --oneline)
+ if [ "${rr[no_regression_result]}" = "0" ]; then
+ echo "Success"
+ else
+ echo "Failure"
+ fi
+ ;;
+ --short|--long)
+ echo "Results changed to"
+ echo "$(cat $top_artifacts/results)"
+ echo ""
+ echo "From"
+ echo "$(cat base-artifacts/results)"
+ ;;
+ esac
+}
+
+###### PRINT LAST ICOMMIT
+# Extract the last status from icommit
+# --entry : output directory of icommit entry
+# --status : will output the icommit status
+# --reproduction_instructions_link : will output the link to the reproduction_instructions
+print_last_icommit ()
+{
+ (
+ $dump_model_only && echo "<<${FUNCNAME[0]}>>" && return
+ set -euf -o pipefail
+ local print_arg="$1"
+ shift 1
+
+ if [ x"$change_kind" != x"single_commit" ]; then
+ return 0
+ fi
+
+ local isubdir
+ isubdir=$(interesting_subdir "$changed_single_component" "$first_bad" "$@")
+
+ case "$print_arg" in
+ --entry)
+ echo "$icommits/$isubdir"
+ ;;
+ --status)
+ cat "$icommits/$isubdir/status.txt"
+ ;;
+ --reproduction_instructions_link)
+ print_icommits_link "$isubdir/reproduction_instructions.txt"
+ ;;
+ esac
+ )
+}
+
+# Print link url to interesting-commits.git repo for "$path".
+print_icommits_link ()
+{
+ (
+ set -euf -o pipefail
+ local path="$1"
+
+ local url="https://git-us.linaro.org/toolchain/ci/interesting-commits.git"
+ echo "$url/plain/$path"
+ )
+}
+
+# CHECK_IF_FIRST_REPORT : Check if this is the first report in icommits
+# Result stored in first_icommit_to_report global variable
+check_if_first_report()
+{
+ declare -g first_icommit_to_report
+
+ first_icommit_to_report=false
+ if [ x"$change_kind" != x"single_commit" ]; then
+ return
+ fi
+
+ local isubdir
+ isubdir=$(interesting_subdir "$changed_single_component" "$first_bad")
+ if ! [ -f "$icommits/$isubdir/first_url" ]; then
+ return
+ fi
+
+ local first_url
+ first_url=$(cat "$icommits/$isubdir/first_url")
+ if [ "$first_url" = "$(get_current_manifest BUILD_URL)" ]; then
+ first_icommit_to_report=true
+ elif [ "$notify" = "onregression" ]; then
+ # Send out emails for post-commit "onregression" reports only for
+ # the first detection.
+ post_mail=false
+ fi
+}
+
+###### PRINT the configuration of this build
+# --oneline : target short name (eg: arm/aarch64)
+# --short : short "pretty" version suitable for summary
+# --long : full details
+print_config()
+{
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+
+ local print_arg=$1
+ case "$print_arg" in
+ --oneline)
+ case "$ci_config" in
+ *arm*) echo "arm" ;;
+ *aarch64*) echo "aarch64" ;;
+ *) echo "$ci_config" ;;
+ esac
+ ;;
+ --short|--long)
+ echo "CI config $ci_project/$ci_config"
+ ;;
+ esac
+}
+
+print_artifacts_url ()
+{
+ (
+ set -euf -o pipefail
+
+ local url
+ url="$(get_current_manifest BUILD_URL)artifact/artifacts"
+ if [ "${pw[project]-}" != "" ]; then
+ url="$url/artifacts.precommit"
+ fi
+ echo "$url/$*"
+ )
+}
+
+#==========================================
+# *GNU* PRINT ROUTINES
+#==========================================
+
+###### PRINT the configuration of this build
+# --oneline : target short name (eg: arm/aarch64)
+# --short : short "pretty" version suitable for summary
+# --long : full details
+gnu_print_config()
+{
+ (
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+
+ # shellcheck source=tcwg_gnu-config.sh
+ . $scripts/tcwg_gnu-config.sh
+
+ settings_for_ci_project_and_config "$ci_project" "$ci_config"
+
+ local print_arg=$1
+ case "$print_arg" in
+ --oneline)
+ print_config "$print_arg"
+ ;;
+ --short)
+ echo "${gnu_data[pretty_project]} ${gnu_data[pretty_config]}"
+ ;;
+ --long)
+ echo "CI config $ci_project ${gnu_data[long_config]}"
+ ;;
+ esac
+ )
+}
+
+# most of them mapped to generic implementation
+gnu_print_result()
+{
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+ local print_arg="$1"
+
+ if ! [ -d $top_artifacts/sumfiles ]; then
+ print_result "$@"
+ return 0
+ fi
+
+ # From now on, we should have called already gnu_generate_extra_details
+
+ local validate_failures="gcc-compare-results/contrib/testsuite-management/validate_failures.py"
+ local xfails="$top_artifacts/sumfiles/xfails.xfail"
+
+ if ! [ -f "$xfails" ]; then
+ return 0
+ fi
+
+ "$validate_failures" --manifest="$xfails" \
+ --expiry_date="${rr[result_expiry_date]}" \
+ --build_dir="$top_artifacts/sumfiles" --verbosity=1 \
+ > $top_artifacts/notify/regressions.sum &
+ wait $! || true
+ "$validate_failures" --inverse_match --manifest="$xfails" \
+ --expiry_date="${rr[result_expiry_date]}" \
+ --build_dir="$top_artifacts/sumfiles" --verbosity=1 \
+ > $top_artifacts/notify/progressions.sum &
+ wait $! || true
+
+ local n_regressions n_progressions pass_fail=PASS
+ if [ "${rr[no_regression_result]}" != "0" ]; then
+ pass_fail=FAIL
+ fi
+ n_regressions=$(grep -c "^[A-Z]\+:" \
+ $top_artifacts/notify/regressions.sum || true)
+ n_progressions=$(grep -c "^[A-Z]\+:" \
+ $top_artifacts/notify/progressions.sum || true)
+
+ printf "$pass_fail"
+ if [ "$n_regressions" != "0" ]; then
+ printf ": $n_regressions regressions"
+ else
+ rm $top_artifacts/notify/regressions.sum
+ fi
+ if [ "$n_progressions" != "0" ]; then
+ printf ": $n_progressions progressions"
+ else
+ rm $top_artifacts/notify/progressions.sum
+ fi
+ printf "\n"
+
+ if [ "$print_arg" = "--oneline" ]; then
+ return 0
+ fi
+
+ local length=10 outfile n_lines
+ if [ "$print_arg" = "--long" ]; then
+ # Ask "head" to print out all the lines except for the last 0 lines.
+ length=-0
+ fi
+
+ for outfile in regressions.sum progressions.sum; do
+ if ! [ -f $top_artifacts/notify/$outfile ]; then
+ continue
+ fi
+
+ echo
+ echo "$outfile:"
+ n_lines=$(cat $top_artifacts/notify/$outfile \
+ | grep -v "Results Summary" | wc -l)
+ n_lines=$(($n_lines - $length))
+ head -n$length $top_artifacts/notify/$outfile \
+ | grep -v "Results Summary"
+ if [ $n_lines -gt 0 ] && [ $length != -0 ]; then
+ echo "... and $n_lines more entries"
+ fi
+ done
+
+ cat <<EOF
+
+You can find the failure logs in *.log.1.xz files in
+ - $(print_artifacts_url 00-sumfiles/)
+The full lists of regressions and progressions as well as configure and make commands are in
+ - $(print_artifacts_url notify/)
+The list of [ignored] baseline and flaky failures are in
+ - $(print_artifacts_url sumfiles/xfails.xfail)
+EOF
+}
+
+
+#==========================================
+# *BMK* PRINT ROUTINES
+#==========================================
+
+# print_result. either oneline version or short/long.
+# Here is an ex :
+# --oneline :
+# 644.nab_s slowed down of 4%
+# --short/long :
+# the following benchmarks slowed down by more than 3%:
+# - 644.nab_s slowed down by 4% from 112963 to 117189 perf samples
+bmk_print_result()
+{
+ (
+ set -euf -o pipefail
+
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+
+ local print_arg=$1
+
+ artifacts_mail_dir=$top_artifacts/notify
+
+ if [ "$stage" != "full" ]; then
+ return
+ fi
+
+ ## Prepare data
+
+ # From now on, we should have called already bmk_generate_extra_details
+
+ # If there's one regression. Don't bother about improvements.
+ local improved_or_regressed
+ if [ -f $artifacts_mail_dir/exe.regression ] || [ -f $artifacts_mail_dir/symbol.regression ]; then
+ improved_or_regressed=regression
+ else
+ improved_or_regressed=improvement
+ fi
+
+ declare -A changed_by_msg
+ changed_by_msg[size-regression]="grew in size by"
+ changed_by_msg[size-improvement]="reduced in size by"
+ changed_by_msg[sample-regression]="slowed down by"
+ changed_by_msg[sample-improvement]="speeds up by"
+ changed_by_msg[num_vect_loops-regression]="reduced the number of vect loops by"
+ changed_by_msg[num_vect_loops-improvement]="increased the number of vect loops by"
+ changed_by_msg[num_sve_loops-regression]="reduced the number of sve instructions by"
+ changed_by_msg[num_sve_loops-improvement]="increased the number of sve instructions by"
+ changed_by=${changed_by_msg[${rr[metric_id]}-$improved_or_regressed]}
+
+ # FIXME: Remove hard-coded thresholds
+ # thresholds
+ case ${rr[metric_id]} in
+ size)
+ exe_threshold=1 # We use 1% tolerance for binary size
+ symbol_threshold=10 # and 10% tolerance for symbol size.
+ ;;
+ sample)
+ # Reduce thresholds when bisecting to avoid considering borderline
+ # regressions as spurious. This should break cycles of build and
+ # bisect jobs triggering each other on borderline regressions.
+ exe_threshold=3
+ symbol_threshold=15
+ ;;
+ num_vect_loops|num_sve_loops)
+ exe_threshold=0
+ symbol_threshold=0
+ ;;
+ *) assert false ;;
+ esac
+
+ # Now print result
+ case "$print_arg" in
+ --oneline)
+ assert_with_msg "Builds with infra problems should never get here" \
+ [ "${rr[no_regression_result]}" != "$EXTERNAL_FAIL" ]
+
+ # Generate readable oneline diag
+ local metric bmk symbol short_diag long_diag
+ if [ -f $artifacts_mail_dir/exe.$improved_or_regressed ]; then
+ # shellcheck disable=SC2034
+ IFS=, read metric bmk symbol short_diag long_diag < <(head -n1 $artifacts_mail_dir/exe.$improved_or_regressed)
+ elif [ -f $artifacts_mail_dir/symbol.$improved_or_regressed ]; then
+ # shellcheck disable=SC2034
+ IFS=, read metric bmk symbol short_diag long_diag < <(head -n1 $artifacts_mail_dir/symbol.$improved_or_regressed)
+ else
+ short_diag="No change"
+ fi
+ echo "$short_diag"
+ ;;
+
+ --short|--long)
+ # The following exe regressed/improved:
+ if [ -f $artifacts_mail_dir/exe.$improved_or_regressed ]; then
+ sort -gr -o $artifacts_mail_dir/exe.$improved_or_regressed \
+ $artifacts_mail_dir/exe.$improved_or_regressed
+
+ echo "the following benchmarks $changed_by more than ${exe_threshold}%:"
+ local metric exe symbol short_diag long_diag
+ while IFS=, read metric exe symbol short_diag long_diag; do
+ echo "- $long_diag"
+ if [ -f $artifacts_mail_dir/$exe.symbols-$improved_or_regressed ]; then
+ while IFS=, read metric bmk symbol short_diag long_diag; do
+ echo " - $long_diag"
+ done < $artifacts_mail_dir/$exe.symbols-$improved_or_regressed
+ # Delete $bmk.regressions so that it doesn't show up
+ # in symbol-regression loop below.
+ rm $artifacts_mail_dir/$exe.symbols-$improved_or_regressed
+ fi
+ done < $artifacts_mail_dir/exe.$improved_or_regressed
+ fi
+
+ # The following functions regressed/improved:
+ if [ -f $artifacts_mail_dir/symbol.$improved_or_regressed ]; then
+ echo "the following hot functions $changed_by more than ${symbol_threshold}% (but their benchmarks $changed_by less than ${exe_threshold}%):"
+ local metric bmk symbol short_diag long_diag
+ # shellcheck disable=SC2034
+ while IFS=, read metric bmk symbol short_diag long_diag; do
+ echo "- $long_diag"
+ done < $artifacts_mail_dir/symbol.$improved_or_regressed
+ fi
+
+ if ! [ -f $artifacts_mail_dir/exe.$improved_or_regressed ] && ! [ -f $artifacts_mail_dir/symbol.$improved_or_regressed ]; then
+ echo "No change"
+ fi
+ ;;
+ esac
+ )
+}
+
+###### PRINT the configuration of this build
+# --oneline : target short name (eg: arm/aarch64)
+# --short : short "pretty" version suitable for summary
+# --long : full details
+bmk_print_config()
+{
+ # shellcheck source=tcwg_bmk-config.sh
+ . $scripts/tcwg_bmk-config.sh
+
+ $dump_model_only && echo "<<${FUNCNAME[0]} $*>>" && return
+
+ # ${ci_project}--${ci_config} format is :
+ # 'tcwg_bmk-#{PROFILE_NAME}-#{BMK}--#{TOOLCHAIN}-#{TARGET}-{toolchain_ver}-{cflags}'
+ IFS=- read -a ci_pjt_cfg <<EOF
+$ci_project--$ci_config
+EOF
+ local toolchain target cflags
+ toolchain="${ci_pjt_cfg[4]}"
+ target="${ci_pjt_cfg[5]}"
+ cflags="${ci_pjt_cfg[7]}"
+
+ local compiler="" libc="" linker="" version="" bmk_flags="" hw=""
+ bmk_flags=$(echo "$cflags" | sed -e "s/_/ -/g")
+
+ local print_arg=$1
+ case "$print_arg" in
+ --oneline)
+ case "$ci_config" in
+ *arm*) echo "arm $bmk_flags" ;;
+ *aarch64*) echo "aarch64 $bmk_flags" ;;
+ *) echo "$ci_config" ;;
+ esac
+ return 0
+ ;;
+ --short)
+ print_config "$print_arg"
+ return 0
+ ;;
+ esac
+
+ # --long as default
+
+ local bmk_suite publish_save_temps
+ bmk_suite=""
+ publish_save_temps=false
+ case "$(tcwg_bmk_benchs)" in
+ coremark)
+ bmk_suite="EEMBC CoreMark"
+ ;;
+ spec2k6|4*)
+ bmk_suite="SPEC CPU2006"
+ publish_save_temps=true
+ ;;
+ spec2017*|5*|6*)
+ bmk_suite="SPEC CPU2017"
+ publish_save_temps=true
+ ;;
+ esac
+
+ cat <<EOF
+Below reproducer instructions can be used to re-build both "first_bad" and "last_good" cross-toolchains used in this bisection. Naturally, the scripts will fail when triggerring benchmarking jobs if you don\'t have access to Linaro TCWG CI.
+EOF
+
+ # Copy save-temps tarballs to artifacts, so that they are accessible.
+ # We can publish pre-processed source only for benchmarks derived from
+ # open-source projects.
+ # Note that we include save-temps artifacts for successful builds so that
+ # "last_good" build has the artifacts.
+ if $publish_save_temps; then
+ mkdir -p ${top_artifacts}/top-artifacts
+ local s_t
+ while read s_t; do
+ case "$s_t" in
+ 400.perlbench*|500.perlbench*|600.perlbench*) ;;
+ 401.bzip2*) ;;
+ 403.gcc*|502.gcc*|602.gcc*) ;;
+ 435.gromacs*) ;;
+ 436.cactusADM*|507.cactuBSSN*|607.cactuBSSN*) ;;
+ 445.gobmk*) ;;
+ 454.calculix*) ;;
+ 456.hmmer*) ;;
+ 462.libquantum*) ;;
+ 465.tonto*) ;;
+ 481.wrf*|521.wrf*|621.wrf*) ;;
+ 482.sphinx3*) ;;
+ 483.xalanc*) ;;
+ 505.mcf*|605.mcf*)
+ # 429.mcf is not present in redistributable_sources/ :-|
+ ;;
+ 511.povray*) ;;
+ 525.x264*|625.x264*)
+ # 464.h264ref is not present in redistributable_sources/ :-|
+ ;;
+ 526.blender*) ;;
+ 527.cam4*|627.cam4*) ;;
+ 538.imagick*|638.imagick*) ;;
+ 544.nab*|644.nab*) ;;
+ 554.roms*|654.roms*) ;;
+ 557.xz*|657.xz*) ;;
+ 628.pop2) ;;
+ *)
+ # Can't redistribute benchmark sources.
+ continue
+ ;;
+ esac
+ cp "$s_t" ${top_artifacts}/top-artifacts/save-temps/
+ done < <(find results-1 -path "save.*.temps/*.tar.xz")
+ fi
+
+ if [ -d ${top_artifacts}/top-artifacts/save-temps/ ]; then
+ cat <<EOF
+
+For your convenience, we have uploaded tarballs with pre-processed source and assembly files at:
+- First_bad save-temps: \$FIRST_BAD_ARTIFACTS/save-temps/
+- Last_good save-temps: \$LAST_GOOD_ARTIFACTS/save-temps/
+- Baseline save-temps: \$BASELINE_ARTIFACTS/save-temps/
+EOF
+ fi
+
+ case "$toolchain" in
+ gnu)
+ compiler="GCC"
+ libc="Glibc"
+ linker="GNU Linker"
+ ;;
+ gnu_eabi)
+ compiler="GCC"
+ libc="Newlib"
+ linker="GNU LD"
+ ;;
+ llvm)
+ compiler="Clang"
+ libc="Glibc"
+ linker="LLVM Linker"
+ ;;
+ esac
+ case "$ci_config" in
+ *-master-*) version="tip of trunk" ;;
+ *-release-*) version="latest release branch" ;;
+ esac
+ case "$(tcwg_bmk_hw)" in
+ *apm*) hw="APM Mustang 8x X-Gene1" ;;
+ *tk1*) hw="NVidia TK1 4x Cortex-A15" ;;
+ *tx1*) hw="NVidia TX1 4x Cortex-A57" ;;
+ *stm32*) hw="STMicroelectronics STM32L476RGTx 1x Cortex-M4" ;;
+ *fx*) hw="Fujitsu FX700 48x AA64" ;;
+ *qc*) hw="Qualcomm 8x AA64" ;;
+ *) hw="<unknown>"
+ esac
+
+ cat <<EOF
+
+Configuration:
+- Benchmark: $bmk_suite
+- Toolchain: $compiler + $libc + $linker
+- Version: all components were built from their $version
+- Target: $(print_gnu_target $target)
+- Compiler flags: $bmk_flags
+- Hardware: $hw
+
+This benchmarking CI is work-in-progress, and we welcome feedback and suggestions at linaro-toolchain@lists.linaro.org . In our improvement plans is to add support for SPEC CPU2017 benchmarks and provide "perf report/annotate" data behind these reports.
+EOF
+}
+
+#========================================================================================
+#
+# INTERESTING_COMMITS PROCEDURES
+#
+#========================================================================================
+
+# Print out merged version of status-summary.txt files at a given subdir level.
+# As an initial approximation, just pick a summary with the biggest number.
+merge_status_summary ()
+{
+ (
+ set -euf -o pipefail
+ local subdir="$1"
+
+ local cur_file cur best="" best_file
+
+ while read -r cur_file; do
+ # Extract a number " N " or " N%"
+ cur=$(sed -e "s/.* \([0-9]\+\)[ %].*/\1/" "$cur_file")
+ if ! [ "$cur" -le "$best" ]; then
+ best="$cur"
+ best_file="$cur_file"
+ fi
+ done < <(find "$subdir" -mindepth 2 -maxdepth 2 \
+ -name "status-summary.txt" | sort)
+
+ cat "$best_file"
+ )
+}
+
+# update_interesting_commits : will update the local icommit repository with
+# the new run.
+# If it is a regression, single_commit, may be updating first_report variable
+#
+# Interesting-commits store regression history in the following hierarchy:
+# 1. At the top level we have COMPONENT directories.
+# 2. At the 2nd level we have SHA1 directories.
+# 2a. We also have "git describe" symlinks to SHA1 directories for convenience.
+# 3. At the 3rd level we have CI_PROJECT directories.
+# 4. At the 4th level we have CI_CONFIG directories, and ...
+# 4a. ... status.txt, which contains status of changes from SHA1 across all
+# CI_CONFIGs in CI_PROJECT.
+# 5. At the 5th level we have per-build files last_good, details.txt, etc.
+update_interesting_commits ()
+{
+ echo "# ${FUNCNAME[0]}"
+
+ local stage="$1"
+ local jira_key="$2"
+
+ # We have successfully identified a bad commit with a good parent!
+ # Add $first_bad to interesting commits.
+
+ local subdir3 subdir4 subdir5
+ # Commit top-level dir
+ subdir3=$(interesting_subdir "$changed_single_component" "$first_bad")
+ # Commit project-level dir
+ subdir4=$(interesting_subdir "$changed_single_component" "$first_bad" \
+ "$ci_project")
+ # Commit config-level dir
+ subdir5=$(interesting_subdir "$changed_single_component" "$first_bad" \
+ "$ci_project" "$ci_config")
+
+ if ! [ -d "$icommits/$subdir3" ]; then
+ mkdir -p $icommits/$subdir3
+ get_current_manifest BUILD_URL > $icommits/$subdir3/first_url
+ git -C $icommits add $subdir3/first_url
+ fi
+
+ # Update interesting-commits/$component/$first_bad/$ci_project/$ci_config
+ mkdir -p "$icommits/$subdir5"
+ echo "$(get_current_manifest BUILD_URL)artifact/artifacts" > "$icommits/$subdir5/build_url"
+ echo "$last_good" > "$icommits/$subdir5/last_good"
+ git -C "$icommits" add "$subdir5/build_url" "$subdir5/last_good"
+
+ if [ "$stage" != "full" ]; then
+ return
+ fi
+
+ # $icommit/<comp>/sha1/<sha1>/.../status-summary.txt
+ $print_result_f --oneline > "$icommits/$subdir5/status-summary.txt"
+ merge_status_summary $icommits/$subdir4 \
+ > "$icommits/$subdir4/status-summary.txt"
+ merge_status_summary $icommits/$subdir3 \
+ > "$icommits/$subdir3/status-summary.txt"
+ git -C "$icommits" add \
+ "$subdir5/status-summary.txt" \
+ "$subdir4/status-summary.txt" \
+ "$subdir3/status-summary.txt"
+
+ $print_result_f --long > "$icommits/$subdir5/details.txt"
+ if [ -f $top_artifacts/notify/configure-make.txt ]; then
+ cat $top_artifacts/notify/configure-make.txt >> "$icommits/$subdir5/details.txt"
+ fi
+
+ # $icommit/<comp>/sha1/<sha1>/<ci_project>/<ci_config>/status.txt
+ (
+ cat "$icommits/$subdir5/status-summary.txt"
+ print_icommits_link "$subdir5/details.txt"
+ cat $icommits/$subdir5/build_url
+ ) | sed "s/^/* /" > "$icommits/$subdir5/status.txt"
+
+ git -C "$icommits" add "$subdir5/details.txt" "$subdir5/status.txt"
+
+ # FIXME: Remove transisional workaround for summary.txt -> status.txt
+ if [ -f "$icommits/$subdir5/summary.txt" ]; then
+ git -C "$icommits" rm "$subdir5/summary.txt"
+ fi
+
+ # $icommit/<comp>/sha1/<sha1>/<ci_project>/<ci_config>/reproduction_instructions.txt
+ local bad_artifacts_url good_artifacts_url
+ bad_artifacts_url="$(get_current_manifest BUILD_URL)artifact/artifacts"
+ good_artifacts_url="$(get_baseline_manifest BUILD_URL)artifact/artifacts"
+ cat > $icommits/$subdir5/reproduction_instructions.txt << EOF
+mkdir -p investigate-$changed_single_component-$first_bad
+cd investigate-$changed_single_component-$first_bad
+
+# Fetch scripts
+git clone https://git.linaro.org/toolchain/jenkins-scripts
+
+# Fetch manifests for bad and good builds
+mkdir -p bad/artifacts good/artifacts
+curl -o bad/artifacts/manifest.sh $bad_artifacts_url/manifest.sh --fail
+curl -o good/artifacts/manifest.sh $good_artifacts_url/manifest.sh --fail
+
+# Reproduce bad build
+(cd bad; ../jenkins-scripts/${build_script} @@rr[top_artifacts] artifacts)
+# Reproduce good build
+(cd good; ../jenkins-scripts/${build_script} @@rr[top_artifacts] artifacts)
+EOF
+ git -C "$icommits" add $subdir5/reproduction_instructions.txt
+
+ # $icommit/<comp>/sha1/<sha1>/<ci_project>/status.txt
+ local ci_config
+ while read ci_config; do
+ # FIXME: Remove transisional workaround for summary.txt -> status.txt
+ if [ -f "$icommits/$subdir4/$ci_config/summary.txt" ]; then
+ echo "* $ci_config"
+ cat $icommits/$subdir4/$ci_config/summary.txt | sed "s/^/** /"
+ continue
+ fi
+
+ if ! [ -f "$icommits/$subdir4/$ci_config/status.txt" ]; then
+ continue
+ fi
+
+ echo "* $ci_config"
+ cat $icommits/$subdir4/$ci_config/status.txt | sed "s/^/*/"
+ done < <(cd $icommits/$subdir4; ls) > "$icommits/$subdir4/status.txt"
+ git -C "$icommits" add "$subdir4/status.txt"
+
+ # $icommit/<comp>/sha1/<sha1>/status.txt
+ local ci_project
+ while read ci_project; do
+ if ! [ -f "$icommits/$subdir3/$ci_project/status.txt" ]; then
+ continue
+ fi
+
+ echo "* $ci_project"
+ cat $icommits/$subdir3/$ci_project/status.txt | sed "s/^/*/"
+ done < <(cd $icommits/$subdir3; ls) > "$icommits/$subdir3/status.txt"
+ git -C "$icommits" add "$subdir3/status.txt"
+
+ # $icommit/<comp>/sha1/<sha1>/commit-log.txt
+ $print_commits_f --short > "$icommits/$subdir3/commit-log.txt"
+ git -C "$icommits" add "$subdir3/commit-log.txt"
+
+ if $generate_jira; then
+ local jira_dir="$subdir3/jira"
+
+ if [ -f "$icommits/$jira_dir/key" ]; then
+ assert_with_msg "Should not have created multiple jira cards" \
+ [ "$jira_key" = "" ]
+ jira_key=$(cat "$icommits/$jira_dir/key")
+ fi
+
+ # Recreate jira/ dir with up-to-date info.
+ if [ -e "$icommits/$jira_dir" ]; then
+ git -C "$icommits" rm -rf "$jira_dir"
+ fi
+ mkdir "$icommits/$jira_dir"
+
+ if [ "$jira_key" != "" ]; then
+ echo "$jira_key" > "$icommits/$jira_dir/key"
+ git -C "$icommits" add "$jira_dir/key"
+ fi
+
+ # Keep jira/summary in sync with mail-subject.txt
+ echo "$($print_commits_f --oneline):" \
+ "$(cat "$icommits/$subdir3/status-summary.txt")" \
+ > "$icommits/$jira_dir/summary"
+ git -C "$icommits" add "$jira_dir/summary"
+
+ # FIXME: Unify format with email
+ # We stop posting updates to jira cards once they are closed to avoid
+ # spamming developers about resolved issues. Therefore, jira data in
+ # interesting-commits.git repo may be more up-to-date than in
+ # the actual jira cards. We add a link to jira/yaml in the card
+ # description to make it easy for developers to look at the latest
+ # info for closed cards.
+ cat > "$icommits/$jira_dir/description" <<EOF
+Commit: $($print_commits_f --link)
+$(cat "$icommits/$subdir3/commit-log.txt")
+
+$(cat "$icommits/$subdir3/status.txt")
+
+Latest data: $(print_icommits_link "$jira_dir/yaml")
+EOF
+ git -C "$icommits" add "$jira_dir/description"
+
+ update_jira_card
+ fi
+
+ # Generate a $describe_sha1 symlink
+ local describe
+ describe=$(describe_sha1 "$changed_single_component" "$first_bad" false)
+ if [ "$describe" != "" ]; then
+ local d
+ d=$(dirname "$describe")
+ mkdir -p $icommits/$changed_single_component/$d
+ local symlink=""
+ while [ "$d" != "." ]; do
+ symlink="../$symlink"
+ d=$(dirname "$d")
+ done
+ symlink="${symlink}sha1/$first_bad"
+ # ??? For some reason I don't understand overwriting symlink with
+ # "ln -sf" may create a second [broken] symlink.
+ rm -f $icommits/$changed_single_component/$describe
+ ln -s $symlink $icommits/$changed_single_component/$describe
+ git -C $icommits add $changed_single_component/$describe
+ fi
+}
+
+
+# Generate entry in interesting-commits.git and, if $post_icommits,
+# push it. This is applicable only for single-commit case.
+# This is called twice -- first time with $stage == init, and second time
+# with $stage == full.
+# During "init" stage we fetch existing entries and create a very basic entry
+# if there isn't one. This allows us to determine in check_if_first_report()
+# whether this is the first build to discover $first_bad as interesting commit.
+post_interesting_commits ()
+{
+ (
+ set -euf -o pipefail
+ echo "# ${FUNCNAME[0]}"
+
+ local stage="$1"
+
+ if [ "$change_kind" != "single_commit" ]; then
+ return
+ fi
+
+ # Clone interesting-commits.git repo, which contains a regression
+ # summaries for SHA1s. These are the "first_bad" commits.
+ clone_or_update_repo $icommits master \
+ https://git-us.linaro.org/toolchain/ci/interesting-commits.git \
+ auto master
+
+ if ! $post_icommits; then
+ dryrun="echo DRYRUN: "
+ fi
+
+ local jira_dir jira_key=""
+ jira_dir=$(interesting_subdir "$changed_single_component" "$first_bad")
+ jira_dir="$jira_dir/jira"
+ if [ "$stage" = "full" ] && $post_jira_card && $first_icommit_to_report \
+ && [ "$dryrun" = "" ]; then
+ if ! [ -f "$icommits/$jira_dir/key" ]; then
+ # Create jira card for this interesting commit.
+ # We first create the card,
+ # then add a link to it in interesting-commits.git,
+ # then we update the card's content every time we change entry in
+ # interesting-commits.
+ jira_key=$(create_jira_card)
+ else
+ echo "WARNING: jira card already exists $icommits/$jira_dir/key:" \
+ "$(cat "$icommits/$jira_dir/key")"
+ fi
+ fi
+
+ # Regenerate interesting-commits entry and try to push to upstream.
+ # - If failed to push, update icommits again
+ # This can happen if another job pushed to interesting-commits just
+ # before this job.
+ # - If successful we consider round-robin-notify.sh to be successful.
+ while true; do
+ # Reset to master branch
+ git -C $icommits remote update -p
+ git_clean $icommits refs/remotes/origin/master
+
+ update_interesting_commits "$stage" "$jira_key"
+
+ # Commit changes (if any) to local clone of interesting-commits.git
+ git -C $icommits commit \
+ -m "Add entry $first_bad from $(get_current_manifest BUILD_URL)" \
+ || break
+
+ $dryrun git -C $icommits push \
+ ssh://git-us.linaro.org/toolchain/ci/interesting-commits.git \
+ HEAD:refs/heads/master &
+ if wait $!; then
+ # Push successful : stop here
+ break
+ fi
+ # Push failed. update icommit and retry pushing
+ done
+ )
+}
+
+#========================================================================================
+#
+# JIRA RELATED PROCEDURES
+#
+#========================================================================================
+
+print_jira_template_card ()
+{
+ # Catch-all case for when project/config IDs change, so that we
+ # won't miss notifications. Forward all that to GNU-692.
+ local jira_card="GNU-692"
+ case "$ci_project/$ci_config:$changed_single_component" in
+ tcwg_kernel/gnu-*:linux) jira_card="GNU-681" ;;
+ tcwg_kernel/gnu-*:*) jira_card="GNU-680" ;;
+ tcwg_kernel/llvm-*:linux) jira_card="LLVM-647" ;;
+ tcwg_kernel/llvm-*:*) jira_card="LLVM-646" ;;
+ tcwg_bmk-*_speed*/gnu*) jira_card="GNU-689" ;;
+ tcwg_bmk-*_size*/gnu*) jira_card="GNU-686" ;;
+ tcwg_bmk-*_vect*/gnu*) jira_card="GNU-988" ;;
+ tcwg_bmk-*_sve*/gnu*) jira_card="GNU-988" ;;
+ tcwg_bmk-*_speed*/llvm*) jira_card="LLVM-651" ;;
+ tcwg_bmk-*_size*/llvm*) jira_card="LLVM-650" ;;
+ tcwg_bmk-*_vect*/llvm*) jira_card="LLVM-1013" ;;
+ tcwg_bmk-*_sve*/llvm*) jira_card="LLVM-1013" ;;
+ tcwg_aosp-*/*) jira_card="LLVM-1014" ;;
+ esac
+ echo "$jira_card"
+}
+
+# Create jira card for this interesting commit.
+# Link to this card is stored in $icommits/.../jira/key
+create_jira_card ()
+{
+ (
+ set -euf -o pipefail
+
+ local template project parent assignee yaml
+ template=$(print_jira_template_card)
+ project="${template%%-*}"
+ parent=$(jipsearch -j "key=$template" -s parent:key \
+ | sed -e "s/.* , //")
+ assignee=$(jipsearch -j "key=$template" -s assignee:emailAddress \
+ | sed -e "s/.* , //" || true)
+ if [ "$assignee" = "" ]; then
+ # The template card is unassigned, so use parent's assignee.
+ assignee=$(jipsearch -j "key=$parent" -s assignee:emailAddress \
+ | sed -e "s/.* , //")
+ fi
+
+ yaml=$(mktemp)
+ # shellcheck disable=SC2064
+ trap "rm $yaml" EXIT
+
+ cat > $yaml <<EOF
+- Project: $project
+ IssueType: Sub-task
+ Parent: $parent
+ Summary: $changed_single_component:$first_bad
+ AssigneeEmail: $assignee
+EOF
+ local key
+ key=$(jipcreate -f $yaml | sed -e "s#.*/##")
+ echo "$key"
+ )
+}
+
+# Print the existing jira card number for this interesting commit.
+print_jira_card_key ()
+{
+ (
+ set -euf -o pipefail
+
+ local jira_dir
+ jira_dir=$(interesting_subdir "$changed_single_component" "$first_bad")
+ jira_dir="$jira_dir/jira"
+
+ if ! [ -f "$icommits/$jira_dir/key" ]; then
+ return 0
+ fi
+
+ cat "$icommits/$jira_dir/key"
+ )
+}
+
+# Update jira card for this interesting commit.
+# Link to this card is stored in $icommits/.../jira/key
+update_jira_card ()
+{
+ (
+ set -euf -o pipefail
+ echo "# ${FUNCNAME[0]}"
+
+ local jira_dir
+ jira_dir=$(interesting_subdir "$changed_single_component" "$first_bad")
+ jira_dir="$jira_dir/jira"
+
+ local -a components=()
+ case "$changed_single_component" in
+ binutils) components+=(Binutils) ;;
+ gcc) components+=(GCC) ;;
+ gdb) components+=(GDB) ;;
+ glibc) components+=(Glibc) ;;
+ linux) components+=(Linux) ;;
+ llvm) components+=(LLVM) ;;
+ newlib) components+=(Newlib) ;;
+ qemu) components+=(QEMU) ;;
+ *) components+=(CI) ;;
+ esac
+ (IFS=","; echo "${components[*]}") > "$icommits/$jira_dir/components"
+ git -C "$icommits" add "$jira_dir/components"
+
+ local commit_date
+ commit_date=$(git -C "$changed_single_component" log -n1 \
+ --pretty="%cd" --date=iso "$first_bad")
+ date -d "$commit_date" +%Y-%m-%d > "$icommits/$jira_dir/startdate"
+ git -C "$icommits" add "$jira_dir/startdate"
+
+ local key project
+ key=$(print_jira_card_key)
+ if [ -z "$key" ]; then
+ echo "WARNING: no existing jira card $icommits/$jira_dir/key"
+ return 0
+ fi
+
+ project="${key%%-*}"
+
+ local yaml="$icommits/$jira_dir/yaml"
+
+ cat > "$yaml" <<EOF
+- Project: $project
+ IssueType: Sub-task
+ Key: $key
+ Summary: |
+EOF
+ # Summary can have spaces and other special-to-yaml symbols;
+ # quote using " |"
+ sed -e "s/^/ /" "$icommits/$jira_dir/summary" >> "$yaml"
+ cat >> "$yaml" <<EOF
+ Components: $(cat "$icommits/$jira_dir/components")
+ Start date: $(cat "$icommits/$jira_dir/startdate")
+ Description: |
+EOF
+ sed -e "s/^/ /" "$icommits/$jira_dir/description" >> "$yaml"
+ git -C "$icommits" add "$jira_dir/yaml"
+ )
+}
+
+# Generate notify/jira/* files
+generate_jira_dir()
+{
+ (
+ set -euf -o pipefail
+
+ local icommit_entry jira_key=""
+ icommit_entry=$($print_last_icommit_f --entry)
+
+ if [ "$icommit_entry" != "" ] && [ -d "$icommit_entry/jira" ]; then
+ rsync -a "$icommit_entry/jira/" "$top_artifacts/notify/jira/"
+ if [ -f "$top_artifacts/notify/jira/key" ]; then
+ jira_key=$(cat "$top_artifacts/notify/jira/key")
+ fi
+ else
+ mkdir -p "$top_artifacts/notify/jira"
+ fi
+
+ if [ "$jira_key" != "" ]; then
+ cat > $top_artifacts/notify/jira/comment-card.txt <<EOF
+[$jira_key]
+$($print_result_f --oneline)
+Details: $(print_artifacts_url notify/mail-body.txt/*view*/)
+EOF
+ cat > $top_artifacts/notify/jira/comment-template.txt <<EOF
+[$(print_jira_template_card)]
+https://linaro.atlassian.net/browse/$jira_key
+$($print_result_f --oneline)
+Details: $(print_artifacts_url notify/mail-body.txt/*view*/)
+EOF
+ else
+ cat > $top_artifacts/notify/jira/comment-template.txt << EOF
+[$(print_jira_template_card)]
+$($print_result_f --oneline)
+Details: $(print_artifacts_url notify/mail-body.txt/*view*/)
+EOF
+ fi
+ )
+}
+
+# Post update to Jira
+post_to_jira ()
+{
+ (
+ set -euf -o pipefail
+ echo "# ${FUNCNAME[0]}"
+
+ local post_card_comment=$post_jira_comment
+ local post_template_comment=$post_jira_comment
+
+ if $post_jira_card && [ -f $top_artifacts/notify/jira/yaml ]; then
+ local key status
+ key=$(print_jira_card_key)
+ status=$(jipsearch -j "key=$key" -s status:name \
+ | sed -e "s/.* , //")
+
+ # Do not update closed cards to avoid spamming developers about
+ # updating resolved issues. Note that this also skips posting
+ # comments below.
+ case "$status" in
+ "Closed")
+ post_card_comment=false
+ # In this case we may still post a comment to the template
+ # card if $post_jira_comment is true.
+ ;;
+ *)
+ $dryrun jipcreate -f $top_artifacts/notify/jira/yaml
+ post_template_comment=false
+ ;;
+ esac
+ fi
+
+ if $post_card_comment \
+ && [ -f $top_artifacts/notify/jira/comment-card.txt ]; then
+ echo y | $dryrun jipdate \
+ -f $top_artifacts/notify/jira/comment-card.txt
+ fi
+
+ if $post_template_comment \
+ && [ -f $top_artifacts/notify/jira/comment-template.txt ]; then
+ echo y | $dryrun jipdate \
+ -f $top_artifacts/notify/jira/comment-template.txt
+ fi
+ )
+}
+
+
+#========================================================================================
+#
+# MAIL RELATED PROCEDURES
+#
+#========================================================================================
+# Model for mail-recipient.txt (may contain some of the following):
+# <author>, <compiler-mailing-list>, <linaro-toolchain-mailing-list>, <tcwg-validation-mailing-list>
+# (generate_mail_recipients : Generic)
+#
+# Model for mail-subject.txt :
+# [Linaro-TCWG-CI] <diag> after <changes> (generate_mail_subject : Generic)
+#
+# Model for mail-body.txt :
+# <mail regression details> (generate_mail_body_regression : Specific to the kind of project)
+#
+# <reproduction instructions details> (generate_mail_body_reproduction_instructions : Generic)
+#
+
+print_mail_recipients ()
+{
+ (
+ set -euf -o pipefail
+
+ local c="$changed_single_component"
+ if [ "$c" = "" ]; then
+ echo "bcc:tcwg-validation@linaro.org"
+ return 0
+ fi
+
+ local -A emails
+ emails["tcwg-validation@linaro.org"]=bcc
+ emails["author"]=cc
+ emails["committer"]=to
+
+ case "$ci_project/$ci_config:$c" in
+ *_fast_*/*:*)
+ # FIXME: testing maintainer-mode
+ emails["author"]=no
+ emails["committer"]=no
+ emails["christophe.lyon@linaro.org"]=to
+ ;;
+ tcwg_aosp-*/*:*)
+ # FIXME: stabilize and enable notifications.
+ emails["author"]=no
+ emails["committer"]=no
+ emails["antoine.moynault@linaro.org"]=to
+ ;;
+ tcwg_bmk-*/*:*)
+ # FIXME: stabilize and enable notifications.
+ emails["author"]=no
+ emails["committer"]=no
+ emails["maxim.kuvyrkov@linaro.org"]=to
+ ;;
+ tcwg_kernel/llvm-*:linux|tcwg_kernel/*:llvm)
+ emails["author"]=no
+ emails["committer"]=no
+ emails["llvm@lists.linux.dev"]=to_postcommit
+ ;;
+ tcwg_kernel/*:linux)
+ emails["author"]=no
+ emails["committer"]=no
+ emails["linaro-kernel@lists.linaro.org"]=to_postcommit
+ ;;
+ tcwg_kernel/*:*)
+ emails["author"]=no
+ emails["committer"]=no
+ emails["linaro-toolchain@lists.linaro.org"]=to_postcommit
+ ;;
+ */*:gcc)
+ emails["gcc-regression@gcc.gnu.org"]=cc_postcommit
+ ;;
+ */*:gdb)
+ emails["gdb-testers@sourceware.org"]=cc_postcommit
+ ;;
+ */*:*)
+ emails["linaro-toolchain@lists.linaro.org"]=cc_postcommit
+ ;;
+ esac
+
+ local c email base_rev cur_rev
+
+ # CC: author
+ base_rev=$(get_baseline_git "${c}_rev")
+ cur_rev=$(get_current_git "${c}_rev")
+ while read -r email; do
+ emails["$email"]="${emails[author]}"
+ done < <(git -C "$c" log --pretty='%ae' "$base_rev..$cur_rev" || true)
+
+
+ local precommit_postcommit=postcommit
+ if [ "$notify" != precommit ]; then
+ # TO: committer
+ base_rev=$(get_baseline_git "${c}_rev")
+ cur_rev=$(get_current_git "${c}_rev")
+ while read -r email; do
+ # shellcheck disable=SC2034
+ emails["$email"]="${emails[committer]}"
+ done < <(git -C "$c" log --pretty='%ce' "$base_rev..$cur_rev" || true)
+ else
+ precommit_postcommit=precommit
+
+ if [ "${notify_email-}" = "" ]; then
+ # TO: precommit submitter
+ # Note that for precommit testing "git log" will specify
+ # tcwg-buildslave@linaro.org as committer, so use patchwork submitter
+ # instead.
+ notify_email="${pw[${c}_patch_submitter]-}"
+ fi
+
+ if [ "${notify_email-}" != "" ]; then
+ emails["$notify_email"]="${emails[committer]}"
+ fi
+ fi
+
+ unset "emails[author]" "emails[committer]"
+
+ local type
+ local -a recipients=()
+ for email in "${!emails[@]}"; do
+ type="${emails[$email]}"
+ case "$precommit_postcommit:$type" in
+ precommit:to_precommit) type=to ;;
+ precommit:to_postcommit) type=no ;;
+ precommit:cc_precommit) type=cc ;;
+ precommit:cc_postcommit) type=no ;;
+ postcommit:to_precommit) type=no ;;
+ postcommit:to_postcommit) type=to ;;
+ postcommit:cc_precommit) type=no ;;
+ postcommit:cc_postcommit) type=cc ;;
+ esac
+
+ case "$type" in
+ no) ;;
+ to) recipients+=("$email") ;;
+ *) recipients+=("$type:$email") ;;
+ esac
+ done
+
+ (IFS=","; echo "${recipients[*]}")
+ )
+}
+
+##### Model for GNU SPECIFIC mail-body-regression.txt regression details
+#
+# After commit <>
+#
+# The following .. slowed down/grew up ..
+#
+# The configuration is ...
+#
+
+##### Generates mail/mail-body.txt file
+print_mail_body()
+{
+ local bad_artifacts_url good_artifacts_url
+ bad_artifacts_url="$(get_current_manifest BUILD_URL)artifact/artifacts"
+ good_artifacts_url="$(get_baseline_manifest BUILD_URL)artifact/artifacts"
+
+ local key=""
+ # We create a Jira card only for single-commit regressions in
+ # post-commit CI
+ if [ "$change_kind" = "single_commit" ] \
+ && [ "${pw[project]-}" = "" ]; then
+ key=$(print_jira_card_key)
+ if [ -z "$key" ]; then
+ key=$(print_jira_template_card)
+ fi
+ fi
+
+ cat << EOF
+Dear contributor, our automatic CI has detected problems related to your \
+patch(es). Please find some details below. If you have any questions, \
+please follow up on linaro-toolchain@lists.linaro.org mailing list, Libera's \
+#linaro-tcwg channel, or ping your favourite Linaro toolchain developer \
+on the usual project channel.
+
+We appreciate that it might be difficult to find the necessary logs or \
+reproduce the issue locally. If you can't get what you need from our \
+CI within minutes, let us know and we will be happy to help.
+
+EOF
+
+ if [ "$key" != "" ]; then
+ cat <<EOF
+We track this report status in https://linaro.atlassian.net/browse/$key , \
+please let us know if you are looking at the problem and/or when you have a fix.
+
+EOF
+ fi
+
+ cat <<EOF
+In $($print_config_f --short) after:
+
+$($print_commits_f --short | sed -e 's/^/ | /')
+
+$($print_result_f --short)
+
+The configuration of this build is:
+$($print_config_f --long)
+
+-----------------8<--------------------------8<--------------------------8<--------------------------
+The information below can be used to reproduce a debug environment:
+
+Current build : $bad_artifacts_url
+Reference build : $good_artifacts_url
+
+EOF
+
+ # FIXME: Remove this warning when we enable maintainer-mode in
+ # production.
+ if [ "${pw[project]-}" != "" ]; then
+ cat <<EOF
+Warning: we do not enable maintainer-mode nor automatically update
+generated files, which may lead to failures if the patch modifies the
+master files.
+
+EOF
+ fi
+
+ if [ "$change_kind" != "single_commit" ] \
+ || [ "${pw[project]-}" != "" ]; then
+ return
+ fi
+
+ cat <<EOF
+Reproduce last good and first bad builds: $($print_last_icommit_f --reproduction_instructions_link "$ci_project" "$ci_config")
+
+Full commit : $($print_commits_f --link)
+
+List of configurations that regressed due to this commit :
+$($print_last_icommit_f --status)
+
+EOF
+}
+
+# Generate notify/mail-*.txt files
+generate_mail_files()
+{
+ (
+ set -euf -o pipefail
+
+ print_mail_recipients > $top_artifacts/notify/mail-recipients.txt
+
+ # Keep jira/summary in sync with mail-subject.txt
+ echo "[Linaro-TCWG-CI]" \
+ "$($print_commits_f --oneline): $($print_result_f --oneline) on $($print_config_f --oneline)" \
+ > $top_artifacts/notify/mail-subject.txt
+
+ print_mail_body > $top_artifacts/notify/mail-body.txt
+ )
+}
+
+print_readme_header()
+{
+ (
+ set -euf -o pipefail
+
+ local text_type="$1"
+
+ local msg="How to browse artifacts of this build"
+ case $text_type in
+ html)
+ cat <<EOF
+<!DOCTYPE html>
+<html>
+<body>
+<font color="black">
+<h2>$msg</h2>
+EOF
+ ;;
+ txt)
+ cat <<EOF
+$msg
+
+EOF
+ ;;
+ esac
+ )
+}
+
+# print MSG as a link, if appropriate
+# $1: type of output (html/txt)
+# $2: message (directory or file name)
+# #3: directory where $2 resides, used to detertime the file type
+print_readme_link()
+{
+ (
+ set -euf -o pipefail
+
+ local text_type="$1"
+ local msg="$2"
+ local home="$3"
+
+ case $text_type in
+ html)
+ # Jenkins webserver need a /*view*/ decoration so that
+ # text files are displayed in the browser.
+ view=""
+ if [ -f "$home/$msg" ]; then
+ if file "$home/$msg" | grep -qw text ; then
+ view="/*view*/"
+ fi
+ fi
+ echo -n "<a href=\"$msg$view\">$msg</a>"
+ ;;
+ txt)
+ echo -n "$msg"
+ ;;
+ esac
+ )
+}
+print_readme_footer()
+{
+ (
+ set -euf -o pipefail
+
+ local text_type="$1"
+ case $text_type in
+ html)
+ cat << EOF
+</body>
+</html>
+EOF
+ ;;
+ esac
+ )
+}
+
+# Provide some hints to users, to help them find their way in our
+# artifacts.
+# $1: type of text (txt, html)
+generate_readme()
+{
+ (
+ set -euf -o pipefail
+
+ local text_type="$1"
+
+ local gnu_text=false
+ case "$ci_project" in
+ *check*)
+ case "$ci_project" in
+ *gnu*|*gcc*|*binutils*|*gdb*|*bootstrap*)
+ gnu_text=true
+ ;;
+ esac
+ ;;
+ esac
+
+ local list_start=""
+ local list_end=""
+ local list_item="- "
+ local new_parag=""
+
+ if [ "$text_type" = "html" ]; then
+ list_start="<ul>"
+ list_end="</ul>"
+ list_item="<li>"
+ new_parag="<p>"
+ fi
+
+ print_readme_header $text_type
+
+ cat << EOF
+The artifact directories contain a lot of information related to the
+results of this build.
+$new_parag
+Directories starting with a number contain the logs of each step of
+the build. More synthetic information is available in other directories,
+as described below:
+$new_parag
+$list_start
+EOF
+
+ if [ -d $top_artifacts/00-sumfiles ]; then
+ cat <<EOF
+$list_item$(print_readme_link "$text_type" "00-sumfiles/" "") contains .log and possibly .sum files generated by the
+ build. Files with .0 suffix contain the results of the initial full
+ testsuite run, files with .1, .2 etc... contain logs restricted to
+ the parts (.exp) of the testsuite where we detected regressions.
+ .1, .2, .... represent the number of times this subset of the testsuite
+ was executed in order to also identify flaky tests. The last one
+ contains what is considered as the results of this build.
+
+EOF
+ fi
+
+ cat <<EOF
+$list_item$(print_readme_link "$text_type" "git/" "") contains the revision and repository of each toolchain
+ component built
+
+$list_item$(print_readme_link "$text_type" "jenkins/" "") contains information useful for the CI maintainers
+
+$list_item$(print_readme_link "$text_type" "notify/" "") contains the material used to build various
+ notifications/reports (emails, Jira, LNT, ...)
+EOF
+
+ if $gnu_text; then
+ cat <<EOF
+
+$list_item$(print_readme_link "$text_type" "sumfiles/" "") contains the .sum files produced by this build.
+EOF
+ fi
+
+ cat <<EOF
+$list_end
+$new_parag
+If you received a notification about one of your patches causing
+problems, the information you received is in $(print_readme_link "$text_type" "notify/" "") and has
+links to other artifacts from this directory.
+EOF
+
+ if $gnu_text; then
+ local regressions=""
+ if [ -f $top_artifacts/notify/regressions.sum ]; then
+ regressions="$(print_readme_link "$text_type" "notify/regressions.sum" "$top_artifacts") and "
+ fi
+ cat <<EOF
+$new_parag
+If you are investigating such a problem, you are probably primarily
+interested in:
+$new_parag
+$list_start
+$list_item$regressions$(print_readme_link "$text_type" "notify/results.compare.txt" "$top_artifacts") (regression report).
+
+EOF
+ if [ -d $top_artifacts/00-sumfiles ]; then
+ cat <<EOF
+$list_item$(print_readme_link "$text_type" "00-sumfiles/" "") .log files with detailed errors, to save
+ yourself reproducing the problem on your machine.
+EOF
+ fi
+
+ cat <<EOF
+$list_end
+EOF
+ fi
+
+ # Print the list of files below top_artifacts, some users find
+ # this useful.
+ cat <<EOF
+$new_parag
+List of files below:
+$new_parag
+$list_start
+EOF
+
+ while read -r cur_file; do
+ echo "$list_item$(print_readme_link "$text_type" "$cur_file" "$top_artifacts")"
+ done < <(cd $top_artifacts ; find . -type f | sort)
+
+ cat <<EOF
+$list_end
+EOF
+
+ print_readme_footer $text_type
+ )
+}
+
+# Procedure to generate a nice html to publish in jenkins job
+generate_jenkins_html_files()
+{
+
+ (
+ set -euf -o pipefail
+
+ echo "# ${FUNCNAME[0]}"
+ if ! $generate_jenkins_html; then
+ echo "... Skipping"
+ return
+ fi
+
+ case "$ci_project" in
+ tcwg_bmk-*)
+ (
+ status_file="$top_artifacts/results-vs-prev/csv-results-1/status.csv"
+ if [ -f $status_file ]; then
+ # status is one of : success, failed-to-build or failed-to-run
+ nb_succeed=$(sort -u $status_file | grep -c ",success$")
+ nb_failed=$(sort -u $status_file | grep -c ",failed-to-")
+ title="$nb_succeed benchmarks succeeded"
+ if [ "$nb_failed" != "0" ]; then
+ title+=", <FONT COLOR=\"orange\">$nb_failed failed<FONT COLOR=\"black\">"
+ fi
+ cat << EOF
+ <!DOCTYPE html>
+ <html>
+ <body>
+
+ <h2>Status of this run : $title</h2>
+
+ <FONT COLOR="orange">
+EOF
+
+ sort -u $status_file | grep ",failed-to-" | cut -d, -f1,3 | \
+ sed -e 's|\(.*\),\(.*\)|<h3> - \1 : \2</h3>|'
+
+ cat << EOF
+ <FONT COLOR="black">
+
+ </body>
+ </html>
+EOF
+ fi
+ ) > $top_artifacts/jenkins/status.html
+ ;;
+ *)
+ # no implementation
+ echo "... Skipping"
+ return
+ ;;
+ esac
+ ) &
+ wait $! || true
+
+ generate_readme html > $top_artifacts/README.html
+ generate_readme txt > $top_artifacts/README.txt
+}
+
+
+#========================================================================================
+#
+# DASHBOARD RELATED PROCEDURES
+#
+#========================================================================================
+# Calculate a reasonable date to associate with the current results / artifacts
+# and prints this date in manifest.sh.
+calculate_results_date ()
+{
+ (
+ set -euf -o pipefail
+
+ local c base_d cur_d results_date=0
+
+ # Firstly, set results_date to the max of commit dates of all components.
+ for c in $(get_current_manifest "{rr[components]}"); do
+ base_d=$(get_baseline_component_date ${c} || true)
+ cur_d=$(get_current_component_date ${c} || true)
+ if [ x"$base_d" != x"" ]; then
+ if [ x"$cur_d" = x"" ] || [ $cur_d -lt $base_d ]; then
+ cur_d="$base_d"
+ fi
+ fi
+ if [ x"$cur_d" = x"" ]; then
+ continue
+ fi
+
+ if [ $cur_d -gt $results_date ]; then
+ results_date="$cur_d"
+ fi
+ done
+
+ assert_with_msg "Failed to produce results_date" [ $results_date -gt 0 ]
+
+ base_d=$(get_baseline_manifest "{rr[results_date]}")
+
+ # Normally there's a rr[results_date] in baseline manifest.
+ # The rr[results_date] was useless at one point (no dashboard for a period
+ # of time) and disapeared from the manifest for that time. It is now useful
+ # to have it back because we are setting another dashboard backend.
+ # This assertion on the existing results blocks any new results.
+ # Disabling it.
+ #
+ # TODO:
+ # Once all base-artifacts history are rewritten with the results_date,
+ # we will re-enable this assertion.
+ #assert_with_msg "Missing rr[results_date] from baseline manifest" \
+ # [ "$base_d" != "" ]
+
+ if [ "$base_d" != "" ]; then
+ if [ $results_date -gt $base_d ]; then
+ # Average between our current results_date and baseline date.
+ # The reason behind average is to spread out dates between bursts
+ # of builds, which can occur when reducing a regression.
+ results_date=$((($results_date + $base_d) / 2))
+ elif [ $results_date -eq $base_d ]; then
+ # If the dates are equal, then no point in taking average.
+ # Instead just add some arbitrary, but reasonable, amount.
+ results_date=$(($results_date + 600))
+ else
+ # If the baseline date is in the future (e.g., because git commit
+ # dates are weird in one of the components), then add some
+ # arbitrary, but reasonable, amount.
+ results_date=$(($base_d + 600))
+ fi
+ fi
+
+ # Save results_date in the manifest so that we can fetch it as $base_d
+ # above for the next build.
+ rr[results_date]="$results_date"
+ cat <<EOF | manifest_out
+rr[results_date]="$results_date"
+EOF
+ )
+}
+
+generate_dashboard_squad ()
+{
+ local results_date
+
+ echo "# ${FUNCNAME[0]}"
+ if ! $generate_dashboard; then
+ echo "... Skipping"
+ return
+ fi
+
+ results_date="${rr[results_date]}"
+ results_date=$(date --utc --iso-8601=seconds --date="@$results_date")
+
+ $scripts/dashboard-generate-squad.sh \
+ --top_artifacts "$top_artifacts" \
+ --baseline_branch "$(get_current_manifest "{rr[baseline_branch]}")" \
+ --components "$(get_current_manifest "{rr[components]}")" \
+ --run_date "$results_date" \
+ --relative_results true \
+ --squad_mode "vs-first"
+ echo "... Done"
+}
+
+post_dashboard_squad ()
+{
+ echo "# ${FUNCNAME[0]}"
+ if ! $post_dashboard; then
+ echo "... Skipping"
+ return
+ fi
+
+ if ! [ -d $top_artifacts/notify/dashboard/squad-vs-first ]; then
+ return
+ fi
+
+ $dryrun $top_artifacts/notify/dashboard/squad-vs-first/dashboard-push-squad.sh
+ echo "... Done"
+}
+
+generate_lnt_report()
+{
+ (
+ set -euf -o pipefail
+ local results_date
+
+ echo "# ${FUNCNAME[0]}"
+ if ! $generate_lnt; then
+ echo "... Skipping"
+ return
+ fi
+
+ # shellcheck source=lnt-utils
+ . $scripts/lnt-utils.sh
+
+ results_date="$(get_current_manifest "{rr[results_date]}")"
+ results_date=$(date +"%Y-%m-%d %H:%M:%S" --date "@$results_date")
+
+ local jira_key="-"
+ if [ -f "$top_artifacts/notify/jira/key" ]; then
+ jira_key=$(cat "$top_artifacts/notify/jira/key")
+ fi
+
+ case "$ci_project" in
+ tcwg_binutils*|tcwg_bootstrap*|tcwg_gcc*|tcwg_gdb*|tcwg_glibc*|tcwg_gnu*)
+ generate_lnt_gnu_check_report \
+ "$(get_current_manifest BUILD_URL)" "$ci_project" "$ci_config" \
+ "$results_date" "$jira_key" \
+ $top_artifacts/sumfiles \
+ $top_artifacts/notify/lnt_report.json
+ ;;
+ tcwg_bmk-*)
+ local cc cur_rev describe
+ case "${rr[toolchain]}" in
+ llvm) cc=llvm ;;
+ gnu) cc=gcc ;;
+ *) false ;;
+ esac
+ cur_rev=$(get_current_git ${cc}_rev)
+ describe=$(describe_sha1 "${cc}" "$cur_rev" false)
+ generate_lnt_bmk_report \
+ "$(get_current_manifest BUILD_URL)" "$ci_project" "$ci_config" \
+ "$results_date" "$jira_key" \
+ $top_artifacts/results-vs-prev/csv-results-1/size.csv \
+ $top_artifacts/results-vs-prev/csv-results-1/perf.csv \
+ $top_artifacts/results-vs-prev/csv-results-1/status.csv \
+ $top_artifacts/results-vs-prev/bmk-specific-variability-max.csv \
+ $top_artifacts/notify/lnt_report.json
+ ;;
+ *)
+ # no lnt support
+ echo "... Skipping"
+ return
+ ;;
+ esac
+
+
+
+ ) &
+ wait $! || true
+}
+
+#========================================================================================
+#
+# MAIN FLOW
+#
+#========================================================================================
+
+# setup the environment to run notify stage
+setup_notify_environment
+check_source_changes
+setup_stages_to_run
+
+# Initialize icommits
+post_interesting_commits init
+
+if [ "$stage" != "full" ]; then
+ echo "Init stage ran successfully."
+ exit 0
+fi
+
+$generate_extra_details_f
+calculate_results_date
+
+check_if_first_report
+
+# Update entry with full information
+post_interesting_commits full
+
+if $generate_jira; then
+ generate_jira_dir
+fi
+
+echo "# print all notification files"
+if $generate_mail; then
+ generate_mail_files
+fi
+
+if $generate_jenkins_html; then
+ generate_jenkins_html_files
+fi
+
+# generate and post Dashboard
+echo "# generate dashboard"
+generate_dashboard_squad
+post_dashboard_squad
+
+generate_lnt_report
+
+if $post_mail; then
+ release_notification_files
+fi
+
+if $post_gcc_testresults; then
+ release_gcc_testresults_files
+fi
+
+# Update jira card description
+post_to_jira
+
+echo "Full stage ran successfully."
diff --git a/round-robin.sh b/round-robin.sh
index feeafae3..d3d73574 100644
--- a/round-robin.sh
+++ b/round-robin.sh
@@ -4,41 +4,56 @@
. "$(dirname $0)"/jenkins-helpers.sh
# Round-Robin associative array.
+# FIXME: This should be declared when starting a new manifest with
+# %%rr[top_artifacts].
declare -gA rr
+# Major and minor versions of the manifest. These are used in
+# round-robin-baseline.sh to determine when historic result in base-artifacts/
+# needs to be regenerated.
+# Mismatch in major numbers means that the historic result may not be
+# compatible with the current scripts, and it is OK to drop it.
+# Mismatch in minor numbers means that the historic result should be
+# compatible with the current scripts, and it should be updated normally.
+# In most cases we will be increasing minor numbers to trigger regeneration of
+# reports in interesting-commits.git and updating jira cards.
+# Mismatch in patch numbers means that the historic results are fully compatible
+# with the current scripts. However, the generated notify files are not aligned
+# anymore with one of the backend (ex: dashboard), and then need to be regenerated.
+#rr[major]="0"
+#rr[minor]="0"
+#rr[patch]="0"
+
# PROJECT's git url#branch or url#SHA1 revision parsable by git rev-parse.
# A special value "baseline" means that PROJECT is not being updated
# in this build, and its baseline branch should be used.
-# In a successful build "update_baseline" step will update baseline
+# After a successful build round-robin-baseline.sh will update baseline
# branches of all PROJECTs to the current values, thus setting
# a baseline for the next build.
#rr[PROJECT_git]
-# PROJECT's git SHA1 revision. These are mostly used in manifests.
+# PROJECT's git SHA1 revision. This is used in manifest and overrides
+# any branch setting in ${rr[PROJECT_git]}.
#rr[PROJECT_rev]
-# Baseline branch name for current configuration. These branches should
-# be present in all above git repos (if ${rr[init_configuration]} is false).
+# Baseline branch name for current configuration.
#rr[baseline_branch]="${rr[ci_project]}/${rr[ci_config]}"
-# Run mode: bisect or non-bisect. In bisect mode we do a couple things
-# slightly differently (e.g., don't touch repo in clone_repo() ).
+# Run mode: build or bisect. In bisect mode we do a couple things
+# slightly differently (e.g., don't touch repo in clone_repo() ), and
+# it is allowed to have only a single component updated.
#rr[mode]="$mode"
# How to handle baseline:
-# - init: use "empty" results for base-artifacts, which will make current
-# build successful. Push our artifacts as the one and only entry.
-# - update: update baseline branches of base-artifacts and components' repos
+# - onsuccess: update baseline branches of base-artifacts and components' repos
# on success (generate trigger files for bisection on failure).
-# - reset: ignore failures in check_regression(), which will make current
+# - force: ignore failures in check_regression(), which will make current
# build successful. Push our artifacts to the top of base-artifacts/.
-# - push: push results from current build (whatever they are, regressions are
-# ignored) as new commit to base-artifacts, and update baseline
-# branches. This is useful for user projects, e.g., to generate
-# historical results for several toolchain versions.
-# - rebase: treat results of this build as historically eldest, and
-# rebase base-artifacts commits on top of this build's artifacts.
-#rr[update_baseline]=update/reset/init/rebase
+# - init: use "empty" results for base-artifacts, which will make current
+# build successful. Push our artifacts as the one and only entry.
+# - ignore: Do not affect baseline. Useful for developer testing.
+#rr[update_baseline]=onsuccess/force/init/ignore
+
# Target architecture to build for: arm or aarch64.
#rr[target]="$target"
@@ -51,51 +66,18 @@ declare -gA rr
# shellcheck disable=SC2154
rr[no_regression_p]=no_regression_p
-# Hook to break up updated component (see print_updated_components) into
+# Hook to break up changed component (see print_changed_components) into
# smaller sets: print one set per line. By default, breakup into singletons.
# shellcheck disable=SC2154
-rr[breakup_updated_components]=breakup_updated_components
-
-# Print round-robin components that are being updated in this build
-# (the ones using non-baseline branches).
-print_updated_components ()
-{
- (
- set -euf -o pipefail
-
- local delim=""
- local c
- for c in ${rr[components]}; do
- if [ x"${rr[${c}_git]}" != x"baseline" ]; then
- echo -ne "$delim$c"
- delim=${1- }
- fi
- done
- echo
- )
-}
-
-# By default, print each component on its own line.
-breakup_updated_components ()
-{
- print_updated_components "\n"
-}
+rr[breakup_changed_components]=breakup_changed_components
-# Print the single round-robin component being updated in this build.
-# Print nothing if multiple components are being updated.
-print_single_updated_component ()
-{
- (
- set -euf -o pipefail
+# Abe's repository and branch to use for the build.
+rr[abe_repo]="https://git-us.linaro.org/toolchain/abe.git"
+rr[abe_branch]="master"
- local update_components
- IFS=" " read -r -a updated_components <<< "$(print_updated_components)"
-
- if [ ${#updated_components[@]} -eq 1 ]; then
- echo "${updated_components[0]}"
- fi
- )
-}
+# Host compiler defaults to /usr/bin/gcc and g++
+rr[host_cc]="/usr/bin/gcc"
+rr[host_c++]="/usr/bin/g++"
# Reset artifacts to an empty state. ${rr[top_artifacts]}/results is the most
# important artifact, since it records the metric of how successful the build
@@ -113,24 +95,66 @@ reset_artifacts ()
fresh_dir $run_step_top_artifacts \
$run_step_top_artifacts/console.log \
$run_step_artifacts/console.log \
+ $run_step_top_artifacts/manifest.sh \
"$run_step_top_artifacts/jenkins/*"
+ local branch repo1 repo
+ branch="${rr[baseline_branch]}"
+ repo1="${branch#linaro-local/ci/}"
+ repo="ssh://bkp.tcwglab/home/tcwg-buildslave/base-artifacts/$repo1.git"
+
+ local git_result
+ git_result=$(git ls-remote --heads "$repo" "refs/heads/$branch" || true)
+
+ # FIXME: Remove transitional workaround.
+ if [ "$git_result" = "" ]; then
+ # Try to use old repo
+ repo="https://git-us.linaro.org/toolchain/ci/base-artifacts/$repo1.git"
+ git_result=$(git ls-remote --heads "$repo" "refs/heads/$branch" || true)
+ fi
+
+ if [ "$git_result" = "" ]; then
+ echo "WARNING: BASELINE IS NOT FOUND; INITIALIZING AN EMPTY BASELINE"
+ rr[update_baseline]="init"
+ run_step_patch_env "==rr[update_baseline]" "init"
+ fi
+
+ if [ x"${rr[update_baseline]}" = x"init" ]; then
+ branch="empty"
+ # FIXME: Move empty.git to bkp.tcwglab.
+ repo="https://git-us.linaro.org/toolchain/ci/base-artifacts/empty.git"
+ fi
+
# Clone base-artifacts here so that bisect runs (which skip this step)
# don't overwrite it.
# base-artifacts repo is big and changes all the time, so we
# fetch only the $baseline_branch, instead of all branches.
- local single_branch
- if [ x"${rr[update_baseline]}" = x"init" ]; then
- single_branch=empty
+ rr[base-artifacts_rev]="${rr[base-artifacts_rev]-$branch}"
+ clone_or_update_repo base-artifacts "${rr[base-artifacts_rev]}" \
+ "$repo" auto "$branch"
+
+ git_annex_download base-artifacts annex
+
+ if [ -d base-artifacts/git/ ]; then
+ # Copy baseline git_url/git_rev settings into the current build,
+ # which will then be overwritten in due course by clone_repo()
+ # of various components.
+ # Note that we need to copy data for all components to correctly handle
+ # builds that fail before all their components are checked out.
+ # Note that we want to iterate over components (rather than rsync
+ # the whole base-artifacts/git/ directory) to avoid copying data for
+ # removed components.
+ local c
+ for c in ${rr[components]}; do
+ get_baseline_git ${c}_url | set_current_git ${c}_url
+ get_baseline_git ${c}_rev | set_current_git ${c}_rev
+ done
else
- single_branch=${rr[baseline_branch]}
+ # We are in "init" baseline build, apparently. "Init" builds should
+ # have a full set of git data for all components specified on
+ # the command line, so that get_baseline_git() is not called.
+ mkdir base-artifacts/git
fi
- rr[base-artifacts_rev]="${rr[base-artifacts_rev]-$single_branch}"
-
- clone_or_update_repo base-artifacts ${rr[base-artifacts_rev]} https://git-us.linaro.org/toolchain/ci/base-artifacts.git auto $single_branch
- cat <<EOF | manifest_out
-rr[base-artifacts_rev]=$(git -C base-artifacts rev-parse HEAD)
-EOF
)
}
@@ -142,38 +166,99 @@ clone_repo ()
set -euf -o pipefail
local project="$1"
- if [ x"${rr[mode]}" = x"bisect" ]; then
- # Cleanup current checkout in bisect mode.
- git_clean "$project"
- return 0
- fi
-
local url branch
- if [ x"${rr[${project}_git]}" != x"baseline" ]; then
- # Fetch and checkout from the specified repo.
- url="${rr[${project}_git]%#*}"
- branch="${rr[${project}_git]#*#}"
- else
- # Fetch and checkout from baseline repo.
- url=$(print_baseline_repo "$project" true)
- branch="${rr[baseline_branch]}"
- fi
-
- clone_or_update_repo_no_checkout "$project" "$url" auto "" origin \
- > /dev/null
+ case "${rr[${project}_git]}" in
+ *"#"*)
+ # Fetch from specified remote repo.
+ url="${rr[${project}_git]%#*}"
+ branch="${rr[${project}_git]#*#}"
+ ;;
+ "baseline")
+ # Fetch from remote repo specified in the baseline.
+ url=$(get_baseline_git ${project}_url)
+ branch=$(get_baseline_git ${project}_rev)
+ ;;
+ *)
+ # Use revision in the existing local repo.
+ # Most likely it is "HEAD" in precommit testing.
+ url=""
+ branch="${rr[${project}_git]}"
+ ;;
+ esac
- # Allow manifest override
+ # Allow manifest override for $url
+ url="${rr[${project}_url]-$url}"
+ # Allow manifest override for $branch
branch="${rr[${project}_rev]-$branch}"
- git -C $project checkout --detach "$branch"
+ if [ x"${rr[mode]}" = x"bisect" ]; then
+ # In bisect mode we rely on round-robin-bisect.sh to arrange
+ # all source directories, and here we only clean them.
+ # Note that in bisect mode round-robin-bisec.sh passes "_git" spec
+ # as url#sha1 so that create_trigger_files() generates trigger-build-*
+ # suitable for triggering last_good and first_bad builds.
+ git_clean "$project"
+ elif [ "$url" = "" ]; then
+ # In local mode -- clean the project directory.
+ git_clean "$project"
+ # Don't use git_checkout(), which prefers remote resolution of refs.
+ git -C "$project" checkout --detach "$branch"
+ else
+ clone_or_update_repo "$project" "$branch" "$url" > /dev/null
+ fi
local cur_rev
cur_rev=$(git -C $project rev-parse HEAD)
+ rr[debug_${project}_date]=$(git -C $project show --no-patch \
+ --pretty="%ct # %cr" HEAD)
+
+ # Store git info in the manifest and git data into artifacts.
+ # Git data in artifacts is then used by subsequent builds to fetch
+ # baseline commits.
+ if [ "$url" != "" ]; then
+ echo "$url" | set_current_git ${project}_url
+ fi
+ echo "$cur_rev" | set_current_git ${project}_rev
+ )
+}
- cat <<EOF | manifest_out
-rr[${project}_rev]=$cur_rev
+# Configure ccache wrappers in "$1".
+setup_ccache ()
+{
+ (
+ set -euf -o pipefail
+ local bin="$1"
+
+ local -a ccache_opts=("CCACHE_BASEDIR=$workspace")
+ if [ -d "$HOME/.ccache" ] && ! touch "$HOME/.ccache" 2>/dev/null; then
+ # Setup read-only ccache; this is for pre-commit testing.
+ # Since this is for ephemeral pre-commit, do not bother about
+ # cleaning up temp directory.
+ #
+ # Note that we use "touch" instead of "test -w" to check writability
+ # of $HOME/.ccache. This is because "test -w" documentation says
+ # that "test -w" checks for "w" permission, which is not the same
+ # as writability -- e.g., consider read-only filesystems.
+ # In practice, "test -w" does seem to check for actual writability,
+ # but "touch" is more robust.
+ ccache_opts+=("CCACHE_READONLY=true" "CCACHE_NOSTATS=true"
+ "CCACHE_TEMPDIR=$(mktemp -d)")
+ fi
+
+ cat > "$bin/gcc" <<EOF
+#!/bin/sh
+${ccache_opts[@]} exec ccache ${rr[host_cc]} "\$@"
EOF
+ chmod +x "$bin/gcc"
+ cp "$bin/gcc" "$bin/cc"
+
+ cat > "$bin/g++" <<EOF
+#!/bin/sh
+${ccache_opts[@]} exec ccache ${rr[host_c++]} "\$@"
+EOF
+ chmod +x "$bin/g++"
+ cp "$bin/g++" "$bin/c++"
)
}
@@ -183,7 +268,14 @@ prepare_abe ()
(
set -euf -o pipefail
- clone_or_update_repo abe tested https://git-us.linaro.org/toolchain/abe.git > /dev/null
+ clone_or_update_repo abe ${rr[abe_branch]} ${rr[abe_repo]} > /dev/null
+
+ # We use our modified version of GCC's comparison script
+ clone_or_update_repo gcc-compare-results master \
+ https://git.linaro.org/toolchain/gcc-compare-results.git
+
+ local workspace
+ workspace=$(pwd)
cd abe
@@ -192,19 +284,15 @@ prepare_abe ()
rm -rf "$(pwd)/bin"
mkdir "$(pwd)/bin"
- cat > "$(pwd)/bin/gcc" <<EOF
-#!/bin/sh
-exec ccache /usr/bin/gcc "\$@"
-EOF
- chmod +x "$(pwd)/bin/gcc"
- cp "$(pwd)/bin/gcc" "$(pwd)/bin/cc"
+ setup_ccache "$(pwd)/bin"
- cat > "$(pwd)/bin/g++" <<EOF
+ # Disable building documention. Apparently, this is one of
+ # the most popular ways.
+ cat > "$(pwd)/bin/makeinfo" <<EOF
#!/bin/sh
-exec ccache /usr/bin/g++ "\$@"
+exec true
EOF
- chmod +x "$(pwd)/bin/g++"
- cp "$(pwd)/bin/g++" "$(pwd)/bin/c++"
+ chmod +x "$(pwd)/bin/makeinfo"
PATH=$(pwd)/bin:$PATH
export PATH
@@ -213,14 +301,89 @@ EOF
)
}
-# Build ABE component
-# $1: Component -- ABE component to build.
+# Create GNU toolchain xfail files
+# $1: file to store flaky xfails to.
+# $2: file to store baseline xfails to.
+build_abe_check_xfails ()
+{
+ (
+ set -euf -o pipefail
+ local flaky_tests="$1"
+ local baseline_fails="$2"
+
+ local sumfiles="$run_step_top_artifacts/sumfiles"
+ if [ -f "$sumfiles/flaky.xfail" ]; then
+ # Add newly-detected flaky tests to the xfails to be used in
+ # tcwg_gnu-build.sh:no_regression_p().
+ # Strictly speaking, this is not necessary, since all tests
+ # detected as flaky in the current run will show up as PASSed
+ # test in the final merged .sum files.
+ echo "# New flaky tests" >> "$flaky_tests"
+ cat "$sumfiles/flaky.xfail" >> "$flaky_tests"
+ fi
+
+ # Fetch flaky tests from base-artifacts history.
+ echo "# Known flaky tests" >> "$flaky_tests"
+ local history_flaky history_root=""
+ while read history_flaky; do
+ if [ "$history_root" = "" ]; then
+ history_root="$history_flaky"
+ continue
+ fi
+
+ (echo; cat "$history_flaky") >> "$flaky_tests"
+ done < <(get_git_history 0 base-artifacts sumfiles/flaky.xfail)
+ rm -rf "$history_root"
+
+ # Construct $baseline_fails from base-artifacts/sumfiles/.
+ # These and $flaky_tests are passed to ABE to speed-up test convergence
+ # and then to .sum comparison in tcwg_gnu-build.sh:no_regression_p().
+ if [ -d base-artifacts/sumfiles ]; then
+ gcc-compare-results/contrib/testsuite-management/validate_failures.py \
+ --build_dir=base-artifacts/sumfiles --produce_manifest \
+ --manifest "$baseline_fails"
+ else
+ touch "$baseline_fails"
+ fi
+ )
+}
+
+# Build ABE component. Arguments:
+#
+# build_abe <component> [--build_patch <patch_branch>] [--check_patch <patch_branch>] [--] [ABE arguments]*
+#
+# Where:
+#
+# <component> ABE component to build.
+# --build_patch <patch_branch> Branch with patch to apply before build.
+# --check_patch <patch_branch> Branch with patch to apply before test.
+# -- Separates arguments for build_abe from arguments for
+# other components.
+#
+# Any argument not mentioned above is carried over to ABE.
build_abe ()
{
(
set -euf -o pipefail
local component="$1"
+ shift
+
+ if [ x"$component" = x"check_gdb" ]; then
+ # Limit GDB testsuites to single-thread parallelism.
+ # We've tried running GDB testsuites with 16-thread parallelism,
+ # but could not shake out flaky tests in the course of several weeks.
+ # Try stabilizing GDB testsuites with single-thread parallelism.
+ # If this doesn't work, we'll have to look into dejagnu.
+ local cpus
+ cpus=$(cat abe/host.conf | grep "^cpus=" | sed -e "s/^cpus=\(.*\)/\1/")
+ if [ "$cpus" -gt 1 ]; then
+ cp abe/host.conf abe/host.conf.orig
+ sed -i -e "s/^cpus=.*/cpus=1/" abe/host.conf
+ fi
+ elif [ -f abe/host.conf.orig ]; then
+ mv abe/host.conf.orig abe/host.conf
+ fi
local project stage action check
check=false
@@ -284,6 +447,20 @@ build_abe ()
;;
esac
+ local build_patch=""
+ if [ $# -gt 0 ] && [ "$1" = "--build_patch" ]; then
+ build_patch="$2"
+ shift 2
+ fi
+
+ local check_patch=""
+ if [ $# -gt 0 ] && [ "$1" = "--check_patch" ]; then
+ check_patch="$2"
+ shift 2
+ fi
+
+ # Finished processing arguments for build_abe. Now look for arguments meant
+ # for various components.
while [ $# -gt 0 ]; do
if [ x"$1" = x"--" ]; then
shift
@@ -292,20 +469,70 @@ build_abe ()
shift
done
- local patch_branch=""
- if [ $# -gt 0 ] && [ x"$1" = x"--patch" ]; then
- patch_branch="$2"
- shift 2
+ local -a rerun_failed_tests=()
+ local -a send_results=()
+ if $check; then
+ # Clean testing results
+ rm -rf "$run_step_top_artifacts/sumfiles" \
+ "$run_step_top_artifacts/00-sumfiles"
+ mkdir "$run_step_top_artifacts/sumfiles" \
+ "$run_step_top_artifacts/00-sumfiles"
+
+ local flaky_tests="$run_step_artifacts/flaky.xfail"
+ local baseline_fails="$run_step_artifacts/baseline.xfail"
+ build_abe_check_xfails "$flaky_tests" "$baseline_fails"
+
+ rerun_failed_tests=("--rerun-failed-tests"
+ "--gcc-compare-results" "$PWD/gcc-compare-results"
+ "--flaky-failures" "$flaky_tests"
+ "--expected-failures" "$baseline_fails")
+
+ if [ "${rr[update_baseline]}" != "ignore" ]; then
+ # If we have a chance to commit $new_flaky into base-artifacts.git
+ # then pretend it is 8 weeks into the future and ignore flaky entries
+ # that will be expired by then. This, effectively, gives us 8 weeks
+ # to re-detect/confirm flaky tests without no_regression_p()
+ # noticing anything.
+ # We will then set expiration date to "now+12 weeks" (see below)
+ # for entries in $new_flaky/"sumfiles/flaky.xfail".
+ local week_from_now
+ week_from_now=$(date -d "now+8 weeks" +%Y%m%d)
+ rerun_failed_tests+=("--failures-expiration-date" "$week_from_now")
+ fi
+
+ case "${rr[ci_project]}" in
+ # Don't send results for partial 'make check'
+ *_fast_check_*) ;;
+ *)
+ # Default recipient, overriden in round-robin-notify.sh
+ send_results=("--send-results-to" "christophe.lyon@linaro.org"
+ "--send-results-filter" "$(pwd)/abe/scripts/testresults2jenkins.sh")
+ ;;
+ esac
fi
if [ x"$project" = x"gcc" ]; then
- # Don't build Go, which has a funky and unstable testsuite.
# Stage1 ignores "--set languages=" option, so this applies only
# to stage2 builds.
- stage="$stage --set languages=c,c++,fortran,lto"
- if [ $# != 0 ]; then
- stage="$stage $*"
- fi
+ case "${rr[ci_project]}" in
+ *embed*)
+ # Do not build fortran for bare-metal configurations.
+ stage="$stage --set languages=c,c++,lto"
+ ;;
+ *_mingw_*)
+ # FIXME: Only C is supported for aarch64-w64-mingw32.
+ stage="$stage --set languages=c"
+ ;;
+ *)
+ # Build upstream-default languages (not abe-default languages).
+ stage="$stage --set languages=default"
+ ;;
+ esac
+ fi
+
+ # Carry over any remaining arguments to ABE.
+ if [ $# != 0 ]; then
+ stage="$stage $*"
fi
action="--build $project"
@@ -318,9 +545,20 @@ build_abe ()
local custom_abe_src_opt=""
local git_dir="$project"
local n_patches=0
+ local patch_repo
+
+ case "$project" in
+ binutils|gdb)
+ patch_repo="binutils-gdb"
+ ;;
+ *)
+ patch_repo="$project"
+ ;;
+ esac
+
case "$component" in
- # Use our custom sources for everything, but kernel headers and dejagnu.
- linux|dejagnu) ;;
+ # Use our custom sources for everything, but dejagnu.
+ dejagnu) ;;
*)
git_dir="$git_dir.git"
custom_abe_src_opt="$project=http://git.l.o/$git_dir~master --disable update"
@@ -328,12 +566,30 @@ build_abe ()
if ! $check; then
clone_repo $project
- if [ x"$patch_branch" != x"" ]; then
- git -C $project fetch "https://git.linaro.org/toolchain/$project.git" "refs/heads/$patch_branch"
+ if [ x"$build_patch" != x"" ]; then
+ git -C $project fetch \
+ "https://git.linaro.org/toolchain/$patch_repo.git" \
+ "refs/heads/$build_patch"
git -C $project cherry-pick FETCH_HEAD
n_patches=1
fi
+ (
+ cd $project
+
+ # Avoid rebuilding of auto-generated C files. Rather than
+ # try to determine which are auto-generated, touch all of
+ # them. If a C file is not autogenerated, it does
+ # no harm to update its timestamp.
+ git ls-files -z '*.c' | xargs -r -0 touch
+
+ # Touch GCC's auto-generated files to avoid
+ # non-determenistic behavior.
+ if [ -x ./contrib/gcc_update ]; then
+ ./contrib/gcc_update --touch
+ fi
+ )
+
# Don't use ABE's repo clone functions and setup abe/snapshots/
# directory to have the right entries.
local git_path
@@ -341,14 +597,68 @@ build_abe ()
rm -rf $git_path $git_path~master
ln -s "$(pwd)/$project" $git_path
ln -s "$(pwd)/$project" $git_path~master
+ else
+ if [ x"$check_patch" != x"" ]; then
+ git -C $project fetch \
+ "https://git.linaro.org/toolchain/$patch_repo.git" \
+ "refs/heads/$check_patch"
+ git -C $project cherry-pick FETCH_HEAD
+ n_patches=1
+ fi
fi
;;
esac
- cd abe
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * abe/snapshots/$git_dir~master abe/snapshots/$git_dir~master/ || true
+ set -f
+
+
+ # In precommit testing, enable maintainer_mode so that we
+ # regenerate files as needed. Also update $PATH to include the
+ # right versions of autoconf and automake.
+ # ${rr[update_baseline]} == "ignore" is an approximate detection
+ # of precommit testing mode, since this can also be true while we
+ # are bisecting. When bisecting, we want to keep the sources
+ # exactly as they were committed, so we don't enable
+ # maintainer_mode in this case.
+ #
+ # FIXME binutils, gdb and gcc are not ready for automatic
+ # maintainer_mode. Disable for all projects for the moment.
+ local maintainer_mode=false
+ if [ "${rr[update_baseline]}" = "ignore" ] \
+ && [ "${rr[mode]}" != "bisect" ] \
+ && [ "$project" != "binutils" ] \
+ && [ "$project" != "gcc" ] \
+ && [ "$project" != "gdb" ] \
+ && false; then
+ maintainer_mode=true
+ fi
- local gnu_target
- gnu_target=$(print_gnu_target ${rr[target]})
+ # FIXME Test only with fast_* projects for the moment (they have
+ # no precommit mode, so enable maintainer_mode in the "normal"
+ # jobs.
+ if [ "${rr[mode]}" != "bisect" ]; then
+ case "${rr[ci_project]}" in
+ *_fast_check_*)
+ maintainer_mode=true
+ ;;
+ esac
+ fi
+
+ if $maintainer_mode; then
+ stage="$stage --enable maintainer_mode"
+ # No need to export PATH, it is already exported by parent processes
+ PATH=/usr/local/automake-1.15.1/bin:/usr/local/autoconf-2.69/bin:$PATH
+ # Remove the fake makeinfo we created in prepare_abe(), so
+ # that we can check that docs can be built.
+ rm -f "$(pwd)/abe/bin/makeinfo"
+ else
+ stage="$stage --disable make_docs"
+ fi
+
+ cd abe
# Remove previous build directories and .stamp files.
# (we rely on ccache for fast rebuilds)
@@ -362,79 +672,250 @@ build_abe ()
PATH=$(pwd)/bin:$PATH
export PATH
- if [ x"$component" != x"stage2" ]; then
- # TODO: Fix install_sysroot logic in ABE.
- # ABE tries to install sysroot even for partial builds, e.g.,
- # with "--build binutils". Workaround by patching ABE.
- sed -i -e "s/do_install_sysroot/:/" lib/control.sh
- else
- git checkout -- lib/control.sh
- fi
-
- if true; then
- # WORKAROUND abe patches being blocked on proper testing.
- # Append, not overwrite runtestflags in abe.sh
- sed -i -e 's/override_runtestflags=.*/override_runtestflags="$override_runtestflags $setting"/' abe.sh
- fi
-
ccache -z
local target_opt=""
if [ x"${rr[target]}" != x"native" ]; then
- target_opt="--target $gnu_target"
-
- # Disable TSan execution tests when using QEMU.
- # QEMU can't handle TSan's shadow memory and it sends host machine
- # into swap.
- if $check && [ x"$project" = x"gcc" ]; then
- assert_with_msg "ERROR: Testing is not using QEMU" \
- ! echo "$stage" | grep -q -e "--testcontainer"
- sed -i \
- -e 's/set dg-do-what-default run/set dg-do-what-default link' \
- ../gcc/gcc/testsuite/lib/tsan-dg.exp
- fi
+ target_opt="--target $(print_gnu_target ${rr[target]})"
fi
# Run "./abe.sh --build $project".
- ./abe.sh \
- $action \
- $target_opt \
- --extraconfigdir config/master \
- $custom_abe_src_opt \
- $stage &
+ # shellcheck disable=SC2206
+ local -a abe_cmd=(
+ ./abe.sh
+ $action
+ $target_opt
+ --extraconfigdir config/master
+ $custom_abe_src_opt
+ "${rerun_failed_tests[@]}"
+ "${send_results[@]}"
+ $stage)
+ TESTRESULTS_PREFIX=$run_step_artifacts/testresults- "${abe_cmd[@]}" &
res=0 && wait $! || res=$?
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * snapshots/$git_dir~master snapshots/$git_dir~master/ || true
+ find ../$project -newer builds/*/*/$project-*-configure.stamp || true
+ find ../$project -newer builds/*/*/$project-*-build.stamp || true
+ set -f
+
+ # If a build without check failed, re-run it with --disable
+ # parallel so that errors are easier to extract from the logs.
+ if ! $check && [ $res -ne 0 ]; then
+ # ABE skips the configure step if $builddir/config.status
+ # exists. Remove this file when maintainer-mode is enabled, so
+ # that files are regenerated in case something went wrong
+ # during the parallel build. We cannot use ABE's --force
+ # because it also implies --enable-update.
+ if $maintainer_mode; then
+ rm -f builds/*/*/$project-*/config.status
+ fi
+
+ abe_cmd+=(--disable parallel)
+ "${abe_cmd[@]}" | ts abe-debug-build: &
+ res=0 && wait $! || res=$?
+
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * snapshots/$git_dir~master snapshots/$git_dir~master/ || true
+ set -f
+
+ if [ $res -eq 0 ]; then
+ # Parallel build failed, single-threaded one passed:
+ # consider this "OK" on aarch32, but a failure on other
+ # targets where we do not expect memory exhaustion.
+ if [ "$(getconf LONG_BIT)" = "32" ]; then
+ echo "WARNNING: Parallel build failed, single-threaded one passed."
+ echo "WARNNING: Considering this build as succesful (likely a transient memory exhaustion caused by a highly parallel build)"
+ else
+ if $maintainer_mode; then
+ echo "WARNNING: Parallel build failed, single-threaded one passed. This error is ignored when maintainer-mode is enabled."
+ else
+ echo "ERROR: Parallel build failed, single-threaded one passed."
+ res=1
+ fi
+ fi
+ fi
+ fi
+
# Revert patches if applied.
if [ -d ../$project ]; then
- git -C ../$project reset --hard HEAD~$n_patches
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * snapshots/$git_dir~master snapshots/$git_dir~master/ || true
+ git -C ../$project status
+ git -C ../$project diff HEAD~$n_patches
+ set -f
+
+ git -C ../$project reset -q --hard HEAD~$n_patches
+
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * snapshots/$git_dir~master snapshots/$git_dir~master/ || true
+ find ../$project -newer builds/*/*/$project-*-configure.stamp || true
+ find ../$project -newer builds/*/*/$project-*-build.stamp || true
+ set -f
fi
- # If abe failed to build component, return exit status.
- if [ x"$res" != x"0" ]; then
- return $res
- fi
ccache -s
+ # Save logs generated in the current step to artifacts.
+ # Note that the logs generated in the previous steps will have .log.xz
+ # extension, and would not match in "find".
+ local log
+ while IFS= read -r -d '' log; do
+ rm -f "$log.xz"
+ (
+ xz "$log"
+ cp "$log.xz" "$run_step_artifacts/"
+ ) &
+ done < <(find builds/ \( -name "make-*.log" -o -name "check-*.log" \) \
+ -print0)
+
+ # FIXME remove debug traces
+ set +f
+ stat -c "%Y %n" * snapshots/$git_dir~master snapshots/$git_dir~master/ || true
+ set -f
+
if $check; then
- local sum log
- rm -rf ${rr[top_artifacts]}/sumfiles
- mkdir -p ${rr[top_artifacts]}/sumfiles
+ local sum sumfiles
while IFS= read -r -d '' sum; do
- cp "$sum" ${rr[top_artifacts]}/sumfiles/
- log="${sum%.sum}.log"
+ case "$res:$sum" in
+ 0:*".sum")
+ # Only store sum files in definitive directory if abe
+ # succeeded.
+ sumfiles="$run_step_top_artifacts/sumfiles"
+ ;;
+ *)
+ # Store *.sum.N files and .log.xz files in 00-sumfiles/,
+ # so that these non-essential big files are eventually
+ # removed from base-artifacts.git history by
+ # "git filter-repo" in round-robin-baseline.sh's
+ # trim_base_artifacts().
+ sumfiles="$run_step_top_artifacts/00-sumfiles"
+ ;;
+ esac
+
+ # Remove WORKSPACE prefix instead of making a plain copy
+ sed "s|Running .*/snapshots/|Running |" \
+ < "$sum" > "$sumfiles/$(basename "$sum")"
+
+ log="${sum/.sum/.log}"
# Testsuite logs grow 50-400MB in size, so compress them to save
- # disk space on ci.linaro.org.
+ # disk space on ci.linaro.org and in base-artifacts.git.
# Delete previous XZ'ed log, which can occur when we are bisecting
# QEMU and not rebuilding compiler (therefore not cleaning compiler
# build/test directory).
+ # Process logs in parallel; "wait" below waits for all to
+ # finish.
rm -f "$log.xz"
(
xz "$log"
- cp "$log.xz" ${rr[top_artifacts]}/sumfiles/
+ cp "$log.xz" "$run_step_top_artifacts/00-sumfiles/"
) &
- done < <(find builds/ -name "*.sum" -print0)
- # Wait for logs to compress
- wait
+ done < <(find builds/ \( -name '*.sum' -o -name '*.sum.[0-9]*' \) \
+ -print0)
+
+ # ABE re-wrote the $flaky_tests file to contain entries only
+ # for the new flaky tests detected in this run.
+ if [ -s "$flaky_tests" ]; then
+ sumfiles="$run_step_top_artifacts/sumfiles"
+ if [ "$res" != 0 ]; then
+ sumfiles="$run_step_top_artifacts/00-sumfiles"
+ fi
+
+ # Mark new flaky entries to expire in 12 weeks.
+ # The upside of expiration is that we will have an up-to-date list
+ # of flaky tests, which we can address.
+ # The downside is that we will spend a bit more CPU cycles
+ # to re-detect flaky tests.
+ local expire
+ expire=$(date -d "now+12 weeks" +%Y%m%d)
+ # A lot of thought went into the following nugget:
+ expire="expire=$expire"
+ sed -i -e "s#^flaky | #flaky,$expire | #" "$flaky_tests"
+
+ # Move flaky fails to the sumfiles so that they will be
+ # fetched by next run's get_git_history().
+ echo "# From ${BUILD_URL-$(pwd)}:" > "$sumfiles/flaky.xfail"
+ cat "$flaky_tests" >> "$sumfiles/flaky.xfail"
+ fi
+
+ if [ $res -eq 0 ] \
+ && [ -f $run_step_artifacts/testresults-mail-body.txt ]; then
+ (
+ # Move testresults files, so that it's easier to find them
+ # later when we want to send them via email.
+ rm -rf $run_step_top_artifacts/testresults
+ mkdir $run_step_top_artifacts/testresults
+ mv $run_step_artifacts/testresults-mail-recipients.txt \
+ $run_step_artifacts/testresults-mail-subject.txt \
+ $run_step_top_artifacts/testresults/
+ # Add a pointer to the build origin, for easier tracking
+ echo "# From ${BUILD_URL-$(pwd)}:" \
+ > $run_step_top_artifacts/testresults/testresults-mail-body.txt
+ cat $run_step_artifacts/testresults-mail-body.txt \
+ >> $run_step_top_artifacts/testresults/testresults-mail-body.txt
+ rm $run_step_artifacts/testresults-mail-body.txt
+ ) &
+ if ! wait $!; then
+ echo "christophe.lyon@linaro.org" \
+ > artifacts/jenkins/error-mail-recipients.txt
+ echo -e "${BUILD_URL-}\nERROR: failed to process testresults" \
+ >> artifacts/jenkins/error-mail-body.txt
+ fi
+ fi
fi
+
+ # Wait for logs to compress.
+ wait
+
+ return $res
+ )
+}
+
+# Print sysroot path under ABE's build tree.
+print_abe_sysroot ()
+{
+ (
+ set -euf -o pipefail
+
+ local host target sysroot
+ host=$(print_gnu_target native)
+ target=$(print_gnu_target ${rr[target]})
+
+ # FIXME: This is a copy of ugly code from abe/lib/globals.sh:
+ # init_globals_and_PATH(). Don't ask my why we have different sysroot
+ # paths for cross and native cases.
+ sysroot="$(pwd)/abe/builds/destdir/$host"
+ if [ "$host" != "$target" ]; then
+ sysroot="$sysroot/$target"
+ fi
+ if [ "${rr[target]}" != "woa64" ]; then
+ # FIXME: WoA toolchain uses mingw CRT, and this is a quick
+ # fix to make it build. At the moment ABE pretends to use
+ # newlib library when building mingw GCC. See settings of
+ # stage2_flags in abe/config/gcc.conf.
+ sysroot="$sysroot/libc"
+ fi
+
+ echo "$sysroot"
+ )
+}
+
+# If we bisect a regression between different major versions of Glibc,
+# then we might get a mixed sysroot with several versions of ld-M.N.so and
+# other binaries installed side-by-side. Such a sysroot will break
+# benchmarking, which requires a single ld-*.so binary to be present.
+# Similarly, weird problems can occur if we are bisecting linux
+# and re-installing kernel headers one on top another.
+# Forcefully delete sysroot before building C library or linux headers.
+clean_sysroot ()
+{
+ (
+ set -euf -o pipefail
+
+ rm -rf "$(print_abe_sysroot)"
)
}
@@ -444,28 +925,26 @@ build_llvm ()
(
set -euf -o pipefail
- local use_abe=${1-false}
+ local projects="${1-clang;lld}"
+ local extra_targets="${2-}"
+ local metric_id="${3-}"
clone_repo llvm
-
+ if [ x"$metric_id" = x"num_vect_loops" ]; then
+ wget -O llvm-vect-metric.diff "https://git.linaro.org/toolchain/jenkins-scripts.git/plain/downstream_patches/llvm-vect-metric.diff"
+ git -C llvm apply "$(pwd)/llvm-vect-metric.diff"
+ fi
sanity_check_pwd
+ local workspace
+ workspace=$(pwd)
+
# Setup ccache and ninja wrappers.
# shellcheck disable=SC2115
rm -rf "$(pwd)/bin"
mkdir "$(pwd)/bin"
- cat > "$(pwd)/bin/cc" <<EOF
-#!/bin/sh
-exec ccache /usr/bin/gcc "\$@"
-EOF
- chmod +x "$(pwd)/bin/cc"
-
- cat > "$(pwd)/bin/c++" <<EOF
-#!/bin/sh
-exec ccache /usr/bin/g++ "\$@"
-EOF
- chmod +x "$(pwd)/bin/c++"
+ setup_ccache "$(pwd)/bin"
if [ -f /usr/local/bin/ninja.bin ]; then
# Use ninja configuration from llvm buildbots to avoid running out of RAM.
@@ -479,20 +958,19 @@ EOF
PATH=$(pwd)/bin:$PATH
export PATH
- local binutils_incdir
- # Freshen up build and install directories. We rely on ccache for fast rebuilds.
- if $use_abe; then
- rsync -a --del abe/builds/destdir/x86_64-pc-linux-gnu/ llvm-install/
- binutils_incdir=$(pwd)/binutils/include
- else
- rm -rf llvm-install
- binutils_incdir=/usr/include
- fi
- rm -rf llvm-build
+ # Freshen up build and install directories. We rely on ccache for fast
+ # rebuilds.
+ rm -rf llvm-build llvm-install
mkdir -p llvm-build
cd llvm-build
- cmake -G Ninja ../llvm/llvm "-DLLVM_ENABLE_PROJECTS=clang;lld" -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=True -DCMAKE_INSTALL_PREFIX=../llvm-install "-DLLVM_TARGETS_TO_BUILD=$(print_llvm_target ${rr[target]})" -DLLVM_BINUTILS_INCDIR=$binutils_incdir
+ local llvm_targets
+ llvm_targets="$(print_llvm_target ${rr[target]})${extra_targets}"
+
+ cmake -G Ninja ../llvm/llvm "-DLLVM_ENABLE_PROJECTS=$projects" \
+ -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=True \
+ -DCMAKE_INSTALL_PREFIX=../llvm-install \
+ "-DLLVM_TARGETS_TO_BUILD=$llvm_targets" -DCLANG_DEFAULT_LINKER=lld
ccache -z
ninja
ninja install
@@ -518,273 +996,379 @@ no_build_regression_p ()
fi
local build_result_ref build_result_new
- build_result_ref=$(grep -v "^#" $ref_artifacts/results | tail -n1)
- build_result_new=$(grep -v "^#" $new_artifacts/results | tail -n1)
-
- if [ $build_result_new -lt $build_result_ref ]; then
- # In log scan for errors below
- # - sed -e 's/"[^"]*"//g' -- removes quoted "error: app diagnostics" strings
- # - " error:" detects compiler errors from GCC and Clang (including GCC ICEs)
- # - "^ERROR:" detects linker errors
- # - ": undefined reference" detects missing symbols during linking
- # - "] Error " detects GNU make errors
- # Then grep for "grep" to exclude other uses of this search.
- cat > $run_step_artifacts/results.regressions <<EOF
+ build_result_ref=$(grep -v -E "^#|^$" $ref_artifacts/results | tail -n1)
+ build_result_new=$(grep -v -E "^#|^$" $new_artifacts/results | tail -n1)
+
+ if [ $build_result_new -ge $build_result_ref ]; then
+ return 0
+ fi
+
+ # If we are observing a regression with a negative score, consider results as invalid.
+ # Setting EXTERNAL_FAIL will avoid any bisect, and the run will be marked as UNSTABLE.
+ # A side-effect of this is lack of reduction to component when a failure to build
+ # one of the components will just stop the job in UNSTABLE state and not attempt
+ # to update other components.
+ if [ $build_result_new -lt 0 ]; then
+ return $EXTERNAL_FAIL
+ fi
+
+ local last_log
+ last_log=$(find $new_artifacts/ -name console.log.xz | sort -g | tail -n1)
+ # In log scan for errors below
+ # - sed -e 's/"[^"]*"//g' -- removes quoted "error: app diagnostics" strings
+ # - " error:" detects compiler errors from GCC and Clang (including GCC ICEs)
+ # - "^ERROR:" detects linker errors
+ # - ": undefined reference" detects missing symbols during linking
+ # - "] Error " detects GNU make errors
+ # Then grep for "grep" to exclude other uses of this search.
+ # Do this twice, once with the abe-debug-build: prefix (where we
+ # disabled build parallelism to make debug easier), once without
+ # this prefix if for some reason the failure occurred only with
+ # parallelism enabled.
+ local debug_log
+ debug_log=$(mktemp)
+
+ xzcat $last_log | \
+ grep abe-debug-build: | \
+ sed -e 's/abe-debug-build: //' | \
+ sed -e 's/"[^"]*"//g' | \
+ grep " error:\|^ERROR:\|: undefined reference\|\] Error " | \
+ grep -v "grep" | \
+ head | \
+ sed -e "s/^/# /" > $debug_log
+
+ cat > $run_step_artifacts/results.regressions <<EOF
# First few build errors in logs:
-$(cat $new_artifacts/console.log | sed -e 's/"[^"]*"//g' | grep " error:\|^ERROR:\|: undefined reference\|\] Error " | grep -v "grep" | head | sed -e "s/^/# /")
EOF
- return 1
+
+ if [ -s $debug_log ]; then
+ cat $debug_log >> $run_step_artifacts/results.regressions
+ else
+ xzcat $last_log | \
+ grep -v abe-debug-build: | \
+ sed -e 's/"[^"]*"//g' | \
+ grep " error:\|^ERROR:\|: undefined reference\|\] Error " | \
+ grep -v "grep" | \
+ head | \
+ sed -e "s/^/# /" >> $run_step_artifacts/results.regressions
fi
- return 0
+ rm -f $debug_log
+
+ return 1
)
}
-# Check if current build regressed compared to the baseline
-# (unless ${rr[update_baseline]} is set to "reset").
-check_regression ()
+# Generate trigger-build-* and trigger-bisect files to reduce the regression.
+# $1: Directory for trigger-* files.
+# $2: Score of the build.
+create_trigger_files ()
{
(
set -euf -o pipefail
+ local trigger_dest="$1"
+ local score="$2"
+
+ local -a changed_components
+ IFS=" " read -r -a changed_components <<< "$(print_changed_components)"
+
+ if [ ${#changed_components[@]} -gt 1 ] \
+ || { [ x"${rr[mode]}" = x"bisect" ] \
+ && ! [ "$score" -lt 0 ] 2>/dev/null; }; then
+ # If we have several changed components, then trigger individual builds
+ # for these components to narrow down the problem to a single component.
+ # Also generate trigger-build-* files as a favor to
+ # round-robin-bisect.sh script to distinguish "bad" from "skip" builds
+ # and to provide templates for triggering follow-up builds.
+ # Note that we can't use "[ "$score" -ge 0 ]" condition above and below
+ # because it will give wrong answer for non-numeric scores like "all"
+ # and "boot". "-lt" comparison, however, will produce correct answer,
+ # albeit, with an occasional harmless error message.
+ local -a update_components
+
+ while read -a update_components; do
+ local c update_components2
+ update_components2=$(echo "${update_components[@]}" | tr ' ' '-')
+
+ # find the list of components_to_update
+ local -a components_to_update=()
+ for c in ${rr[components]}; do
+ if echo "${update_components[@]}" | tr ' ' '\n' \
+ | grep "^$c\$" >/dev/null; then
+ assert_with_msg "Should never happen for precommit builds" \
+ [ "${rr[${c}_git]}" != "HEAD" ]
+ components_to_update+=("${c}")
+ fi
+ done
- local score
- score=$(grep -v "^#" ${rr[top_artifacts]}/results | tail -n1)
-
- if [ x"$score" = x"-$EXTERNAL_FAIL" ]; then
- echo "ERROR: We have encountered some infrastructure problem (e.g.,"
- echo " benchmarking boards are offline), andso we can't finish"
- echo " the build."
- # Exit now and don't update baseline artifacts.
- # By not creating trigger-build-* files, we signal
- # round-robin-bisect.sh to skip this build/revision.
- exit $EXTERNAL_FAIL
- fi
-
- if [ x"${rr[update_baseline]}" = x"rebase" ]; then
- # No-one ever used "rebase" mode. It's now obsoleted by custom ci_projects.
- local base_artifacts_head base_artifacts_tail
- base_artifacts_head=$(git -C base-artifacts rev-parse HEAD)
- base_artifacts_tail=$(git -C base-artifacts rev-list --max-parents=0 HEAD)
- git -C base-artifacts reset --hard $base_artifacts_tail
-
- ${rr[no_regression_p]} ${rr[top_artifacts]} base-artifacts &
- if wait $!; then
- echo "Current results are no better then the all-time-best results. \
-No reason to rebase base-artifacts."
- false
+ # print components_to_update to the trigger file
+ if [ "${rr[dynamic_components_list]+abc}" ]; then
+ echo "dynamic_components_list=${components_to_update[*]}"
+ else
+ for c in "${components_to_update[@]}"; do
+ echo "${c}_git=${rr[${c}_git]}"
+ done
+ fi > $trigger_dest/trigger-build-$update_components2
+
+ # Add update_baseline setting to the parameters, so that
+ # "ignore" builds will trigger "ignore" reduction builds.
+ # Do not set update_baseline for bisect builds, since these
+ # trigger-build-* files will then be used as templates to trigger
+ # last_good and first_bad builds.
+ if [ x"${rr[mode]}" != x"bisect" ]; then
+ echo "update_baseline=${rr[update_baseline]}" \
+ >> $trigger_dest/trigger-build-$update_components2
+ fi
+ done < <(${rr[breakup_changed_components]})
+ elif [ ${#changed_components[@]} = 1 ] \
+ && ! [ "$score" -lt 0 ] 2>/dev/null; then
+ local single_component="${changed_components[0]}"
+
+ # Trigger bisect failures in a single changed component in all steps
+ # with a positive result. If $score is less-than 0, then
+ # the regression is not very interesting, so don't bisect.
+
+ # Rather than the current git commit sha1, use the original
+ # specification which can be a branch name: in case the
+ # regression has already been fxed in the branch, we won't
+ # bother running a useless bisection.
+ local bad_git
+ bad_git=$(get_current_manifest "{rr[${single_component}_git]}")
+
+ cat > $trigger_dest/trigger-bisect <<EOF
+current_project=$single_component
+bad_git=$bad_git
+EOF
+ if [ -f $run_step_artifacts/extra-bisect-params ]; then
+ cat $run_step_artifacts/extra-bisect-params >> $trigger_dest/trigger-bisect
fi
+ fi
+ )
+}
- git -C base-artifacts reset --hard $base_artifacts_head
- else
- # Generate comparison artifacts for update, reset, init and push modes.
- ${rr[no_regression_p]} base-artifacts ${rr[top_artifacts]} &
- if wait $!; then
- # All good, no regression
- return
- fi
+# Make sure manifest has all up-to-date rr[] fields that other
+# round-robin-*.sh scripts require. This is an easy way to update manifests
+# saved in base-artifacts.git to the latest format by rewriting result history.
+finalize_manifest ()
+{
+ (
+ set -euf -o pipefail
- if [ -f $run_step_artifacts/results.regressions ]; then
- # Add regression info generated by no_regression_p to top-level
- # results file.
- cat $run_step_artifacts/results.regressions \
- >> ${rr[top_artifacts]}/results
+ echo "# Saving rr[] in the manifest" | manifest_out
+ local field value
+ while read -r field; do
+ value=$(get_current_manifest "{rr[$field]-}")
+ if [ "$value" = "${rr[$field]}" ]; then
+ continue
fi
- # We've got a regression. Generate trigger-* files.
- local single_component
- single_component=$(print_single_updated_component)
- local trigger_dest
-
- if [ x"${rr[update_baseline]}" = x"update" ]; then
- # When "updating" baseline place trigger-* files at top level
- # where jenkins expects them -- and trigger the followup builds.
- trigger_dest="${rr[top_artifacts]}"
- else
- # We don't want to trigger follow up builds when "pushing"
- # baseline. So, for the record, place trigger-* files in
- # the step's artifacts directory.
- trigger_dest="$run_step_artifacts"
+ if [ "$value" != "" ]; then
+ echo "# WARNING: overriding previous rr[$field]=$value" \
+ | manifest_out
fi
- # 1. If $score is less-than 0, then the regression is not very
- # interesting, so reduce down to component, but don't bisect. This
- # allows non-broken components to advance their revisions.
- # 2. Otherwise, trigger builds with narrowed-down list of updated
- # components -- by generating trigger-build-* files. Also generate
- # these files as a favor to round-robin-bisect.sh script.
- # 3. Otherwise, we have narrowed-down regression to a single component,
- # so trigger bisect build for this component -- by generating
- # trigger-bisect file. Note that the negative "! [ $score -lt 0 ]"
- # condition is to handle non-numeric scores like "all" and "boot".
- if [ $score -lt 0 ] 2>/dev/null && [ x"${rr[mode]}" = x"bisect" ]; then
- # Don't bisect uninteresting regressions.
- :
- elif [ x"$single_component" = x"" ] || [ x"${rr[mode]}" = x"bisect" ]; then
- local -a update_components
-
- while read -a update_components; do
- local c update_components2
- update_components2=$(echo "${update_components[@]}" | tr ' ' '-')
-
- for c in ${rr[components]}; do
- if echo "${update_components[@]}" | tr ' ' '\n' | grep -q "^$c\$"; then
- echo "${c}_git=${rr[${c}_git]}"
- else
- echo "${c}_git=baseline"
- fi
- done > $trigger_dest/trigger-build-$update_components2
- done < <(${rr[breakup_updated_components]})
- elif ! [ $score -lt 0 ] 2>/dev/null; then
- # Bisect failures in all steps after "-1" step.
- local cur_rev
- cur_rev=$(git -C $single_component rev-parse HEAD)
-
- cat > $trigger_dest/trigger-bisect <<EOF
-current_project=$single_component
-bad_git=${rr[${single_component}_git]%#*}#$cur_rev
+ cat <<EOF | manifest_out
+rr[$field]="${rr[$field]}"
EOF
- if [ -f $run_step_artifacts/extra-bisect-params ]; then
- cat $run_step_artifacts/extra-bisect-params >> $trigger_dest/trigger-bisect
- fi
- fi
-
- if [ x"${rr[update_baseline]}" = x"update" ]; then
- echo "Detected a regression in \"update\" mode!"
- false
- else
- # Compare results to generate logs and other artifacts,
- # but we don't really care about regressions.
- :
- fi
- fi
+ done < <(IFS=$'\n'; echo "${!rr[*]}" | sort | grep -v "^top_artifacts\$")
)
}
-# Commit current result and artifacts to the baseline repository
-update_baseline ()
+
+# Check if current build regressed compared to the baseline.
+#
+# As inputs we have:
+# $score -- last line of artifacts/results
+# $res -- exit code of no_regression_p()
+#
+# Additionally, ${rr[update_baseline]} and ${rr[mode]} affect how the rest
+# of the build will proceed -- via create_trigger_files().
+#
+# Decision matrix:
+#
+# 1. score >= 0 && res == 0: return 0
+# OK; new results good for baseline;
+# update baseline if != ignore (e.g., not bisect test or precommit)
+# - round-robin-notify.sh compares results in artifacts/ vs
+# base-artifacts#HEAD^
+# don't update baseline if == ignore (e.g., bisect test or precommit)
+# - round-robin-notify.sh compares results in artifacts/ vs
+# base-artifacts#HEAD
+# send precommit feedback if precommit testing is active;
+# (see [R] below) send regression report if notify==onregression
+# push baseline if != ignore (e.g., not precommit build);
+# trigger pre-commit testing if SCM build;
+#
+# 2. score >= 0 && res == E: return E
+# sporadic fail; wait for next build; don't trigger anything;
+# don't update baseline even with force or init;
+# Can happen in, e.g., tcwg_native_build if git server fails while
+# cloning repo for one of the components;
+#
+# 3. score >= 0 && res != {0,E}: return I (or 0 if update_baseline==force)
+# regression (or expected regression if update_baseline==force);
+# if update_baseline == onsuccess -- trigger reduce or bisect;
+# [R] if update_baseline == force -- succeed, return 0, treat as (1).
+# if update_baseline == ignore -- nothing
+# - if mode==bisect, then create trigger-* files as favor for
+# round-robin-bisect.sh script.
+# - if pre-commit testing is active, then send notifications
+# similar to "update_baseline==force". See [P] below.
+#
+# 4. score < 0 && res == 0: return E
+# should not happen; baseline is bad and current build is no better;
+# don't update the baseline or reduce or bisect; send error-mail
+#
+# 5. score < 0 && res == E: return E
+# sporadic fail; wait for next build; don't trigger anything;
+# don't update baseline even with force or init.
+#
+# 6. score < 0 && res != {0,E}: return I
+# uninteresting fail; trigger reduce, but no bisect
+# don't update baseline even with force or init.
+# This happens when the build failed before reaching "interesting"
+# phase -- e.g., glibc build failed in a configuration that checks
+# gcc testsuite results in a cross-toolchain. In this case we want
+# to continue testing and updating baseline when advancing binutils and
+# gcc sources, while waiting for glibc build to get fixed.
+# For this reason we often have matching tcwg_*_build ci_projects for
+# most tcwg_*_check ci_projects. Non-interesting (for tcwg_*_check) failures
+# are reduced, bisected and reported by tcwg_*_build projects.
+#
+# 7. score == -E: return E
+# special case indicating a sporadic failure; same as (5) but without
+# calling no_regression_p() to get "res".
+#
+# "E" -- EXTERNAL_FAIL; "I" -- INTERNAL_FAIL
+#
+# Notes:
+# - If score < 0 then we never update baseline -- even with force or init.
+# This allows us to always have a reasonable "interesting" baseline,
+# which is necessary for pre-commit testing. E.g., we don't want to
+# pre-commit test upstream patches against a baseline that doesn't build.
+#
+# - If we return E -- exit, do nothing, wait for the next timed trigger.
+# Hopefully by that time infra fixes itself.
+# In the .yaml files we have a choice on how to implement this:
+# a. Mark the build as FAILURE, which will prevent any subsequent steps
+# from running. Pros: simple, doesn't require conditional-steps;
+# cons: the build is shown as "red" failure.
+# b. Mark the build as UNSTABLE, and then condition all subsequent steps
+# to run only for current-status==SUCCESS, which is what we are doing
+# now. Pros: the build is shown as "yellow"; cons: all subsequent
+# steps need to be wrapped in conditional-step.
+#
+# - [P] It's not clear how to send notifications for failed pre-commit
+# builds. At the moment we run trigger-followup-builds for failed
+# builds and exit, thus avoiding notify-and-push step for failed builds.
+# It seems we need to
+# a. Split notify-and-push into "notify" and "push"
+# b. Move "notify" before trigger-followup-builds and leave "push"
+# after trigger-followup-builds.
+# c. We need to make sure "notify" does not send anything when it is
+# running after a failed build.
+# d. Trigger of pre-commit testing needs to happen only after "push",
+# so we know that we have a good recent baseline.
+#
+# - If we return E during precommit testing, the check will be in "pending"
+# state, and testing of the patch will be retriggered on the next round.
+#
+# - If we return N != {0, I, E}, then the exit was due to script error,
+# so send error-mail and follow case of returning "E".
+#
+# - If we returned "0", then build should be marked as SUCCESS.
+#
+# - If we returned "I", then build should be marked FAILURE
+#
+# - If we returned "E" or anything else, then build should be marked
+# as UNSTABLE.
+check_regression ()
{
(
set -euf -o pipefail
- local amend=""
- local rebase_head rebase_tail
-
- if [ x"${rr[update_baseline]}" = x"init" ]; then
- amend="--amend"
- elif [ x"${rr[update_baseline]}" = x"push" ]; then
- :
- elif [ x"${rr[update_baseline]}" = x"rebase" ]; then
- rebase_head=$(git -C base-artifacts rev-parse HEAD)
- rebase_tail=$(git -C base-artifacts rev-list --max-parents=0 HEAD)
- git -C base-artifacts reset --hard $rebase_tail
- amend="--amend"
- else
- # ${rr[update_baseline]} == update, reset
- local prev_head=""
-
- # We discard baseline entries for results worse or same than
- # the current one, but keep entries for results that are better
- # (so that we have a record of pending regressions).
- while true; do
- ${rr[no_regression_p]} base-artifacts ${rr[top_artifacts]} &
- if ! wait $!; then
- break
- fi
+ local score
+ score=$(grep -v "^#" ${rr[top_artifacts]}/results | tail -n1)
- prev_head=""
- if git -C base-artifacts rev-parse HEAD^ >/dev/null 2>&1; then
- # For every regression we want to keep artifacts for the first-bad
- # build, so reset to the most relevant regression (marked by reset-baseline).
- if [ -f base-artifacts/reset-baseline ] \
- && [ x"${rr[update_baseline]}" = x"update" ]; then
- prev_head=$(git -C base-artifacts rev-parse HEAD)
- fi
- git -C base-artifacts reset --hard HEAD^
- else
- # We got to the beginning of git history, so amend the current
- # commit. The initial state of baseline is "empty" branch,
- # which we treat as worst possible in ${rr[no_regression_p]}().
- amend="--amend"
- break
- fi
- done
+ if [ x"$score" = x"-$EXTERNAL_FAIL" ]; then
+ echo "ERROR: We have encountered some infrastructure problem (e.g.,"
+ echo " benchmarking boards are offline), and we can't finish"
+ echo " the build."
- if [ x"$prev_head" != x"" ]; then
- git -C base-artifacts reset --hard $prev_head
- fi
+ # Exit now and don't update baseline artifacts.
+ # By not creating trigger-build-* files, we signal
+ # round-robin-bisect.sh to skip this build/revision.
+ return $EXTERNAL_FAIL
fi
- # Rsync current artifacts. Make sure to use -I rsync option since
- # quite often size and timestamp on artifacts/results will be the same
- # as on base-artifacts/results due to "git reset --hard HEAD^" below.
- # This caused rsync's "quick check" heuristic to skip "results" file.
- # !!! From this point on, logs and other artifacts won't be included
- # in base-artifacts.git repo (though they will be uploaded to jenkins).
- rsync -aI --del --exclude /.git ${rr[top_artifacts]}/ base-artifacts/
-
- local rev_count
- if [ x"$amend" = x"" ]; then
- rev_count=$(git -C base-artifacts rev-list --count HEAD)
- else
- rev_count="0"
- fi
+ local res
- local msg_title="$rev_count: ${rr[update_baseline]}"
+ # Generate comparison artifacts for update, reset, init and push modes.
+ ${rr[no_regression_p]} base-artifacts ${rr[top_artifacts]} &
+ res=0 && wait $! || res=$?
- if [ x"${rr[update_baseline]}" = x"reset" ]; then
- # Create a marker for builds that reset baselines (these are builds
- # for bisected regressions).
- touch base-artifacts/reset-baseline
+ # Move extra artifacts that no_regression_p generated to $top_artifacts.
+ if [ -d $run_step_artifacts/top-artifacts ]; then
+ rsync -a $run_step_artifacts/top-artifacts/ ${rr[top_artifacts]}/
+ rm -rf $run_step_artifacts/top-artifacts
fi
- local single_component
- single_component=$(print_single_updated_component)
- if [ x"$single_component" != x"" ]; then
- local single_rev
- single_rev=$(git -C $single_component rev-parse HEAD)
- msg_title="$msg_title: $single_component-$single_rev"
- else
- msg_title="$msg_title: $(print_updated_components "-")"
+ if [ -f $run_step_artifacts/results.regressions ]; then
+ # Add regression info generated by no_regression_p to top-level
+ # results file.
+ cat $run_step_artifacts/results.regressions \
+ >> ${rr[top_artifacts]}/results
fi
- msg_title="$msg_title: $(grep -v "^#" ${rr[top_artifacts]}/results | tail -n1)"
-
- git -C base-artifacts add .
- git -C base-artifacts commit $amend -m "$msg_title
-BUILD_URL: ${BUILD_URL-$(pwd)}
-
-results:
-$(cat ${rr[top_artifacts]}/results)"
-
- if [ x"${rr[update_baseline]}" = x"rebase" ]; then
- for rebase_tail in $(git -C base-artifacts rev-list --reverse $rebase_head); do
- git -C base-artifacts checkout $rebase_tail -- .
- git -C base-artifacts commit -C $rebase_tail
- done
+ rr[no_regression_result]="$res"
+ finalize_manifest
+
+ if [ $res = 0 ]; then
+ if [ "$score" -lt 0 ] 2>/dev/null; then
+ # Case (4) in the comment above.
+ if [ -d ${rr[top_artifacts]}/jenkins ]; then
+ echo "maxim.kuvyrkov@linaro.org, laurent.alfonsi@linaro.org" \
+ > artifacts/jenkins/error-mail-recipients.txt
+ echo -e "${BUILD_URL-}\nERROR: case (4) in check_regression" \
+ >> artifacts/jenkins/error-mail-body.txt
+ fi
+ return $EXTERNAL_FAIL
+ else
+ # All good, no regression
+ return 0
+ fi
+ elif [ $res = $EXTERNAL_FAIL ]; then
+ # Comparison failed to produce a meaningful result
+ return $EXTERNAL_FAIL
fi
- )
-}
-# Push to baseline branches and to base-artifacts repo.
-push_baseline ()
-{
- (
- set -euf -o pipefail
+ assert_with_msg "no_regression_p should succeed in init baseline mode" \
+ [ x"${rr[update_baseline]}" != x"init" ]
+
+ # We've got a regression. Generate trigger-* files.
+ local trigger_dest
+ if [ "${rr[update_baseline]}" = "onsuccess" ] \
+ || [ "${rr[mode]}" = "bisect" ]; then
+ # We are seeing a failure, so instead of updating baseline start
+ # reducing/bisecting the failure. Create trigger-* files at top level
+ # where jenkins expects them -- and trigger the followup builds.
+ trigger_dest="${rr[top_artifacts]}"
+ else
+ # We don't want to trigger follow up builds when forcing
+ # the baseline. So, for the record, place trigger-* files in
+ # the step's artifacts directory.
+ trigger_dest="$run_step_artifacts"
+ fi
- git_init_linaro_local_remote base-artifacts baseline false
- git_push base-artifacts baseline ${rr[baseline_branch]}
+ create_trigger_files "$trigger_dest" "$score"
- if [ x"${rr[update_baseline]}" = x"rebase" ]; then
- return
+ if [ "$score" -lt "0" ]; then
+ # Case (6) above.
+ return $INTERNAL_FAIL
+ elif [ "${rr[update_baseline]}" = "force" ]; then
+ return 0
fi
- local url
- local c
- for c in $(print_updated_components); do
- # Clone (but don't checkout) always-present "empty" branch of
- # the baseline repo. This initializes read/write "baseline" remote.
- url=$(print_baseline_repo "$c" false)
- git_set_remote "$c" baseline "$url"
- git_push $c baseline ${rr[baseline_branch]}
- done
+ echo "Detected a regression!"
+ return $INTERNAL_FAIL
)
}
diff --git a/start-container-docker.sh b/start-container-docker.sh
index a1e30b51..7b4d822c 100755
--- a/start-container-docker.sh
+++ b/start-container-docker.sh
@@ -13,18 +13,19 @@ set -e -o pipefail
# to run inside the container.
# - definition of ${CONTAINER_CLEANUP}, a cleanup statement remove the
# container on exit for instance
-# - definition of ${session_host} and ${session_port}, can be used for
-# a remote connexion to the container
+# - definition of ${session_host}, ${session_port}, and ${session_opts[@]}
+# can be used for a remote connection to the container
usage() {
- echo "Usage: $0 [--arch container-arch] --distro flavour [--docker_opts opts] [--dryrun true/false] [--label label] [--newuser username:[uid]] [--node node] [--prefix prefix] [--session-host host] [--session-name name] [--ssh_info true/false] [--task {build|test|bench}] [--user user] [--weight weight] [--verbose true/false]"
+ echo "Usage: $0 [--arch container-arch] --distro flavour [--dryrun true/false] [--label label] [--newuser username:[uid]] [--node node] [--prefix prefix] [--secondary true/false] [--session-host host] [--session-name name] [--ssh_info true/false] [--task {build|test|bench}] [--user user] [--weight weight] [--verbose true/false] [--security options]"
echo
echo " container-arch: architecture (eg: amd64, i386, arm64, armhf)"
- echo " distro: distribution (eg: bionic)"
+ echo " distro: distribution (eg: lts_1)"
echo " dryrun: boolean, just print commands if true"
echo " label: jenkins label; container is started on least-busy node; also sets container architecture"
echo " newuser: new user to create inside container, <username>[:<uid>] specification."
echo " node: jenkins node; container is started on host mapped to the node"
echo " prefix: prefix to prepend to output variables and functions"
+ echo " secondary: create a secondary container that will use the same workspace"
echo " session-host: hostname where the container will run, defaults to localhost"
echo " useful if the name resolution does not work correctly"
echo " session-name: session, in case the default '\$BUILD_NUMBER-\$JOB_NAME' is not suitable"
@@ -32,6 +33,7 @@ usage() {
echo " task: type of container (build, test or bench, default=build)"
echo " user: remote user to use in the container."
echo " weight: container weight, reserves resources. Default=1"
+ echo " security: override the default container security options (currently CAP_SYS_PTRACE and unconfined seccomp)."
echo " verbose: whether enable verbose output. Default=false"
exit 1
}
@@ -51,12 +53,12 @@ exec 1>&2
container_arch="default"
distro="default"
-docker_opts=
dryrun=false
label=
node=
newuser=
prefix=
+secondary=false
session_host=
session_name=
ssh_info=false
@@ -64,6 +66,7 @@ task="build"
weight=1
user=
verbose="false"
+security=""
while [ $# -ge 1 ]
do
@@ -78,11 +81,6 @@ do
[ x${distro} = x ] && usage
shift 2
;;
- --docker_opts)
- docker_opts="$2"
- [ x"${docker_opts}" = x ] && usage
- shift 2
- ;;
--dryrun)
dryrun=$2
[ x${dryrun} = x ] && usage
@@ -109,6 +107,11 @@ do
[ x${prefix} = x ] && usage
shift 2
;;
+ --secondary)
+ secondary=$2
+ [ x$secondary = x ] && usage
+ shift 2
+ ;;
--session-host)
session_host=$2
[ x${session_host} = x ] && usage
@@ -127,7 +130,7 @@ do
--task)
task=$2
case "${task}" in
- build|bench|test) ;;
+ build|precommit|bench|test) ;;
*) usage ;;
esac
shift 2
@@ -147,6 +150,10 @@ do
[ x${verbose} = x ] && usage
shift 2
;;
+ --security)
+ security="$2"
+ shift 2
+ ;;
*)
echo "Unsupported option: $1"
usage
@@ -181,6 +188,14 @@ if [ x"$session_host" = x"" ]; then
# Get first FQDN. This name needs to have .tcwglab suffix for VPN'ed
# machines and entries in .ssh/config for external machines.
session_host=$(hostname -A | cut -d" " -f 1)
+ if [ "$session_host" = "" ]; then
+ # WSL environment return empty string for "hostname -A", but outputs
+ # a proper hostname (set in /etc/wsl.conf) for "hostname".
+ session_host=$(hostname)
+ assert_with_msg "Cannot get hostname" \
+ [ x"$session_host" != x"" ]
+ fi
+
arch_host="localhost"
else
arch_host="$session_host"
@@ -202,7 +217,7 @@ if [ x"$session_name" = x ]; then
# as set by Jenkins.
# shellcheck disable=SC2153
if [ "x$BUILD_NUMBER" != "x" ] && [ "x$JOB_NAME" != "x" ]; then
- session_name="$BUILD_NUMBER-$JOB_NAME"
+ session_name="$BUILD_NUMBER-$JOB_NAME-$task"
else
session_name="$USER-$(date +%Y%m%d-%H_%M_%S)"
fi
@@ -210,39 +225,84 @@ if [ x"$session_name" = x ]; then
fi
# Resolve LTS and LTS-1 values to Ubuntu distros.
-case "$distro:$container_arch" in
- lts_1:*|default:*) distro=bionic ;;
- lts:armhf)
- # There's still no arm32v7/ubuntu:focal docker image, so
- # force using bionic for armhf for now.
- distro=bionic
- ;;
- lts:*) distro=focal ;;
+case "$distro" in
+ lts_1) distro=focal ;;
+ lts|default) distro=jammy ;;
esac
image=linaro/ci-${container_arch}-tcwg-build-ubuntu:${distro}
-# Avoid connexion sharing because of race conditions with parallel
-# builds
-SSH="ssh -S none"
+# Avoid connection sharing because of race conditions with parallel builds.
+# Also, we don't really need ssh agent forwarding here, so, since precommit
+# testing takes this path, disable it for extra caution. Also see note
+# about ssh agent forward at wait_for_ssh_server below.
+SSH="ssh -Snone -oForwardAgent=no"
+
+pwd_translate=(cat)
+
+# Configure container for precommit testing:
+# - use tcwg-build user instead of tcwg-buildslave;
+# - disable ssh agent forwarding;
+# - use scratch docker volume for $WORKSPACE instead of bind-mounting from host;
+# -- use container_rsync() to transfer data to and from precommit container;
+# - mount everything else as read-only (e.g., ccache, snapshots-ref, etc.);
+# - translate absolute /home/* paths
+if [ "$task" = "precommit" ]; then
+ if [ "$newuser" = "" ]; then
+ newuser=tcwg-build
+ fi
+ if [ "$user" = "" ]; then
+ user="$newuser"
+ fi
+
+ if [ "${WORKSPACE+set}" = "set" ]; then
+ # Translate $WORKSPACE/* paths from $USER to $user. Or, specifically,
+ # from /home/tcwg-buildslave/workspace/* to
+ # /home/tcwg-build/workspace/*.
+ dst_workspace=$(echo "$WORKSPACE" | sed -e "s#^$HOME#/home/$user#")
+ pwd_translate=(sed -e "s#^$WORKSPACE#$dst_workspace#")
+ fi
+fi
+
+assert_with_msg "user and USER variables should not be set to the same value" \
+ [ x"$user" != x"$USER" ]
+
# Note that when we use this we *want* it to split on spaces
# So that the shell runs:
# foo bar docker <...>
# Instead of:
# "foo bar docker" <...>
-DOCKER="$dryruncmd $SSH $session_host docker-wrapper"
+# Note: use "ssh -n" to avoid consuming stdin. This is especially important
+# in "while read $i;" loops we use to cleanup containers. Without this
+# we cleanup the first container, and entries for all other containers
+# are swallowed by ssh.
+DOCKER="$dryruncmd $SSH -n $session_host docker-wrapper"
$DOCKER maybepull $image || ssh_error $?
-SECURITY="--cap-add=SYS_PTRACE"
-# We need this because of a bug in libgo's configure script:
-# it would crash when testing "whether setcontext clobbers TLS
-# variables", and report neither "no" nor "yes", later making
-# configure fail.
-# Also, because the sanitizers need to disable ASLR during the tests
-# and docker needs to explicitly enable the process to do that on all
-# architectures.
-SECURITY="${SECURITY} --security-opt seccomp:unconfined"
+# If the configuration does not override the security options, use the default.
+if [ -z "$security" ]; then
+ security="--cap-add=SYS_PTRACE"
+ # We need this because of a bug in libgo's configure script:
+ # it would crash when testing "whether setcontext clobbers TLS
+ # variables", and report neither "no" nor "yes", later making
+ # configure fail.
+ # Also, because the sanitizers need to disable ASLR during the tests
+ # and docker needs to explicitly enable the process to do that on all
+ # architectures.
+ security="${security} --security-opt seccomp:unconfined"
+
+ case "$container_arch:$distro:$($DOCKER --version | cut -d" " -f3)" in
+ armhf:focal:18*)
+ # To run armhf focal images on old docker we need to disable
+ # seccomp via --privileged option. We can't upgrade docker to
+ # a newer version on TK1s as we will loose bridge network (presumably,
+ # due to incompatibility with old 3.10 kernel), which we use in
+ # jenkins CI builds.
+ security="--privileged"
+ ;;
+ esac
+fi
# Reserve resources according to weight and task
nproc=$($SSH $session_host nproc --all)
@@ -261,32 +321,122 @@ if [ x"${JOB_NAME:+set}" = x"set" ]; then
job_name="$JOB_NAME"
fi
-IFS=" " read -r -a bind_mounts <<< "$(print_bind_mounts "$task" "$SSH $session_host")"
+wsl=false
+case "$($SSH $session_host uname -r)" in
+ *"-WSL2") wsl=true ;;
+esac
-bind_mounted_workspace=false
-bind_mounts_opt=()
-for bind_mount in "${bind_mounts[@]}"; do
- dir="${bind_mount%%:*}"
+lock_workspace=false
+mounts_opt=()
+chown_mounts=()
+git_mounts=()
+force_port=()
+
+readarray -t mounts < <(print_mounts "$task" "$job_name" \
+ "-$container_arch-$distro" \
+ $SSH "$session_host")
+
+if $wsl; then
+ # Enable WSL-Interop inside containers. This allows us to build toolchains
+ # inside docker containers inside WSL2 environments, and still have ability
+ # to run generated win32 executables.
+ # If this doesn't work, make WSL2 version is 2.0.14 or later; WSL 2.0.9
+ # has a bug preventing interop outside of the "main init" process tree.
+ mounts+=(/init:/init:ro /run/WSL:/run/WSL:ro)
+
+ # FIXME: WSL VM is on a private network, and we have several
+ # ports -- 22, 2222, and 32768 -- proxied inside it. I couldn't
+ # figure out how to proxy a port range, so it's simpler to configure
+ # docker to use a fixed port.
+ # We should try to configure bridged network for WSL VM, so that no port
+ # forwarding is necessary.
+ force_port=(-p 32768:22)
+fi
- if [ x"$dir" = x"$WORKSPACE" ]; then
- bind_mounted_workspace=true
+echo "MOUNTS: ${mounts[*]}"
+
+for mount in "${mounts[@]}"; do
+ # Disassemble the mount
+ ro=$(echo "$mount" | cut -s -d: -f 3)
+ if [ "$ro" != "" ]; then
+ assert [ "$ro" = "ro" ]
+
+ # This is a read-only bind-mount or volume mount, e.g.,
+ # - ssh host keys,
+ # - $WORKSPACE/base-artifacts/ for task==precommit,
+ # - ccache-* for task==precommit.
+ dst=$(echo "$mount" | cut -s -d: -f 2)
+ src=$(echo "$mount" | cut -s -d: -f 1)
+ else
+ dst=$(echo "$mount" | cut -s -d: -f 2)
+ if [ "$dst" != "" ]; then
+ # This is a read-write bind-mount or volume mount, e.g.,
+ # - $WORKSPACE for task==build,
+ # - ccache-* for task==build.
+ src=$(echo "$mount" | cut -s -d: -f 1)
+ assert_with_msg "Non-readonly mount for precommit task" \
+ [ "$task" != "precommit" ]
+ else
+ # This is a read-write scratch mount, e.g.,
+ # - $WORKSPACE for task==precommit.
+ dst="$mount"
+ src=""
+ fi
fi
- # Make sure all bind-mount /home/* directories exist.
- # If a host bind-mount dir doesn't exist, then docker creates it on
- # the host with root:root owner, which can't be removed by cleanup job.
- case "$dir" in
+ if [ "${WORKSPACE+set}" = "set" ] && [ "$dst" = "$WORKSPACE" ]; then
+ lock_workspace=true
+ fi
+
+ dst=$(echo "$dst" | "${pwd_translate[@]}")
+
+ # ccache-* volumes are owned by tcwg-buildslave, so don't let
+ # anyone else write into them. It's fine, though, to use them
+ # as read-only ccache for other users and for precommit testing.
+ # Also see round-robin.sh:setup_ccache().
+ case "$src" in
+ ccache-*)
+ if [ "$user" != "" ] && [ "$user" != "tcwg-buildslave" ]; then
+ dst="/home/$user/.ccache"
+ ro="ro"
+ fi
+ ;;
+ esac
+
+ case "$src" in
"/home/"*)
- $dryruncmd $SSH $session_host mkdir -p "$dir"
+ # Make sure all bind-mount /home/* directories exist.
+ # If a host bind-mount dir doesn't exist, then docker creates
+ # it on the host with root:root owner, which can't be removed
+ # by cleanup job.
+ $dryruncmd $SSH $session_host mkdir -p "$src"
+ ;;
+ "")
+ case "$dst:$ro" in
+ *":ro") ;; # This is a read-only mount
+ "/home/"*)
+ # Similarly to above "mkdir -p", chown scratch volumes
+ # under /home to to $user.
+ chown_mounts+=("$dst")
+ ;;
+ esac
;;
esac
- bind_mounts_opt=("${bind_mounts_opt[@]}" "-v" "$dir:$bind_mount")
-done
+ # See processing of git_mounts below.
+ if [ "$src" != "" ] && git -C "$src" status >/dev/null 2>&1; then
+ git_mounts+=("$dst")
+ fi
-IFS=" " read -r -a volume_mounts <<< "$(print_volume_mounts "$job_name" "-$container_arch-$distro")"
-for mount in "${volume_mounts[@]}"; do
- bind_mounts_opt=("${bind_mounts_opt[@]}" "-v" "$mount")
+ # Re-assemble the mount
+ mount="$dst"
+ if [ "$src" != "" ]; then
+ mount="$src:$mount"
+ if [ "$ro" != "" ]; then
+ mount="$mount:ro"
+ fi
+ fi
+ mounts_opt+=("-v" "$mount")
done
# Give access to all CPUs to container.
@@ -300,15 +450,14 @@ cpuset_opt="--cpuset-cpus 0-$(($nproc - 1))"
echo "DEBUG: starting docker on $session_host from $(hostname), date $(date)"
# shellcheck disable=SC2206
-docker_run=($DOCKER run --name $session_name -dtP \
- "${bind_mounts_opt[@]}" \
- ${SECURITY} \
+docker_run=($DOCKER run --name $session_name -dtP "${force_port[@]}" \
+ "${mounts_opt[@]}" \
${memory_opt} \
"--pids-limit=${pids}" \
"--cpu-shares=${cpus}" \
$cpuset_opt \
- ${docker_opts} \
- $image) || ssh_error $?
+ ${security} \
+ $image)
echo "${docker_run[@]}"
# FIXME: It seems in some cases $DOCKER run generates a session_id but
@@ -317,6 +466,7 @@ ret=0
session_id=$("${docker_run[@]}") || ret=$?
if [ $ret -ne 0 ]; then
+ ssh_error $ret
if [ $ret -eq 255 ]; then
echo "WARNING: $SSH $session_host returned an error ($ret). Trying another ssh connexion to get debug logs"
$SSH -v $session_host true
@@ -337,17 +487,62 @@ CONTAINER_CLEANUP="$DOCKER rm -fv ${session_id}"
trap "exec 1>&3 2>&4 ; ${CONTAINER_CLEANUP}" EXIT
if [ x"$newuser" != x"" ]; then
- $DOCKER exec "$session_id" new-user.sh --user $newuser
+ $DOCKER exec "$session_id" \
+ new-user.sh --user "$newuser" --verbose "$verbose"
+fi
+
+if [ "$user" != "" ]; then
+ for dir in "${chown_mounts[@]}"; do
+ $DOCKER exec "$session_id" chown $user "$dir"
+ done
+
+ # Mark git mounts as safe git directories,so that git does not complain
+ # about dubious ownership. This is important for get_git_history()
+ # fetching sumfiles/flaky.xfail files.
+ for dir in "${git_mounts[@]}"; do
+ $DOCKER exec "$session_id" sudo -i -u $user \
+ git config --global --add safe.directory "$dir"
+ done
+
+ if [ "$user" = "tcwg-build" ]; then
+ # FIXME: Hack -- use tcwg-buildslave's key while tcwg-build's
+ # is unavailable.
+ $DOCKER exec "$session_id" cp \
+ /home/tcwg-buildslave/.ssh/authorized_keys \
+ /home/tcwg-build/.ssh/authorized_keys
+ $DOCKER exec "$session_id" chown $user \
+ /home/tcwg-build/.ssh/authorized_keys
+ fi
+
+ # Below $user is used as a prefix for $session_host
+ user="$user@"
fi
session_port=$($DOCKER port $session_id 22 | cut -d: -f 2) || ssh_error $?
+session_opts=("-p$session_port")
+
+# SECURITY NOTE: this is the first time we are establishing ssh connection
+# to the container, and, provided we are using connection sharing, settings
+# specified for this connection may affect many or all of the subsequent
+# connections. In particular, if ssh agent forwarding is enabled for the
+# below connection, then it will be part of the master connection, and it
+# may persist through the whole lifetime of the container.
+if [ "$task" = "precommit" ]; then
+ # FIXME: We should be OK to disable agent forwarding for most of our
+ # jobs, but, for now, keep the previous state as the default.
+
+ # Disable agent forwarding for precommit testing.
+ session_opts+=("-oForwardAgent=no")
+fi
+
# Wait until the ssh server is ready to serve connexions
# Make sure connexion messages go to stderr, so that in case of
# success stdout contains only the connexion info expected by the
# caller.
ret=0
-$dryruncmd wait_for_ssh_server ${user}$session_host $session_port || ret=$?
+$dryruncmd wait_for_ssh_server ${user}$session_host "" "${session_opts[@]}" \
+ || ret=$?
if [ $ret != 0 ]; then
echo SSH server did not respond, exiting
@@ -357,16 +552,36 @@ fi
# For CI builds make sure to kill previous build, which might have been
# aborted by jenkins, but processes could have survived. Otherwise old
# build can start writing to files of the current build.
-if $bind_mounted_workspace; then
- prev_container=$($SSH $session_host flock "$WORKSPACE/.lock" cat "$WORKSPACE/.lock" || true)
- # Container may have been cleaned up by something else
- if [ x"$prev_container" != x"" ] && [ "$(docker ps -a | grep $prev_container)" ] ; then
- echo "NOTE: Removing previous container for $WORKSPACE"
- $DOCKER rm -vf "$prev_container" || echo "WARNING: Could not remove $prev_container"
+if $lock_workspace; then
+ # We may have several containers (one primary and several secondary)
+ # sharing the same workspace, and we list these in $WORKSPACE/.lock.
+ # Helpers stop_all_containers() and clean_all_containers() use this .lock
+ # file to stop/cleanup all containers created in the current session.
+ # We keep the .lock file, so that the primary container in the next
+ # build can confirm that all containers are indeed removed.
+ #
+ # When cleanup routine is triggered by aborted jenkins build, we
+ # often no longer have access to ssh-agent. Therefore, $SSH command
+ # will likely fail. However, sometimes $SSH just hangs indefinitely,
+ # which causes problems in tcwg-benchmark_backend job -- the shell hanging
+ # in "trap" waits on $SSH and does not release the board lock file.
+ # To avoid this we put a "timeout 10m" on the container cleanup.
+ # Docker daemon will finish removing the container even if the caller
+ # docker client is killed by timeout.
+ if ! $secondary; then
+ while read prev_container; do
+ # Container may have been cleaned up by something else
+ if $DOCKER stats --no-stream "$prev_container" &>/dev/null; then
+ echo "NOTE: Removing previous container for $WORKSPACE"
+ $DOCKER rm -vf "$prev_container" \
+ || echo "WARNING: Could not remove $prev_container"
+ fi
+ done < <($SSH $session_host flock "$WORKSPACE/.lock" \
+ cat "$WORKSPACE/.lock" || true)
+ $SSH $session_host rm -f "$WORKSPACE/.lock"
fi
- $SSH $session_host bash -c "\"mkdir -p $WORKSPACE && flock $WORKSPACE/.lock echo $session_name > $WORKSPACE/.lock\""
- CONTAINER_CLEANUP="${CONTAINER_CLEANUP}; $SSH $session_host flock $WORKSPACE/.lock rm $WORKSPACE/.lock"
+ $SSH $session_host bash -c "\"mkdir -p $WORKSPACE && echo $session_id | flock $WORKSPACE/.lock tee -a $WORKSPACE/.lock\""
fi
# Do not remove the container upon exit: it is now ready
@@ -374,6 +589,25 @@ trap EXIT
ssh_info_opt=""
if $ssh_info; then
+ assert_with_msg "ssh_info is not supported for task==precommit" \
+ [ "$task" != "precommit" ]
+ # FIXME: One of the things to fix for precommit benchmarking is to pass
+ # all ${session_opts[@]} to the benchmarking container. Benchmarking
+ # precommit workflow is tricky because precommit container needs to trigger
+ # benchmarking job on ci.linaro.org, and then the benchmarking will
+ # connect via ssh to the [precommit] build container. This connection
+ # needs to happen with ssh agent DISABLED, so that processes inside
+ # precommit container can't escape. To disable ssh agent forwarding
+ # when connecting to this container -- we need to pass all
+ # ${session_opts[@]}, which have -oForwardAgent=no.
+ #
+ # Triggering of the benchmarking job on ci.linaro.org can be arranged
+ # by allowing tcwg-build's ssh key to trigger tcwg-benchmark job. This
+ # relies on Jenkins ssh interface to be robust against
+ # privileged-escalation attacks.
+ #
+ # Additionally, we should disable ssh agent forwarding by default,
+ # and enable it only by request.
ssh_info_opt="ssh_host=${user}$session_host ssh_port=$session_port"
fi
@@ -385,24 +619,21 @@ cat <<EOF
# The vars are used when this script is sourced
# shellcheck disable=SC2034
# v1 interface
-CONTAINER="${dryruncmd} $SSH -p ${session_port} ${user}${session_host}"
+CONTAINER="${dryruncmd} $SSH ${session_opts[@]} ${user}${session_host}"
CONTAINER_CLEANUP="${CONTAINER_CLEANUP}"
session_host=${session_host}
session_port=${session_port}
+session_opts=(${session_opts[@]})
# v2 interface
# Source jenkins-helpers.sh for remote_exec
. "$(dirname "$(readlink -f "$0")")/jenkins-helpers.sh"
-${prefix}CONTAINER_RSH="${dryruncmd} $SSH -p ${session_port} ${user}${session_host}"
+${prefix}CONTAINER_RSH="${dryruncmd} $SSH ${session_opts[@]} ${user}${session_host}"
${prefix}container_cleanup ()
{
- [ -f /sys/fs/cgroup/memory/memory.failcnt ] && echo "Number of memory usage failures:" && cat /sys/fs/cgroup/memory/memory.failcnt
- [ -f /sys/fs/cgroup/memory/memory.max_usage_in_bytes ] && echo "Maximum memory used:" && cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes
- [ -f /sys/fs/cgroup/pids/pids.events ] && echo "Number of fork failures:" && cat /sys/fs/cgroup/pids/pids.events
-
- ${CONTAINER_CLEANUP}
+ timeout 10m ${CONTAINER_CLEANUP}
}
${prefix}container_stop ()
{
@@ -410,28 +641,52 @@ ${prefix}container_stop ()
}
${prefix}container_exec ()
{
- $dryruncmd remote_exec "${user}${session_host}:${session_port}:\$(pwd)::$ssh_info_opt" "\$@"
+ (
+ # Avoid logging of option processing, which may include secret tokens.
+ set +x
+ $dryruncmd remote_exec "${user}${session_host}::\$(pwd | ${pwd_translate[@]}):${session_opts[@]}:$ssh_info_opt" "\$@"
+ )
}
${prefix}container_host=${session_host}
${prefix}container_port=${session_port}
+${prefix}container_opts=(${session_opts[@]})
${prefix}container_id=${session_id}
-container_prefix_list+=("${prefix}")
+${prefix}container_rsync ()
+{
+ local opt
+ local -a opts
+ for opt in "\$@"; do
+ case "\$opt" in
+ ":"*) opt="${user}${session_host}\$opt"
+ esac
+ opts+=("\$opt")
+ done
+
+ $dryruncmd rsync -e "ssh ${session_opts[@]}" --rsync-path="cd \$(pwd | ${pwd_translate[@]}); rsync" "\${opts[@]}"
+}
+EOF
+
+if $lock_workspace && ! $secondary; then
+ cat <<EOF
stop_all_containers ()
{
local i
- for i in "\${container_prefix_list[@]}"; do
- eval "\${i}container_stop"
- done
+ while read i; do
+ $DOCKER stop "\$i" || echo "WARNING: Could not stop \$i"
+ done < <($SSH $session_host flock "$WORKSPACE/.lock" \
+ cat "$WORKSPACE/.lock")
}
cleanup_all_containers ()
{
local i
- for i in "\${container_prefix_list[@]}"; do
- eval "\${i}container_cleanup"
- done
+ while read i; do
+ timeout 10m $DOCKER rm -vf "\$i" || echo "WARNING: Could not cleanup \$i"
+ done < <(timeout 10m $SSH $session_host flock "$WORKSPACE/.lock" \
+ cat "$WORKSPACE/.lock")
}
EOF
+fi
diff --git a/start-container-qemu.sh b/start-container-qemu.sh
index 235c4aea..bc90cd1a 100755
--- a/start-container-qemu.sh
+++ b/start-container-qemu.sh
@@ -37,7 +37,7 @@ container=$(mktemp)
# Run container as privileged to mount host's /dev inside container to gain
# access to /dev/kvm.
# Publish container's port 2222, which is forwarded to VM's ssh server.
-"$(dirname "$0")"/start-container-docker.sh "${docker_args[@]}" --docker_opts "--privileged --publish 2222" > "$container"
+"$(dirname "$0")"/start-container-docker.sh "${docker_args[@]}" --security "--privileged --publish 2222" > "$container"
# shellcheck disable=SC2064
trap "rm $container; cleanup_all_containers" EXIT
@@ -121,7 +121,7 @@ remote_exec "$host:$port:/:-f -Snone" \
qemu_port=$(ssh $host docker port $container_id 2222 | cut -d: -f 2)
ret=0
-wait_for_ssh_server $host $qemu_port || ret=$?
+wait_for_ssh_server $host "" -p$qemu_port || ret=$?
if [ $ret != 0 ]; then
echo "SSH server did not respond, exiting"
exit $ret
diff --git a/tcwg-benchmark-bare.sh b/tcwg-benchmark-bare.sh
deleted file mode 100755
index e21222d2..00000000
--- a/tcwg-benchmark-bare.sh
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/bin/bash
-
-# Clean: shellcheck -e 2001 ./tcwg-benchmark.sh
-
-set -ex
-
-# Make shellcheck happy and workaround Jenkins not defining variables
-# for empty arguments.
-bench_container_tag="${bench_container_tag-bionic}"
-toolchain_url="$toolchain_url"
-toolchain_type="${toolchain_type-auto}"
-bench_list="$bench_list"
-cflags="$cflags"
-ldflags="$ldflags"
-fileserver="$fileserver"
-forceinstall="$forceinstall"
-results_id="$results_id"
-BUILD_NUMBER="$BUILD_NUMBER"
-NODE_NAME="$NODE_NAME"
-WORKSPACE="$WORKSPACE"
-
-# Jenkins doesn't define variables when parameter value is empty (like cflags),
-# so enable "set -u" only after above binding of variables.
-set -u
-
-. jenkins-helpers.sh
-
-# Start a container to run the benchmarks in.
-# The board is connected to the slave via USB, the container needs
-# special rights to access it.
-
-# tcwg-benchmark user already exists, re-creating it causes an error.
-newuser=
-[ "x$USER" != "xtcwg-benchmark" ] && newuser="--newuser $USER"
-
-./start-container-docker.sh \
- $newuser \
- --distro "$bench_container_tag" \
- --task bench \
- --docker_opts "--privileged -v /dev/bus/usb:/dev/bus/usb" \
- --prefix run_ > run-container.sh
-trap "cleanup_all_containers" EXIT
-. ./run-container.sh
-
-# If $toolchain_url is of ssh:// type, don't use a remote build
-# container, just use the ssh command as provided.
-build_container_host=
-build_container_port=
-case "$toolchain_url" in
- "ssh://"*)
- ccprefix="${toolchain_url##ssh://}"
-
- # Extract host:port: specification from ccprefix, we don't
- # need to care about :parallelize here, just pass it to run.sh
- # if present.
- build=${ccprefix%:*}
- build_container_host="$(echo $build | cut -d: -f 1)"
- case ${ccprefix} in
- *:*:*)
- build_container_port="$(echo $build | cut -s -d: -f 2)"
- ;;
- *:*)
- # If no port is specified, use 22 (ssh default port)
- build_container_port=22
- ;;
- esac
-
- if [ "x$build_container_host" = "x" ]; then
- echo "ERROR: ssh:// toolchain_url lacks a host: $toolchain_url."
- exit 1
- fi
- if [ "x$build_container_port" = "x" ]; then
- echo "ERROR: ssh:// toolchain_url lacks a port: $toolchain_url."
- exit 1
- fi
- ;;
- *)
- # When we copy the toolchain, access it from the 'run' container
- # run_container_host is set with . ./run-container.sh above
- # shellcheck disable=SC2154
- build_container_host=$run_container_host
- # shellcheck disable=SC2154
- build_container_port=$run_container_port
- ;;
-esac
-
-case "$toolchain_url" in
- "ssh://"*)
- # Last component of ccprefix is the path, keep it
- toolchaindir="$(dirname ${ccprefix##*:})"
- ;;
- "http://"*".tar.xz"|"https://"*".tar.xz"|"http://"*".tar.bz2"|"https://"*".tar.bz2")
- toolchaindir=$(untar_url "$toolchain_url" "$WORKSPACE" "--strip-components 1")
- ;;
- "rsync://"*)
- ccprefix="${toolchain_url##rsync://}"
-
- # Extract host:port: specification from ccprefix, we don't
- # need to care about :parallelize here, just pass it to run.sh
- # if present.
- rsync_spec=${ccprefix%:*}
- rsync_host="$(echo $rsync_spec | cut -d: -f 1)"
- case ${ccprefix} in
- *:*:*)
- rsync_port="$(echo $rsync_spec | cut -s -d: -f 2)"
- ;;
- *:*)
- # If no port is specified, use 22 (ssh default port)
- rsync_port=22
- ;;
- esac
- # We want to access the remote toolchain via a container, to
- # avoid problems with the hosts's ssh server restrictions on the
- # number of simulaneous connexions.
- # We copy it to the build container (assuming it uses the same
- # architecture as the machine pointed to by $toolchain_url).
- # Assume ccprefix looks like /path/bin/target-triplet-, and
- # compute 'path'.
- src_toolchaindir=$(dirname "$(dirname ${ccprefix##*:})")
- toolchaindir="${WORKSPACE}/toolchain-${BUILD_NUMBER}"
- rsync -az --delete -e "ssh -p$rsync_port" "$rsync_host:$src_toolchaindir/" "$toolchaindir/"
- ;;
- *)
- echo "ERROR: Cannot handle toolchain_url: $toolchain_url"
- exit 1
- ;;
-esac
-
-# Sanity check that toolchain_type is supported
-case "$toolchain_type" in
- gnu|llvm) ;;
- *)
- echo "ERROR: Unsupported toolchain type: $toolchain_type"
- exit 1
- ;;
-esac
-
-case "$toolchain_url" in
- "http://"*|"https://"*|"rsync://"*|"ssh://"*)
-
- # In the ssh:// case, we have to perform the 'find' operations
- # remotely.
- case "$toolchain_url" in
- "ssh://"*)
- maybe_remote="ssh -p $build_container_port $build_container_host"
- ;;
- *)
- maybe_remote=""
- ;;
- esac
-
- case "$toolchain_type" in
- "gnu"|"llvm") ;;
- "auto")
- if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*gcc" | wc -l)" != x"0" ]; then
- toolchain_type="gnu"
- elif [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*clang" | wc -l)" != x"0" ]; then
- toolchain_type="llvm"
- else
- echo "ERROR: Cannot autodetect toolchain type"
- exit 1
- fi
- ;;
- esac
-
- case "$toolchain_type" in
- "gnu") ccname="gcc" ;;
- "llvm") ccname="clang" ;;
- esac
- ccpath=$($maybe_remote find "$toolchaindir" -path "*bin/*$ccname")
- if [ "$(echo "$ccpath" | wc -w)" -ne 1 ]; then
- echo "ERROR: found more than one compiler: $ccpath"
- exit 1
- fi
-
- # No need to copy the toolchain to the build container: it
- # runs on the local machine and has access to $toolchaindir.
- # FIXME: ssh:// access is currently broken.
- case "$toolchain_url" in
- "ssh://"*) ;;
- *)
- ccprefix=$(echo "$ccpath" | sed -e "s/$ccname\$//")
- ;;
- esac
- ;;
-esac
-
-results_id=$(echo "$results_id" \
- | sed -e "s/<build_num>/$BUILD_NUMBER/g" \
- -e "s/@build_num@/$BUILD_NUMBER/g")
-hw_tag="${results_id%%/*}"
-if echo "$results_id" | grep -q "\.\."; then
- echo "ERROR: results_id should not escape /home/tcwg-benchmark/results* hierarchy; do not use \"..\""
- exit 1
-fi
-case "$hw_tag" in
- stm32*) hw_tag=${hw_tag##stm32_} ;;
- *)
- echo "ERROR: results_id does not start with a valid hw_tag"
- exit 1
- ;;
-esac
-
-# vars are from run-container.sh sourced above
-# shellcheck disable=SC2154
-case "$bench_list" in
- *coremark*)
- remote_exec "$run_container_host:$run_container_port:$WORKSPACE/bmk-scripts:-t -Snone" \
- "./coremark.sh" \
- --ccprefix "$ccprefix" \
- --cflags "$cflags" \
- --ldflags "$ldflags" \
- --forceinstall "${forceinstall}" \
- --hw_tag "$hw_tag" \
- --resultsdest "bkp-01.tcwglab:/home/tcwg-benchmark/results-${results_id}/${NODE_NAME}" \
- --verbose true
- ;;
-esac
-
-# Delete temporary toolchains to avoid filling disk
-case "$toolchain_url" in
- "ssh://"*)
- # Nothing to do, we didn't copy any toolchain
- ;;
- "http://"*|"https://"*|"rsync://"*)
- rm -rf $toolchaindir
- ;;
-esac
diff --git a/tcwg-benchmark-results-compare.sh b/tcwg-benchmark-results-compare.sh
deleted file mode 100755
index f6450254..00000000
--- a/tcwg-benchmark-results-compare.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# Clean: shellcheck -e 2001 ./tcwg-benchmark-results-compare.sh
-
-set -ex
-
-# Make shellcheck happy and workaround Jenkins not defining variables
-# for empty arguments.
-resultsref="$resultsref"
-resultseval="$resultseval"
-peak="$peak"
-
-peak_opt=""
-$peak && peak_opt=--peak
-
-results_top="bkp-01.tcwglab:/home/tcwg-benchmark/results"
-
-rm -rf results-eval results-ref artifacts
-
-rsync -az --delete "$results_top-$resultsref/" results-ref/
-rsync -az --delete "$results_top-$resultseval/" results-eval/
-
-mkdir artifacts
-perl bmk-scripts/quick-view-spec.pl --timeratio ${peak_opt} results-ref/*/*csv* -- results-eval/*/*csv* > artifacts/time.txt
-perl bmk-scripts/quick-view-spec.pl --scoreratio ${peak_opt} results-ref/*/*csv* -- results-eval/*/*csv* > artifacts/score.txt
-
-cat artifacts/time.txt
diff --git a/tcwg-benchmark-results.broken-list b/tcwg-benchmark-results.broken-list
index 85d65264..0870bc7a 100644
--- a/tcwg-benchmark-results.broken-list
+++ b/tcwg-benchmark-results.broken-list
@@ -1,97 +1,5 @@
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2/2458
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2/2578
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2/2824
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2/3089
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2_LTO/2416
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2_LTO/2550
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2_LTO/2707
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2_LTO/2852
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O2_LTO/2981
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3/2511
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3/2632
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3/2831
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3/3056
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3_LTO/2798
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3_LTO/2947
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-O3_LTO/3142
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os/2356
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os/2567
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os/2791
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os/2962
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os/3162
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os_LTO/2379
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os_LTO/2488
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os_LTO/2642
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os_LTO/2848
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-master-arm-spec2k6-Os_LTO/3073
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2/2750
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2/2967
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2/3135
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2_LTO/2470
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2_LTO/2661
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2_LTO/2857
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O2_LTO/3187
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3/2335
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3/2539
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3/2742
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3/2936
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3/3172
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3_LTO/2323
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3_LTO/2532
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3_LTO/2720
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3_LTO/2921
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-O3_LTO/3165
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os/2423
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os/2554
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os/2679
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os/3123
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os_LTO/2496
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os_LTO/2670
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os_LTO/2883
-tk1_32/tcwg_bmk_tk1/jenkins-full-gnu-release-arm-spec2k6-Os_LTO/3151
-tk1/tcwg_bmk/gnu-master-arm-spec2k6-O3-4374
-tk1/tcwg_bmk/gnu-master-arm-spec2k6-O3_LTO-4704
-tk1/tcwg_bmk/gnu-master-arm-spec2k6-O3_LTO-4788
-tk1/tcwg_bmk/llvm-master-arm-spec2k6-O2-5917
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2143
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2182
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2252
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2393
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2542
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2/2732
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2_LTO/2248
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2_LTO/2380
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2_LTO/2543
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O2_LTO/2727
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O3/2563
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O3/2769
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-O3_LTO/2745
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os/2213
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os/2316
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os/2458
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os/2621
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os/2798
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os_LTO/2442
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os_LTO/2593
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-master-aarch64-spec2k6-Os_LTO/2823
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O2/2763
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O2_LTO/2267
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O2_LTO/2451
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O2_LTO/2683
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3/2349
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3/2528
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3/2692
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3_LTO/2151
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3_LTO/2259
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3_LTO/2447
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3_LTO/2681
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os/2318
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os/2392
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os/2564
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os/2829
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os_LTO/2298
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os_LTO/2403
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os_LTO/2594
-tx1_64/tcwg_bmk_tx1/jenkins-full-gnu-release-aarch64-spec2k6-Os_LTO/2847
-tx1/tcwg_bmk/gnu-master-aarch64-spec2k6-O3-4348
-tx1/tcwg_bmk/llvm-master-aarch64-spec2k6-O3-4358
+apm_64/tcwg_bmk_gnu_apm/jenkins-full-gnu-release-aarch64-spec2k6-Os_LTO/3654
+tx1_64/tcwg_bmk_gnu_tx1/jenkins-full-gnu-release-aarch64-spec2k6-O3_LTO/4571
+apm_32/tcwg_bmk_llvm_apm/jenkins-full-llvm-release-arm-spec2k6-Oz/4763
+tk1_32/tcwg_bmk_llvm_tk1/jenkins-full-llvm-release-arm-spec2k6-O2_LTO/4332
+tx1_64/tcwg_bmk_llvm_tx1/jenkins-full-llvm-master-aarch64-spec2k6-O2_LTO/3831
diff --git a/tcwg-benchmark-results.sh b/tcwg-benchmark-results.sh
index 6b45c971..c788d044 100755
--- a/tcwg-benchmark-results.sh
+++ b/tcwg-benchmark-results.sh
@@ -8,54 +8,20 @@ scripts=$(dirname $0)
# shellcheck source=jenkins-helpers.sh
. $scripts/jenkins-helpers.sh
-function gather_perf_data ()
-{
- local has_perf_logs=$1
- local hw_tag=$2
- local num=$3
-
- if [ x"$has_perf_logs" = xyes ]; then
- $scripts/../bmk-scripts/perfdatadir2csv.sh \
- --buildid-dir local --format sample,size --sort-field sample \
- --perf-bin /usr/lib/linux-tools/$hw_tag/perf \
- $verbose_opt $num_entries_opt \
- --results-dir "results-$num/" > "$top_artifacts/results-$num.csv"
- else
- # No perf logs to parse, just copy the plain results.csv.
- # Use 'find' because results.csv is located under
- # results-$num/NODE_NAME/ and we don't want to hardcode
- # NODE_NAME. Since the whole script runs under 'set -f', using
- # '*' does not work.
- mapfile -t this_csv < <(find results-$num -name results.csv)
- if [ "${#this_csv[@]}" -eq 1 ]; then
- cp -v "${this_csv[@]}" "$top_artifacts/results-$num.csv"
- else
- echo "ERROR: Found ${#this_csv[@]} CSV results files in results-$num, expecting a single one."
- exit 1
- fi
- fi
-}
-
convert_args_to_variables "$@"
-obligatory_variables results
+obligatory_variables results hw_tag
top_artifacts="${top_artifacts-artifacts}"
verbose="${verbose-false}"
# shellcheck disable=SC2154
num_entries_opt="${num_dsos+--num-dsos $num_dsos} ${num_symbols+--num-symbols $num_symbols}"
entry_threshold="${entry_threshold-5}"
-metric="${metric-perf}"
-has_perf_logs="${has_perf_logs-no}"
-
-if [ x"$metric" = x"perf" ]; then
- has_perf_logs="yes"
-fi
+has_perf_logs="${has_perf_logs-yes}"
+hw_tag="${hw_tag-unknown}"
-verbose_opt=""
if $verbose; then
set -x
- verbose_opt="--verbose"
fi
relative_opt=""
@@ -65,38 +31,106 @@ if [ x"${results_ref+set}" = x"set" ]; then
results=("$results_ref" "${results[@]}")
num=0
else
+ results=("${results[@]}")
num=1
fi
mkdir -p $top_artifacts
-results_top="bkp-01.tcwglab:/home/tcwg-benchmark/results"
+j=$num
+for i in "${results[@]}"; do
+ # results can be either
+ # - an existing local dir, create a symlink results-N -> results_dir
+ # - or a remote bkp-01 dir, rsync it to result-N
+ rm -rf results-$j
+ if [ -d $i ]; then
+ ln -s $i results-$j
+ else
+ rsync -az --delete "$i/" results-$j/
+ fi
+ csv_results_dir="$top_artifacts/csv-results-$j"
+ mkdir -p $csv_results_dir
+
+ $scripts/../bmk-scripts/gather-metrics.sh --results_dir results-$j/ \
+ --csv_results_dir $csv_results_dir \
+ ++metrics perf \
+ --hw_tag $hw_tag --has_perf_logs $has_perf_logs \
+ --verbose $verbose $num_entries_opt
+ j=$((j+1))
+done
+
+$scripts/../bmk-scripts/gen_interesting_symbols.py \
+ --perf_csvs="$top_artifacts/csv-results-1/perf.csv" \
+ --out_csv="$top_artifacts/interesting-symbols.csv" \
+ --threshold_interesting=$entry_threshold
csvs=""
for i in "${results[@]}"; do
- rsync -az --delete "$results_top-$i/" results-$num/
-
- case $metric in
- "perf")
- hw_tag="${i%%/*}"
- gather_perf_data $has_perf_logs $hw_tag $num
- ;;
- "vect")
- $scripts/../bmk-scripts/vect-data-to-csv.py "results-$num" \
- "$top_artifacts/results-$num.csv"
- ;;
- *)
- echo "ERROR: invalid value for metric: ${metric}"
- exit 1
- ;;
- esac
-
- csvs="$csvs $top_artifacts/results-$num.csv"
+ csv_results_dir="$top_artifacts/csv-results-$num"
+
+ # FIXME: This will (needlessly) recompute perf metric. Revisit later
+ # to avoid that.
+ $scripts/../bmk-scripts/gather-metrics.sh --results_dir "results-$num/" \
+ --csv_results_dir $csv_results_dir \
+ --hw_tag $hw_tag --has_perf_logs $has_perf_logs \
+ --verbose $verbose $num_entries_opt \
+ --interesting_symbols "$top_artifacts/interesting-symbols.csv"
+
+ csvs="$csvs $csv_results_dir/results.csv"
num=$(($num+1))
done
-$scripts/../bmk-scripts/csvs2table.py -p $entry_threshold $relative_opt $csvs > $top_artifacts/results.csv
-
-$scripts/../bmk-scripts/csvs2table.py -p 0 $relative_opt $csvs > $top_artifacts/results-full.csv
+# Demangling symbol names may introduce commas which will interfere with
+# parsing CSV files. So keep a separate copy results-internal.csv, which
+# contains mangled names.
+$scripts/../bmk-scripts/csvs2table.py -p $entry_threshold $relative_opt $csvs \
+ > $top_artifacts/csvs2table-results-internal.csv
+cat $top_artifacts/csvs2table-results-internal.csv | c++filt -p > $top_artifacts/csvs2table-results.csv
+
+$scripts/../bmk-scripts/csvs2table.py -p 0 $relative_opt $csvs | c++filt -p > $top_artifacts/csvs2table-results-full.csv
+
+$scripts/../bmk-scripts/csvs2table.py -p 99 $relative_opt $csvs | c++filt -p > $top_artifacts/csvs2table-results-brief.csv
+
+# Currently we only support relative comparison for 2 csvs with compare-results.py.
+# If we are using non relative mode, or more than 2 csv files for comparison, fall back
+# to csvs2table.py.
+
+if [ x"$relative_opt" == x"--relative" ] && [ $num == 2 ]; then
+ $scripts/../bmk-scripts/compare-results.py $csvs \
+ --interesting_symbols "$top_artifacts/interesting-symbols.csv" \
+ > $top_artifacts/compare-results-internal.csv
+
+ $scripts/../bmk-scripts/diff-bmk-results.py \
+ $top_artifacts/compare-results-internal.csv \
+ $top_artifacts/csvs2table-results-internal.csv \
+ true \
+ > $top_artifacts/cmp-results.diff || true
+
+ # Only keep cmp-results.diff if there were differences found between output of
+ # compare-results.py and csvs2table.py for same input csvs.
+ if grep -Fxq "Same results" $top_artifacts/cmp-results.diff
+ then
+ rm $top_artifacts/cmp-results.diff
+ fi
+fi
-$scripts/../bmk-scripts/csvs2table.py -p 99 $relative_opt $csvs > $top_artifacts/results-brief.csv
+# We currently have two workflows to produce final list of regressions:
+# (a) csvs2table.py
+# -> results with rel metrics
+# -> tcwg_bmk-build.sh:compare_results
+# -> list of exe and symbol regressions.
+#
+# (b) compare-results.py
+# -> results with rel metrics
+# -> output-bmk-results.py
+# -> list of exe and symbol regressions
+#
+# (a) is used by default for all cases currently.
+# In short term we want to replace, (a) with (b) for relative mode
+# with two input csvs, which is the common case for bmk CI. To achieve that,
+# (i) Replace csvs2table.py with compare-results.py in (a).
+# (ii) If (i) succeeds, then replace tcwg_bmk-build.sh:compare_results
+# with output-bmk-results.py.
+# In longer term, we should replace (a) with (b) for all cases.
+
+cp $top_artifacts/csvs2table-results-internal.csv $top_artifacts/results-internal.csv
diff --git a/tcwg-benchmark.sh b/tcwg-benchmark.sh
index 37859848..21e5fa9e 100755
--- a/tcwg-benchmark.sh
+++ b/tcwg-benchmark.sh
@@ -2,7 +2,7 @@
# Clean: shellcheck -e 2001 ./tcwg-benchmark.sh
-set -eux
+set -eu
scripts=$(dirname "$0")
# shellcheck source=jenkins-helpers.sh
@@ -23,12 +23,12 @@ obligatory_variables \
sysroot \
forceinstall \
builder \
- results_id \
- BUILD_NUMBER \
+ results_dest \
WORKSPACE \
reboot \
ignore_errors \
- clean_older_than
+ clean_older_than \
+ hw_tag
declare -g \
boardname \
image_arch \
@@ -43,29 +43,27 @@ declare -g \
sysroot \
forceinstall \
builder \
- results_id \
- BUILD_NUMBER \
+ results_dest \
WORKSPACE \
reboot \
ignore_errors \
- clean_older_than
+ clean_older_than \
+ hw_tag
# Make shellcheck happy and workaround Jenkins not defining variables
# for empty arguments.
-bench_container_tag="${bench_container_tag-bionic}"
-build_container_tag="${build_container_tag-bionic}"
+bench_container_tag="${bench_container_tag-default}"
toolchain_type="${toolchain_type-auto}"
prepare_board="${prepare_board-true}"
+verbose="${verbose-true}"
+support_fortran_opt=""
-if echo "$builder" | grep -q ".*-[0-9]\+"; then
- docker_host_opt="--arch amd64 --node $builder"
-else
- docker_host_opt="--label $builder"
+if $verbose; then
+ set -x
fi
-# shellcheck source=jenkins-helpers.sh
-. $scripts/jenkins-helpers.sh
-
+prepare_toolchain ()
+{
# If $toolchain_url is of ssh:// type, don't create a remote build
# container, just use the ssh command as provided.
build_container_host=
@@ -83,27 +81,32 @@ case "$toolchain_url" in
*:*:*)
build_container_port="$(echo $build | cut -s -d: -f 2)"
;;
- *:*)
- # If no port is specified, use 22 (ssh default port)
- build_container_port=22
- ;;
esac
if [ "x$build_container_host" = "x" ]; then
echo "ERROR: ssh:// toolchain_url lacks a host: $toolchain_url."
exit 1
fi
- if [ "x$build_container_port" = "x" ]; then
- echo "ERROR: ssh:// toolchain_url lacks a port: $toolchain_url."
- exit 1
- fi
;;
*)
- # Make sure to cleanup build container if something goes
- # wrong when preparing the test environment
- trap "cleanup_all_containers" EXIT
- $scripts/start-container-docker.sh $docker_host_opt --distro "$build_container_tag" --task build --prefix build_ > build-container.sh
- . ./build-container.sh
+ if [ x"$builder" = x"bmk_board" ]; then
+ # shellcheck disable=SC2154
+ build_container_host=$run_container_host
+ # shellcheck disable=SC2154
+ build_container_port=$run_container_port
+ else
+ build_container_tag="${builder#*:}"
+ builder="${builder%:*}"
+ if echo "$builder" | grep ".*-[0-9]\+" >/dev/null; then
+ # Builder is a specific node
+ docker_host_opt="--arch amd64 --node $builder"
+ else
+ docker_host_opt="--label $builder"
+ fi
+
+ $scripts/start-container-docker.sh $docker_host_opt --distro "$build_container_tag" --task build --prefix build_ > build-container.sh
+ . ./build-container.sh
+ fi
;;
esac
@@ -121,6 +124,21 @@ case "$toolchain_url" in
;;
"rsync://"*)
ccprefix="${toolchain_url##rsync://}"
+
+ # Extract host:port: specification from ccprefix, we don't
+ # need to care about :parallelize here, just pass it to run.sh
+ # if present.
+ rsync_spec=${ccprefix%:*}
+ rsync_host="$(echo $rsync_spec | cut -d: -f 1)"
+ case ${ccprefix} in
+ *:*:*)
+ rsync_port="$(echo $rsync_spec | cut -s -d: -f 2)"
+ ;;
+ *:*)
+ # If no port is specified, use 22 (ssh default port)
+ rsync_port=22
+ ;;
+ esac
# We want to access the remote toolchain via a container, to
# avoid problems with the hosts's ssh server restrictions on the
# number of simulaneous connexions.
@@ -128,9 +146,10 @@ case "$toolchain_url" in
# architecture as the machine pointed to by $toolchain_url).
# Assume ccprefix looks like /path/bin/target-triplet-, and
# compute 'path'.
- src_toolchaindir=$(dirname "$(dirname ${ccprefix})")
- toolchaindir="${WORKSPACE}/toolchain-${BUILD_NUMBER}"
- rsync -az --delete "$src_toolchaindir/" "$toolchaindir/"
+ src_toolchaindir=$(dirname "$(dirname ${ccprefix##*:})")
+ toolchaindir="${WORKSPACE}/toolchain"
+ rsync -az --delete -e "ssh -p$rsync_port" \
+ "$rsync_host:$src_toolchaindir/" "$toolchaindir/"
;;
*)
echo "ERROR: Cannot handle toolchain_url: $toolchain_url"
@@ -147,56 +166,62 @@ case "$toolchain_type" in
;;
esac
+# In the ssh:// case, we have to perform the 'find' operations
+# remotely.
case "$toolchain_url" in
- "http://"*|"https://"*|"rsync://"*|"ssh://"*)
-
- # In the ssh:// case, we have to perform the 'find' operations
- # remotely.
- case "$toolchain_url" in
- "ssh://"*)
- maybe_remote="ssh -p $build_container_port $build_container_host"
- ;;
- *)
- maybe_remote=""
- ;;
- esac
+ "ssh://"*)
+ maybe_remote="ssh ${build_container_port:+-p$build_container_port} $build_container_host"
+ ;;
+ *)
+ maybe_remote=""
+ ;;
+esac
- case "$toolchain_type" in
- "gnu"|"llvm") ;;
- "auto")
- if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*gcc" | wc -l)" != x"0" ]; then
- toolchain_type="gnu"
- elif [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*clang" | wc -l)" != x"0" ]; then
- toolchain_type="llvm"
- else
- echo "ERROR: Cannot autodetect toolchain type"
- exit 1
- fi
- ;;
- esac
+case "$toolchain_type" in
+ "gnu") ;;
+ "llvm")
+ if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*flang-new" | wc -l)" != x"0" ]; then
+ support_fortran_opt="--support_fortran"
+ fi
+ ;;
+ "auto")
+ if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*gcc" | wc -l)" != x"0" ]; then
+ toolchain_type="gnu"
+ elif [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*clang" | wc -l)" != x"0" ]; then
+ toolchain_type="llvm"
+ else
+ echo "ERROR: Cannot autodetect toolchain type"
+ exit 1
+ fi
+ ;;
+esac
+# Non-ssh:// cases have to copy the just-copied toolchain to
+# the remote build container. For ssh://, we'll access the
+# toolchain remotely.
+case "$toolchain_url" in
+ "ssh://"*) ;;
+ *)
case "$toolchain_type" in
"gnu") ccname="gcc" ;;
"llvm") ccname="clang" ;;
esac
+
ccpath=$($maybe_remote find "$toolchaindir" -path "*bin/*$ccname")
if [ "$(echo "$ccpath" | wc -w)" -ne 1 ]; then
echo "ERROR: found more than one compiler: $ccpath"
exit 1
fi
- # Non-ssh:// cases have to copy the just-copied toolchain to
- # the remote build container. For ssh://, we'll access the
- # toolchain remotely.
- case "$toolchain_url" in
- "ssh://"*) ;;
- *)
- ccprefix=$(echo "$ccpath" | sed -e "s/$ccname\$//")
- # Copy toolchain to the build container.
- rsync -a --delete -e "ssh -p$build_container_port" "$toolchaindir/" "$build_container_host:$toolchaindir/"
- ccprefix="$build_container_host:$build_container_port:$ccprefix"
- ;;
- esac
+ ccprefix=$(echo "$ccpath" | sed -e "s/$ccname\$//")
+ # Copy toolchain to the build container.
+ ssh ${build_container_port:+-p$build_container_port} \
+ $build_container_host mkdir -p "$toolchaindir"
+ rsync -a --del -e "ssh ${build_container_port:+-p$build_container_port}" \
+ "$toolchaindir/" "$build_container_host:$toolchaindir/"
+ if [ x"$builder" != x"bmk_board" ]; then
+ ccprefix="$build_container_host:$build_container_port:$ccprefix"
+ fi
;;
esac
@@ -207,7 +232,10 @@ case "$sysroot" in
"http://"*|"https://"*)
sysrootdir=$(untar_url "$sysroot" "$WORKSPACE" "--strip-components 1")
# Copy toolchain to the build container.
- rsync -a --delete -e "ssh -p$build_container_port" "$sysrootdir/" "$build_container_host:$sysrootdir/"
+ ssh ${build_container_port:+-p$build_container_port} \
+ $build_container_host mkdir -p "$sysrootdir"
+ rsync -a --del -e "ssh ${build_container_port:+-p$build_container_port}" \
+ "$sysrootdir/" "$build_container_host:$sysrootdir/"
sysroot="$build_container_host:$build_container_port:$sysrootdir"
;;
"ssh://"*)
@@ -231,21 +259,23 @@ case "$sysroot" in
exit 1
;;
esac
+}
-if echo "$results_id" | grep -q "\.\."; then
- echo "ERROR: results_id should not escape /home/tcwg-benchmark/results* hierarchy; do not use \"..\""
- exit 1
-fi
-
-hw_tag="${results_id%%/*}"
case "$hw_tag:$boardname:$image_arch" in
+ apm_32:*-apm-*:armhf) ;;
+ apm_64:*-apm-*:arm64) ;;
sq_32:*-sq-*:armhf) ;;
sq_64:*-sq-*:arm64) ;;
+ stm32:dev-*:amd64) ;;
tk1_32:*-tk1-*:armhf) ;;
tx1_64:*-tx1-*:arm64) ;;
tx1_32:*-tx1-*:armhf) ;;
+ fx_32:*-fx-*:armhf) ;;
+ fx_64:*-fx-*:arm64) ;;
+ qc_32:*-qc-*:armhf) ;;
+ qc_64:*-qc-*:arm64) ;;
*)
- echo "ERROR: results_id does not start with a valid hw_tag: $hw_tag"
+ echo "ERROR: hw_tag parameter is not valid : $hw_tag"
exit 1
;;
esac
@@ -253,49 +283,96 @@ esac
# Check that we can ssh to the board and rsync scripts. This ensures that
# the board is online and filesystem is good condition. Try to reboot and/or
# power-cycle the board as needed.
-if $reboot; then
- # 1. Try access after soft reboot
- # 2. Try access after power-cycle
- tries_left=2
-else
- # 1. Try access without rebooting
- # 2. Try access after soft reboot
- # 3. Try access after power-cycle
- tries_left=3
-fi
+case "$hw_tag:$reboot" in
+ stm32:*)
+ # 1. If the host machine isn't available on the 1st try -- give up.
+ tries_left=1
+ reboot=false
+ prepare_board=false
+ ;;
+ *:true)
+ # 1. Try access after soft reboot
+ # 2. Try access after power-cycle
+ tries_left=2
+ ;;
+ *)
+ # 1. Try access without rebooting
+ # 2. Try access after soft reboot
+ # 3. Try access after power-cycle
+ tries_left=3
+ ;;
+esac
force_power_cycle=false
while [ $tries_left != 0 ]; do
tries_left=$(($tries_left-1))
- if $reboot; then
- if ! ssh "$boardname" true || $force_power_cycle; then
+ if timeout 1m ssh "$boardname" true; then
+ ssh_cmd="ssh"
+ wait_opts=()
+ elif timeout 1m ssh -p22 -lroot "$boardname" true; then
+ ssh_cmd="ssh -p22 -lroot"
+ wait_opts=(-p22 -lroot)
+ else
+ ssh_cmd="false"
+ wait_opts=(-p22 -lroot)
+ reboot=true
+ force_power_cycle=true
+ tries_left=0
+ fi
+
+ if $prepare_board; then
+ if ! $reboot; then
+ # Check board for kernel panics and reboot, if any.
+ dmesg_file="$boardname.dmesg-$(date +%s)"
+ timeout 1m $ssh_cmd "$boardname" dmesg -l emerg 2>&1 \
+ | tee "$dmesg_file-emerg"
+ if [ x"$(cat "$dmesg_file-emerg" | wc -l)" != x"0" ]; then
+ reboot=true
+ timeout 1m $ssh_cmd "$boardname" dmesg 2>&1 \
+ | tee "$dmesg_file"
+ else
+ # Remove empty dmesg reports, but keep non-empty ones for
+ # offline analysis -- e.g., to understand frequency and
+ # nature of kernel panics.
+ rm "$dmesg_file-emerg"
+ fi
+ fi
+
+ if $force_power_cycle; then
echo "Trying to power-cycle $boardname"
(
pdu_name=$(echo "${boardname%.tcwglab}" \
| sed -e 's/^tcwg-bmk-/tcwg-/')
nvidia-power-cycle.sh "$pdu_name"
- wait_for_ssh_server "$boardname" 22 100
+ wait_for_ssh_server "$boardname" 150 "${wait_opts[@]}"
) &
wait $! || exit $EXTERNAL_FAIL
echo "Successfully powered-cycled $boardname"
- else
+ elif $reboot; then
+ echo "Trying to reboot $boardname"
# Reboot the board.
# Ping board every second (ServerAliveInterval=1) to avoid
# waiting [default] 5min for ssh to break connection.
- ssh -Snone -oServerAliveInterval=1 $boardname sudo /sbin/reboot \
- || true
+ $ssh_cmd -Snone -oServerAliveInterval=1 $boardname \
+ sudo reboot || true
+
# Wait until the ssh server is ready
sleep 30 # Give time to the board to shutdown
- ret=0
- wait_for_ssh_server $boardname 22 100 || ret=$?
+ wait_for_ssh_server "$boardname" 150 "${wait_opts[@]}" &
+ ret=0 && wait $! || ret=$?
if [ $ret != 0 ]; then
- echo "SSH server did not respond after reboot, exiting."
- exit $EXTERNAL_FAIL
+ echo "SSH server did not respond after reboot"
fi
fi
fi
- rsync -az --delete bmk-scripts/ "$boardname:bmk-scripts/" &
+ (
+ if $prepare_board; then
+ $scripts/tcwg-update-bmk-containers.sh --board "$boardname" \
+ --test_docker true
+ fi
+ rsync -az --del bmk-scripts/ "$boardname:bmk-scripts/"
+ ) &
res=0 && wait $! || res=$?
if [ $res = 0 ]; then
break
@@ -312,11 +389,6 @@ if [ $res != 0 ]; then
exit $EXTERNAL_FAIL
fi
-case "$testmode" in
- build|verify) input_size="test" ;;
- benchmark) input_size="ref" ;;
-esac
-
if $prepare_board; then
# FIXME: Implement more configurations and checks:
# disable swap
@@ -324,9 +396,8 @@ if $prepare_board; then
# check that there are no stray processes
# test that taskset works
remote_exec "$boardname:::-t -Snone" \
- sudo /usr/local/bin/benchmark.sh --hw_tag "$hw_tag" \
- --action start_board --verbose \
- --image "linaro/ci-$image_arch-tcwg-build-ubuntu:$bench_container_tag" &
+ sudo bmk-scripts/prepare-board.sh --hw_tag "$hw_tag" \
+ --action start_board --verbose &
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
@@ -335,9 +406,13 @@ if $prepare_board; then
fi
fi
+# Make sure to cleanup build container if something goes
+# wrong when preparing the test environment
+trap "cleanup_all_containers" EXIT
+
# Start a container to run the benchmarks in.
# We install SPEC in /home/tcwg-benchmark, so bind-mount it as $WORKSPACE.
-WORKSPACE=$HOME $scripts/start-container-docker.sh --session-host "$boardname" --arch "$image_arch" --distro "$bench_container_tag" --task bench --docker_opts "--privileged" --prefix run_ > run-container.sh &
+WORKSPACE=$HOME $scripts/start-container-docker.sh --session-host "$boardname" --arch "$image_arch" --distro "$bench_container_tag" --task bench --security "--privileged" --prefix run_ > run-container.sh &
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
@@ -347,33 +422,61 @@ fi
trap "cleanup_all_containers" EXIT
. ./run-container.sh
+declare -g run_container_host run_container_port
+
+prepare_toolchain
+
+case "$bench_list" in
+ coremark)
+ remote_exec "$run_container_host:$run_container_port:bmk-scripts:-t -Snone" \
+ ./coremark.sh \
+ --ccprefix "$ccprefix" \
+ --cflags "$cflags" \
+ --ldflags "$ldflags" \
+ --forceinstall "true" \
+ --resultsdest "${results_dest}/$boardname" \
+ --verbose true
+ ;;
+ *) # any others keywords corresponds to spec2xxx (either 2006 or 2017)
+ case "$testmode" in
+ build) input_size="test" ;;
+ verify) input_size="train" ;;
+ benchmark) input_size="ref" ;;
+ esac
+
+ #spec_config follows run_profile
+ case "$run_profile" in
+ serial) config="serial" ;;
+ parallel|parallel_*) config="parallel" ;;
+ esac
-# vars are from run-container.sh sourced above
-# shellcheck disable=SC2154
-remote_exec "$run_container_host:$run_container_port::-t -Snone" \
- bmk-scripts/run.sh \
- --bench "$bench_list" \
- --config "${BUILD_NUMBER}-$run_profile" \
- --cflags "$cflags" \
- --ldflags "$ldflags" \
- --ccprefix "$ccprefix" \
- --extension "$extension" \
- --hw_tag "$hw_tag" \
- --ignore_errors "$ignore_errors" \
- --input_size "$input_size" \
- --iterations "$iterations" \
- --run_profile "$run_profile" \
- ${sysroot:+--sysroot "$sysroot"} \
- --toolchain "$toolchain_type" \
- --resultsdest "bkp-01.tcwglab:/home/tcwg-benchmark/results-${results_id}/$boardname" \
- --nodename "$boardname" \
- --forceinstall "${forceinstall}" \
- ${clean_older_than:+--clean_older_than "$clean_older_than"} \
- --verbose true
+ remote_exec "$run_container_host:$run_container_port::-t -Snone" \
+ bmk-scripts/run.sh \
+ --bench "$bench_list" \
+ --config "$config" \
+ --cflags "$cflags" \
+ --ldflags "$ldflags" \
+ --ccprefix "$ccprefix" \
+ --extension "$extension" \
+ --hw_tag "$hw_tag" \
+ --ignore_errors "$ignore_errors" \
+ --input_size "$input_size" \
+ --iterations "$iterations" \
+ --run_profile "$run_profile" \
+ ${sysroot:+--sysroot "$sysroot"} \
+ --toolchain "$toolchain_type" \
+ $support_fortran_opt \
+ --resultsdest "${results_dest}/$boardname" \
+ --nodename "$boardname" \
+ --forceinstall "${forceinstall}" \
+ ${clean_older_than:+--clean_older_than "$clean_older_than"} \
+ --verbose true
+ ;;
+esac
if $prepare_board; then
remote_exec "$boardname:::-t -Snone" \
- sudo /usr/local/bin/benchmark.sh --action stop_board --verbose &
+ sudo bmk-scripts/prepare-board.sh --action stop_board --verbose &
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
echo "Warning: prepare-board.sh did not finish cleanly"
diff --git a/tcwg-buildfarm.sh b/tcwg-buildfarm.sh
index 736d315e..77089efb 100755
--- a/tcwg-buildfarm.sh
+++ b/tcwg-buildfarm.sh
@@ -19,6 +19,7 @@ override="${override-}"
host_x86_64_languages="${host_x86_64_languages-default}"
host_aarchXX_languages="${host_aarchXX_languages-c,c++}"
runtests="${runtests-aarch64-linux-gnu}"
+send_results_filter="${send_results_filter-}"
send_results_to="${send_results_to-}"
try_bootstrap="${try_bootstrap-true}"
host_x86_64_excludecheck="${host_x86_64_excludecheck-gdb}"
@@ -29,8 +30,7 @@ log_name="${log_name-$target}"
dont_fail="${dont_fail-false}"
log_server="${log_server-dev-01.tcwglab:$HOME/logs}"
abe_branch="${abe_branch-refs/heads/tested}"
-build_container_tag="${build_container_tag-bionic}"
-test_container_tag="${test_container_tag-bionic}"
+test_container_tag="${test_container_tag-default}"
binaries="${binaries-false}"
dryrun="${dryrun-false}"
@@ -56,7 +56,7 @@ fi
runtests_opt=""
qemu_cpu=""
-if echo $runtests | grep -q $target; then
+if echo $runtests | grep $target >/dev/null; then
runtests_opt="--runtests"
tester_label=$(print_tester_label_for_target $target)
@@ -85,6 +85,10 @@ if echo $runtests | grep -q $target; then
[ "x${qemu_cpu}" != "x" ] && qemu_cpu="--qemu-cpu ${qemu_cpu}"
fi
+if [ "x${send_results_filter}" != x ]; then
+ send_results_filter="--send-results-filter ${send_results_filter}"
+fi
+
if [ "x${send_results_to}" != x ]; then
send_results_to="--send-results-to ${send_results_to}"
fi
@@ -157,14 +161,7 @@ done
result="0"
-# Configure postfix
-sudo sed -e s/@@MYHOSTNAME@@/${NODE_NAME}/g -e s/@@MAILHOST@@/email-smtp.us-east-1.amazonaws.com/ -e s/@@MAILPORT@@/587/ -i /etc/postfix/main.cf
-sudo sed -e s/@@MAILHOST@@/email-smtp.us-east-1.amazonaws.com/ -e s/@@MAILPORT@@/587/ -e s/@@MAILUSER@@/${TCWG_SES_USER}/ -e s/@@MAILPASSWORD@@/${TCWG_SES_PASSWORD}/ -i /etc/postfix/sasl_password
-echo linaro.org | sudo tee /etc/mailname
-sudo postmap hash:/etc/postfix/sasl_password
-sudo /etc/init.d/postfix start
-
-cd ${WORKSPACE} && bash -x ${WORKSPACE}/jenkins-scripts/jenkins.sh --workspace ${WORKSPACE} --abedir ${WORKSPACE} --override "$override ${qemu_cpu}" "${target_opt[@]}" --languages ${languages} $bootstrap $runtests_opt $excludecheck_opt ${extraconfig_opt} ${send_results_to} --logserver $log_server $logname_opt $norebuild $options || result=$?
+cd ${WORKSPACE} && bash -x ${WORKSPACE}/jenkins-scripts/jenkins.sh --workspace ${WORKSPACE} --abedir ${WORKSPACE} --override "$override ${qemu_cpu}" "${target_opt[@]}" --languages ${languages} $bootstrap $runtests_opt $excludecheck_opt ${extraconfig_opt} ${send_results_filter} ${send_results_to} --logserver $log_server $logname_opt $norebuild $options || result=$?
if $dryrun; then
# Keep Jenkins happy
diff --git a/tcwg-cleanup-stale-containers.sh b/tcwg-cleanup-stale-containers.sh
index e160337c..a7ae8ab7 100755
--- a/tcwg-cleanup-stale-containers.sh
+++ b/tcwg-cleanup-stale-containers.sh
@@ -63,14 +63,28 @@ DOCKER="docker"
do_cleanup_containers ()
{
local hours="$1"
- local docker_ps_opts="$2"
+ local filter="$2"
local action="$3"
local action_msg="$4"
local msg="$5"
local cleanup_containers=true
local dryrun_msg=""
+
+ # We stop only jenkins build containers, and remove all old stopped
+ # containers.
local only_jenkins_containers=true
+ local docker_ps_opts=""
+ local date_field="{{.Created}}"
+ case "$filter" in
+ "jenkins-running")
+ ;;
+ "all-exited")
+ only_jenkins_containers=false
+ docker_ps_opts="--filter status=exited"
+ date_field="{{.State.FinishedAt}}"
+ ;;
+ esac
if [ "$hours" -eq "0" ]; then
exit 0
@@ -90,11 +104,10 @@ do_cleanup_containers ()
for container in $($DOCKER ps $docker_ps_opts --format "{{.ID}}"); do
local container_date container_seconds
- container_date=$(date +%s --date="$($DOCKER inspect --format "{{.Created}}" $container)")
+ container_date=$(date +%s --date="$($DOCKER inspect --format "$date_field" $container)")
container_seconds=$((curdate-container_date))
if [ "$(($container_seconds/3600))" -gt "$hours" ]; then
-
# Do we want to remove all containers, or only those
# started by Jenkins jobs?
if ${only_jenkins_containers}; then
@@ -145,12 +158,14 @@ do_cleanup_containers ()
}
res="0"
-do_cleanup_containers $cleanup_running_hours "" "stop" "stop" "Stopping long-running containers" &
+do_cleanup_containers $cleanup_running_hours "jenkins-running" \
+ "stop" "stop" "Stopping long-running containers" &
wait $! || res=$?
status=$res
res="0"
-do_cleanup_containers $cleanup_stopped_hours "-a" "rm -fv" "remove" "Removing containers stopped long ago" &
+do_cleanup_containers $cleanup_stopped_hours "all-exited" \
+ "rm -fv" "remove" "Removing containers stopped long ago" &
wait $! || res=$?
status=$(($status|(2*$res)))
diff --git a/tcwg-cleanup-stale-results.sh b/tcwg-cleanup-stale-results.sh
index 413de235..b92734d1 100755
--- a/tcwg-cleanup-stale-results.sh
+++ b/tcwg-cleanup-stale-results.sh
@@ -8,85 +8,139 @@ scripts=$(dirname "$0")
convert_args_to_variables "$@"
-days="${days-30}"
-refs_url="${refs_url-https://git.linaro.org/toolchain/ci/base-artifacts}"
-refs_pattern="${refs_pattern-refs/heads/linaro-local/ci/tcwg_bmk*}"
-results_top="${results_top-/home/tcwg-benchmark/results}"
+results_top="${results_top-/home/tcwg-buildslave/base-artifacts}"
dryrun="${dryrun-true}"
-verbose="${verbose-true}"
+verbose="${verbose-false}"
+cleanup_gc="${cleanup_gc-true}"
+cleanup_annex="${cleanup_annex-true}"
+
+current_host="bkp-01.tcwglab"
if $verbose; then
set -x
fi
-# Delete "used_by" markers older than $days days.
-(set +f; find $results_top-* -name used_by -mtime "+$days" -delete)
-
-# Initialize base-artifacts repo (by cloning its "empty" branch).
-refs_repo=$(basename "$refs_url" .git)
-clone_or_update_repo_no_checkout "$refs_repo" "$refs_url" none empty origin
-git -C "$refs_repo" reset --hard
-
-# Walk through all commits of all tcwg_bmk* branches and mark results
-# referenced in those results with "used_by" file.
-while IFS= read -r ref; do
- git -C "$refs_repo" fetch origin "$ref" >/dev/null 2>&1
- git -C "$refs_repo" reset --hard FETCH_HEAD >/dev/null 2>&1
- depth=0
- # Walk all commits of just-fetched branch (i.e., until HEAD^ can't
- # be parsed by git rev-parse).
- while true; do
- for results_id in "$refs_repo/results_id" \
- "$refs_repo/results_id-1" \
- "$refs_repo/results_id-2"; do
- if [ -f "$results_id" ]; then
- results_dir="$results_top-$(cat "$results_id")"
- used_by="$refs_url/$ref~$depth"
- if [ ! -d "$results_dir" ]; then
- echo "WARNING: $used_by is missing $results_dir"
- else
- echo "$used_by" > "$results_dir/used_by"
- fi
- fi
- done
- if ! git -C "$refs_repo" rev-parse HEAD^ >/dev/null 2>&1; then
- break
- fi
- git -C "$refs_repo" reset --hard HEAD^ >/dev/null 2>&1
- depth=$(($depth+1))
- done
-done < <(git ls-remote "$refs_url" "$refs_pattern" | awk '{ print $2 }')
+WORKSPACE="${WORKSPACE-}"
+if [ "${WORKSPACE-}" = "" ]; then
+ WORKSPACE=$(mktemp -d)
+ rm_workspace="rm -rf $WORKSPACE"
+else
+ rm_workspace="true"
+fi
-while IFS= read -r -d '' dir; do
- # Skip already-deleted dirs (e.g., $dir's parent was deleted).
- if [ ! -d "$dir" ]; then
- continue
- fi
+perform_cleanup_gc()
+{
+ ### CLEANUP THE GIT REPOSITORIES
+ echo "=== CLEANUP THE GIT REPOSITORIES"
+ while read -r gitdir; do
+ if $dryrun; then
+ echo "DRYRUN: git -C $gitdir gc"
+ else
+ echo "# git -C $gitdir gc"
+ git -C $gitdir gc
+ fi
+ done < <(find $results_top -mindepth 2 -maxdepth 2 -type d -name '*.git')
+}
- # Don't delete "used_by" dirs and dirs that have recent files
- # (i.e., "-mtime -$days"). E.g., in-progress benchmark might have uploaded
- # partial results.
- if [ x"$(find "$dir" -name used_by -o -mtime "-$days" | head -n1)" != x"" ]; then
- continue
- fi
+perform_cleanup_annex()
+{
+ ### CLEANUP THE ANNEX FILES
+ echo "=== CLEANUP THE ANNEX FILES"
+ existing_annex_file=$WORKSPACE/list_annex.existing.txt
+ used_annex_file=$WORKSPACE/list_annex.used.txt
+ recent_annex_file=$WORKSPACE/list_annex.recent.txt
+ rm -f $used_annex_file $existing_annex_file $recent_annex_file
- # Don't delete subdirectories of a "used_by" parent.
- parent="$dir"
- used=false
- while [ x"$parent" != x"/home/tcwg-benchmark" ] && ! $used; do
- parent=$(dirname "$parent")
- if [ -f "$parent/used_by" ]; then
- used=true
- fi
- done
- if $used; then
- continue
+ # List all existing annex
+ echo "# existing annex results"
+ assert_with_msg "ERROR: $results_top/annex doesnot exist" [ -d $results_top/annex ]
+ find $results_top/annex/ -type f > $existing_annex_file
+
+ sort -u $existing_annex_file > $existing_annex_file.tmp
+ mv $existing_annex_file.tmp $existing_annex_file
+
+ echo " => $(cat $existing_annex_file | wc -l) existing annex"
+
+ # List all used annex
+ echo "# referenced annex results"
+ while read gitdir; do
+ ci_project_config=${gitdir#$results_top/}
+ ci_project_config=${ci_project_config%.git}
+
+ # annex are tcwg_bmk only
+ if ! [[ $ci_project_config =~ tcwg_bmk- ]]; then
+ continue
+ fi
+
+ rm -rf base-artifacts
+ git clone -q --reference $gitdir $gitdir \
+ --branch linaro-local/ci/$ci_project_config \
+ base-artifacts
+
+ for br in $(git -C base-artifacts/ branch -r); do
+ git -C base-artifacts checkout -q $br
+ readarray -t all_bmk_datas < <(set +x; get_git_history 0 base-artifacts "annex/bmk-data")
+ cat "${all_bmk_datas[@]:1}" | sed -e "s|^$current_host:||" >> $used_annex_file
+ printf " => $(cat $used_annex_file | wc -l) referenced annex -- %-200s\n" "[processed $ci_project_config ($br)]"
+ rm -rf "${all_bmk_datas[0]}"
+ done
+ done < <(find $results_top -mindepth 2 -maxdepth 2 -type d -name '*.git')
+
+ echo ""
+ echo " => $(cat $used_annex_file | wc -l) referenced annex"
+
+ # recent annex
+ find $results_top/annex/ -type f -mtime -30 > $recent_annex_file
+ echo " => $(cat $recent_annex_file | wc -l) recent annex (less than 1-month old)"
+
+ # include recent annex in the referenced ones
+ sort -u $used_annex_file $recent_annex_file > $used_annex_file.tmp
+ mv $used_annex_file.tmp $used_annex_file
+
+ ### compare and remove useless annex
+ missing_annex=list_annex.referenced_but_not_exist.txt
+ useless_annex=list_annex.exist_but_not_referenced.txt
+
+ set +o pipefail
+ diff -u $existing_annex_file $used_annex_file | grep '^\+' | sed -e 's|^\+||' | tail -n +2 > $missing_annex
+ diff -u $existing_annex_file $used_annex_file | grep '^\-' | sed -e 's|^\-||' | tail -n +2 > $useless_annex
+
+ if [ -s $missing_annex ]; then
+ echo "WARNING: these annex are referenced, but not exists"
+ cat $missing_annex | sed -e 's|^| |'
+ else
+ echo "NOTE: All referenced annex files exist"
fi
- echo "DELETE: $dir is not used"
- if $dryrun; then
- echo "DRYRUN: rm -rf $dir"
+ if [ -s $useless_annex ]; then
+ echo "REMOVING: About to remove $(cat $useless_annex|wc -l) files."
else
- rm -rf "$dir"
+ echo "NOTE: No annex file to remove."
fi
-done < <(set +f; find $results_top-* -type d -print0)
+
+ for file in $(cat $useless_annex); do
+ if $dryrun; then
+ echo "DRYRUN: rm -rf $file"
+ else
+ rm -rf "$file"
+ fi
+ done
+}
+
+cd $WORKSPACE
+
+# free 10Gb disk space. This will be necessary to cleanup the git repositories
+rm -f empty-10Gbfile.tmp
+
+if $cleanup_gc; then
+ perform_cleanup_gc
+fi
+
+if $cleanup_annex; then
+ perform_cleanup_annex
+fi
+
+# Create a big file to reserve 10Gb for next cleanup.
+dd if=/dev/zero of=empty-10Gbfile.tmp bs=1G count=10
+
+$rm_workspace
diff --git a/tcwg-cleanup-stale-workspaces.sh b/tcwg-cleanup-stale-workspaces.sh
index f2022ef2..9e16b0e4 100755
--- a/tcwg-cleanup-stale-workspaces.sh
+++ b/tcwg-cleanup-stale-workspaces.sh
@@ -56,7 +56,11 @@ for dir in "${dirs[@]}"; do
# chance of parallel build starting in this directory.
# Use a lock to avoid a race condition with a competing
# build.
- flock "$dir" mv "$dir" "$dir.bak"
+ flock "$dir" mv "$dir" "$dir.bak" || true
+ # On the off-chance that "$dir" was removed by another cleanup
+ # process, and above we have just created a lock file named
+ # "$dir" -- delete it.
+ rm -rf "$dir"
fi
rm_dirs=("${rm_dirs[@]}" "$dir.bak")
fi
diff --git a/tcwg-dev-build.sh b/tcwg-dev-build.sh
index 9b295e75..ea2e75e6 100755
--- a/tcwg-dev-build.sh
+++ b/tcwg-dev-build.sh
@@ -7,14 +7,24 @@ scripts=$(dirname $0)
. $scripts/jenkins-helpers.sh
convert_args_to_variables "$@"
+# By default this script produces an aarch64-linux-gnu toolchain. If we're
+# already running on that platform, default to "native" so that this is clear
+# for this and other scripts.
+if [ "$(uname -s)" = "Linux" ] && [ "$(uname -m)" = "aarch64" ]; then
+ default_target="native"
+else
+ default_target="aarch64-linux-gnu"
+fi
+
abe_branch="${abe_branch-tested}"
dry_run="${dry_run-false}"
manifest="${manifest-}"
release_name="${release_name-default}"
buildnumber="${buildnumber-0}"
-target="${target-aarch64-linux-gnu}"
+target="${target-$default_target}"
version="${version-default}"
verbose="${verbose-true}"
+check="${check-}"
set -u
@@ -36,6 +46,11 @@ if [ x"$target" = x"native" ]; then
manifest_validation_opt="--manifest_validation false"
fi
+check_opt=()
+if [ -n "$check" ]; then
+ check_opt=("--check" "$check")
+fi
+
# shellcheck disable=SC2154
$scripts/MakeRelease.job \
--abedir "$(pwd)"/abe \
@@ -47,5 +62,6 @@ $scripts/MakeRelease.job \
--workspace "$(pwd)" \
$manifest_opt \
$manifest_validation_opt \
+ "${check_opt[@]}" \
${binutils:+--binutils "$binutils"} \
${gcc:+--gcc "$gcc"}
diff --git a/tcwg-generate-source-cache.sh b/tcwg-generate-source-cache.sh
index 8dad0db7..eec94f8e 100755
--- a/tcwg-generate-source-cache.sh
+++ b/tcwg-generate-source-cache.sh
@@ -126,6 +126,10 @@ generate_misc_files ()
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
https://github.com/llvm/llvm-project.git
https://git.linaro.org/toolchain/jenkins-scripts.git
+ https://git.linaro.org/toolchain/bmk-scripts.git
+ https://android.googlesource.com/platform/manifest.git
+ https://android.googlesource.com/platform/superproject.git
+ https://git.code.sf.net/p/mingw-w64/mingw-w64.git
)
generate_git_cache "${repos[@]}"
}
@@ -135,17 +139,22 @@ update_git_repos () {
$verbose
local dir="$1"
- while IFS= read -r -d '' repo_git
- do
- (
- local repo
- repo=$(dirname "$repo_git")
+
+ local repo
+
+ while IFS= read -r -d '' repo_git; do
+ repo=$(dirname "$repo_git")
+ (
cd "$repo"
# Update and prune local clone
run_with_timeout_and_retry 1h 3 git remote update -p
find -maxdepth 1 ! -name .git ! -name . -print0 \
- | xargs -0 rm -rf
- )
+ | xargs -0 rm -rf
+ ) &
+ if ! wait $!; then
+ echo "WARNING: Failed to update: $repo -- removing to re-clone"
+ rm -rf "$repo"
+ fi
done < <(find "$dir" -name .git -type d -print0)
}
diff --git a/tcwg-llvm-build.sh b/tcwg-llvm-build.sh
index 11de84e6..bf89a18c 100755
--- a/tcwg-llvm-build.sh
+++ b/tcwg-llvm-build.sh
@@ -61,7 +61,7 @@ while [ "$#" -gt 0 ]; do
shift ;;
--revision)
if [ ! -z "$VAL" ]; then
- if echo "$VAL" | grep -q "r[0-9]\+"; then
+ if echo "$VAL" | grep "r[0-9]\+" >/dev/null; then
REVISION="$VAL"
else
echo "ERROR: $1"
diff --git a/tcwg-llvm-release.bat b/tcwg-llvm-release.bat
new file mode 100755
index 00000000..489513c3
--- /dev/null
+++ b/tcwg-llvm-release.bat
@@ -0,0 +1,23 @@
+@echo on
+
+mkdir artifacts
+
+REM For now we use a local copy of build_llvm_package.bat.
+REM We should merge our changes to upstream
+REM llvm/utils/release/build_llvm_package.bat script.
+call jenkins-scripts\build_llvm_package.bat %1 %2 || exit /b
+
+set revision=%1
+
+if "%2" == "test" (
+ set git_ref=%revision%
+ set package_version=%revision:~0,8%
+) else (
+ set git_ref=llvmorg-%revision%
+ set package_version=%revision%
+)
+set clang_format_vs_version=13.0.0.%datestamp%
+set build_dir=llvm_package_%package_version%
+
+cp %build_dir%/build32/LLVM-%package_version%-woa64.zip artifacts\
+cp %build_dir%/build32/LLVM-%package_version%-woa64.exe artifacts\
diff --git a/tcwg-llvm-release.sh b/tcwg-llvm-release.sh
index 5e1b5433..3acaf5cc 100755
--- a/tcwg-llvm-release.sh
+++ b/tcwg-llvm-release.sh
@@ -15,7 +15,7 @@ BASEDIR=$(dirname "$(readlink -f "$0")")
# Syntax
SYN_WORKSPACE="--workspace=/path/to/workspace"
SYN_RELEASE="--release=M.m.p"
-SYN_CANDIDATE="--candidate=N (or 'final' or 'branch=*')"
+SYN_CANDIDATE="--candidate=N (or 'final' or 'git-ref=*')"
SYN_BUILDJOBS="--buildjobs=N (def. CPUS)"
SYN_TOOLCHAIN="--toolchain=http://url/for/tarball"
SYN_TOOLCHAIN_FILE="--toolchain-file=<file-name> (file name to copy the produced toolchain file name)"
@@ -25,7 +25,9 @@ SYNTAX="$0 $SYN_WORKSPACE $SYN_RELEASE $SYN_CANDIDATE $SYN_TOOLCHAIN $SYN_TOOLCH
# Environment Variables and default values
WORKSPACE=$(pwd)
+GITREF=""
RELEASE=""
+RELCALL=""
CANDIDATE=""
RCCALL=""
RCTAG=""
@@ -43,10 +45,15 @@ while [ "$#" -gt 0 ]; do
case "$ARG" in
--release)
RELEASE="$VAL"
- if ! echo "$RELEASE" | grep -E -q "^[0-9]+\\.[0-9]\\.[0-9]"; then
+ if test -z "$RELEASE"; then
+ # The release can be empty if we're building a git ref.
+ echo "No release specified"
+ elif ! echo "$RELEASE" | grep -E -q "^[0-9]+\\.[0-9]\\.[0-9]"; then
echo "ERROR: $1"
echo "Syntax: $SYN_RELEASE"
exit 1
+ else
+ RELCALL="-release $RELEASE"
fi
shift ;;
--candidate)
@@ -120,8 +127,13 @@ while [ "$#" -gt 0 ]; do
done
# Validate options
-if [ "$RELEASE" = "" ] || [ "$CANDIDATE" = "" ]; then
- echo "ERROR: Missing release or candidate"
+if [ "$CANDIDATE" = "" ]; then
+ echo "ERROR: Missing candidate"
+ echo "$SYNTAX"
+ exit 1
+fi
+if [ "$RELEASE" = "" ] && [ "$GITREF" = "" ]; then
+ echo "ERROR: Missing release or git ref"
echo "$SYNTAX"
exit 1
fi
@@ -140,22 +152,24 @@ fi
PLATFORM=$(uname -m)
OPENMP=""
MLIR=""
-FLANG="-flang"
+FLANG=""
TARGET="$PLATFORM-linux-gnu"
-LIMITLINKJOBS=""
-if echo "$PLATFORM" | grep -qi "armv7"; then
+EXTRA_CONFIG_ARGS="-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON "
+if echo "$PLATFORM" | grep -i "armv7" >/dev/null; then
TARGET="armv7a-linux-gnueabihf"
OPENMP="-no-openmp"
MLIR="-no-mlir"
- FLANG=""
+ FLANG="-no-flang"
if [ x"$USENINJA" != x"" ]; then
- LIMITLINKJOBS="-configure-flags -DLLVM_PARALLEL_LINK_JOBS=$LINKJOBS"
+ EXTRA_CONFIG_ARGS="$EXTRA_CONFIG_ARGS -DLLVM_PARALLEL_LINK_JOBS=$LINKJOBS -DLLVM_LIT_ARGS=-v -DCOMPILER_RT_USE_LLVM_UNWINDER=ON"
fi
-elif ! echo "$PLATFORM" | grep -qi "aarch64" && \
- ! echo "$PLATFORM" | grep -qi "x86_64"; then
+elif echo "$PLATFORM" | grep -i "aarch64" >/dev/null; then
+ EXTRA_CONFIG_ARGS="$EXTRA_CONFIG_ARGS -DLLVM_LIT_ARGS=-vj16"
+elif ! echo "$PLATFORM" | grep -i "x86_64" >/dev/null; then
echo "ERROR: Don't recognise PLATFORM $PLATFORM"
exit 1
fi
+EXTRA_CONFIG_ARGS="$EXTRA_CONFIG_ARGS -DCMAKE_C_COMPILER_TARGET=$TARGET"
# Dump
echo "RELEASE = $RELEASE"
@@ -175,7 +189,11 @@ LOGEXT="txt"
# Release script
RELEASE_SCRIPT="test-release.sh"
-RELEASE_TAG="llvmorg-${RELEASE}${RCTAG}"
+if test -z "$RELEASE"; then
+ RELEASE_TAG="$GITREF"
+else
+ RELEASE_TAG="llvmorg-${RELEASE}${RCTAG}"
+fi
SCRIPT_URL=https://raw.githubusercontent.com/llvm/llvm-project/$RELEASE_TAG/llvm/utils/release/$RELEASE_SCRIPT
(
cd "$WORKSPACE"
@@ -188,11 +206,15 @@ SCRIPT_URL=https://raw.githubusercontent.com/llvm/llvm-project/$RELEASE_TAG/llvm
# - we want to push the binary public on success or failre
result=0
(
- cd "$WORKSPACE" && LC_ALL=C ./$RELEASE_SCRIPT -release $RELEASE $RCCALL -triple $TARGET -j$BUILDJOBS $OPENMP $MLIR $FLANG $USENINJA $LIMITLINKJOBS |& tee "$LOGBASE-release.$LOGEXT"
+ cd "$WORKSPACE" && ulimit -n 65536 && LC_ALL=C ./$RELEASE_SCRIPT $RELCALL $RCCALL -triple $TARGET -j$BUILDJOBS $OPENMP $MLIR $FLANG $USENINJA -configure-flags "$EXTRA_CONFIG_ARGS" |& tee "$LOGBASE-release.$LOGEXT"
) &
wait $! || result=$?
-PKGTAG="$RELEASE$RCTAG"
+if test -z "$RELEASE"; then
+ PKGTAG="$GITREF"
+else
+ PKGTAG="$RELEASE$RCTAG"
+fi
PKGNAME="$(push_binary_name "$PKGTAG" "$TARGET")"
PUSHTYPE="releases"
PUSHDIR="$(push_binary_dir "$PUSHTYPE")"
diff --git a/tcwg-lnt/create-server.sh b/tcwg-lnt/create-server.sh
new file mode 100755
index 00000000..ec70564e
--- /dev/null
+++ b/tcwg-lnt/create-server.sh
@@ -0,0 +1,158 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+# glob variable declarations
+script_dir="$(readlink -f "$(dirname "$0")")"
+
+# ####################################################################
+# read server configuration
+
+config_name="$1"
+
+# shellcheck disable=SC1090
+. $script_dir/$config_name/config
+
+lnt_repo_url=${lnt_repo_url-https://git.linaro.org/toolchain/llvm-lnt.git}
+lnt_repo_branch=${lnt_repo_branch?}
+lnt_root_dir="${lnt_root_dir-$PWD/$config_name/lntserver/}"
+
+echo "[$config_name] server name is : ${lnt_server_name?}"
+echo "[$config_name] server port is : ${lnt_server_port?}"
+echo "[$config_name] lnt repository : ${lnt_repo_url?}"
+echo "[$config_name] lnt branch : ${lnt_repo_url?}"
+echo "[$config_name] local root_dir : ${lnt_root_dir}"
+# shellcheck disable=SC2154
+echo "[$config_name] check projects : ${check_configs[*]}"
+# shellcheck disable=SC2154
+echo "[$config_name] bmk projects : ${bmk_configs[*]}"
+
+# ####################################################################
+#
+
+lnt_sandbox_dir="$lnt_root_dir/sandbox"
+lnt_repo_dir="$lnt_root_dir/llvm-lnt"
+lnt_db_dir="$lnt_root_dir/lnt-database"
+lnt_schemas_dir="$script_dir/$config_name"
+
+# ####################################################################
+#
+# get LLVM-LNT sources
+
+if [ ! -d "$lnt_repo_dir" ]; then
+ git clone "$lnt_repo_url" "$lnt_repo_dir"
+fi
+
+git -C "$lnt_repo_dir" fetch origin "$lnt_repo_branch"
+
+git -C "$lnt_repo_dir" checkout --force FETCH_HEAD
+
+# ####################################################################
+#
+# create LNT sandbox
+
+if [ ! -d "$lnt_sandbox_dir" ]; then
+ (
+ virtualenv "$lnt_sandbox_dir"
+
+ cd "$lnt_sandbox_dir"
+
+ # shellcheck disable=SC1091
+ source bin/activate
+
+ python "$lnt_repo_dir"/setup.py develop
+
+ # temporary workaround for jammy
+ case "$(lsb_release -sc)" in
+ focal)
+ ;;
+ jammy)
+ (
+ set +f
+ sed -i 's/from collections /from collections.abc /' \
+ lib/python3.10/site-packages/Werkzeug-0.12.2-py3.10.egg/werkzeug/datastructures.py \
+ lib/python3.10/site-packages/Jinja2-2.7.2-py3.10.egg/jinja2/_compat.py \
+ lib/python3.10/site-packages/MarkupSafe-0.23-py3.10-*.egg/markupsafe/__init__.py
+ )
+ ;;
+ *)
+ echo >&2 "distro not supported"
+ exit 1
+ ;;
+ esac
+ )
+fi
+
+# shellcheck disable=SC1091
+source "$lnt_sandbox_dir/bin/activate"
+
+
+# ####################################################################
+#
+# create LNT database
+
+generate_schema()
+{
+ local template=${1?}
+ local tsname=${2?}
+
+ cat "${lnt_schemas_dir?}/$template.yaml.in" \
+ | sed "s/TSNAME/$tsname/g" \
+ > "${lnt_db_dir?}/schemas/$tsname.yaml"
+}
+
+if [ ! -d "$lnt_db_dir" ]; then
+
+ lnt create "$lnt_db_dir" --name "$lnt_server_name" --stable-urls
+
+ lnt_secret_token="$(echo $RANDOM | md5sum | cut -d' ' -f1)"
+
+ sed -i "s/# \(api_auth_token =\).*/\1 '$lnt_secret_token'/" "$lnt_db_dir"/lnt.cfg
+
+ for config in "${bmk_configs[@]}"; do
+ generate_schema tcwg_bmk "$config"
+ done
+
+ for config in "${check_configs[@]}"; do
+ generate_schema tcwg_check "$config"
+ done
+
+fi
+
+# TODO schema update not yet supported
+# eventually delete everything
+# or just delete the updated testsuite (not supported yet in lnt)
+
+
+# ####################################################################
+#
+# run the LNT server
+
+# only one server on the machine for now
+pkill --echo --full "lnt runserver .* --port $lnt_server_port" || true
+
+lnt runserver \
+ "$lnt_db_dir" \
+ --hostname 0.0.0.0 \
+ --port $lnt_server_port \
+ --processes 4 \
+ &
+
+
+# ####################################################################
+
+exit 0
+
+
+# ####################################################################
+
+# TODO
+#
+# - remove temporary workaround for jammy (update dependencies version)
+#
+# - look at github.com/llvm/llvm-lnt/blob/main/docker/docker-entrypoint.sh
+#
+# - also apply local LNT patches
+#
+# - option to restart everything from scratch ? just the db ?
+#
diff --git a/tcwg-lnt/lnt-check.sh b/tcwg-lnt/lnt-check.sh
new file mode 100755
index 00000000..1e35976c
--- /dev/null
+++ b/tcwg-lnt/lnt-check.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+set -euf -o pipefail
+
+
+# ####################################################################
+
+# run lnt checks - called from an llvm-lnt directory
+
+# usage : check-lnt.sh
+
+
+# ####################################################################
+
+[ -d lnt ]
+
+which sqlite3
+
+which tox
+
+which python
+
+
+# ####################################################################
+
+LLVM_BUILD_DIR="$PWD/llvm-build"
+LLVM_SRC_DIR="$LLVM_BUILD_DIR/llvm-project"
+LLVM_URL=https://github.com/llvm/llvm-project.git
+LLVM_BRANCH=main
+
+# build llvm test tools (llvm-lit, FileCheck, not)
+
+# TODO: find a simpler way to get these tools
+
+if [ ! -f "$LLVM_BUILD_DIR/bin/llvm-lit" ]; then
+
+ [ ! -d "$LLVM_SRC_DIR" ] \
+ && git clone "$LLVM_URL" "$LLVM_SRC_DIR"
+
+ cd "$LLVM_SRC_DIR"
+
+ git fetch origin
+
+ git checkout --force "origin/$LLVM_BRANCH"
+
+ mkdir -p "$LLVM_BUILD_DIR"
+
+ cd "$LLVM_BUILD_DIR"
+
+ cmake \
+ -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DLLVM_TARGETS_TO_BUILD=AArch64 \
+ -DLLVM_ENABLE_ASSERTIONS=True \
+ -DLLVM_ENABLE_PROJECTS=clang \
+ "$LLVM_SRC_DIR/llvm"
+
+ ninja FileCheck not split-file
+fi
+
+export PATH="$LLVM_BUILD_DIR/bin/:$PATH"
+
+
+# ####################################################################
+
+# check
+
+tox -e py3,mypy
+
+# TODO: also check flake8 & docs configs
+
+
+# ####################################################################
diff --git a/tcwg-lnt/tcwg-lnt-01/config b/tcwg-lnt/tcwg-lnt-01/config
new file mode 100644
index 00000000..d0c4685d
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-01/config
@@ -0,0 +1,39 @@
+lnt_server_port=38000
+lnt_server_name="LNT-TCWG (all projects)"
+lnt_repo_branch="linaro-local/master"
+
+check_configs=(
+ tcwg_binutils_check
+ tcwg_bootstrap_check
+ tcwg_gcc_check
+ tcwg_gdb_check
+ tcwg_glibc_check
+ tcwg_gnu_cross_check_binutils
+ tcwg_gnu_cross_check_gcc
+ tcwg_gnu_embed_check_binutils
+ tcwg_gnu_embed_check_gcc
+ tcwg_gnu_native_check_binutils
+ tcwg_gnu_native_check_gcc
+ tcwg_gnu_native_check_gdb
+ tcwg_gnu_native_check_glibc
+ tcwg_gnu_native_fast_check_gcc
+ tcwg_gnu_native_fast_check_gdb
+ tcwg_gnu_woa_check_binutils
+)
+
+bmk_configs=(
+ tcwg_bmk-code_size-coremark
+ tcwg_bmk-code_size-cpu2017fast
+ tcwg_bmk-code_size-cpu2017rate
+ tcwg_bmk-code_size-spec2k6
+ tcwg_bmk-code_speed-coremark
+ tcwg_bmk-code_speed-cpu2017rate
+ tcwg_bmk-code_speed-cpu2017speed
+ tcwg_bmk-code_speed-spec2k6
+ tcwg_bmk-code_vect-cpu2017fast
+ tcwg_bmk-code_vect-cpu2017rate
+ tcwg_bmk-code_vect-spec2k6
+ tcwg_bmk-fujitsu_speed-cpu2017speed
+ tcwg_bmk-qc_speed-cpu2017rate
+)
+
diff --git a/tcwg-lnt/tcwg-lnt-01/tcwg_bmk.yaml.in b/tcwg-lnt/tcwg-lnt-01/tcwg_bmk.yaml.in
new file mode 100644
index 00000000..20ffdc5c
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-01/tcwg_bmk.yaml.in
@@ -0,0 +1,55 @@
+# Nightly Test Suite (nts) schema. This was originally developed for the llvm
+# test-suite and now is LNTs default schema. It also has some extra fields
+# like `score` or `mem_bytes` to work for more test suites out of the box.
+format_version: '2'
+name: TSNAME
+metrics:
+- name: compile_status
+ type: Status
+- name: compile
+ type: Real
+ display_name: Compile
+ unit: seconds
+ unit_abbrev: s
+
+- name: execution_status
+ type: Status
+- name: execution
+ type: Real
+ display_name: Execution
+ unit: seconds
+ unit_abbrev: s
+
+- name: hash_status
+ type: Status
+- name: hash
+ type: Hash
+ status_field: hash_status
+
+- name: code_size
+ type: Real
+ display_name: Code Size
+ unit: bytes
+ unit_abbrev: b
+
+- name: mem_bytes
+ type: Real
+ display_name: Memory Usage
+ unit: bytes
+ unit_abbrev: b
+
+- name: score
+ type: Real
+ bigger_is_better: true
+ display_name: Score
+
+- name: execution_variation
+ type: Real
+ display_name: sample variability
+
+run_fields:
+- name: llvm_project_revision
+ order: true
+machine_fields:
+- name: hardware
+- name: os
diff --git a/tcwg-lnt/tcwg-lnt-01/tcwg_check.yaml.in b/tcwg-lnt/tcwg-lnt-01/tcwg_check.yaml.in
new file mode 100644
index 00000000..5bbd2978
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-01/tcwg_check.yaml.in
@@ -0,0 +1,55 @@
+# TCWG gnu check (tcwg_gnu_check) schema
+format_version: '2'
+name: TSNAME
+metrics:
+
+- name: nb_PASS
+ type: Real
+ bigger_is_better: true
+ display_name: PASS
+- name: nb_XPASS
+ type: Real
+ display_name: XPASS
+- name: nb_FAIL
+ type: Real
+ display_name: FAIL
+- name: nb_XFAIL
+ type: Real
+ display_name: XFAIL
+- name: nb_KFAIL
+ type: Real
+ display_name: KFAIL
+- name: nb_UNSUPPORTED
+ type: Real
+ display_name: UNSUPPORTED
+- name: nb_UNRESOLVED
+ type: Real
+ display_name: UNRESOLVED
+- name: nb_UNTESTED
+ type: Real
+ display_name: UNTESTED
+- name: nb_ERROR
+ type: Real
+ display_name: ERROR
+
+- name: nb_OTHER
+ type: Real
+ display_name: OTHER
+
+- name: nb_FLAKY
+ type: Real
+ display_name: FLAKY
+- name: nb_good
+ type: Real
+ bigger_is_better: true
+ display_name: good
+- name: nb_bad
+ type: Real
+ display_name: bad
+
+run_fields:
+- name: llvm_project_revision
+ order: true
+machine_fields:
+- name: hardware
+- name: os
diff --git a/tcwg-lnt/tcwg-lnt-02/config b/tcwg-lnt/tcwg-lnt-02/config
new file mode 100644
index 00000000..b133012d
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-02/config
@@ -0,0 +1,26 @@
+lnt_server_port=38500
+lnt_server_name="LNT-TCWG (stable)"
+lnt_repo_branch="linaro-local/master"
+
+check_configs=(
+ tcwg_binutils_check
+ tcwg_bootstrap_check
+ tcwg_gcc_check
+ tcwg_gdb_check
+ tcwg_glibc_check
+ tcwg_gnu_cross_check_binutils
+ tcwg_gnu_cross_check_gcc
+ tcwg_gnu_embed_check_binutils
+ tcwg_gnu_embed_check_gcc
+ tcwg_gnu_native_check_binutils
+ tcwg_gnu_native_check_gcc
+ tcwg_gnu_native_check_gdb
+ tcwg_gnu_native_check_glibc
+ tcwg_gnu_native_fast_check_gcc
+ tcwg_gnu_native_fast_check_gdb
+ tcwg_gnu_woa_check_binutils
+)
+
+# No bmk project in this server
+bmk_configs=()
+
diff --git a/tcwg-lnt/tcwg-lnt-02/tcwg_check.yaml.in b/tcwg-lnt/tcwg-lnt-02/tcwg_check.yaml.in
new file mode 100644
index 00000000..5bbd2978
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-02/tcwg_check.yaml.in
@@ -0,0 +1,55 @@
+# TCWG gnu check (tcwg_gnu_check) schema
+format_version: '2'
+name: TSNAME
+metrics:
+
+- name: nb_PASS
+ type: Real
+ bigger_is_better: true
+ display_name: PASS
+- name: nb_XPASS
+ type: Real
+ display_name: XPASS
+- name: nb_FAIL
+ type: Real
+ display_name: FAIL
+- name: nb_XFAIL
+ type: Real
+ display_name: XFAIL
+- name: nb_KFAIL
+ type: Real
+ display_name: KFAIL
+- name: nb_UNSUPPORTED
+ type: Real
+ display_name: UNSUPPORTED
+- name: nb_UNRESOLVED
+ type: Real
+ display_name: UNRESOLVED
+- name: nb_UNTESTED
+ type: Real
+ display_name: UNTESTED
+- name: nb_ERROR
+ type: Real
+ display_name: ERROR
+
+- name: nb_OTHER
+ type: Real
+ display_name: OTHER
+
+- name: nb_FLAKY
+ type: Real
+ display_name: FLAKY
+- name: nb_good
+ type: Real
+ bigger_is_better: true
+ display_name: good
+- name: nb_bad
+ type: Real
+ display_name: bad
+
+run_fields:
+- name: llvm_project_revision
+ order: true
+machine_fields:
+- name: hardware
+- name: os
diff --git a/tcwg-lnt/tcwg-lnt-03/config b/tcwg-lnt/tcwg-lnt-03/config
new file mode 100644
index 00000000..cd698baf
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-03/config
@@ -0,0 +1,39 @@
+lnt_server_port=38240
+lnt_server_name="LNT-TCWG (experimental)"
+lnt_repo_branch="linaro-local/experimental"
+
+check_configs=(
+ tcwg_binutils_check
+ tcwg_bootstrap_check
+ tcwg_gcc_check
+ tcwg_gdb_check
+ tcwg_glibc_check
+ tcwg_gnu_cross_check_binutils
+ tcwg_gnu_cross_check_gcc
+ tcwg_gnu_embed_check_binutils
+ tcwg_gnu_embed_check_gcc
+ tcwg_gnu_native_check_binutils
+ tcwg_gnu_native_check_gcc
+ tcwg_gnu_native_check_gdb
+ tcwg_gnu_native_check_glibc
+ tcwg_gnu_native_fast_check_gcc
+ tcwg_gnu_native_fast_check_gdb
+ tcwg_gnu_woa_check_binutils
+)
+
+bmk_configs=(
+ tcwg_bmk-code_size-coremark
+ tcwg_bmk-code_size-cpu2017fast
+ tcwg_bmk-code_size-cpu2017rate
+ tcwg_bmk-code_size-spec2k6
+ tcwg_bmk-code_speed-coremark
+ tcwg_bmk-code_speed-cpu2017rate
+ tcwg_bmk-code_speed-cpu2017speed
+ tcwg_bmk-code_speed-spec2k6
+ tcwg_bmk-code_vect-cpu2017fast
+ tcwg_bmk-code_vect-cpu2017rate
+ tcwg_bmk-code_vect-spec2k6
+ tcwg_bmk-fujitsu_speed-cpu2017speed
+ tcwg_bmk-qc_speed-cpu2017rate
+)
+
diff --git a/tcwg-lnt/tcwg-lnt-03/tcwg_bmk.yaml.in b/tcwg-lnt/tcwg-lnt-03/tcwg_bmk.yaml.in
new file mode 100644
index 00000000..20ffdc5c
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-03/tcwg_bmk.yaml.in
@@ -0,0 +1,55 @@
+# Nightly Test Suite (nts) schema. This was originally developed for the llvm
+# test-suite and now is LNTs default schema. It also has some extra fields
+# like `score` or `mem_bytes` to work for more test suites out of the box.
+format_version: '2'
+name: TSNAME
+metrics:
+- name: compile_status
+ type: Status
+- name: compile
+ type: Real
+ display_name: Compile
+ unit: seconds
+ unit_abbrev: s
+
+- name: execution_status
+ type: Status
+- name: execution
+ type: Real
+ display_name: Execution
+ unit: seconds
+ unit_abbrev: s
+
+- name: hash_status
+ type: Status
+- name: hash
+ type: Hash
+ status_field: hash_status
+
+- name: code_size
+ type: Real
+ display_name: Code Size
+ unit: bytes
+ unit_abbrev: b
+
+- name: mem_bytes
+ type: Real
+ display_name: Memory Usage
+ unit: bytes
+ unit_abbrev: b
+
+- name: score
+ type: Real
+ bigger_is_better: true
+ display_name: Score
+
+- name: execution_variation
+ type: Real
+ display_name: sample variability
+
+run_fields:
+- name: llvm_project_revision
+ order: true
+machine_fields:
+- name: hardware
+- name: os
diff --git a/tcwg-lnt/tcwg-lnt-03/tcwg_check.yaml.in b/tcwg-lnt/tcwg-lnt-03/tcwg_check.yaml.in
new file mode 100644
index 00000000..5bbd2978
--- /dev/null
+++ b/tcwg-lnt/tcwg-lnt-03/tcwg_check.yaml.in
@@ -0,0 +1,55 @@
+# TCWG gnu check (tcwg_gnu_check) schema
+format_version: '2'
+name: TSNAME
+metrics:
+
+- name: nb_PASS
+ type: Real
+ bigger_is_better: true
+ display_name: PASS
+- name: nb_XPASS
+ type: Real
+ display_name: XPASS
+- name: nb_FAIL
+ type: Real
+ display_name: FAIL
+- name: nb_XFAIL
+ type: Real
+ display_name: XFAIL
+- name: nb_KFAIL
+ type: Real
+ display_name: KFAIL
+- name: nb_UNSUPPORTED
+ type: Real
+ display_name: UNSUPPORTED
+- name: nb_UNRESOLVED
+ type: Real
+ display_name: UNRESOLVED
+- name: nb_UNTESTED
+ type: Real
+ display_name: UNTESTED
+- name: nb_ERROR
+ type: Real
+ display_name: ERROR
+
+- name: nb_OTHER
+ type: Real
+ display_name: OTHER
+
+- name: nb_FLAKY
+ type: Real
+ display_name: FLAKY
+- name: nb_good
+ type: Real
+ bigger_is_better: true
+ display_name: good
+- name: nb_bad
+ type: Real
+ display_name: bad
+
+run_fields:
+- name: llvm_project_revision
+ order: true
+machine_fields:
+- name: hardware
+- name: os
diff --git a/tcwg-report-ci-status.sh b/tcwg-report-ci-status.sh
new file mode 100755
index 00000000..7033f30a
--- /dev/null
+++ b/tcwg-report-ci-status.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+view_url=https://ci.linaro.org/view/
+views=(tcwg_bmk tcwg_bmk-bisect tcwg_gcc tcwg_gnu_native tcwg_gnu_cross tcwg_kernel-gnu tcwg_kernel-llvm)
+
+declare -A nb
+declare -A all
+
+# get view html file
+for view in "${views[@]}"; do
+ wget -o /dev/null -O "/tmp/$view.html" "$view_url/$view"
+done
+
+# Print INSTANT PICTURE status
+list_pat=("Success" "In progress" "Failed" "Aborted" "Disabled")
+printf "%-22s | %13s | %13s | %13s | %13s | %13s |\n" "TODAY PICTURE" "${list_pat[@]}"
+printf "========================================================================================================\n"
+for view in "${views[@]}"; do
+ printf "%-22s |" "$view"
+ for pat in "${list_pat[@]}"; do
+ nb["$pat"]=$(grep -c "tooltip=\"$pat\"" < "/tmp/$view.html")
+ all["$pat"]=$((all["$pat"] + nb["$pat"]))
+ printf " %13s |" "${nb["$pat"]}"
+ done
+ printf "\n"
+done
+printf "========================================================================================================\n"
+printf "%-22s | %13s | %13s | %13s | %13s | %13s |\n" \
+ "TOTAL" "${all[Success]}" "${all[In progress]}" "${all[Failed]}" "${all[Aborted]}" "${all[Disabled]}"
+
+
+echo ""
+echo ""
+
+# Print WHEATHER PICTURE status
+list_name=("No-fail" "1-fail" "2-fails" "3-fails" "4-fails" "All-fails")
+list_pat=("No recent" "1 out of the last 5" "2 out of the last 5" "3 out of the last 5" "4 out of the last 5" "All recent")
+
+printf "%-22s | %13s | %13s | %13s | %13s | %13s | %13s |\n" "WHEATHER (5 last runs)" "${list_name[@]}"
+printf "========================================================================================================================\n"
+for view in "${views[@]}"; do
+ printf "%-22s |" "$view"
+ for nbfail in {0..5}; do
+ pat="${list_pat[$nbfail]}"
+ nb["$nbfail"]=$(grep -c "Build stability: $pat builds failed." < "/tmp/$view.html")
+ all["$nbfail"]=$((all["$nbfail"] + nb["$nbfail"]))
+ printf " %13s |" "${nb[$nbfail]}"
+ done
+ printf "\n"
+done
+printf "========================================================================================================================\n"
+printf "%-22s | %13s | %13s | %13s | %13s | %13s | %13s |\n" \
+ "TOTAL" "${all[0]}" "${all[1]}" "${all[2]}" "${all[3]}" "${all[4]}" "${all[5]}"
diff --git a/tcwg-report-stale-rr-jobs.sh b/tcwg-report-stale-rr-jobs.sh
index f06cef0d..e3b0a692 100755
--- a/tcwg-report-stale-rr-jobs.sh
+++ b/tcwg-report-stale-rr-jobs.sh
@@ -10,51 +10,630 @@ convert_args_to_variables "$@"
days="${days-10}"
human="${human-true}"
+output="${output-/dev/null}"
refs_prefix="${refs_prefix-refs/heads/linaro-local/ci/}"
-refs_url_prefix="${refs_url_prefix-https://git.linaro.org/toolchain/ci}"
+refs_glo_url_prefix="${refs_glo_url_prefix-https://git.linaro.org/toolchain/ci}"
+refs_bkp_url_prefix="${refs_bkp_url_prefix-ssh://bkp.tcwglab/home/tcwg-buildslave}"
repos=("${repos[@]-default}")
verbose="${verbose-false}"
+classify="${classify-false}"
+only="${only-false}"
+keep_tmp="${keep_tmp-false}"
+tmpdir="${tmpdir-""}"
-if $verbose; then
- set -x
+output=$(realpath $output)
+if [ $output != "/dev/null" ]; then
+ rm -f $output
fi
-if [ x"${repos[*]}" = x"default" ]; then
- repos=(base-artifacts binutils-gdb gcc glibc linux llvm-project newlib qemu)
-fi
+# This represent the average number of days between the last commit
+# tested in the component, and the starting date of the jenkins test
+declare -A delay_per_component=([binutils]=0 [gcc]=0 [glibc]=0 [llvm]=0 [linux]=8 [qemu]=0)
-process_git_url ()
+process_base_artifacts ()
{
(
set -euf -o pipefail
- local refs_url="$1"
-
- # Initialize base-artifacts repo (by cloning its "empty" branch).
- refs_repo=$(basename "$refs_url" .git)
- clone_or_update_repo_no_checkout "$refs_repo" "$refs_url" auto empty origin \
- >/dev/null 2>&1
- git -C "$refs_repo" reset --hard >/dev/null 2>&1
-
- # Walk through all commits of all tcwg_bmk* branches and mark results
- # referenced in those results with "used_by" file.
- while IFS= read -r ref; do
- dst_ref="refs/remotes/origin/${ref#refs/heads/}"
- git -C "$refs_repo" fetch -q origin "+$ref:$dst_ref" >/dev/null 2>&1
- commit_stamp=$(git -C "$refs_repo" show --no-patch --pretty=%ct "$dst_ref")
- days_ago=$((($(date +%s) - $commit_stamp) / (24 * 3600)))
- if [ $days_ago -gt $days ]; then
- if $human; then
- echo "$refs_repo: ${ref#$refs_prefix}: last updated $days_ago days ago"
+ local baseartifacts_url="$1"
+
+ local days_limit=$days
+
+ local gitpath
+ while read -r ref; do
+
+ local ci_project_config="${ref#refs/heads/linaro-local/ci/}"
+ local remote_project_config="${ci_project_config/\//--}"
+ local dst_ref="refs/remotes/$remote_project_config/linaro-local/ci/$ci_project_config"
+
+ if [ $only != false ] && [[ ! $ci_project_config =~ $only ]]; then
+ continue
+ fi
+
+ # -- get the studied base-artifact branch
+ if ! [ "$(git -C "base-artifacts" remote get-url $remote_project_config 2> /dev/null)" ]; then
+ git -C "base-artifacts" remote add $remote_project_config $baseartifacts_url
+ git -C "base-artifacts" fetch -q "$baseartifacts_url" "+$ref:$dst_ref" 2> /dev/null
+ fi
+ git -C "base-artifacts" checkout -q $dst_ref
+
+ # -- get components for this project
+ local components
+ components="$(get_baseline_manifest "{rr[components]}")"
+
+ # -- check last update of that component
+ declare -A commit_stamps
+ for c in $components; do
+
+ local tmproot="" last_changed_sha1=""
+ local -a git_history
+
+ readarray -t git_history < <(get_git_history 1 base-artifacts "git/${c}_rev")
+ tmproot=${git_history[0]}
+ if [ ${#git_history[@]} == 2 ]; then
+ last_changed_sha1=${git_history[1]}
+ last_changed_sha1="${last_changed_sha1#$tmproot/}"
+ last_changed_sha1="${last_changed_sha1%/git/${c}_rev}"
+ commit_stamps[$c]="$(git -C base-artifacts show --no-patch --pretty='%ct' $last_changed_sha1)"
+ fi
+ rm -rf "$tmproot"
+ done
+
+ # -- dump the messages
+ for c in $components; do
+ if [[ -v commit_stamps[$c] ]]; then
+ days_ago=$((($(date +%s) - ${commit_stamps[$c]}) / (24 * 3600)))
+ if [ "$days_ago" -gt "$((days_limit + delay_per_component[$c]))" ]; then
+ echo "$c: $ci_project_config: last updated $days_ago days ago" |& tee -a $output
+ fi
else
- echo "$refs_repo:$ref"
+ echo "$c: $ci_project_config: no date for this component" |& tee -a $output
fi
- fi
- done < <(git ls-remote "$refs_url" "${refs_prefix}*" | awk '{ print $2 }')
+ done
+
+ done < <(git ls-remote "$baseartifacts_url" "${refs_prefix}*" | cut -f2)
)
}
-for repo in "${repos[@]}"; do
- process_git_url "$refs_url_prefix/$repo" &
-done | sort
+report_old_backup_branches ()
+{
+ (
+ set -euf -o pipefail
+
+ local ci_project_config
+ while read -r ci_project_config; do
+
+ # get the branches of this $ci_project_config git repository
+ baseartifacts_url="$refs_bkp_url_prefix/base-artifacts/$ci_project_config.git"
+
+ readarray -t git_branches < <(git -C base-artifacts ls-remote \
+ "$baseartifacts_url"|cut -f2)
+
+ # retrieve the manifest of current branch to have minor/major vars
+ git -C base-artifacts checkout FETCH_HEAD -- manifest.sh
+ cur_major=$(("$(get_baseline_manifest "{rr[major]}")"))
+ cur_minor=$(("$(get_baseline_manifest "{rr[minor]}")"))
+
+ for br in "${git_branches[@]}"; do
+
+ if [[ $br =~ refs/heads/linaro-local/ci/.* ]]; then
+ continue
+
+ elif [[ $br =~ refs/heads/linaro-local/v.*_to_v.*-.*/.* ]]; then
+ br_revs=$(echo $br | \
+ sed -e 's|refs/heads/linaro-local/v.*_to_v\([0-9\.]*\)-.*/.*|\1|')
+ br_major=$(echo $br_revs | cut -d. -f1)
+ br_minor=$(echo $br_revs | cut -d. -f2)
+
+ if [ "$cur_major" -eq "$br_major" ] \
+ && [ "$cur_minor" -eq "$br_minor" ]; then
+ # Nothing to report. This is the last backup branch.
+ true
+ elif [ "$cur_major" -gt "$br_major" ] \
+ || { [ "$cur_major" -eq "$br_major" ] \
+ && [ "$cur_minor" -gt "$br_minor" ]; }; then
+ echo "BRANCH $br : Too old. (v$br_major.$br_minor < v$cur_major.$cur_minor)"
+ elif [ "$cur_major" -lt "$br_major" ] \
+ || { [ "$cur_major" -eq "$br_major" ] \
+ && [ "$cur_minor" -lt "$br_minor" ]; }; then
+ echo "BRANCH $br : In advance (v$br_major.$br_minor > v$cur_major.$cur_minor)"
+ else
+ assert_with_msg "Internal error for branch $br" false
+ fi
+ else
+ echo "BRANCH $br : Strangely formed"
+ fi
+ done
+
+ done < <(ssh bkp.tcwglab 'cd /home/tcwg-buildslave/base-artifacts/; find . -type d -name "*.git"' | \
+ sed -e 's/.git$//' | cut -d/ -f2-3)
+ )
+}
+
+process_all_base_artifacts ()
+{
+ (
+ set -euf -o pipefail
+
+ # -- Initialize base-artifacts repo (by cloning its "empty" branch).
+ # FIXME: We need to add handling of /home/shared/git/base-artifacts to
+ # tcwg-generate-source-cache.sh .
+ clone_or_update_repo "base-artifacts" empty \
+ "$refs_glo_url_prefix/base-artifacts/empty.git" auto empty
+ git -C "base-artifacts" reset -q --hard
+
+ # -- Fetching all remotes
+ local nb_remote idx_remote
+ echo "# Fetching all"
+ git -C "base-artifacts" fetch --all 2> /dev/null
+ nb_remote=$(git -C "base-artifacts" remote |wc -l)
+
+ # -- Process every components dates
+ echo "# Reporting component not updated recently"
+
+ # process all unitary base-artifacts/<ci_project>/<ci_config>
+ local -A all_ci_project_configs
+ local ci_project_config
+ idx_remote=0
+ while read -r ci_project_config; do
+
+ idx_remote=$((idx_remote+1))
+ verbose -en "# Processing base-artifacts $idx_remote / $nb_remote\r"
+
+ process_base_artifacts "$refs_bkp_url_prefix/base-artifacts/$ci_project_config.git"
+
+ # This one has been processed
+ all_ci_project_configs[$ci_project_config]=1
+
+ done < <(ssh bkp.tcwglab 'cd /home/tcwg-buildslave/base-artifacts/; find . -type d -name "*.git"' | \
+ sed -e 's/.git$//' | cut -d/ -f2-3)
+
+ # FIXME: This is temporary code and needs to be removed at some point.
+ # This is to report base-artifacts that are not migrated yet to bkp.tcwglab
+ echo "# Reporting not migrated yet base-artifacts"
+ while read -r gitpath; do
+
+ # Check if already processed
+ ci_project_config="${gitpath#toolchain/ci/base-artifacts/}"
+ if [[ -v all_ci_project_configs[$ci_project_config] ]]; then
+ continue
+ fi
+
+ process_base_artifacts "$refs_glo_url_prefix/base-artifacts/$ci_project_config.git"
+
+ done < <(ssh git.linaro.org info toolchain/ci/base-artifacts|cut -f2|grep base-artifacts/tcwg)
+
+ # -- Check for old branches
+ echo "# Reporting useless backup branches"
+ report_old_backup_branches
+
+ # -- clean local repository
+ # Clean up the clone (this is supposed to re-share objects from
+ # reference clone and keep the size of the clone minimal).
+ # It's possible that previous GC process was interrupted and left
+ # a lock. Use --force to workaround that. It should be safe
+ # to override the lock since directories should not be shared
+ # between concurrent builds.
+ #
+ # Also, prune all loose objects to avoid "git gc --auto" failing
+ # and creating .git/gc.log to warn us.
+ rm -f "base-artifacts/.git/gc.log"
+ # Do not detach into background for GC. Running in the background may
+ # cause a failure during bisect's rsync, which may see some of
+ # the files disappering mid-rsync.
+ git -C "base-artifacts" config gc.autoDetach false
+ git -C "base-artifacts" gc --auto --force --prune=all
+ # Delete stale locks -- especially .git/refs/remotes/REMOTE/BRANCH.lock
+ # These occur when builds are aborted during "git remote update" or similar.
+ find "base-artifacts/.git" -name "*.lock" -delete
+ )
+}
+
+jenkins_base_url="https://ci.linaro.org"
+use_last_build="${use_last_build-no}"
+
+count_all=0
+list_err_noproject=()
+
+declare -A test
+declare -A alldiags
+
+################## UTILITY FUNCTIONS
+classify_get_project()
+{
+ verbose " * $1"
+
+ # zero-initialize test var
+ test=(['gitproject']="" ['jkproject']="" ['branch']="" ['last_updated']=""
+ ['poll_date']=""
+ #
+ ['run_nb']="" ['run_date']="" ['run_status']="" ['run_title']="" ['run_check_regression']=""
+ #
+ ['last_run']="" ['diag']="" )
+
+ test['last_updated']=$(echo $1 | sed -e 's|.*: last updated ||' -e 's|.*: No successful run since ||')
+ test['gitproject']=$(echo $1 |cut -d: -f 1)
+ test['branch']=$(echo $1 |cut -d: -f 2)
+ test['branch']=${test['branch']:1}
+
+ test['jkproject']=$(echo ${test['branch']} | sed \
+ -e's|\(.*\)/\(.*\)|\1--\2-build|')
+
+ verbose " : $jenkins_base_url/job/${test['jkproject']}"
+ verbose " : $tmpdir/""${test['jkproject']}"
+
+ mkdir -p $tmpdir/"${test['jkproject']}" ; cd $tmpdir/"${test['jkproject']}"
+}
+
+set_diag()
+{
+ diag_error="$1"
+ test['diag']="$diag_error"
+
+ if [ "${test['diag']}" == "ERROR (project doesnot exist)" ] &&
+ [[ ! ${list_err_noproject[*]} =~ (^|[[:space:]])${test['branch']}($|[[:space:]]) ]]; then
+ list_err_noproject+=("${test['branch']}");
+ fi
+
+ [ -z "${alldiags["$diag_error"]+set}" ] && alldiags["$diag_error"]=0
+ alldiags["$diag_error"]="$(( alldiags["$diag_error"] + 1 ))"
+ verbose " ==> diag=$diag_error"
+}
+
+verbose ()
+{
+ if [ $verbose != false ]; then
+ echo "$@"
+ fi
+}
+
+download_project_file ()
+{
+ local filename=$1
+ local local_file=$filename
+ local remote_file
+
+ remote_file="$(echo $filename | sed -e 's|__toppage__|.|')"
+
+ cd $tmpdir/"${test['jkproject']}"
+ [ -f "$local_file" ] && return
+
+ mkdir -p "$(dirname "$local_file")"
+ # echo $(pwd)/$local_file
+ wget -O "$(pwd)/$local_file" -o /dev/null "$jenkins_base_url/job/${test['jkproject']}/$remote_file" || true
+}
+
+classify ()
+{
+ local condition="$1"
+ local filename="$2"
+ local expression="$3"
+ local diag_error="$4"
+
+ # Only if not already classified
+ if [ ! -z "${test['diag']}" ]; then return; fi
+
+ download_project_file "$filename"
+
+ if [ "$condition" == "exist" ]; then
+ if [ ! -s "$filename" ]; then
+ set_diag "$diag_error"
+ fi
+ fi
+ if [ "$condition" == "grep" ] && [ -f "$filename" ]; then
+ nb=$(grep -c "$expression" $filename || true)
+ if [ "$nb" != "0" ]; then
+ set_diag "$diag_error"
+ fi
+ fi
+ if [ "$condition" == "xzgrep" ] && [ -s "$filename" ]; then
+ nb=$(xzcat $filename | grep -c "$expression" || true)
+ if [ "$nb" != "0" ]; then
+ set_diag "$diag_error"
+ fi
+ fi
+}
+
+
+################## GET INFO FROM THE BUILD
+get_project_info ()
+{
+ count_all=$((count_all+1))
+
+ # Diag has been already classified. Probably means no project. don't go further
+ if [ ! -z "${test['diag']}" ]; then return; fi
+
+ # Last poll
+ download_project_file scmPollLog
+ test['poll_date']=$(grep 'Started on' scmPollLog | sed -e 's|.*Started on ||' -e 's|,||g' || true)
+ test['poll_date']=$(echo ${test['poll_date']} | sed -e 's|,||g' -e 's| mo | month |g' -e 's| hr | hour |g' || true)
+ test['poll_date']=$(date --date="${test['poll_date']}" +"%x %R")
+
+ # LastBuild run date
+ download_project_file lastBuild/__toppage__
+ test['run_date']=$(grep 'Started .* ago' lastBuild/__toppage__ | sed -e 's|.*Started \(.*\) ago.*|\1 ago|'|head -1)
+}
+get_artifact_dir ()
+{
+ lookfor=$1
+ download_project_file ${test['run_nb']}/artifact/artifacts/__toppage__
+ local i nb stepname
+ for i in {1..15}; do
+ stepname=$(printf "%02d" $i)-$lookfor
+ nb=$(grep -c "href=\"$stepname\"" ${test['run_nb']}/artifact/artifacts/__toppage__)
+ if [ $nb != 0 ]; then
+ test["run_dir_$lookfor"]="$stepname"
+ echo "$stepname"
+ #echo "$jenkins_base_url/job/${test['jkproject']}/${test['run_nb']}/artifact/artifacts/${test[run_dir_$lookfor]}"
+ break
+ fi
+ done
+}
+get_run_title_and_status ()
+{
+ run=$1
+
+ verbose " - get_run_title_and_status() : $run"
+ # Last run
+ download_project_file $run/__toppage__
+ if [ -s "$run/__toppage__" ]; then
+ test['run_title']=$(grep '<title>.*</title>' $run/__toppage__ | head -1 | sed -e 's|</title>.*||' -e 's|.*<title>||'||true)
+ test['run_title']=$(echo ${test['run_title']}|sed -e 's|.* #||' -e 's| \[Jenkins\].*||')
+ test['run_nb']=$(echo ${test['run_title']}|sed -e 's|\([0-9]*\)-.*|\1|')
+ test['run_status']=$(grep 'tooltip' $run/__toppage__ | head -1 | sed -e 's|.*tooltip="||' -e 's|"* .*||' ||true)
+ fi
+ verbose " > [${test['run_status']}] ${test['run_title']}"
+}
+get_last_interesting_run ()
+{
+ gitprojectshort=$(echo ${test['gitproject']}|cut -d- -f1)
+
+ test['last_run']="lastBuild"
+ get_run_title_and_status "lastBuild"
+
+ [ "x${test['run_nb']}" = "x" ] && return
+ [[ "${test['gitproject']}" =~ base-artifacts ]] && return
+
+ verbose " . last interesting run() : ${test['run_nb']}"
+
+ export r # to avoid shellcheck unused warning
+ for r in {1..8}; do
+ get_run_title_and_status ${test['run_nb']}
+ if [[ "${test['run_title']}" =~ $gitprojectshort ]] ||
+ [ $gitprojectshort == "*all*" ]; then
+ test['last_run']=${test['run_nb']}
+ verbose " > ${test['run_nb']}"
+ return
+ fi
+ test['run_nb']=$((test['run_nb']-1))
+ done
+ verbose " > ${test['run_nb']}"
+}
+
+################## CLASSIFY FUNCTIONS
+
+classify_polling_error ()
+{
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ verbose " - classify_polling_error()"
+ classify grep scmPollLog "Connection timed out" "ERROR(timeout while polling)"
+ classify grep scmPollLog "fatal: read error: Connection reset by peer" "ERROR(fatal polling error)"
+}
+classify_project_deleted ()
+{
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ verbose " - classify_project_deleted()"
+ classify exist __toppage__ "x" "ERROR (project doesnot exist)"
+}
+classify_project_disabled ()
+{
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ verbose " - classify_project_disabled()"
+ classify grep __toppage__ "Project DELETE ME" "ERROR (project disabled)"
+}
+classify_gcc_boostrap_timeout ()
+{
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ verbose " - classify_gcc_boostrap_timeout()"
+ classify grep __toppage__ "tcwg_gcc_bootstrap" "ERROR (bootstrap timeout)"
+}
+
+classify_analyse_console ()
+{
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ verbose " - classify_analyse_console()"
+ classify grep lastBuild/consoleText "Build timed out" "ERROR (build timeout)"
+ classify grep lastBuild/consoleText "FATAL: \[ssh-agent\] Unable to start agent" "ERROR (cannot start ssh-agent)"
+}
+
+classify_analyse_result_file ()
+{
+ local stage
+ if [ ! -z "${test['diag']}" ]; then return; fi
+ if [ "${test['run_status']}" == "Success" ]; then return; fi
+
+ verbose " - classify_analyse_result_file()"
+ download_project_file ${test['run_nb']}/artifact/artifacts/results
+
+ while read line
+ do
+ # stage line
+ pat='^# .*(reset_artifacts|build_abe|build_bmk_llvm|benchmark|linux_n_obj)'
+ if [[ $line =~ $pat ]]; then
+ stage="$(echo $line|sed -e 's|# ||' -e 's| --.*||' -e 's|:.*||')"
+ #echo " $line => $stage"
+ fi
+
+ # Clear error line
+ pat='^# Benchmarking infra is offline'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR (infra offline)"
+ fi
+ pat='# .* error: patch failed'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR (git patch failed)"
+ fi
+ pat='# First few build errors in logs'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR ($stage build errors)"
+ fi
+ pat='^# .*grew in size.*'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR (grew in size)"
+ fi
+ pat='^# .*slowed down.*'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR (slowed down)"
+ fi
+ pat='^# .*reduced by.*'
+ if [[ $line =~ $pat ]]; then
+ set_diag "ERROR (reduced)"
+ fi
+
+ # single message before reset-artifact
+ pat='^# FAILED'
+ if [[ $line =~ $pat ]] && [ ! -v stage ]; then
+ set_diag "ERROR (FAILED in reset_artifacts)"
+ fi
+
+ [ ! -z "${test['diag']}" ] && break
+
+ done < "${test['run_nb']}/artifact/artifacts/results"
+
+ # If diag is set
+ if [ ! -z "${test['diag']}" ]; then return; fi
+
+ # otherwise fill with the stage
+ if [[ -v stage ]]; then
+ set_diag "ERROR ($stage)";
+ fi
+}
+
+## Detection quite fragile for the moment
+classify_no_change_in_sources ()
+{
+ local pjt days_limit
+
+ if [ ! -z "${test['diag']}" ]; then return; fi
+
+ verbose " - classify_no_change_in_sources()"
+ # how many days
+ if [[ ${test['jkproject']} =~ -release- ]]; then
+ days_limit="$((days+days))"
+ else
+ days_limit="$days"
+ fi
+
+ # get date
+ download_project_file ${test['run_nb']}/artifact/artifacts/jenkins/manifest.sh
+
+ # shellcheck disable=SC2034
+ declare -A rr debug
+ # shellcheck disable=SC1090
+ source ${test['run_nb']}/artifact/artifacts/jenkins/manifest.sh
+
+ # set diag if appopriate
+ pjt=$(echo ${test['gitproject']}|cut -d- -f1)
+ if [ "${rr[debug_${pjt}_date]+abc}" ]; then
+ local last_commit_date start_warn_date
+ last_commit_date="${rr[debug_${pjt}_date]}"
+ start_warn_date=$(date +%s --date="$days_limit days ago")
+
+ if [ "$last_commit_date" -lt "$start_warn_date" ]; then
+ set_diag "ERROR (no change in sources)";
+ fi
+ fi
+}
+
+
+################## PRINT SUGGESTIONS
+print_suggestions ()
+{
+ local gitbase="ssh://git.linaro.org/toolchain/ci"
+
+ if [ ${#list_err_noproject[@]} -ne 0 ]; then
+ echo "1) For deleted projects you may want to DELETE the stored results branches:" |& tee -a $output
+ tmpdir=/tmp/empty_git
+ echo "mkdir -p $tmpdir && cd $tmpdir && git init" |& tee -a $output
+ echo "git push $gitbase/base-artifacts.git \\" |& tee -a $output
+ for br in "${list_err_noproject[@]}"; do
+ echo " --delete refs/heads/linaro-local/ci/$br \\" |& tee -a $output
+ done
+ echo "" |& tee -a $output
+ echo "rm -rf $tmpdir" |& tee -a $output
+ fi
+}
+
+################## MAIN CLASSIFY FUNCTIONS
+classify_failures ()
+{
+ local stale_jobs_file
+
+ stale_jobs_file="$(pwd)/$1"
+
+ [ "x$tmpdir" = "x" ] &&
+ tmpdir="$(mktemp -d -t tmpdir-XXXXXXXXXX)"
+
+ echo "working in $tmpdir"
+ cd $tmpdir
+
+ printf "\n" |& tee -a $output
+ printf "%-45s | %-13s | %-16s | %-16s | %-50s\n" "AUTOMATIC DIAGNOSTIC" "LAST UPDATED" "LAST POLLING" "LAST RUN" "PROJECT NAME" |& tee -a $output
+ printf "====================================================================================================================================================================\n" |& tee -a $output
+
+ while read -r line;
+ do
+
+ classify_get_project "$line"
+
+ if [ $only != false ] && [[ ! ${test['jkproject']} =~ $only ]]; then
+ continue
+ fi
+
+ if [ $verbose == "*all*" ]; then set -x; fi
+
+ # Check if project exist before getting the infos
+ classify_project_deleted
+
+ # Get info from project
+ get_project_info
+
+ # Classify
+ classify_polling_error
+
+ # is disabled ?
+ classify_project_disabled
+
+ # deeper analyse
+ get_last_interesting_run
+ classify_analyse_console
+ classify_analyse_result_file
+ classify_no_change_in_sources
+ [ -z "${test['diag']}" ] && set_diag "-"
+
+ printf "%-45s | %-13s | %-16s | %-16s | %-16s %-50s\n" "${test['diag']}" "${test['last_updated']}" "${test['poll_date']}" "${test['run_date']}" "[${test['gitproject']}]" "$jenkins_base_url/job/${test['jkproject']}/${test['run_nb']}" |& tee -a $output
+ done < $stale_jobs_file
+
+ printf "====================================================================================================================================================================\n" |& tee -a $output
+ printf "SUMMARY : \n" |& tee -a $output
+ for K in "${!alldiags[@]}"; do
+ printf " %-28s : %-3s\n" "$K" "${alldiags[$K]}" |& tee -a $output
+ done
+ printf " %-28s : %-3s\n" "TOTAL FAILURES" "$count_all" |& tee -a $output
+ printf "====================================================================================================================================================================\n" |& tee -a $output
+ printf "SUGGESTIONS : \n" |& tee -a $output
+ print_suggestions
+ printf "====================================================================================================================================================================\n" |& tee -a $output
+ printf "\n" |& tee -a $output
+
+ [ $keep_tmp ] || rm -rf $tmpdir
+}
+
+
+# If classify is specified, classify the failures
+if [ $classify != false ]; then
+ classify_failures $classify
+ exit 0;
+fi
+
+process_all_base_artifacts
-wait
diff --git a/tcwg-start-container.sh b/tcwg-start-container.sh
index e33a4bb7..00271b06 100755
--- a/tcwg-start-container.sh
+++ b/tcwg-start-container.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-set -ef -o pipefail
+set -euf -o pipefail
# shellcheck source=jenkins-helpers.sh
. "$(dirname $0)"/jenkins-helpers.sh
@@ -10,14 +10,71 @@ shift "$SHIFT_CONVERTED_ARGS"
obligatory_variables container image
declare container image
+dryrun="${dryrun-false}"
keep_existing="${keep_existing-true}"
verbose="${verbose-true}"
-
-set -u
+additional_options="${additional_options-}"
+test_docker="${test_docker-false}"
if $verbose; then set -x; fi
-docker pull "$image"
+# Check that docker can start a container.
+test_docker()
+{
+ timeout 30s /root/docker-wrapper ps
+ /root/docker-wrapper maybepull "$image"
+ /root/docker-wrapper run --rm --entrypoint=/bin/sh "$image"
+ echo "NOTE: Docker seems to be OK"
+}
+
+if [ -f /root/docker-wrapper ]; then
+ # /root/docker-wrapper is created by dockerfiles/tcwg-base/tcwg-host/run.sh;
+ # on benchmarking boards /root is bind-mounted inside "host" container.
+ if $test_docker; then
+ # The fact that we are here implies that we running as root on
+ # a bare machine.
+ test_docker &
+ if ! wait $!; then
+ storage_driver=$(timeout 30s /root/docker-wrapper info \
+ | grep "Storage Driver" | awk '{print $3}' \
+ || true)
+ if [ x"$storage_driver" = x"" ] \
+ || [ x"$storage_driver" = x"devicemapper" ]; then
+ # With the TK1's old kernel the only way to run docker
+ # is to use devicemapper storage driver with loopback
+ # backend, which is unfit for production usage.
+ # Every few months the loopback file gets corrupted and
+ # docker can't start.
+ # To solve this we go nuclear on docker.
+ timeout 30s /usr/sbin/service docker stop || true
+ rm -rf /var/lib/docker/
+ # If below hangs, then we'll just wait for the eventual
+ # power-cycle. If docker still doesn't work from a clean
+ # state, then we need to investigate manually.
+ /usr/sbin/service docker stop || true
+ fi
+ /usr/sbin/service docker restart
+ test_docker &
+ if ! wait $!; then
+ echo "ERROR: Cannot make docker work on the system"
+ exit 1
+ fi
+ fi
+ fi
+
+ if [ x"$keep_existing" != x"false" ]; then
+ # We have docker-wrapper available, so use it to workaround dockerhub's
+ # limits on pull requests. This is important for benchmarking boards,
+ # which call tcwg-update-bmk-containers.sh for every build.
+ /root/docker-wrapper maybepull "$image"
+ else
+ # We are asked to update the container unconditionally.
+ # Make sure we will use latest image.
+ docker pull "$image"
+ fi
+else
+ docker pull "$image"
+fi
rm_cnt=""
if docker stats --no-stream "$container" >/dev/null 2>&1; then
@@ -32,6 +89,11 @@ if docker stats --no-stream "$container" >/dev/null 2>&1; then
fi
;;
esac
+
+ if $dryrun; then
+ exit $EXTERNAL_FAIL
+ fi
+
# Rename the current container to free-up the name for "docker run" below.
# Use rename name starting with a number (seconds since epoch) so that
# it'll be cleaned up even if something goes wrong here.
@@ -42,11 +104,18 @@ if docker stats --no-stream "$container" >/dev/null 2>&1; then
# Failure to rename a container is usually caused by container
# restarting loop. This restarting container can't be the current
# one, so just delete it.
- docker rm -vf "$container"
+ docker stop "$container" || true
+ if ! docker rm -v "$container"; then
+ docker rm -vf "$container"
+ fi
rm_cnt=""
fi
fi
+if $dryrun; then
+ exit $EXTERNAL_FAIL
+fi
+
qemu_mount=""
qemu_bin=$(mktemp -p $HOME)
case "$(uname -m):$image" in
@@ -62,7 +131,7 @@ esac
start_sh=$(mktemp)
docker run --rm $qemu_mount $image start.sh > "$start_sh"
-bash "$start_sh" "$@"
+bash "$start_sh" --verbose "$verbose" --additional_options "$additional_options" -- "$@"
rm "$start_sh" "$qemu_bin"
if [ x"$rm_cnt" != x"" ]; then
@@ -70,5 +139,11 @@ if [ x"$rm_cnt" != x"" ]; then
# Note that if both old and new containers need an exclusive resource
# (e.g., tcp port or connection to jenkins), then the new container might
# need to restart a couple of times to wait for removal of the old one.
- docker rm -vf "$rm_cnt"
+ #
+ # We first try to gracefully shutdown the container
+ docker stop "$rm_cnt" || true
+ if ! docker rm -v "$rm_cnt"; then
+ # ... and force SIGKILL only when necessary.
+ docker rm -fv "$rm_cnt"
+ fi
fi
diff --git a/tcwg-update-bmk-containers.sh b/tcwg-update-bmk-containers.sh
new file mode 100755
index 00000000..a6421d8f
--- /dev/null
+++ b/tcwg-update-bmk-containers.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+convert_args_to_variables "$@"
+
+obligatory_variables board
+declare -g board
+
+distro="${distro-default}"
+force="${force-false}"
+test_docker="${test_docker-false}"
+node="${node-}"
+verbose="${verbose-true}"
+
+if $verbose; then set -x; fi
+
+ssh_root="ssh -p22 -lroot"
+rsync -az --del -e "$ssh_root" \
+ $scripts/ $board:jenkins-scripts/
+$ssh_root -Snone $board \
+ ./jenkins-scripts/tcwg-update-host-containers.sh \
+ --distro "$distro" \
+ --group "tcwg-bmk" \
+ --force "$force" \
+ --test_docker "$test_docker" \
+ --additional_options "--privileged" \
+ --verbose "$verbose"
+
+wait_for_ssh_server "$board" 100
+
+if [ x"$node" != x"" ]; then
+ rsync -az --del \
+ $scripts/ $board:jenkins-scripts/
+ ssh -Snone $board \
+ ./jenkins-scripts/tcwg-update-host-containers.sh \
+ --distro "$distro" \
+ --node "$node" \
+ --force "$force" \
+ --verbose "$verbose"
+fi
diff --git a/tcwg-update-host-containers.sh b/tcwg-update-host-containers.sh
index bde3f718..83088680 100755
--- a/tcwg-update-host-containers.sh
+++ b/tcwg-update-host-containers.sh
@@ -1,22 +1,29 @@
#!/bin/bash
-set -ef -o pipefail
+set -euf -o pipefail
scripts=$(dirname $0)
# shellcheck source=jenkins-helpers.sh
. $scripts/jenkins-helpers.sh
convert_args_to_variables "$@"
-distro="${distro-bionic}"
+distro="${distro-default}"
node="${node-host}"
group="${group-default}"
force="${force-false}"
+test_docker="${test_docker-false}"
+additional_options="${additional_options-}"
+dryrun="${dryrun-false}"
verbose="${verbose-true}"
-set -u
-
if $verbose; then set -x; fi
+# Resolve LTS and LTS-1 values to Ubuntu distros.
+case "$distro" in
+ lts_1) distro=focal ;;
+ lts|default) distro=jammy ;;
+esac
+
image="linaro/ci-$(print_arch_for_host localhost)-tcwg-host-ubuntu:$distro"
if [ x"$group" = x"default" ]; then
@@ -34,8 +41,11 @@ fi
$scripts/tcwg-start-container.sh \
--container "$node" \
+ --dryrun "$dryrun" \
--image "$image" \
--keep_existing "$keep_existing" \
+ --test_docker "$test_docker" \
+ --additional_options "$additional_options" \
--verbose "$verbose" \
-- \
- --verbose $verbose -- $image $group $node
+ $image $group $node
diff --git a/tcwg-update-llvmbot-containers.sh b/tcwg-update-llvmbot-containers.sh
index 2eeb38a2..72f9641b 100755
--- a/tcwg-update-llvmbot-containers.sh
+++ b/tcwg-update-llvmbot-containers.sh
@@ -10,13 +10,19 @@ convert_args_to_variables "$@"
obligatory_variables NODE_NAME password
declare NODE_NAME password
-distro="${distro-bionic}"
+distro="${distro-default}"
force="${force-false}"
master="${master-silent}"
verbose="${verbose-true}"
if $verbose; then set -x; fi
+# Resolve LTS and LTS-1 values to Ubuntu distros.
+case "$distro" in
+ lts_1) distro=focal ;;
+ lts|default) distro=jammy ;;
+esac
+
if $force; then
keep_existing=false
else
@@ -24,77 +30,81 @@ else
fi
case "$NODE_NAME:$master" in
- tcwg-amp-05:normal)
+ tcwg-jade-04:normal)
bots=(
+ linaro-clang-aarch64-full-2stage
+ linaro-flang-aarch64-libcxx
linaro-lldb-arm-ubuntu
- linaro-lldb-aarch64-ubuntu
)
;;
- tcwg-amp-05:buildkite)
+ tcwg-jade-04:buildkite)
bots=(
- # 1 bot per AArch64 config to limit the
- # total build time to 1x the slowest config
+ # 1 bot per config to limit the
+ # total build time to 1x the slowest config.
+ # aarch64 exceptions/no-exceptions and Armv7-M picolib
+ # (which is v7-M being simulated on AArch64).
linaro-aarch64-libcxx-01
linaro-aarch64-libcxx-02
- )
- ;;
- tcwg-amp-06:buildkite)
- bots=(
+ linaro-aarch64-libcxx-03
# These can all build any v7/8 config.
# 4 because there's 4 combinations.
# exceptions/no exceptions armv7/armv8
- # 1 bot per config limits total build
- # time to 1x the slowest bot.
linaro-armv8-libcxx-01
linaro-armv8-libcxx-02
linaro-armv8-libcxx-03
linaro-armv8-libcxx-04
)
;;
- tcwg-fx-02:normal)
- bots=(
- linaro-clang-aarch64-sve-vls
- linaro-clang-aarch64-sve-vls-2stage
- )
- ;;
tcwg-jade-01:normal)
bots=(
# Bots in same order as in llvm-zorg/.../workers.py
+ linaro-clang-armv7-global-isel
linaro-clang-armv7-lnt
linaro-clang-armv7-2stage
- linaro-clang-armv7-quick
- linaro-clang-armv7-global-isel
linaro-clang-armv7-vfpv3-2stage
+ linaro-clang-armv8-quick
linaro-clang-armv8-lld-2stage
linaro-clang-aarch64-quick
linaro-clang-aarch64-lld-2stage
linaro-clang-aarch64-global-isel
- linaro-clang-aarch64-full-2stage
linaro-flang-aarch64-dylib
linaro-flang-aarch64-sharedlibs
linaro-flang-aarch64-out-of-tree
- linaro-flang-aarch64-debug
- linaro-flang-aarch64-latest-clang
linaro-flang-aarch64-release
+ linaro-flang-aarch64-debug-reverse-iteration
linaro-flang-aarch64-rel-assert
linaro-flang-aarch64-latest-gcc
)
;;
- tcwg-llvmbot_tk1-09:silent)
- # Connect linaro-tk1-09 to silent buildmaster to test its stability
- bots=("$(echo "$NODE_NAME" | sed -e "s/tcwg-llvmbot_/linaro-/")")
+ tcwg-g3-01:normal)
+ # -01 is 32-core / 64GB RAM G3 instance. It can comforably run
+ # 1-2 linaro-g3-* workers.
+ bots=(
+ linaro-g3-01
+ )
;;
- tcwg-llvmbot_tk1-09:normal)
+ tcwg-g3-02:normal)
+ # Testing lldb running on SVE/PAC enabled G3 hardware.
+ bots=(
+ linaro-lldb-aarch64-ubuntu
+ )
;;
- tcwg-llvmbot_tk1-*:normal)
- bots=("$(echo "$NODE_NAME" | sed -e "s/tcwg-llvmbot_/linaro-/")")
+ tcwg-g3-04:normal)
+ # -04 is 64-core / 128GB RAM G3 instance. It can comforably run
+ # 3-4 linaro-g3-* workers.
+ bots=(
+ linaro-g3-02
+ linaro-g3-03
+ linaro-g3-04
+ )
;;
esac
+default_distro=$distro
for bot in "${bots[@]+${bots[@]}}"; do
case "$bot" in
- linaro-*aarch64-*) arch="arm64" ;;
- linaro-*arm*|linaro-tk1-*) arch="armhf" ;;
+ linaro-*aarch64-*|linaro-g3-*) arch="arm64" ; distro=$default_distro ;;
+ linaro-*arm*|linaro-tk1-*) arch="armhf" ; distro=$default_distro ;;
*) echo "ERROR: Unknown bot: $bot"; exit 1 ;;
esac
image="linaro/ci-$arch-tcwg-llvmbot-ubuntu:$distro"
diff --git a/tcwg-update-lnt-results.sh b/tcwg-update-lnt-results.sh
new file mode 100755
index 00000000..5ae80b44
--- /dev/null
+++ b/tcwg-update-lnt-results.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+
+convert_args_to_variables "$@"
+shift "$SHIFT_CONVERTED_ARGS"
+
+obligatory_variables lnt_config ci_project ci_config
+declare lnt_config ci_project ci_config
+
+verbose="${verbose-false}"
+if $verbose; then set -x; fi
+
+gitbaseurl="ssh://bkp.tcwglab/home/tcwg-buildslave/base-artifacts/"
+lntserver="${lntserver-/home/tcwg-buildslave/$lnt_config/lntserver}"
+lntdb="$lntserver/lnt-database"
+
+. $lntserver/sandbox/bin/activate
+
+# cleanup tmp result dir in any cases.
+declare -g tmpdirs=()
+trap 'rm -rf "${tmpdirs[@]}"' EXIT
+
+update_one_lnt_results_project()
+{
+ local project=$1
+ local config=$2
+
+ # Early exit if unsupported
+ case "$project" in
+ tcwg_binutils*|tcwg_bootstrap*|tcwg_gcc*|tcwg_gdb*|tcwg_glibc*|tcwg_gnu*|\
+ tcwg_bmk*)
+ # process the lnt update further
+ ;;
+ *)
+ # Do not update anything else so far
+ return
+ ;;
+ esac
+
+ # Update base-artifacts with project/config
+ branch="linaro-local/ci/$project/$config"
+ clone_or_update_repo "base-artifacts/$project/$config" "$branch" "$gitbaseurl/$project/$config.git" \
+ auto "$branch"
+
+ # push each one on LNT server
+ readarray -t lnt_reports < <(get_git_history -0 base-artifacts/$project/$config notify/lnt_report.json)
+
+ # get_git_history always returns a first line with the created tmpdir, and each
+ # extracted files. lnt_reports[0] is the tmpdir created, and should be cleaned afterwards.
+ tmpdirs+=("${lnt_reports[0]}")
+
+ if [ ${#lnt_reports[@]} -gt 1 ]; then
+ echo "Pushing ${lnt_reports[*]:1}"
+ lnt import --testsuite "$project" $lntdb "${lnt_reports[@]:1}"
+ else
+ echo "No lnt_report to push"
+ fi
+ rm -rf "${lnt_reports[0]}"
+}
+
+# Update the results either on $ci_project/$ci_config if defined
+# Or iterate over all matching $ci_project/$ci_config
+if [ x"$ci_project" != x"*" ] && [ x"$ci_config" != x"*" ]; then
+
+ update_one_lnt_results_project $ci_project $ci_config
+
+else
+
+ while read -r project_config; do
+
+ project=${project_config%/*}
+ config=${project_config#*/}
+
+ if [ x"$ci_project" != x"*" ] && [ x"$ci_project" != x"$project" ]; then
+ continue
+ fi
+ if [ x"$ci_config" != x"*" ] && [ x"$ci_config" != x"$config" ]; then
+ continue
+ fi
+
+ update_one_lnt_results_project $project $config || true
+
+ done < <(ssh bkp.tcwglab 'cd /home/tcwg-buildslave/base-artifacts/; find . -type d -name "*.git"' | sed -e 's/.git$//' | cut -d/ -f2-3)
+
+fi
+
+rm -rf "${tmpdirs[@]}"
diff --git a/tcwg-update-tested.sh b/tcwg-update-tested.sh
index 286eaebc..709ded88 100755
--- a/tcwg-update-tested.sh
+++ b/tcwg-update-tested.sh
@@ -19,15 +19,22 @@ set -u
if $verbose; then set -x; fi
-if [ x"$USER" = x"tcwg-buildslave" ]; then
- git config --global user.name "TCWG Buildslave"
- git config --global user.email tcwg-buildslave@linaro.org
-fi
+# Repos cloned by Jenkins have the core.hooksPath set to /dev/null, causing
+# the "git review" command to fail (see
+# https://storyboard.openstack.org/#!/story/2010342) so change it back to
+# its default value.
+git -C abe config --unset core.hooksPath
+git -C abe review -s
git -C abe push gerrit $GIT_COMMIT:refs/heads/tested
+
+git -C jenkins-scripts config --unset core.hooksPath
+git -C jenkins-scripts review -s
git -C jenkins-scripts push gerrit $GIT_COMMIT_1:refs/heads/tested
pushd dockerfiles
GIT_COMMIT_2=$(git rev-parse $GIT_COMMIT_2)
+
+git config --unset core.hooksPath
git review -s
git checkout -B tcwg-tested origin/tcwg-tested
if [ x"$(git diff HEAD $GIT_COMMIT_2 | wc -l)" != x"0" ]; then
@@ -42,7 +49,7 @@ if [ x"$(git diff HEAD $GIT_COMMIT_2 | wc -l)" != x"0" ]; then
git commit --amend -C HEAD
# Make sure there's no stale rebase from previous review.
rm -rf .git/rebase-merge
- # Submit review against tcwg-llvmprod branch
+ # Submit review against tcwg-tested branch
git review -y tcwg-tested
ssh -p29418 review.linaro.org gerrit review --code-review 2 --submit "$(git rev-parse HEAD)"
fi
diff --git a/tcwg-upstream2gerrit.sh b/tcwg-upstream2gerrit.sh
deleted file mode 100755
index 03c5b893..00000000
--- a/tcwg-upstream2gerrit.sh
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/bin/bash
-
-set -ef -o pipefail
-
-# shellcheck source=jenkins-helpers.sh
-. "$(dirname $0)"/jenkins-helpers.sh
-convert_args_to_variables "$@"
-
-# Make shellcheck happy and workaround Jenkins not defining variables
-# for empty arguments.
-branch="${branch-master}"
-patches="${patches-last}"
-project="${project-gcc}"
-squash="${squash-false}"
-filter="${filter-false}"
-verbose="${verbose-true}"
-
-# Jenkins doesn't define variables when parameter value is empty,
-# so enable "set -u" only after above binding of variables.
-set -u
-
-if $verbose; then set -x; fi
-
-rm -f pwclient
-wget http://people.linaro.org/~maxim.kuvyrkov/pwclient/pwclient
-
-pwc="$(pwd)/pwclient"
-chmod +x "$pwc"
-sed -i -e "s#~/.pwclientrc#$(pwd)/pwclientrc#g" "$pwc"
-cat > pwclientrc <<EOF
-[options]
-default=$project
-
-[$project]
-url= https://patches-$project.linaro.org/xmlrpc/
-EOF
-
-last_id=$($pwc list -N 1 -f "%{id}")
-if [ "$last_id" = "" ]; then
- echo "ERROR: Cannot fetch last patch id."
- exit 1
-fi
-
-patches=$(echo "$patches" | sed -e "s/last/$last_id/g")
-
-refdir=/home/tcwg-buildslave/snapshots-ref/$project.git
-if ! [ -d $refdir/.git ]; then
- refdir=""
-fi
-
-clone_or_update_repo $project refs/heads/$branch https://git-us.linaro.org/toolchain/$project $refdir
-
-cd $project
-cat > .gitreview <<EOF
-[gerrit]
-host=review.linaro.org
-port=29418
-project=toolchain/$project
-EOF
-git review -s
-rm .gitreview
-
-case "$project" in
- "gcc")
- # Attempt to apply patches to nested gcc/ directory if top-level
- # fails.
- try_dirs=". gcc"
- ;;
- *) try_dirs="." ;;
-esac
-
-patch_file=$(mktemp)
-# shellcheck disable=SC2064
-trap "rm -f $patch_file /tmp/mydiff.$$" EXIT
-
-count="0"
-for patch in $patches; do
- download_file=$($pwc get -p $project $patch | sed -e "s/Saved patch to //")
- mv $download_file $patch_file
-
- res=0
- git reset --hard
- git clean -df
- if grep -q "diff --git" $patch_file; then
- git am $patch_file || res=$?
- if [ "$res" = "0" ]; then
- git commit --amend --reset-author -C HEAD
- else
- git am --abort
- fi
- else
- for dir in $try_dirs; do
- pushd $dir
- git reset --hard
- git clean -df
- patch -f -p0 < $patch_file || res=$?
- if [ "$res" = "0" ]; then
- git add .
- git commit -m "Patch #$patch"
- fi
- popd
- if [ "$res" = "0" ]; then
- break
- fi
- done
- fi
- if [ "$res" != "0" ]; then
- echo "ERROR: Cannot apply patch $patch"
- exit 1
- fi
- count=$(($count+1))
-done
-
-# Apply the filter once the patch series has been applied, to decide
-# whether we actually want review and validation
-keepit=true
-if $filter; then
- keepit=false
- git diff HEAD~$count..HEAD > /tmp/mydiff.$$
-
- # Keep commits impacting ARM or AArch64
- wanted1=0
- # Search the exact words arm, thumb or aarch64
- egrep '^[-+]' /tmp/mydiff.$$ | egrep -w -i 'arm|thumb|aarch64' || wanted1=$?
- wanted2=0
- # Search the same strings with '_' prefix or suffix, excluding string with 'parm'
- egrep '^[-+]' /tmp/mydiff.$$ | egrep -i '_arm|arm_|_thumb|thumb_|_aarch64|aarch64_' | grep -v -i 'parm' || wanted2=$?
- if [ $wanted1 -eq 0 ] || [ $wanted2 -eq 0 ]
- then
- keepit=true
- else
- # We could keep commits from ARM, Linaro, or whitelist if we had a ChangeLog
- echo "Cannot filter authors without a ChangeLog"
- keepit=false
- fi
-fi
-
-if ! $keepit; then
- echo "The filter decided to skip this patch"
- exit 0
-fi
-
-if $squash; then
- git reset --soft HEAD~$count
- git commit -m "Patches: $patches"
-fi
-git review -y "$branch"
diff --git a/tcwg-wip/push-results-to-squad.sh b/tcwg-wip/push-results-to-squad.sh
new file mode 100755
index 00000000..d387db03
--- /dev/null
+++ b/tcwg-wip/push-results-to-squad.sh
@@ -0,0 +1,364 @@
+#!/bin/bash -f
+
+# Example to push a base-artifacts branch :
+# tcwg-wip/push-results-to-squad.sh --branch linaro-local/ci/tcwg_bmk_gnu_apm/gnu-master-aarch64-spec2k6-Os_LTO --push true
+
+
+set -e
+
+scripts=$(dirname "$0")/..
+# shellcheck source=jenkins-helpers.sh
+. "$scripts/jenkins-helpers.sh"
+
+convert_args_to_variables "$@"
+obligatory_variables branch
+
+branch=${branch-}
+push=${push-false}
+dryrun=${dryrun-false}
+verbose=${verbose-false}
+
+if [ "$dryrun" == "false" ]; then
+ curl="curl"
+else
+ echo "DRYRUN : Will show but not push result"
+ curl="echo curl"
+fi
+
+
+# --------------------------- UTILS --------------------------------
+verbose()
+{
+ if $verbose; then
+ echo "$*"
+ fi
+}
+
+error()
+{
+ echo ""
+ echo "ERROR : $*"
+ echo ""
+ exit 1
+}
+
+first_time=true
+clean_squad_project_if_first_time()
+{
+ if $first_time; then
+ rm -rf squad_inputs/${project_run['project']}
+ fi
+ first_time=false
+}
+
+# --------------------------- GET INFOS --------------------------------
+get_infos_from_artifacts ()
+{
+ local artifacts_dir=$1
+ local artifacts_sha1=$2
+ local artifacts_branch=$3
+
+ echo "# base-artifact : rev=$sha1 ($artifacts_branch)"
+ git -C $artifact_dir reset -q --hard
+ git -C $artifact_dir checkout -q -f $artifacts_sha1
+
+ local manifest
+ [ -f "$artifacts_dir/manifest.sh" ] && manifest=manifest.sh
+ [ -f "$artifacts_dir/jenkins/manifest.sh" ] && manifest=jenkins/manifest.sh
+ [ x"$manifest" == x"" ] && error "Manifest not found"
+
+ declare -A rr
+ # shellcheck disable=SC1090
+ source $artifacts_dir/$manifest
+
+ if [ -d $artifacts_dir/dashboard ]; then
+ # Use dashboard directory
+ :
+ else
+
+ # general
+ project_run['artifact_topdir']="$artifacts_dir"
+ project_run['project']=$(echo $BUILD_URL |cut -d/ -f5)
+ project_run['run']=$(echo $BUILD_URL |cut -d/ -f6)
+ project_run['url']="$BUILD_URL"
+
+ # tool versions
+ project_run['version_binutils']=${rr[binutils_rev]}
+ project_run['version_gcc']=${rr[gcc_rev]}
+ project_run['version_glibc']=${rr[glibc_rev]}
+ project_run['version_linux']=${rr[linux_rev]}
+ [ -v "rr[llvm_rev]" ] && project_run['version_llvm']=${rr[llvm_rev]}
+
+ # url
+ project_run['artifacts_url_base']="https://git.linaro.org/toolchain/ci/base-artifacts.git/tree"
+ project_run['artifacts_url_branch_and_version']="h=$artifacts_branch&id=$artifacts_sha1"
+ project_run['artifacts_url_results']="${project_run['artifacts_url_base']}/?${project_run['artifacts_url_branch_and_version']}"
+
+ # relative to artifact topdir
+ benchmark_dir=$(cd $artifacts_dir && find . -maxdepth 2 -name "*-benchmark")
+ [ x"$benchmark_dir" == x"" ] && error "Benchmark dir not found"
+
+ check_regression_dir=$(cd $artifacts_dir && find . -maxdepth 2 -name "*-check_regression")
+ [ x"$check_regression_dir" == x"" ] && error "Regression dir not found"
+
+ [ -f "$artifacts_dir/manifest.sh" ] && project_run['manifest']="manifest.sh"
+ [ -f "$artifacts_dir/jenkins/manifest.sh" ] && project_run['manifest']="jenkins/manifest.sh"
+ [ x"${project_run['manifest']}" == x"" ] && error "Manifest not found"
+
+ [ -f "$artifacts_dir/$benchmark_dir/benchmark-build.log" ] && project_run['benchmark_log']="$benchmark_dir/benchmark-build.log"
+ [ -f "$artifacts_dir/$benchmark_dir/benchmark.log" ] && project_run['benchmark_log']="$benchmark_dir/benchmark.log"
+ [ x"${project_run['benchmark_log']}" == x"" ] && error "Benchmark log not found"
+
+ [ -f "$artifacts_dir/$check_regression_dir/csv-results-1/results.csv" ] && project_run['results_csv']="$check_regression_dir/csv-results-1/results.csv"
+ [ -f "$artifacts_dir/$check_regression_dir/results-1.csv" ] && project_run['results_csv']="$check_regression_dir/results-1.csv"
+ [ x"${project_run['results_csv']}" == x"" ] && error "results.csv log not found"
+
+ project_run['datetime']=$(git -C $artifact_dir show --no-patch --format=%cI)
+ fi
+}
+
+
+# --------------------------- GET INFOS --------------------------------
+# uses RESULTS_FILE argument
+# outputs project_results
+parse_results_csv_files()
+{
+ echo " * parsing base-artifacts inputs"
+ echo " ${project_run['artifact_topdir']}/${project_run['results_csv']}"
+
+ IFS=','
+ # benchmark, symbol, sample, size, num_vect_loops, symbol_md5sum
+ while read bench symb perf size vect md5sum
+ do
+ [[ $symb == *.default* ]] || continue
+
+ if [[ -v project_results["func:$bench"] ]] && [ ${project_results["func:$bench"]} != "pass" ]; then
+ : # do nothing if fail is already
+ else
+ case $perf in
+ 999999999)
+ project_results["func:$bench"]="build_fail"
+ # project_results["perf:$bench"]=""
+ # project_results["size:$bench"]=""
+ # project_results["vect:$bench"]=""
+ ;;
+ 888888888)
+ project_results["func:$bench"]="run_fail"
+ [ $vect != "-1" ] && project_results["vect:$bench"]="$vect"
+ [ $size != "-1" ] && project_results["size:$bench"]="$size"
+ # project_results["perf:$bench"]=""
+ ;;
+ *)
+ project_results["func:$bench"]="pass"
+ [ $vect != "-1" ] && project_results["vect:$bench"]="$vect"
+ [ $size != "-1" ] && project_results["size:$bench"]="$size"
+ [ $perf != "-1" ] && project_results["perf:$bench"]="$perf"
+ ;;
+ esac
+ fi
+
+ #echo " # $bench,$symb,$perf,$size,$vect,$md5sum"
+ verbose " $bench : ${project_results["func:$bench"]}, ${project_results["perf:$bench"]}, " \
+ "${project_results["size:$bench"]}, ${project_results["vect:$bench"]}, $md5sum"
+ done < "${project_run['artifact_topdir']}/${project_run['results_csv']}"
+ unset IFS
+
+ # hh:mm:ss Finished: SUCCESS
+ project_run['run_status']=$(grep '^Finished: ' ${project_run['artifact_topdir']}/${project_run['benchmark_log']} | tail -1 | cut -d: -f2)
+}
+
+
+
+# --------------------------- WRITING OUTPUTS --------------------------------
+# uses project_results[] project_run[]
+# dump files in ${project_run['project_run_dir']}
+dump_json_for_squad()
+{
+ local nbtest=0 nbpass=0 nbfail=0 nbskip=0
+
+ outdir=squad_inputs/${project_run['project']}/${project_run['run']}
+ rm -rf $outdir
+ mkdir -p $outdir
+
+ clean_squad_project_if_first_time
+
+ echo " * writing json for squad"
+
+ for key in "${!project_results[@]}"; do
+
+ kind=$(echo $key | cut -d: -f1)
+ bench=$(echo $key | cut -d: -f2)
+ [[ "$kind" == "func" ]] || continue
+
+ # echo " - writing $outdir/$bench # ${project_results["func:$bench"]}"
+ mkdir -p $outdir/$bench
+
+ case ${project_results["func:$bench"]} in
+ "build_fail")
+ build_func="fail"; ((nbfail+=1));
+ run_func="skip"; ((nbskip+=1));
+ ((nbtest+=2));
+ ;;
+ "run_fail")
+ build_func="pass"; ((nbpass+=1));
+ run_func="fail"; ((nbfail+=1));
+ ((nbtest+=2));
+ ;;
+ "pass")
+ build_func="pass"; ((nbpass+=1));
+ run_func="pass"; ((nbpass+=1));
+ ((nbtest+=2));
+ ;;
+ esac
+
+ resfile=$outdir/$bench/results-functional.json
+ echo "{" >> $resfile
+ echo " \"build\" : \"$build_func\"", >> $resfile
+ echo " \"run\" : \"$run_func\"" >> $resfile
+ echo "}" >> $resfile
+
+ resfile=$outdir/$bench/results-metrics.json
+ echo "{" >> $resfile
+ [[ -v project_results["perf:$bench"] ]] && echo " \"perf\" : \"${project_results["perf:$bench"]}\"," >> $resfile
+ [[ -v project_results["size:$bench"] ]] && echo " \"size\" : \"${project_results["size:$bench"]}\"," >> $resfile
+ [[ -v project_results["vect:$bench"] ]] && echo " \"vect\" : \"${project_results["vect:$bench"]}\"" >> $resfile
+ echo "}" >> $resfile
+
+ resfile=$outdir/$bench/results-metadata.json
+ echo "{" >> $resfile
+ # general infos
+ echo " \"datetime\": \"${project_run['datetime']}\"," >> $resfile
+ echo " \"build_url\": \"${project_run['url']}\"," >> $resfile
+ echo " \"build_log\": \"${project_run['url']}console\"," >> $resfile
+ echo " \"job_status\": \"${project_run['run_status']}\"," >> $resfile
+
+ # tool versions
+ echo " \"version_binutils\": \"${project_run['version_binutils']}\"," >> $resfile
+ echo " \"version_gcc\": \"${project_run['version_gcc']}\"," >> $resfile
+ echo " \"version_glibc\": \"${project_run['version_glibc']}\"," >> $resfile
+ echo " \"version_linux\": \"${project_run['version_linux']}\"," >> $resfile
+ [[ -v project_run['version_llvm'] ]] && echo " \"version_llvm\": \"${project_run['version_llvm']}\"," >> $resfile
+
+ # links to base-artifacts
+ echo " \"artifact_results\": \"${project_run['artifacts_url_base']}/" \
+ "?${project_run['artifacts_url_branch_and_version']}\"," >> $resfile
+ echo " \"build_manifest\": \"${project_run['artifacts_url_base']}/" \
+ "${project_run['manifest']}?${project_run['artifacts_url_branch_and_version']}\"," >> $resfile
+ echo " \"benchmark_log\": \"${project_run['artifacts_url_base']}/"\
+ "${project_run['benchmark_log']}?${project_run['artifacts_url_branch_and_version']}\"" >> $resfile
+
+ echo "}" >> $resfile
+
+ done
+ project_run['squad_input_dir']=$outdir
+
+ echo " $outdir ($nbtest test, $nbpass pass, $nbfail fail, $nbskip skip)"
+}
+
+
+push_results_to_squad_server()
+{
+ local outdir=squad_inputs/${project_run['project']}/${project_run['run']}
+ local pushcmdfile=$outdir/push_results_to_squad.sh
+ rm -f $pushcmdfile
+
+ echo " * generating cmd to push results"
+
+
+ SQUAD_TOKEN="e1e2856837b6c89c08e1031f3263e290b5aeefb5"
+
+ squad_server=https://qa-reports.linaro.org/
+ GRP="~laurent.alfonsi"
+
+ echo "#!/bin/bash -f" > $pushcmdfile
+ echo "cd $(pwd)" >> $pushcmdfile
+ echo "" >> $pushcmdfile
+
+ for key in "${!project_results[@]}"; do
+
+ kind=$(echo $key | cut -d: -f1)
+ bench=$(echo $key | cut -d: -f2)
+ [[ "$kind" == "func" ]] || continue
+
+ # A VERIFIER
+ PRJ="${project_run['project']}"
+ BLD="${project_run['run']}"
+ ENV="$bench"
+
+ SQUAD_URL=$squad_server/api/submit/$GRP/$PRJ/$BLD/$ENV
+
+ # result files
+ RESULTS_RESULTS=${project_run['squad_input_dir']}/$bench/results-functional.json
+ RESULTS_METRICS=${project_run['squad_input_dir']}/$bench/results-metrics.json
+ RESULTS_METADATA=${project_run['squad_input_dir']}/$bench/results-metadata.json
+
+ echo $curl \
+ --header \"Authorization: Token $SQUAD_TOKEN\" \
+ --form tests=@$RESULTS_RESULTS \
+ --form metrics=@$RESULTS_METRICS \
+ --form metadata=@$RESULTS_METADATA \
+ $SQUAD_URL >> $pushcmdfile
+ echo "echo $SQUAD_URL pushed" >> $pushcmdfile
+
+ done
+ echo " $pushcmdfile"
+ chmod a+x $pushcmdfile
+
+ # push command for all project
+ pushcmdfile=squad_inputs/${project_run['project']}/push_results_to_squad.sh
+ if [ ! -f $pushcmdfile ]; then
+ echo "#!/bin/bash -fx" > $pushcmdfile
+ chmod a+x $pushcmdfile
+ fi
+
+ echo "" >> $pushcmdfile
+ echo "./squad_inputs/${project_run['project']}/${project_run['run']}/push_results_to_squad.sh" >> $pushcmdfile
+
+ echo ""
+}
+
+get_and_push_one_project ()
+{
+ artifact_dir=$1
+ artifact_branch=$2
+
+ local project
+
+ echo "# base_artifact : fetch $artifact_branch"
+ git -C $artifact_dir fetch -q origin $artifact_branch
+ git -C $artifact_dir checkout FETCH_HEAD
+ git -C $artifact_dir branch -f $artifact_branch FETCH_HEAD
+
+ for sha1 in $(git -C $artifact_dir log FETCH_HEAD --pretty=%H | tac); do
+ declare -A project_run project_results
+
+ get_infos_from_artifacts $artifact_dir $sha1 $artifact_branch
+ project=${project_run['project']}
+
+ parse_results_csv_files
+
+ dump_json_for_squad
+
+ push_results_to_squad_server
+
+ unset project_run project_results
+ done
+
+ echo "# TO PUSH ALL RESULTS, PLEASE RUN :"
+ echo "squad_inputs/$project/push_results_to_squad.sh"
+ echo ""
+}
+
+
+gitserver=https://git-us.linaro.org/toolchain/ci/base-artifacts.git
+artifact_dir=base-artifacts
+
+# restart from scrach. Checkout single branch.
+# rm -rf $artifact_dir
+
+if [ ! -d $artifact_dir ]; then
+ git clone --single-branch --branch empty $gitserver $artifact_dir
+fi
+
+
+get_and_push_one_project $artifact_dir $branch
diff --git a/tcwg-wip/tcwg-convert-interesting-commits.sh b/tcwg-wip/tcwg-convert-interesting-commits.sh
new file mode 100755
index 00000000..51f08647
--- /dev/null
+++ b/tcwg-wip/tcwg-convert-interesting-commits.sh
@@ -0,0 +1,219 @@
+#!/bin/bash
+
+set -euf -o pipefail
+set -x
+
+old="$HOME/interesting-commits"
+git="git -C $old"
+new="$HOME/interesting"
+
+component_repo ()
+{
+ local c="$1"
+
+ case "$c" in
+ llvm) c="llvm-project" ;;
+ binutils|gdb) c="binutils-gdb" ;;
+ esac
+
+ echo "$HOME/interesting-repos/$c.git"
+}
+
+prepare_git_repos ()
+{
+ local -A interesting_url
+ interesting_url[binutils]=git://sourceware.org/git/binutils-gdb.git
+ interesting_url[gdb]=git://sourceware.org/git/binutils-gdb.git
+ interesting_url[gcc]=https://github.com/gcc-mirror/gcc.git
+ interesting_url[linux]=https://git.linaro.org/kernel-org/linux.git
+ interesting_url[glibc]=git://sourceware.org/git/glibc.git
+ interesting_url[newlib]=git://sourceware.org/git/newlib-cygwin.git
+ interesting_url[llvm]=https://github.com/llvm/llvm-project.git
+ interesting_url[qemu]=https://gitlab.com/qemu-project/qemu.git
+
+ for c in binutils gdb gcc linux glibc newlib llvm qemu; do
+ url="${interesting_url[$c]}"
+ repo=$(component_repo $c)
+ if [ -d $repo ]; then
+ git -C $repo remote update -p
+ else
+ reference_opt=""
+ reference_dir=$(basename "$(component_repo $c)")
+ reference_dir="/home/tcwg-buildslave/snapshots-ref/$reference_dir"
+ if [ -d $reference_dir ]; then
+ reference_opt="--reference $reference_dir"
+ fi
+ git clone --bare $reference_opt $url $repo
+ case "$c" in
+ linux)
+ git -C $repo remote add stable https://git.linaro.org/kernel-org/linux-stable.git
+ git -C $repo remote update -p
+ ;;
+ esac
+ fi
+ done
+
+ $git remote update -p
+ $git reset -q --hard
+ $git clean -fd
+}
+
+prepare_git_repos
+
+declare -A describe_cache
+describe_sha1 ()
+{
+ local component="$1"
+ local sha1="$2"
+
+ if [ x"${describe_cache[$component#$sha1]-unset}" = x"unset" ]; then
+ echo "$component#$sha1" >> $HOME/tmp/miss
+ local -a match=()
+ case "$component" in
+ gcc) match=(--match "basepoints/*" --match "releases/*") ;;
+ binutils) match=(--exclude "users/*") ;;
+ newlib) match=(--match "newlib*") ;;
+ esac
+
+ describe_cache[$component#$sha1]=$(git -C "$(component_repo $component)" describe "${match[@]}" $sha1 || true)
+ fi
+}
+
+update_entry ()
+{
+ local component="$1"
+ local sha1="$2"
+ local ci_project="$3"
+ local ci_config="$4"
+ local bisect_url="$5"
+ local last_good="$6"
+
+ if ! git -C "$(component_repo $component)" rev-parse --verify $sha1 2>/dev/null; then
+ return
+ fi
+
+ describe_sha1 "$component" "$sha1"
+ local describe="${describe_cache[$component#$sha1]}"
+ if ! [ -d $new/$component/sha1/$sha1 ] \
+ && [ x"$describe" != x"" ]; then
+ local d
+ d=$(dirname "$describe")
+ mkdir -p $new/$component/$d
+ local symlink=""
+ while [ x"$d" != x"." ]; do
+ symlink="../$symlink"
+ d=$(dirname "$d")
+ done
+ symlink="${symlink}sha1/$sha1"
+ ln -s $symlink $new/$component/$describe
+ fi
+
+ mkdir -p $new/$component/sha1/$sha1/$ci_project/$ci_config
+ echo "$bisect_url" > $new/$component/sha1/$sha1/$ci_project/$ci_config/build_url
+ if [ x"$last_good" != x"" ]; then
+ echo "$last_good" \
+ > $new/$component/sha1/$sha1/$ci_project/$ci_config/last_good
+ fetch_summary "$bisect_url" \
+ $new/$component/sha1/$sha1/$ci_project/$ci_config
+ if [ x"$describe" = x"" ]; then
+ describe="$ci_project#$sha1"
+ fi
+ generate_status "$describe" $component/sha1/$sha1 $ci_project
+ fi
+}
+
+fetch_dir=$HOME/interesting-repos/fetch-cache
+fetch_summary ()
+{
+ local bisect_url="$1"
+ local out="$2"
+
+ local cache_url="${bisect_url#https://}"
+ if ! [ -f $fetch_dir/$cache_url/summary.txt ]; then
+ mkdir -p $fetch_dir/$cache_url/
+ curl -o $fetch_dir/$cache_url/summary.txt \
+ ${bisect_url}artifact/artifacts/build-first_bad/mail/jira-body.txt \
+ --fail &
+ if wait $!; then
+ sed -i -e "/^[^-]/d" -e "/^\$/d" -e "s/^- //" \
+ $fetch_dir/$cache_url/summary.txt
+ else
+ echo > $fetch_dir/$cache_url/summary.txt
+ fi
+ fi
+ cp $fetch_dir/$cache_url/summary.txt $out/summary.txt
+}
+
+generate_status ()
+{
+ local describe="$1"
+ local subdir="$2"
+ local ci_project="$3"
+ local interesting_commits="$new"
+ local status="$subdir/$ci_project/status.txt"
+
+ local ci_config
+
+ cat > $interesting_commits/$status <<EOF
+Status of $describe commit for $ci_project ci_project:
+EOF
+ for ci_config in $(cd $interesting_commits/$subdir/$ci_project; set +f; echo *); do
+ if ! [ -f "$interesting_commits/$subdir/$ci_project/$ci_config/summary.txt" ]; then
+ continue
+ fi
+ echo "* $ci_config"
+ (
+ cat $interesting_commits/$subdir/$ci_project/$ci_config/summary.txt
+ cat $interesting_commits/$subdir/$ci_project/$ci_config/build_url
+ ) | sed "s/^/** /"
+ done >> $interesting_commits/$status
+}
+
+rm -rf "$new"
+mkdir -p "$new"
+
+while IFS= read -r branch; do
+ $git checkout --detach $branch
+
+ while true; do
+ ci_project=${branch# *origin/linaro-local/ci/}
+ bisect_url=$($git show --pretty=%s --no-patch | sed -e "s/.* from //")
+ pushd $old
+ while read component; do
+ ci_config=""
+ arr=()
+ while read -a arr; do
+ if [ ${#arr[@]} -gt 1 ]; then
+ sha1="${arr[0]}"
+ if [ x"$sha1" != x"e5a9d60317852a7323e46109fa366e630b8b5bae" ]; then
+ #continue
+ :
+ fi
+ for ci_project_config in "${arr[@]:1}"; do
+ ci_project2=${ci_project_config%/*}
+ ci_config=${ci_project_config#*/}
+
+ if [ x"$ci_project" != x"$ci_project2" ]; then
+ echo "ERROR: ci_project mismatch"
+ exit 1
+ fi
+
+ update_entry "$component" "$sha1" "$ci_project" "$ci_config" "$bisect_url" ""
+ done
+ elif [ x"$ci_config" != x"" ]; then
+ if [ x"$sha1" != x"e5a9d60317852a7323e46109fa366e630b8b5bae" ]; then
+ #continue
+ :
+ fi
+ last_good="${arr[0]}"
+ update_entry "$component" "$sha1" "$ci_project" "$ci_config" "$bisect_url" "$last_good"
+ fi
+ done < <(tac $component)
+ done < <(ls)
+ popd
+ if ! $git rev-parse --verify HEAD^ 2>/dev/null; then
+ break
+ fi
+ $git checkout --detach HEAD^
+ done
+done < <($git branch -r --list "origin/linaro-local/*")
diff --git a/tcwg_aosp-build.sh b/tcwg_aosp-build.sh
new file mode 100755
index 00000000..363daf48
--- /dev/null
+++ b/tcwg_aosp-build.sh
@@ -0,0 +1,494 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=round-robin.sh
+. $scripts/round-robin.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables rr[ci_project] rr[ci_config]
+declare -A rr
+
+# tcwg_aosp-code_size-{aosp_modules}
+IFS=- read -a ci_project <<EOF
+${rr[ci_project]}
+EOF
+aosp_modules=("${aosp_modules[@]-${ci_project[2]}}")
+
+# {aosp_target}-{aosp_ver}
+IFS=- read -a ci_config <<EOF
+${rr[ci_config]}
+EOF
+aosp_target="${aosp_target-${ci_config[0]}}"
+
+# Execution mode: build or bisect
+rr[mode]="${rr[mode]-build}"
+
+# Set custom revision for one of the projects, and use baseline revisions
+# for all other projects.
+rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
+rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
+
+rr[components]="aosp_superproject llvm toolchain_superproject"
+
+# Use baseline branches by default.
+for c in ${rr[components]}; do
+ rr[${c}_git]=${rr[${c}_git]-baseline}
+done
+
+start_at="${start_at-default}"
+finish_at="${finish_at-default}"
+verbose="${verbose-true}"
+verbose2="${verbose2-false}"
+
+if $verbose2; then set -x; fi
+
+trap print_traceback EXIT
+
+# Set start and finish steps for different modes.
+default_start_at=""
+default_finish_at=""
+case "${rr[mode]}" in
+ "bisect")
+ case "$(print_single_updated_component)" in
+ toolchain_superproject) default_start_at="build_aosp_toolchain" ;;
+ llvm) default_start_at="build_shadow_llvm" ;;
+ aosp_superproject) default_start_at="build_aosp" ;;
+ *) assert false ;;
+ esac
+ ;;
+esac
+if [ x"$start_at" = x"default" ]; then
+ start_at="$default_start_at"
+fi
+if [ x"$finish_at" = x"default" ]; then
+ finish_at="$default_finish_at"
+fi
+
+run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+
+# Clone AOSP repo
+clone_aosp ()
+{
+ (
+ set -euf -o pipefail
+
+ local dir="$1"
+ local superproject_rev="$2"
+ local supermanifest="$3"
+
+ mkdir -p "$dir"
+ cd "$dir"
+
+ # Remove locks from previous runs
+ find .repo -type f -name "shallow.lock" -delete || true
+ # Clean AOSP checkout
+ .repo/repo/repo forall -c "git reset --hard; git clean -fdx" \
+ >/dev/null &
+ if ! wait $!; then
+ # Wipe AOSP checkout on clean errors
+ cd ..
+ rm -rf "$dir"
+ mkdir "$dir"
+ cd "$dir"
+ fi
+
+ # Resync on superproject change
+ if ! [ -f repo_synced ] \
+ || [ x"$(cat repo_synced)" != x"$superproject_rev" ]; then
+ rm -f repo_synced
+
+ local manifest_url manifest_branch manifest_rev
+ # Add a newline after supermanifest file to avoid "read" exiting with
+ # "1" due to EOF.
+ read manifest_url manifest_branch manifest_rev \
+ < <(cat "$supermanifest"; echo)
+
+ manifest_url="https://android.googlesource.com/$manifest_url"
+
+ rm -rf ./.repo/repo
+ repo init --partial-clone --clone-filter=blob:limit=10M \
+ --use-superproject -u "$manifest_url" -b "$manifest_branch"
+
+ # Use manifest specified in superproject. We then use --nmu option
+ # to "repo sync" to avoid manifest update.
+ git -C .repo/manifests fetch "$manifest_url" "$manifest_rev"
+ git -C .repo/manifests checkout FETCH_HEAD
+
+ # FIXME:
+ # Repo doesn't [yet] support fetching custom revisions of
+ # superproject. We workaround that by hacking command line
+ # for "git fetch branch:branch" to "git fetch SHA1:branch".
+ # This hack is effective only when we have $manifest_branch
+ # specified in "repo init", so don't remove "-b" option in "repo init".
+ sed -i -e "s#\[self._branch + \":\" + self._branch\]#\['$superproject_rev' + \":\" + self._branch\]#" ./.repo/repo/git_superproject.py
+
+ ./.repo/repo/repo sync --nmu -j"$(nproc --all)" -q &
+ if ! wait $!; then
+ ./.repo/repo/repo sync --nmu -j1 --fail-fast
+ fi
+
+ # FIXME:
+ # Verify that "repo sync" fetched the desired revision of superproject.
+ local super_git super_rev
+ super_git=$(find ./.repo/exp-superproject/ -name "*-superproject.git")
+ super_rev=$(git -C "$super_git" rev-parse "$manifest_branch")
+ assert_with_msg "Could not hack superproject repo" \
+ [ "$super_rev" = "$superproject_rev" ]
+
+ echo "$superproject_rev" > repo_synced
+ fi
+ )
+}
+
+# Build AOSP's LLVM
+build_aosp_toolchain ()
+{
+ (
+ set -euf -o pipefail
+
+ clone_repo toolchain_superproject
+
+ clone_aosp llvm-toolchain "$(get_current_git toolchain_superproject_rev)" \
+ "$(pwd)/toolchain_superproject/.supermanifest"
+
+ cd llvm-toolchain
+
+ assert_with_msg "Missing repo_synced file" [ -f repo_synced ]
+
+ # Check if sources were updated and rebuild AOSP toolchain on re-syncs.
+ if [ -f out/stage2/cmake_invocation.sh ] \
+ && [ out/stage2/cmake_invocation.sh -ot repo_synced ]; then
+ # Re-generate cmake recipe
+ rm out/stage2/cmake_invocation.sh
+ fi
+
+ # Reproduce AOSP toolchain to get the cmake recipe
+ if ! [ -f out/stage2/cmake_invocation.sh ]; then
+ rm -rf out
+
+ local tf_loc
+ tf_loc="$(pip show tensorflow | grep Location | cut -d' ' -f2)" || true
+ [ -d "$tf_loc" ] && export TENSORFLOW_INSTALL="$tf_loc/tensorflow"
+
+ python toolchain/llvm_android/build.py --no-build lldb,windows \
+ --skip-runtimes --skip-tests --skip-package --mlgo
+ fi
+ )
+}
+
+# Build LLVM
+build_shadow_llvm ()
+{
+ (
+ set -euf -o pipefail
+
+ clone_repo llvm
+
+ cd llvm-toolchain
+
+ local cc cxx ninja
+ cc=$(cat out/stage2/cmake_invocation.sh \
+ | grep -e " -DCMAKE_C_COMPILER=" \
+ | sed -e "s/.* -DCMAKE_C_COMPILER=\([^ ]*\).*/\1/")
+ cxx=$(cat out/stage2/cmake_invocation.sh \
+ | grep -e " -DCMAKE_CXX_COMPILER=" \
+ | sed -e "s/.* -DCMAKE_CXX_COMPILER=\([^ ]*\).*/\1/")
+ ninja=$(cat out/stage2/cmake_invocation.sh \
+ | grep -e " -DCMAKE_MAKE_PROGRAM=" \
+ | sed -e "s/.* -DCMAKE_MAKE_PROGRAM=\([^ ]*\).*/\1/")
+ cd ..
+
+ local workspace
+ workspace=$(pwd)
+
+ # ${workspace:?}/bin is to avoid shellcheck warning that below never
+ # expands to "rm -rf /bin"
+ rm -rf "${workspace:?}/bin"
+ mkdir "$workspace/bin"
+
+ cat > "$workspace/bin/cc" <<EOF
+#!/bin/sh
+CCACHE_BASEDIR=$workspace exec ccache $cc "\$@"
+EOF
+ chmod +x "$workspace/bin/cc"
+
+ cat > "$workspace/bin/c++" <<EOF
+#!/bin/sh
+CCACHE_BASEDIR=$workspace exec ccache $cxx "\$@"
+EOF
+ chmod +x "$workspace/bin/c++"
+
+ rm -rf llvm-install
+
+ cd llvm-toolchain/out
+
+ cp stage2/cmake_invocation.sh ./
+
+
+ sed -i \
+ -e "s#/llvm-toolchain/out/llvm-project/llvm #/llvm/llvm #" \
+ -e "s#/llvm-toolchain/out/stage2-install #/llvm-install #" \
+ -e "s# -DCMAKE_C_COMPILER=[^ ]* # -DCMAKE_C_COMPILER=$workspace/bin/cc #" \
+ -e "s# -DCMAKE_CXX_COMPILER=[^ ]* # -DCMAKE_CXX_COMPILER=$workspace/bin/c++ #" \
+ cmake_invocation.sh
+
+ ccache -z
+
+ rm -rf stage3
+ mkdir stage3
+ cd stage3
+
+ set +e
+ source ../cmake_invocation.sh
+ local res=$?
+ set -e
+ if [ $res != 0 ] || ! $ninja; then
+ # Attempt to workaround failures in past LLVM versions.
+ cd ..
+
+ # Workaround failures to find BOLT, which seems to be unused anyway.
+ sed -i -e "s/bolt;//" cmake_invocation.sh
+
+ # Workaround failure to link x86_64's libc++.so against non-PIC
+ # libc++abi.a. Add -fPIC to x86_64 runtime's CFLAGS.
+ sed -i -e "s/ '-DRUNTIMES_x86_64-unknown-linux-gnu_CMAKE_C_FLAGS=/ '-DRUNTIMES_x86_64-unknown-linux-gnu_CMAKE_C_FLAGS=-fPIC /g" \
+ cmake_invocation.sh
+ sed -i -e "s/ '-DRUNTIMES_x86_64-unknown-linux-gnu_CMAKE_CXX_FLAGS=/ '-DRUNTIMES_x86_64-unknown-linux-gnu_CMAKE_CXX_FLAGS=-fPIC /g" \
+ cmake_invocation.sh
+
+ rm -rf stage3
+ mkdir stage3
+ cd stage3
+ source ../cmake_invocation.sh
+ $ninja
+ fi
+ $ninja install
+
+ ccache -s
+ )
+}
+
+# Build AOSP
+build_aosp ()
+{
+ (
+ set -euf -o pipefail
+
+ # Clone AOSP superproject
+ clone_repo aosp_superproject
+
+ # Clone AOSP
+ clone_aosp aosp "$(get_current_git aosp_superproject_rev)" \
+ "$(pwd)/aosp_superproject/.supermanifest"
+
+ # Remove old binaries
+ (set +f; rm -rf shadow*)
+ rm -rf aosp/out
+
+ # Install build wrappers
+ local prebuilts_clang_bin
+ while read -r prebuilts_clang_bin; do
+ $scripts/wrappers/install-wrappers.sh \
+ $prebuilts_clang_bin llvm-install/bin aosp shadow
+ done < <(find aosp/prebuilts/clang/host/linux-x86/ \
+ -maxdepth 2 -name "bin" -type d)
+
+ $scripts/wrappers/install-wrappers.sh \
+ aosp/build/soong/scripts llvm-install/bin aosp shadow strip.sh
+
+ # Build AOSP
+ (
+ cd aosp
+ set -e +ufx +o pipefail
+ local release=trunk_staging
+ source build/envsetup.sh
+ echo "RUN: lunch aosp_${aosp_target}-${release}-user"
+ lunch "aosp_${aosp_target}-${release}-user"
+ echo "RUN: m ${aosp_modules[*]}"
+ if [ x"${aosp_modules[*]}" = x"aosp" ]; then
+ m
+ else
+ m "${aosp_modules[@]}"
+ fi
+ )
+ )
+}
+
+# Check shadow build
+process_shadow_data ()
+{
+ (
+ set -euf -o pipefail
+
+ local success_code="$2"
+
+ if ! [ -f shadow.errors ]; then
+ success_code=$(($success_code + 1))
+ echo "# shadow build has no errors" >> $run_step_top_artifacts/results
+ echo "$success_code" >> $run_step_top_artifacts/results
+ else
+ echo "# shadow build has errors" >> $run_step_top_artifacts/results
+ cp shadow.errors $run_step_top_artifacts/
+ exit 1
+ fi
+
+ if [ -f shadow.size ]; then
+ cp shadow.size $run_step_top_artifacts/size.csv
+ success_code=$(($success_code + 1))
+ echo "# shadow.size present" >> $run_step_top_artifacts/results
+ echo "$success_code" >> $run_step_top_artifacts/results
+ fi
+ )
+}
+
+# Exit with code 0 if no regression compared to base-artifacts/results.
+no_regression_p ()
+{
+ (
+ set -euf -o pipefail
+
+ no_build_regression_p "$@"
+
+ if ! [ -f base-artifacts/size.csv ]; then
+ return 0
+ elif ! [ -f $run_step_top_artifacts/size.csv ]; then
+ return 1
+ fi
+
+ # Generate results-vs-prev/results.csv
+ mkdir -p $run_step_top_artifacts/results-vs-prev
+ $scripts/../bmk-scripts/csvs2table.py \
+ --relative "$(pwd)/base-artifacts/size.csv" \
+ $run_step_top_artifacts/size.csv \
+ > $run_step_top_artifacts/results-vs-prev/results.csv
+
+ # Read result lines from <(tail -n +2 ...) below.
+ # "-n +2" is to skip the header line.
+ local -a arr
+ local rel_text text1 text2 metric total1=0 total2=0
+ while IFS=, read -a arr; do
+ binary=${arr[0]}
+ rel_text=${arr[2]}
+ text1=${arr[3]}
+ text2=${arr[4]}
+
+ if [ x"$rel_text" = x"n/a" ]; then
+ case "$text1":"$text2" in
+ "-1:-1") ;;
+ "-1":*)
+ echo "-1,$binary now builds successfully" \
+ >> $run_step_artifacts/binary.improvements
+ ;;
+ *:"-1")
+ echo "1,$binary now fails to build" \
+ >> $run_step_artifacts/binary.regressions
+ ;;
+ esac
+ continue
+ fi
+
+ metric=$(($rel_text - 100))
+
+ if [ $metric -lt 0 ]; then
+ echo "$metric,$binary reduced in size by ${metric}% from $text1 to $text2" \
+ >> $run_step_artifacts/binary.improvements
+ elif [ $metric -gt 0 ]; then
+ echo "$metric,$binary increased in size by ${metric}% from $text1 to $text2" \
+ >> $run_step_artifacts/binary.regressions
+ fi
+
+ total1=$(($total1 + $text1))
+ total2=$(($total2 + $text2))
+ done < <(tail -n +2 $run_step_top_artifacts/results-vs-prev/results.csv)
+
+ if [ $total1 != 0 ]; then
+ metric=$(((100 * $total2) / $total1 - 100))
+ else
+ # Corner case when baseline results have no intersection with
+ # the current results. This happens when AOSP build fails and
+ # size.csv only has the header.
+ metric=0
+ fi
+ if [ $metric -lt 0 ]; then
+ echo "$metric,AOSP reduced in size by ${metric}% from $total1 to $total2" \
+ >> $run_step_artifacts/aosp.improvements
+ elif [ $metric -gt 0 ]; then
+ echo "$metric,AOSP increased in size by ${metric}% from $total1 to $total2" \
+ >> $run_step_artifacts/aosp.regressions
+ fi
+
+ local primary_change="" regression=false
+ if [ -f $run_step_artifacts/aosp.regressions ]; then
+ primary_change=$run_step_artifacts/aosp.regressions
+ regression=true
+ elif [ -f $run_step_artifacts/binary.regressions ]; then
+ primary_change=$run_step_artifacts/binary.regressions
+ regression=true
+ elif [ -f $run_step_artifacts/aosp.improvements ]; then
+ primary_change=$run_step_artifacts/aosp.improvements
+ elif [ -f $run_step_artifacts/binary.improvements ]; then
+ primary_change=$run_step_artifacts/binary.improvements
+ fi
+
+ if [ x"$primary_change" != x"" ]; then
+ sort -gr -o "$primary_change" "$primary_change"
+ if $regression; then
+ head -n1 "$primary_change" \
+ | sed -e "s/^/# /" > $run_step_artifacts/results.regressions
+ fi
+ fi
+
+ case "${rr[ci_project]}" in
+ "tcwg_aosp-build"*)
+ # We have passed no_build_regression_p() check above,
+ # so succeed.
+ return 0
+ ;;
+ esac
+
+ local -a changed_components
+ IFS=" " read -r -a changed_components <<< "$(print_changed_components)"
+ case " ${changed_components[*]} " in
+ *" llvm "*) ;;
+ *)
+ # Code-size builds without changed LLVM we declare as
+ # non-regressing. Code-size changes are due to changes in AOSP,
+ # not in the compiler.
+ return 0
+ ;;
+ esac
+
+ if $regression; then
+ return 1
+ fi
+ return 0
+ )
+}
+
+rr[breakup_changed_components]="breakup_changed_components llvm"
+
+case "${rr[ci_project]}" in
+ "tcwg_aosp-build"*)
+ run_step stop_on_fail 0 reset_artifacts
+ run_step skip_on_fail 1 build_aosp_toolchain
+ run_step skip_on_fail 2 build_shadow_llvm
+ run_step skip_on_fail 3 build_aosp
+ run_step skip_on_fail x process_shadow_data -- 3
+ ;;
+ *)
+ run_step stop_on_fail -10 reset_artifacts
+ run_step skip_on_fail -3 build_aosp_toolchain
+ run_step skip_on_fail -2 build_shadow_llvm
+ run_step skip_on_fail -1 build_aosp
+ run_step skip_on_fail x process_shadow_data -- -1
+ ;;
+esac
+run_step reset_on_fail x check_regression
+
+trap "" EXIT
diff --git a/tcwg_bmk-build.sh b/tcwg_bmk-build.sh
index 87263a59..0c897fd7 100755
--- a/tcwg_bmk-build.sh
+++ b/tcwg_bmk-build.sh
@@ -10,86 +10,64 @@ scripts=$(dirname $0)
convert_args_to_variables "$@"
-obligatory_variables rr[ci_project] rr[ci_config] ssh_host ssh_port
+obligatory_variables rr[ci_project] rr[ci_config]
+declare -A rr
-# Execution mode: baseline, bisect, jenkins-full
-# shellcheck disable=SC2154
-rr[mode]="${rr[mode]-baseline}"
+# All bmk config about hw and benchs is implemented in this file
+# shellcheck source=tcwg_bmk-config.sh
+. $scripts/tcwg_bmk-config.sh
+
+# Execution mode: build or bisect
+rr[mode]="${rr[mode]-build}"
# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
-# shellcheck disable=SC2154
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
-# shellcheck disable=SC2154
-rr[update_baseline]="${rr[update_baseline]-update}"
-# shellcheck disable=SC2154
+rr[update_baseline]="${rr[update_baseline]-ignore}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
-# Set metric to perf by default.
-# shellcheck disable=SC2154
-rr[metric]="${rr[metric]-perf}"
-
-# {toolchain_name}-{toolchain_ver}-{target}-{bmk}-{cflags}
-IFS=- read -a ci_config <<EOF
-${rr[ci_config]}
+# ${ci_project}--${ci_config} format is :
+# 'tcwg_bmk-#{PROFILE_NAME}-#{BMK}--#{TOOLCHAIN}-#{TARGET}-{toolchain_ver}-{cflags}'
+IFS=- read -a ci_pjt_cfg <<EOF
+${rr[ci_project]}--${rr[ci_config]}
EOF
-# shellcheck disable=SC2154
-rr[toolchain]=${rr[toolchain]-${ci_config[0]}}
-# shellcheck disable=SC2154
-rr[target]=${rr[target]-${ci_config[2]}}
-benchmarks=("${benchmarks[@]-${ci_config[3]}}")
-if [ x"${benchmarks[*]}" = x"default" ]; then
- benchmarks=("${ci_config[3]}")
-fi
-if ! test_array cflags; then
- ci_config=("${ci_config[@]:4}")
- # In ${ci_config[@]} we now have "-"-separated entries (due to IFS=- above).
- # We restore "-" in compiler flags when doing flags="$flags-$flag" below.
- # We use "_" to separate compiler options, and it is translated to " -"
- # in benchmark().
- cflags=()
- while [ ${#ci_config[@]} -ge 1 ]; do
- flags=""
- while [ ${#ci_config[@]} -ge 1 ]; do
- flag="${ci_config[0]}"
- ci_config=("${ci_config[@]:1}")
- if [ x"$flag" = x"vs" ]; then
- break
- fi
- flags="$flags-$flag"
- done
- cflags+=("$flags")
- done
-fi
+
+rr[toolchain]=${rr[toolchain]-${ci_pjt_cfg[4]}}
+rr[target]=${rr[target]-${ci_pjt_cfg[5]}}
+
+cflags="${cflags--${ci_pjt_cfg[7]}}"
gcc_mode=""
-for i in $(seq 0 $(("${#cflags[@]}" - 1))); do
- cflags_mode=""
- if [[ x"${cflags[$i]}" == x*"VECT"* ]]; then
- rr[metric]="vect"
- fi
+case "${rr[target]}:$cflags" in
+ "arm:"*"mthumb"*) gcc_mode=thumb ;;
+ "arm:"*"marm"*) gcc_mode=arm ;;
+ "arm:-Os"*|"arm:-Oz"*)
+ gcc_mode=thumb
+ cflags="${cflags}_mthumb"
+ ;;
+ "arm:"*)
+ gcc_mode=arm
+ cflags="${cflags}_marm"
+ ;;
+ "arm_eabi:"*)
+ cflags="${cflags}_mthumb"
+ ;;
+esac
- case "${rr[target]}:${cflags[$i]}" in
- "arm:"*"mthumb"*) cflags_mode=thumb ;;
- "arm:"*"marm"*) cflags_mode=arm ;;
- "arm:-Os"*|"arm:-Oz"*)
- cflags_mode=thumb
- cflags[$i]="${cflags[$i]}_mthumb"
- ;;
- "arm:"*)
- cflags_mode=arm
- cflags[$i]="${cflags[$i]}_marm"
- ;;
- "arm_eabi:"*)
- cflags[$i]="${cflags[$i]}_mthumb"
- ;;
- esac
- if [ x"$gcc_mode" = x"" ]; then
- gcc_mode="$cflags_mode"
- elif [ x"$gcc_mode" != x"$cflags_mode" ]; then
- assert_with_msg "Unsupported arm/thumb configuration ${cflags[$(($i - 1))]} and ${cflags[$i]}" false
- fi
-done
+case "${rr[ci_project]}" in
+ *-*_size-*) rr[metric_id]="size" ;;
+ *-*_speed-*) rr[metric_id]="sample" ;;
+ *-*_vect-*) rr[metric_id]="num_vect_loops" ;;
+ *-*_sve-*) rr[metric_id]="num_sve_loops" ;;
+ *) assert_with_msg "Cannot determine metric from ${rr[ci_project]}" false ;;
+esac
+
+called_from_notify=${called_from_notify-false}
+
+hw=$(tcwg_bmk_hw)
+hw=${hw%_32} ; hw=${hw%_64}
+
+# -----------------------------------------------------------------------
gcc_override_configure=()
# Set default ARM/Thumb mode for AArch32 compiler. This ensures that libraries
@@ -117,10 +95,9 @@ esac
case "${rr[toolchain]}" in
llvm)
- # shellcheck disable=SC2154
- rr[components]="binutils gcc glibc llvm" ;;
+ rr[components]="llvm" ;;
gnu)
- rr[components]="binutils gcc glibc" ;;
+ rr[components]="binutils gcc linux glibc" ;;
gnu_eabi)
rr[components]="binutils gcc newlib" ;;
*) assert_with_msg "Unknown toolchain \"${rr[toolchain]}\"" false ;;
@@ -144,24 +121,19 @@ trap print_traceback EXIT
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
- "baseline")
- default_finish_at="update_baseline"
- ;;
"bisect")
single_updated_component="$(print_single_updated_component)"
case $single_updated_component in
binutils) default_start_at="build_abe-binutils" ;;
gcc) default_start_at="build_abe-stage1" ;;
- glibc) default_start_at="clean_sysroot" ;;
- llvm) default_start_at="build_llvm-true" ;;
+ linux|glibc) default_start_at="clean_sysroot" ;;
+ llvm) default_start_at="build_bmk_llvm" ;;
newlib) default_start_at="build_abe-newlib" ;;
*) assert_with_msg \
- "Invalid single updated component \"$single_updated_component\"" false
- ;;
+ "Invalid single updated component \"$single_updated_component\"" false
+ ;;
esac
- default_finish_at="check_regression"
;;
- "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
start_at="$default_start_at"
@@ -170,33 +142,52 @@ if [ x"$finish_at" = x"default" ]; then
finish_at="$default_finish_at"
fi
+case "${rr[ci_project]}--${rr[ci_config]}" in
+ tcwg_bmk-code_speed-spec2k6--*|\
+ tcwg_bmk-code_speed-cpu2017rate--*|\
+ tcwg_bmk-code_size-cpu2017rate--*|\
+ tcwg_bmk-fujitsu_speed-cpu2017speed--*)
+ rr[major]=2
+ rr[minor]=2
+ ;;
+ *)
+ rr[major]=1
+ rr[minor]=0
+ ;;
+esac
+
run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
-# If we bisect a regression between different major versions of Glibc,
-# then we might get a mixed sysroot with several versions of ld-M.N.so and
-# other binaries installed side-by-side. Such a sysroot will break
-# benchmarking, which requires a single ld-*.so binary to be present.
-# Forcefully delete sysroot before building C library.
-clean_sysroot ()
+build_bmk_llvm ()
{
(
set -euf -o pipefail
- local gnu_target sysroot
- gnu_target=$(print_gnu_target ${rr[target]})
- sysroot="$(pwd)/abe/builds/destdir/x86_64-pc-linux-gnu/$gnu_target/libc"
+ local projects="clang;lld;openmp"
+ case "${rr[target]}" in
+ aarch64)
+ # Flang is not supported for AArch32
+ projects="$projects;flang"
+ ;;
+ esac
+
+ build_llvm "$projects" "" "${rr[metric_id]}"
- rm -rf "$sysroot"
+ # Copy shared libraries to runtime sysroot dir
+ mkdir -p llvm-install/libc
+ rsync -a --del --include "*/" --include "*.so*" --exclude "*" \
+ --delete-excluded llvm-install/lib/ llvm-install/libc/lib/
)
}
benchmark ()
{
+ obligatory_variables ssh_host ssh_port
+
(
set -euf -o pipefail
- local bmk_cflags="$1"
- local results_id_file="$3"
+ local bmk_flags="$2"
sanity_check_pwd
@@ -204,511 +195,334 @@ benchmark ()
rm -rf "$(pwd)"/bin
mkdir "$(pwd)"/bin
- local bmk_flags bmk_ldflags reboot run_profile
- bmk_flags="$(echo $bmk_cflags | sed -e "s/_/ -/g" -e "s/LTO/flto/g")"
- bmk_flags="$(echo $bmk_cflags | sed -e "s/_/ -/g" -e "s/VECT/fmetric-vect/g")"
- case "$bmk_cflags" in
- "-Os"*|"-Oz"*)
+ local reboot run_profile
+
+ bmk_flags="$(echo $bmk_flags | sed -e "s/_/ -/g" -e "s/LTO/flto/g")"
+
+ # Add cflags if necessary for some projects.
+ # code_sve projects is the only to patch today.
+ # code_vect projects generates vectorization data by default.
+ case "${rr[ci_project]}" in
+ *code_sve*) bmk_flags="$bmk_flags -mcpu=generic+sve" ;;
+ esac
+
+ local hw_tag
+ hw_tag=$(tcwg_bmk_hw)
+
+ case "${rr[ci_project]}" in
+ *_size*|*_vect*|*_sve*)
reboot=false
run_profile="parallel"
+ testmode="verify"
;;
*)
reboot=true
run_profile="serial"
+ testmode="benchmark"
;;
esac
- local bench_list bin cc gnu_target sysroot toolchain
- gnu_target=$(print_gnu_target ${rr[target]})
- sysroot="$(pwd)/abe/builds/destdir/x86_64-pc-linux-gnu/$gnu_target/libc"
+ local bench_list bin cc sysroot toolchain
case "${rr[toolchain]}" in
llvm)
- local llvm_target
- llvm_target=$(echo "$gnu_target" | sed -e "s/^arm-/armv7a-/")
- bmk_flags="$bmk_flags --target=$llvm_target --sysroot=$sysroot"
- bmk_ldflags="$bmk_flags"
- # Use LLD for LLVM configurations.
- # Also, BFD linker crashes for AArch32 LTO builds,
- # see https://projects.linaro.org/browse/LLVM-562 .
- case "$bmk_ldflags" in
- *"-fuse-ld="*) ;;
- *) bmk_ldflags="$bmk_ldflags -fuse-ld=lld" ;;
- esac
+ sysroot="$(pwd)/llvm-install/libc"
bin="$(pwd)/llvm-install/bin"
cc="$bin/"
toolchain="llvm"
;;
gnu|gnu_eabi)
- bmk_ldflags="$bmk_flags"
- bin="$(pwd)/abe/builds/destdir/x86_64-pc-linux-gnu/bin"
+ local gnu_host gnu_target
+ gnu_host=$(print_gnu_target native)
+ gnu_target=$(print_gnu_target ${rr[target]})
+ sysroot="$(pwd)/abe/builds/destdir/$gnu_host/$gnu_target/libc"
+ bin="$(pwd)/abe/builds/destdir/$gnu_host/bin"
cc="$bin/$gnu_target-"
toolchain="gnu"
+ # Append -fdump-statistics-asmname to obtain compile time metrics.
+ bmk_flags="$bmk_flags -fdump-statistics-asmname -fdump-tree-vect-details"
;;
esac
- case "${rr[toolchain]}:${benchmarks[@]}" in
- llvm:spec2k6) bench_list="c_and_cxx" ;;
- gnu:spec2k6) bench_list="all" ;;
- llvm:spec2017) bench_list="spec2017_speed_nofortran" ;;
- gnu:spec2017) bench_list="spec2017_speed" ;;
- *) bench_list="${benchmarks[*]}" ;;
- esac
+
+ bench_list="$(tcwg_bmk_benchs)"
+
# shellcheck disable=SC2154
sysroot="ssh://$ssh_host:$ssh_port:$sysroot"
- local hw_tag
- case "${rr[ci_project]}:${rr[target]}" in
- *_sq_32*:*) hw_tag=sq_32 ;;
- *_sq_64*:*) hw_tag=sq_64 ;;
- *_sq*:arm*) hw_tag=sq_32 ;;
- *_sq*:aarch64) hw_tag=sq_64 ;;
- *_tk1_32*:*) hw_tag=tk1_32 ;;
- *_tk1*:arm*) hw_tag=tk1_32 ;;
- *_tx1_32*:*) hw_tag=tx1_32 ;;
- *_tx1_64*:*) hw_tag=tx1_64 ;;
- *_tx1*:arm*) hw_tag=tx1_32 ;;
- *_tx1*:aarch64) hw_tag=tx1_64 ;;
- *_stm32*:arm*) hw_tag=stm32_STM32L476RGTx ;;
- *) echo "ERROR: Unknown hw_tag for ${rr[ci_project]}:${rr[target]}"; exit 1 ;;
- esac
+ local hw image_arch toolchain_proto
- local hw image_arch
+ toolchain_proto=ssh
case "$hw_tag" in
- sq_32) hw=sq; image_arch=armhf ;;
- sq_64) hw=sq; image_arch=arm64 ;;
- tk1_32) hw=tk1; image_arch=armhf ;;
- tx1_32) hw=tx1; image_arch=armhf ;;
- tx1_64) hw=tx1; image_arch=arm64 ;;
- stm32_STM32L476RGTx) hw=stm32; image_arch=armhf ;;
+ stm32)
+ hw=stm32; image_arch=amd64
+ # When running benchmarks on stm32, we prefer to rsync the
+ # toolchain to the board's host machine -- dev-02.tcwglab.
+ toolchain_proto=rsync
+ ;;
+ *_32) hw=${hw_tag/_32}; image_arch=armhf ;;
+ *_64) hw=${hw_tag/_64}; image_arch=arm64 ;;
*) echo "ERROR: Unknown hw_tag $hw_tag"; exit 1 ;;
esac
- local results_id="$hw_tag/${rr[ci_project]}/${rr[mode]}-${rr[ci_config]}/@build_num@"
-
- # When running benchmarks on stm32, we prefer to rsync the
- # toolchain towards tcwg-bmk-stm32-01.
- case "$hw" in
- stm32) toolchain_proto=rsync ;;
- *) toolchain_proto=ssh ;;
- esac
+ # Create directory for tcwg-benchmark to upload results to.
+ # Note that files inside $results_dir will be owned by tcwg-benchmark.
+ local results_dir
+ results_dir="$(mktemp -d)"
+ chmod 0777 "$results_dir"
+ # Trigger benchmarking job and capture its console output.
+ # Ignore exit code of the trigger command to detect various failure
+ # conditions from examining the console log.
# shellcheck disable=SC2154
remote_exec "ci.linaro.org:2222::-l $USER@linaro.org" \
- build tcwg-benchmark -w \
+ build tcwg-benchmark -f -v \
-p bmk_hw=$hw \
-p bench_list="$bench_list" \
-p cflags="$bmk_flags" \
- -p ldflags="$bmk_ldflags" \
- -p testmode=benchmark \
- -p displaytag="${rr[ci_project]}/${rr[mode]}-${rr[ci_config]}" \
+ -p ldflags="$bmk_flags" \
+ -p testmode="$testmode" \
+ -p displaytag="${rr[ci_project]}/${rr[ci_config]}-${rr[mode]}" \
-p ignore_errors=true \
-p toolchain_url=$toolchain_proto://$ssh_host:$ssh_port:$cc \
-p toolchain_type=$toolchain \
-p sysroot="$sysroot" \
- -p results_id="$results_id" \
+ -p results_dest="$ssh_host:$ssh_port:$results_dir" \
-p reboot="$reboot" \
-p run_profile="$run_profile" \
-p image_arch="$image_arch" \
${scripts_branch+-p scripts_branch="$scripts_branch"} \
${bmk_branch+-p bmk_branch="$bmk_branch"} \
- | tee $run_step_artifacts/benchmark-start.log
+ | tee $run_step_artifacts/benchmark-build.log || true
local build_num
- build_num=$(cat $run_step_artifacts/benchmark-start.log \
- | sed -e "s/.*#\([0-9]\+\).*/\1/")
+ build_num=$(head -n1 $run_step_artifacts/benchmark-build.log \
+ | sed -e "s/Started.*#\([0-9]\+\).*/\1/")
assert_with_msg "Benchmark build number should not be 0!" \
- [ "$build_num" -gt "0" ]
+ [ "$build_num" -gt "0" ]
local build_status
local build_ret
while true; do
- (remote_exec "ci.linaro.org:2222::-l $USER@linaro.org" \
- console tcwg-benchmark -n 1 -f $build_num || true) \
- | tee -a $run_step_artifacts/benchmark.log
-
# Ssh connection to ci.linaro.org occasionally drops. We need
# to check whether benchmarking has finished, and, if not, continue
- # to watch its output.
- build_status=$(tail -n 1 $run_step_artifacts/benchmark.log)
+ # waiting.
+ build_status=$(curl -s \
+ "https://ci.linaro.org/job/tcwg-benchmark/$build_num/api/json?tree=result" \
+ | jq -r ".result")
case "$build_status" in
- "Finished: SUCCESS")
+ "null")
+ # Continue waiting
+ true
+ ;;
+ "SUCCESS")
build_ret=0
break
;;
- "Finished: "*)
- echo "# Benchmarking infra is offline:" >> ${rr[top_artifacts]}/results
+ *)
+ echo "# Benchmarking infra is offline:" \
+ >> ${rr[top_artifacts]}/results
echo "-$EXTERNAL_FAIL" >> ${rr[top_artifacts]}/results
build_ret=1
break
;;
esac
- # Sleep a little to avoid flooding ci.linaro.org on transient ssh
- # failures.
- sleep 5
+ # Wait by following console output
+ (ssh -p2222 -l $USER@linaro.org ci.linaro.org \
+ console tcwg-benchmark $build_num -f || true) \
+ | tee $run_step_artifacts/benchmark-wait.log
done
- echo "$results_id" | sed -e "s/@build_num@/$build_num/g" \
- > "$results_id_file"
- return $build_ret
- )
-}
-
-# Compare results obtained from perf data between $1 and $2
-# and generate results-compare.csv
-compare_results_perf ()
-{
- (
- set -euf -o pipefail
+ rm -rf "${rr[top_artifacts]}/annex"
+ mkdir "${rr[top_artifacts]}/annex"
+ ln -s "$results_dir" "${rr[top_artifacts]}/annex/bmk-data"
- case "${cflags[0]}" in
- "-Os"*|"-Oz"*)
- # We use 1% tolerance for binary size
- # and 10% tolerance for symbol size.
- exe_threshold=101
- symbol_threshold=110
- ;;
- *)
- # We use 3% tolerance for binary speed
- # and 15% tolerance for symbol speed.
- exe_threshold=103
- symbol_threshold=115
- # Reduce thresholds when bisecting to avoid considering borderline
- # regressions as spurious. This should break cycles of build and
- # bisect jobs triggering each other on borderline regressions.
- if [ x"${rr[mode]}" = x"bisect" ]; then
- exe_threshold=102
- symbol_threshold=110
- fi
- ;;
- esac
-
- local -a arr
- local bmk symbol time size result prev_bmk
- echo "bmk,symbol,result" > $run_step_artifacts/results-compare.csv
- printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
-
- # Read result lines from <(tail -n +2 ...) below.
- # "-n +2" is to skip the header line.
- prev_bmk=""
- while IFS=, read -a arr; do
- bmk=${arr[0]}
- symbol=${arr[1]}
- time=${arr[2]}
- size=${arr[3]}
- case "${cflags[0]}" in
- "-Os"*|"-Oz"*) metric="$size" ;;
- *) metric="$time" ;;
- esac
-
- # Skip case where we have no info ("n/a")
- if [ "$metric" != "n/a" ]; then
- # Remove padding from the tail of $symbol (padding is added by
- # csvs2table.py for better formatting).
- local short_symbol="${symbol%%[ ]*}"
- case "$short_symbol" in
- "["*) threshold=$symbol_threshold ;;
- *"_base.default") threshold=$exe_threshold ;;
- *) threshold=$symbol_threshold ;;
- esac
- if ! [ "$metric" -le "$threshold" ]; then
- result=100
- echo "# $bmk,$symbol regressed by $metric" >> $run_step_artifacts/results.regressions
- if [ x"$bmk" != x"$prev_bmk" ]; then
- printf "++benchmarks %s " $bmk >> $run_step_artifacts/extra-bisect-params
- prev_bmk="$bmk"
- fi
- else
- result=1
- fi
- echo "$bmk,$symbol,$result" >> $run_step_artifacts/results-compare.csv
- fi
- done < <(tail -n +2 $run_step_artifacts/results.csv)
- printf "\n" >> $run_step_artifacts/extra-bisect-params
+ return $build_ret
)
}
-compare_results_vect ()
+# Exit with code 0 if no regression compared to base-artifacts/.
+no_regression_p ()
{
(
set -euf -o pipefail
- echo "bmk,symbol,result" > $run_step_artifacts/results-compare.csv
-
- while IFS=, read -a arr; do
- bmk=${arr[0]}
- # hack to trim padding
- symbol=$(echo ${arr[1]} | xargs)
- base_num_vect_loops=${arr[3]}
- target_num_vect_loops=${arr[4]}
- if (( base_num_vect_loops > target_num_vect_loops )); then
- echo "$bmk, $symbol, $base_num_vect_loops, $target_num_vect_loops" \
- >> $run_step_artifacts/results-compare.csv
- fi
- done < <(tail -n +2 $run_step_artifacts/results.csv)
- )
-}
-compare_results ()
-{
- (
- set -euf -o pipefail
+ # check score-based regression
+ no_build_regression_p "$@"
- local metric=$1
- local ref_results_id="$2"
- local new_results_id="$3"
- local cmp_options="$4"
+ # At this stage, there's no score-based regression.
+ # We are now checking metric-based regression.
- local results_ref results_new
- results_ref=$(cat $ref_results_id)
- results_new=$(cat $new_results_id)
+ assert_with_msg "Benchmarking succeeded, but bmk-data is missing" \
+ [ -e $run_step_top_artifacts/annex/bmk-data ]
- case "${rr[target]}" in
- "arm_eabi")
- cmp_options="$cmp_options --has_perf_logs no"
- ;;
- esac
+ # Make sure there is no stray results.regression file, which we use
+ # as failure marker.
+ assert ! [ -f $run_step_artifacts/results.regressions ]
- $scripts/tcwg-benchmark-results.sh \
- --results_ref $results_ref ++results $results_new \
- --top_artifacts "$run_step_artifacts" --verbose $verbose \
- --metric "$metric" $cmp_options \
- > $run_step_artifacts/results.log 2>&1
-
- case $metric in
- "perf")
- compare_results_perf
- ;;
- "vect")
- compare_results_vect
- ;;
- *)
- echo "Invalid metric: $metric";
- exit 1
- ;;
+ # FIXME: Remove custom options for LTO configurations after specific
+ # thresholds are large enough for symbols.
+ local compare_opts=""
+ case "${rr[target]}:$cflags" in
+ "arm_eabi":*"_LTO"*) compare_opts="--num_symbols 0 --entry_threshold 10 --has_perf_logs no" ;;
+ *:*"_LTO"*) compare_opts="--num_symbols 0 --entry_threshold 10" ;;
+ "arm_eabi":*) compare_opts="--has_perf_logs no" ;;
+ *) compare_opts="" ;;
esac
- )
-}
-# Exit with code 0 if no new regressions between results_id-1 and -2 compared to
-# regression between results_id-1 and -2 in base-artifacts/.
-no_regression_vs_p ()
-{
- (
- set -euf -o pipefail
+ if [ -f /usr/lib/linux-tools/install-armhf-perf-workaround.sh ]; then
+ # FIXME:
+ # In some cases perf report crashes when run from armhf container on
+ # ARMv8 machine.
+ # Install a workaround while we are investigating the cause.
+ sudo /usr/lib/linux-tools/install-armhf-perf-workaround.sh
+ fi
- local ref_artifacts=$1
- local new_artifacts=$2
+ local new_results="${rr[top_artifacts]}/annex/bmk-data"
+ local ref_results="base-artifacts/annex/bmk-data"
- # Check for build and correctness regressions.
- no_build_regression_p "$@"
+ assert_with_msg "Benchmarking succeeded, but no annex/bmk-data results" \
+ [ -d "$new_results" ]
- # Generate ref-results-compare.csv. The value of "1" means that the result
- # in the 2nd run is no worse than the result in the 1st run (as expected).
- # The value of "100" means that the result in the 2nd run is worse than
- # the result in the 1st run (unexpected).
- # Note that we can grab previously-generated ref-results-compares.csv from
- # base-artifacts/, but it could have been generated with an older version
- # of scripts, so it's safer and more resilient to re-generate it from original
- # perf data.
- if [ ! -f "$ref_artifacts/results_id-1" ] || [ ! -f "$ref_artifacts/results_id-2" ]; then
- return 0
+ if ! [ -d "$ref_results" ]; then
+ # base-artifacts has no reference results.
+ # This can happen on init build (update_baseline=init).
+ # In such cases we compare results to themselves just as an exercise.
+ ref_results="$new_results"
+ assert_with_msg "No reference results" \
+ [ "${rr[update_baseline]}" = "init" ]
fi
- # <Workaround> missing reference results, which we have listed in
- # tcwg-benchmark-results.broken-list. Once all entries referencing missing
- # results are discarded, we'll remove this workaround.
- # Otherwise compare_results will fail while fetching baseline results,
- # and we'll consider this failure as a regression.
- if cat "$scripts/tcwg-benchmark-results.broken-list" \
- | grep -q "^$(cat $ref_artifacts/results_id-1)\$\|^$(cat $ref_artifacts/results_id-2)\$"; then
- return 0
- fi
- # </Workaround>
- compare_results "${rr[metric]}" "$ref_artifacts/results_id-1" "$ref_artifacts/results_id-2" \
- "--num_dsos 1 --num_symbols 0"
-
- while IFS= read -r -d '' i
- do
- mv $i "$(dirname $i)"/ref-"$(basename $i)"
- done < <(find $run_step_artifacts/ -type f -name "results*" -print0)
- # Similarly, generate new-results-compare.csv.
- if [ ! -f "$new_artifacts/results_id-1" ] || [ ! -f "$new_artifacts/results_id-2" ]; then
- return 1
+ # Compare vs previous run
+ mkdir -p ${rr[top_artifacts]}/results-vs-prev
+ ln -s ../results-vs-prev $run_step_artifacts/results-vs-prev
+ $scripts/tcwg-benchmark-results.sh \
+ --results_ref "$ref_results" ++results "$new_results" \
+ --top_artifacts "${rr[top_artifacts]}/results-vs-prev" \
+ --verbose $verbose --hw_tag "$(tcwg_bmk_hw)" \
+ $compare_opts \
+ > ${rr[top_artifacts]}/results-vs-prev/tcwg-benchmark-results.log 2>&1 &
+
+ local res
+ res=0 && wait $! || res=$?
+ if [ $res != 0 ]; then
+ return $EXTERNAL_FAIL
fi
- compare_results "${rr[metric]}" "$new_artifacts/results_id-1" "$new_artifacts/results_id-2" \
- "--num_dsos 1 --num_symbols 0"
- while IFS= read -r -d '' i
- do
- mv $i "$(dirname $i)"/new-"$(basename $i)"
- done < <(find $run_step_artifacts/ -type f -name "results*" -print0)
-
- # Now compare the two reports.
- # If "ref" has value of "100" (bad state), and "new" has value of "100"
- # (also bad state), then we get no change, no regression, and final value
- # of 100% * 100/100 == 100.
- #
- # If "ref" has value of "1" (good state), and "new" has value of "1"
- # (also good state), then we get no change, no regression, and final value
- # of 100% * 1/1 == 100.
- #
- # If "ref" has value of "100" (bad state), and "new" has value of "1"
- # (good state), then we get a progression, and final value
- # of 100% * 1/100 == 1.
- #
- # If "ref" has value of "1" (good state), and "new" has value of "100"
- # (bad state), then we get a regression, and final value
- # of 100% * 100/1 == 10000. We detect this below by comparing vs "5000".
- $scripts/../bmk-scripts/csvs2table.py -p 0 --relative $run_step_artifacts/ref-results-compare.csv $run_step_artifacts/new-results-compare.csv > $run_step_artifacts/results-compare.csv
-
- local -a arr
- local bmk symbol result status prev_bmk
- local -a bisect_bmks
-
- # Read result lines from <(tail -n +2 ...) below.
- # "-n +2" is to skip the header line. Set $status to "1" if there is
- # a regression.
- status=0
- prev_bmk=""
- # Delete results.regressions generated by compare_results() calls above.
- rm -f $run_step_artifacts/results.regressions
- while IFS=, read -a arr; do
- bmk=${arr[0]}
- symbol=${arr[1]}
- result=${arr[2]}
- if ! [ "$result" -le "5000" ]; then
- echo "# $bmk,$symbol regressed" >> $run_step_artifacts/results.regressions
- status=1
- if [ x"$bmk" != x"$prev_bmk" ]; then
- bisect_bmks+=("++benchmarks" "$bmk")
- prev_bmk="$bmk"
- fi
+
+ # Below call to output-bmk-results.py creates *.regression files.
+ assert_with_msg "Found stale regression files" \
+ [ x"$(find $run_step_artifacts/ -name "*.regression" | wc -l)" = x"0" ]
+
+ # Extract 5 most recent compare-results-vs-prev-internal.csv files from
+ # base-artifacts and compute std deviation out of them
+ local -a csvs_paths
+ csvs_paths=("results-vs-prev/compare-results-internal.csv"
+ "$(basename $run_step_artifacts)/compare-results-vs-prev-internal.csv")
+
+ local -a history_csvs
+ local csv history_root=""
+ while read csv; do
+ if [ "$history_root" = "" ]; then
+ history_root="$csv"
+ continue
+ fi
+
+ history_csvs+=("$csv")
+ done < <(get_git_history -0 base-artifacts "${csvs_paths[@]}")
+
+ local csv tmpf
+ local -a compare_results_list=()
+ tmpf=$(mktemp)
+
+ # FIXME:
+ # To deal with some differences along base-artifacts recent history
+ # - remove 'Failed for column' message from csv file
+ # - skip emtpy csv files.
+ for csv in "${history_csvs[@]}"; do
+ grep -v 'Failed for column' "$csv" > "$tmpf" || true
+ cp "$tmpf" "$csv"
+ if [ -s "$csv" ]; then
+ compare_results_list+=("$csv")
fi
- done < <(tail -n +2 $run_step_artifacts/results-compare.csv)
- echo "extra_build_params=${bisect_bmks[*]}" > $run_step_artifacts/extra-bisect-params
- return $status
- )
-}
+ done
-# Exit with code 0 if no regression compared to base-artifacts/.
-# Inspect build results ./results and performance results in ./results_id.
-no_regression_to_base_p ()
-{
- (
- set -euf -o pipefail
+ if [ ${#compare_results_list[@]} != 0 ]; then
+ $scripts/../bmk-scripts/compute-variability.py \
+ --inputs "${compare_results_list[@]}" ${rr[top_artifacts]}/results-vs-prev/compare-results-internal.csv \
+ --weights 2-peaks-linear --method avg \
+ --output ${rr[top_artifacts]}/results-vs-prev/bmk-specific-variability-avg.csv || true
- no_build_regression_p "$@"
+ $scripts/../bmk-scripts/compute-variability.py \
+ --inputs "${compare_results_list[@]}" ${rr[top_artifacts]}/results-vs-prev/compare-results-internal.csv \
+ --weights 2-peaks-linear --method max \
+ --output ${rr[top_artifacts]}/results-vs-prev/bmk-specific-variability-max.csv || true
+ fi
- local ref_artifacts=$1
- local new_artifacts=$2
+ rm -rf "$history_root" "$tmpf"
- if ! [ -f "$ref_artifacts/results_id" ]; then
- return 0
- fi
- # <Workaround> missing reference results, which we have listed in
- # tcwg-benchmark-results.broken-list. Once all entries referencing missing
- # results are discarded, we'll remove this workaround.
- # Otherwise compare_results will fail while fetching baseline results,
- # and we'll consider this failure as a regression.
- if cat "$scripts/tcwg-benchmark-results.broken-list" \
- | grep -q "^$(cat $ref_artifacts/results_id)\$"; then
- return 0
- fi
- # </Workaround>
- if ! [ -f "$new_artifacts/results_id" ]; then
- return 1
- fi
+ $scripts/../bmk-scripts/output-bmk-results.py \
+ --compare_results ${rr[top_artifacts]}/results-vs-prev/compare-results-internal.csv \
+ --variability_file ${rr[top_artifacts]}/results-vs-prev/bmk-specific-variability-max.csv \
+ --variability_file_data "max" \
+ --run_step_dir "$run_step_artifacts"/ \
+ --metric "${rr[metric_id]}" --mode "${rr[mode]}" \
+ --details quiet > $run_step_artifacts/output-bmk-results.log
- # Make sure there is no stray results.regression file, which we use
- # as failure marker.
- assert ! [ -f $run_step_artifacts/results.regressions ]
+ # copy inputs useful to build the mail / jira / .. to mail dir
+ for resfile in $run_step_artifacts/{exe,symbol}.{regression,improvement}; do
+ if [ -f $resfile ]; then
+ cp $resfile ${rr[top_artifacts]}/notify/
+ fi
+ done
- local compare_opts=""
- case "${cflags[0]}" in
- *"_LTO"*) compare_opts="--num_symbols 0 --entry_threshold 10" ;;
- esac
- compare_results "${rr[metric]}" "$ref_artifacts/results_id" "$new_artifacts/results_id" "$compare_opts"
+ # return status rely on the presence of the results.regressions file
if [ -f $run_step_artifacts/results.regressions ]; then
+ assert_with_msg "Found a regression while comparing the build against itself" \
+ [ "$ref_results" != "$new_results" ]
return 1
fi
return 0
)
}
-# Implement rr[breakup_updated_components] hook.
-tcwg_bmk_breakup_updated_components ()
-{
- (
- set -euf -o pipefail
-
- # Compiler changes tend to cause the most regressions.
- # Breakup updated components into compiler and the rest of components
- # to reduce the number of builds.
- local cc
- case "${rr[toolchain]}" in
- llvm) cc="llvm" ;;
- gnu|gnu_eabi) cc="gcc" ;;
- *) assert false ;;
- esac
-
- if print_updated_components "\n" | grep -q "^$cc\$"; then
- echo "$cc"
- print_updated_components "\n" | grep -v "^$cc\$" | tr '\n' ' ' | sed -e "s/ \$//g"
- echo
- else
- print_updated_components "\n"
- fi
- )
-}
-# shellcheck disable=SC2154
-rr[breakup_updated_components]=tcwg_bmk_breakup_updated_components
-
-run_step stop_on_fail -10 reset_artifacts
-run_step stop_on_fail x prepare_abe
-run_step skip_on_fail -9 build_abe binutils
-run_step skip_on_fail -8 build_abe stage1 -- "${gcc_override_configure[@]}"
-run_step skip_on_fail x clean_sysroot
-case "${rr[components]}" in
- *glibc*)
- run_step skip_on_fail -7 build_abe linux
- run_step skip_on_fail -6 build_abe glibc
+# Compiler changes tend to cause the most regressions.
+# Breakup updated components into compiler and the rest of components
+# to reduce the number of builds.
+case "${rr[toolchain]}" in
+ llvm)
+ rr[breakup_changed_components]="breakup_changed_components llvm"
;;
- *newlib*)
- run_step skip_on_fail -6 build_abe newlib
+ gnu|gnu_eabi)
+ rr[breakup_changed_components]="breakup_changed_components gcc"
;;
+ *) assert false ;;
esac
-patch_branch=""
-if [ x"${rr[metric]}" = x"vect" ]; then
- patch_branch="--patch linaro-local/vect-metric-branch"
-fi
-
-run_step skip_on_fail -5 build_abe stage2 -- $patch_branch "${gcc_override_configure[@]}"
-
+run_step stop_on_fail -10 reset_artifacts
case "${rr[toolchain]}" in
- llvm) run_step skip_on_fail -3 build_llvm true ;;
-esac
-case "${#cflags[@]}" in
- 2)
- # Don't bisect benchmark build/run failures in *-vs-* configurations.
- # Bisections happen only for regressions with build scores >=0,
- # which will happen if benchmark "${cflags[1]}" succeeds.
- run_step skip_on_fail -1 benchmark "${cflags[0]}" -- ${rr[top_artifacts]}/results_id-1
- run_step skip_on_fail 0 benchmark "${cflags[1]}" -- ${rr[top_artifacts]}/results_id-2
- # Set final "build" score to "1" for compatibility with older results
- run_step skip_on_fail 1 true
- # shellcheck disable=SC2154
- rr[no_regression_p]=no_regression_vs_p
- run_step reset_on_fail x check_regression
+ gnu*)
+ run_step stop_on_fail x prepare_abe
+ run_step skip_on_fail -9 build_abe binutils
+ run_step skip_on_fail -8 build_abe stage1 -- \
+ "${gcc_override_configure[@]}"
+ run_step skip_on_fail x clean_sysroot
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail -7 build_abe linux
+ run_step skip_on_fail -6 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail -6 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail -5 build_abe stage2 -- \
+ "${gcc_override_configure[@]}"
;;
- 1)
- # Bisect benchmark build/run failures in non-vs configurations.
- # Set score to "0" with "true".
- run_step skip_on_fail 0 true
- run_step skip_on_fail 1 benchmark "${cflags[0]}" -- ${rr[top_artifacts]}/results_id
- rr[no_regression_p]=no_regression_to_base_p
- run_step reset_on_fail x check_regression
+ llvm)
+
+ run_step skip_on_fail -3 build_bmk_llvm
;;
esac
-run_step stop_on_fail x update_baseline
-run_step stop_on_fail x push_baseline
+run_step skip_on_fail 1 benchmark -- "$cflags"
+run_step reset_on_fail x check_regression
trap "" EXIT
diff --git a/tcwg_bmk-config.sh b/tcwg_bmk-config.sh
new file mode 100644
index 00000000..52892308
--- /dev/null
+++ b/tcwg_bmk-config.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+# We need to make sure rr is well defined and ci_project & ci_config are well set
+# before using tcwg_bmk-config.sh
+
+# shellcheck disable=SC2154
+( [[ -v rr[ci_project] ]] && [[ -v rr[ci_config] ]] ) || \
+ assert_with_msg "ERROR: need rr to be well defined." false
+
+declare -A bmk_data
+bmk_data[tcwg_bmk-code_size-coremark--gnu_eabi-arm_eabi]=stm32:coremark
+bmk_data[tcwg_bmk-code_size-cpu2017fast--gnu-aarch64]=sq_64:spec2017_fast
+bmk_data[tcwg_bmk-code_size-cpu2017fast--llvm-aarch64]=sq_64:spec2017_fast
+bmk_data[tcwg_bmk-code_size-cpu2017rate--gnu-arm]=sq_32:spec2017_rate
+bmk_data[tcwg_bmk-code_size-cpu2017rate--llvm-arm]=sq_32:spec2017_rate_nofortran
+bmk_data[tcwg_bmk-code_size-spec2k6--gnu-aarch64]=sq_64:spec2006_all
+bmk_data[tcwg_bmk-code_size-spec2k6--gnu-arm]=sq_32:spec2006_all
+bmk_data[tcwg_bmk-code_size-spec2k6--llvm-aarch64]=sq_64:spec2006_all
+bmk_data[tcwg_bmk-code_size-spec2k6--llvm-arm]=sq_32:spec2006_nofortran
+bmk_data[tcwg_bmk-code_speed-coremark--gnu_eabi-arm_eabi]=stm32:coremark
+bmk_data[tcwg_bmk-code_speed-cpu2017rate--gnu-aarch64]=tx1_64:spec2017_rate
+bmk_data[tcwg_bmk-code_speed-cpu2017rate--gnu-arm]=tk1_32:spec2017_rate
+bmk_data[tcwg_bmk-code_speed-cpu2017rate--llvm-aarch64]=tx1_64:spec2017_rate
+bmk_data[tcwg_bmk-code_speed-cpu2017rate--llvm-arm]=tk1_32:spec2017_rate_nofortran
+bmk_data[tcwg_bmk-code_speed-cpu2017speed--gnu-aarch64]=apm_64:spec2017_speed
+bmk_data[tcwg_bmk-code_speed-cpu2017speed--llvm-aarch64]=apm_64:spec2017_speed
+bmk_data[tcwg_bmk-qc_speed-cpu2017rate--llvm-aarch64]=qc_64:spec2017_rate
+bmk_data[tcwg_bmk-qc_speed-cpu2017rate--gnu-aarch64]=qc_64:spec2017_rate
+bmk_data[tcwg_bmk-code_speed-spec2k6--gnu-aarch64]=tx1_64:spec2006_all
+bmk_data[tcwg_bmk-code_speed-spec2k6--gnu-arm]=tk1_32:spec2006_all
+bmk_data[tcwg_bmk-code_speed-spec2k6--llvm-aarch64]=tx1_64:spec2006_all
+bmk_data[tcwg_bmk-code_speed-spec2k6--llvm-arm]=tk1_32:spec2006_nofortran
+bmk_data[tcwg_bmk-fujitsu_speed-cpu2017speed--gnu-aarch64]=fx_64:spec2017_speed
+bmk_data[tcwg_bmk-fujitsu_speed-cpu2017speed--llvm-aarch64]=fx_64:spec2017_speed
+bmk_data[tcwg_bmk-code_sve-cpu2017fast--gnu-aarch64]=fx_64:spec2017_fast
+bmk_data[tcwg_bmk-code_vect-cpu2017fast--gnu-aarch64]=sq_64:spec2017_fast
+bmk_data[tcwg_bmk-code_vect-cpu2017fast--llvm-aarch64]=sq_64:spec2017_fast
+bmk_data[tcwg_bmk-code_vect-cpu2017rate--gnu-arm]=sq_32:spec2017_rate
+bmk_data[tcwg_bmk-code_vect-cpu2017rate--llvm-arm]=sq_32:spec2017_rate_nofortran
+bmk_data[tcwg_bmk-code_vect-spec2k6--gnu-aarch64]=sq_64:spec2006_all
+bmk_data[tcwg_bmk-code_vect-spec2k6--gnu-arm]=sq_32:spec2006_all
+bmk_data[tcwg_bmk-code_vect-spec2k6--llvm-aarch64]=sq_64:spec2006_all
+bmk_data[tcwg_bmk-code_vect-spec2k6--llvm-arm]=sq_32:spec2006_nofortran
+
+# Returns the hw_tag to use.
+# Requires rr ci_project / ci_config well set
+tcwg_bmk_hw ()
+{
+ local data data_key
+ data_key=$(echo ${rr[ci_project]}--${rr[ci_config]} | cut -d- -f1-6)
+ data="${bmk_data[$data_key]-unset}"
+ if [ x"$data" = x"unset" ]; then
+ assert_with_msg "tcwg_bmk_benchs: Unknown project \"${rr[ci_project]}--${rr[ci_config]}\"" false
+ fi
+ echo "$data" | cut -d: -f1
+}
+
+tcwg_bmk_benchs ()
+{
+ # If a specific benchmark list is specified, just return it.
+ # shellcheck disable=SC2154
+ if [ x"${benchmarks[*]+set}" = x"set" ]; then
+ echo "${benchmarks[*]}"
+ return
+ fi
+
+ local data data_key
+ data_key=$(echo ${rr[ci_project]}--${rr[ci_config]} | cut -d- -f1-6)
+ data="${bmk_data[$data_key]-unset}"
+ if [ x"$data" = x"unset" ]; then
+ assert_with_msg "tcwg_bmk_benchs: Unknown project \"${rr[ci_project]}--${rr[ci_config]}\"" false
+ fi
+ echo "$data" | cut -d: -f2
+}
diff --git a/tcwg_chromium-build.sh b/tcwg_chromium-build.sh
new file mode 100755
index 00000000..72174cfe
--- /dev/null
+++ b/tcwg_chromium-build.sh
@@ -0,0 +1,202 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=round-robin.sh
+. $scripts/round-robin.sh
+# shellcheck source=ci-autotest.sh
+. $scripts/ci-autotest.sh
+
+workspace="${workspace-$(pwd)}"
+
+convert_args_to_variables "$@"
+
+obligatory_variables rr[ci_project] rr[ci_config]
+declare -A rr
+
+# Execution mode: build or bisect
+rr[mode]="${rr[mode]-build}"
+
+# Set custom revision for one of the projects, and use baseline revisions
+# for all other projects.
+rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
+rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
+
+# FIXME : Disable ci_autotest when ready
+rr[ci_autotest]="${rr[ci_autotest]-test}"
+
+# This project uses a dynamic component list
+rr[dynamic_components_list]="${rr[dynamic_components_list]-}"
+
+declare -A deps_dir=(
+ ["dawn"]="third_party/dawn"
+ ["chromium_variations"]="third_party/chromium-variations"
+ ["catapult"]="third_party/catapult"
+ ["perfetto"]="third_party/perfetto"
+ ["vulkan-deps"]="third_party/vulkan-deps"
+ ["angle"]="third_party/angle"
+ ["skia"]="third_party/skia"
+ ["v8"]="v8"
+)
+
+if [ x"${rr[dynamic_components_list]}" == x"*" ]; then
+ dynamic_components_list="${!deps_dir[*]}"
+else
+ dynamic_components_list="${rr[dynamic_components_list]}"
+fi
+
+rr[components]="${rr[components]-}"
+for dep in $dynamic_components_list; do
+ if [ -f ${rr[top_artifacts]}/git/${dep}_rev ]; then
+ rr[components]="${rr[components]} ${dep}"
+ rr[${dep}_git]=$(get_current_git ${dep}_rev)
+ fi
+done
+
+verbose="${verbose-true}"
+verbose2="${verbose2-false}"
+
+if $verbose2; then set -x; fi
+
+trap print_traceback EXIT
+
+# Set start and finish steps for different modes.
+start_at="${start_at-default}"
+finish_at="${finish_at-default}"
+default_start_at=""
+default_finish_at=""
+if [ x"$start_at" = x"default" ]; then
+ start_at="$default_start_at"
+fi
+if [ x"$finish_at" = x"default" ]; then
+ finish_at="$default_finish_at"
+fi
+
+run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+#
+get_sources()
+{
+ cd $workspace
+
+ echo "# RUN_STEP: get_sources"
+
+ /bin/rm -fr $PWD/depot_tools
+
+ git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+
+ export PATH="$PWD/depot_tools:$PATH"
+
+ force_fetch=false
+
+ if [ ! -d $PWD/src ] || $force_fetch; then
+ /bin/rm -fr $PWD/src $PWD/.gclient $PWD/.gclient_entries $PWD/.gclient_previous_sync_commits
+
+ fetch --nohooks --no-history chromium
+ fi
+
+
+ cd $workspace/src
+
+ #
+
+ # FIXME : Temporarily disabled to make it faster
+ # gclient revinfo > REVINFO.BEF
+
+ # Updating individual components
+ (
+ cd $workspace
+
+ echo "components=${rr[components]}"
+
+ # update the rev
+ for depfile in ${rr[components]}; do
+ dep=${depfile#${rr[top_artifacts]}/git/}
+ dep=${dep%_rev}
+
+ rev_or_branch=$(get_current_git ${dep}_rev)
+ git -C "src/${deps_dir[$dep]}" fetch origin "$rev_or_branch"
+ git -C "src/${deps_dir[$dep]}" checkout FETCH_HEAD
+ rev=$(git -C "src/${deps_dir[$dep]}" rev-parse HEAD)
+
+ echo "UPDATING [$dep] : $rev_or_branch -> $rev"
+ echo "$rev" | set_current_git ${dep}_rev
+
+ echo "src/${deps_dir[$dep]}" | set_current_git ${dep}_dir
+ done
+ )
+
+ # FIXME : Temporarily return here to make it faster
+ return
+
+ gclient revinfo > REVINFO.AFT
+
+ #
+ sudo apt-get update
+ sudo env DEBIAN_FRONTEND=noninteractive apt-get install keyboard-configuration
+
+ ./build/install-build-deps.sh
+
+ ./build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
+
+ #gclient sync --nohooks
+
+ gclient runhooks
+
+ gn gen out/Default
+
+ [ "$(grep target_cpu out/Default/args.gn | wc -l)" -ne 0 ] \
+ || echo 'target_cpu = "arm64"' >> out/Default/args.gn
+
+}
+
+#
+build_chromium()
+{
+ echo "# RUN_STEP: build_chromium"
+ export PATH="$PWD/depot_tools:$PATH"
+ if [ ${rr[ci_autotest]} == "build" ]; then
+ ci_autotest_check_guilty_commit "build"
+ return
+ fi
+
+ # autoninja -C out/Default chrome
+ true
+}
+
+#
+test_chromium()
+{
+ echo "# RUN_STEP: test_chromium"
+ if [ ${rr[ci_autotest]} == "test" ]; then
+ ci_autotest_check_guilty_commit "test"
+ return
+ fi
+
+ # test it
+ true
+}
+
+# Exit with code 0 if no regression compared to base-artifacts/results.
+no_regression_p ()
+{
+ # check score-based regression
+ no_build_regression_p "$@"
+
+ # Regression detecvtion can be more precise :
+ # - same "build" score : we could check the number of objects
+ # - etc..
+
+ return 0
+}
+
+run_step stop_on_fail -1 reset_artifacts
+run_step skip_on_fail 0 get_sources
+run_step skip_on_fail 1 build_chromium
+run_step skip_on_fail 2 test_chromium
+run_step reset_on_fail x check_regression
+
+trap "" EXIT
diff --git a/tcwg_dummy-build.sh b/tcwg_dummy-build.sh
new file mode 100755
index 00000000..ccbc2185
--- /dev/null
+++ b/tcwg_dummy-build.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+# shellcheck source=round-robin.sh
+. $scripts/round-robin.sh
+
+convert_args_to_variables "$@"
+obligatory_variables rr[ci_project] rr[ci_config] rr[components] bisect_log
+declare -A rr
+declare bisect_log
+
+rr[mode]="${rr[mode]-build}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
+rr[baseline_branch]="linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}"
+rr[${rr[components]}_git]=${rr[${rr[components]}_git]-baseline}
+
+# Set start and finish steps for different modes.
+start_at=""
+if [ x"${rr[mode]}" = x"bisect" ]; then
+ start_at="clone_repo-${rr[components]}"
+fi
+run_step_init "$start_at" "" "${rr[top_artifacts]}" "true"
+
+no_regression_p ()
+{
+ local rev
+ rev=$(get_current_git "${rr[components]}_rev")
+ if grep -q "^git bisect bad $rev" "$bisect_log"; then
+ return 1
+ fi
+}
+
+run_step stop_on_fail -10 reset_artifacts
+run_step skip_on_fail 0 clone_repo "${rr[components]}"
+run_step reset_on_fail x check_regression
diff --git a/tcwg_gnu-build.sh b/tcwg_gnu-build.sh
index 421f06e5..b3e4c738 100755
--- a/tcwg_gnu-build.sh
+++ b/tcwg_gnu-build.sh
@@ -10,48 +10,102 @@ scripts=$(dirname $0)
convert_args_to_variables "$@"
-obligatory_variables rr[ci_config]
+obligatory_variables rr[ci_project] rr[ci_config]
declare -A rr
-# Execution mode: baseline, bisect, jenkins-full
-rr[mode]="${rr[mode]-baseline}"
+# All config about configure flags, simulator, pretty names, .... is
+# implemented in this file
+# shellcheck source=tcwg_gnu-config.sh
+. $scripts/tcwg_gnu-config.sh
+
+# Execution mode: build or bisect,
+rr[mode]="${rr[mode]-build}"
# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
-rr[ci_project]="${rr[ci_project]-tcwg_gnu}"
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
-rr[update_baseline]="${rr[update_baseline]-update}"
+rr[update_baseline]="${rr[update_baseline]-ignore}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
-# Resolve top_artifacts to absolute dir because some of the subsequent
-# processes work below pwd and would write their artifacts in a wrong
-# location
-rr[top_artifacts]=$(abs_path "${rr[top_artifacts]}")
-
-# {toolchain_name}-{toolchain_ver}-{target}-{type_of_test}
+# {toolchain_ver}-{target}[-{type_of_test}]
IFS=- read -a ci_config <<EOF
${rr[ci_config]}
EOF
# Toolchain version -- master or release
-toolchain_ver=${toolchain_ver-${ci_config[1]}}
-# type_of_test contains the type of action to perform in this test
-# campaign: bootstrap, bootstrap_lto, check_binutils, ....
-type_of_test=${type_of_test-${ci_config[3]}}
-
-case "$type_of_test" in
- *_binutils)
+toolchain_ver=${toolchain_ver-${ci_config[0]}}
+ci_target=${ci_target-${ci_config[1]}}
+# optional type_of_test contains the type of action to perform in this test
+# campaign: bootstrap, bootstrap_lto, ...
+type_of_test=${type_of_test-${ci_config[2]-}}
+
+case "${rr[ci_project]}" in
+ tcwg_gcc_*|tcwg_bootstrap_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="gcc"
+ ;;
+ tcwg_binutils_*)
rr[target]="${rr[target]-native}"
rr[components]="binutils"
;;
- *bootstrap*|*gcc*)
+ tcwg_glibc_*)
rr[target]="${rr[target]-native}"
- rr[components]="binutils gcc"
+ rr[components]="glibc"
+ ;;
+ tcwg_gdb_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="gdb"
+ ;;
+ tcwg_gnu_native_*)
+ rr[target]="${rr[target]-native}"
+ rr[components]="binutils gcc linux glibc gdb"
+ ;;
+ tcwg_gnu_cross_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc linux glibc gdb qemu"
;;
- *_cross)
- rr[target]="${rr[target]-${ci_config[2]}}"
- rr[components]="binutils gcc glibc qemu"
+ tcwg_gnu_embed_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc newlib gdb qemu"
+ ;;
+ tcwg_gnu_mingw_*)
+ rr[target]="${rr[target]-$ci_target}"
+ rr[components]="binutils gcc mingw"
+ ;;
+ *) assert_with_msg "Unknown ci_project: ${rr[ci_project]}" false ;;
+esac
+
+rr[major]=3
+rr[minor]=0
+
+case "${rr[ci_project]}" in
+ tcwg_gnu_native_fast_check_gdb)
+ # Tests that run quickly and have stable results. This list is taken from
+ # Sourceware's builder:
+ # https://sourceware.org/git/?p=builder.git;a=blob;f=builder/master.cfg;h=612b3177f0e5#l2407
+ testsuites=(gdb.base/break-always.exp
+ gdb.base/break-caller-line.exp
+ gdb.base/break-entry.exp
+ gdb.base/break.exp
+ gdb.base/break-fun-addr.exp
+ gdb.base/break-idempotent.exp
+ gdb.base/break-include.exp
+ gdb.base/break-inline.exp
+ gdb.base/break-main-file-remove-fail.exp
+ gdb.base/break-on-linker-gcd-function.exp
+ gdb.base/breakpoint-in-ro-region.exp
+ gdb.base/breakpoint-shadow.exp
+ gdb.base/break-probes.exp
+ gdb.gdb/unittest.exp
+ )
+ ;;
+
+ tcwg_gnu_native_fast_check_gcc)
+ # Use a small subset of GCC testsuite until we have made the
+ # results stable.
+ testsuites=(compile.exp
+ execute.exp
+ )
;;
- *) assert_with_msg "Unknown type_of_test: $type_of_test" false ;;
esac
runtestflags=()
@@ -80,21 +134,22 @@ trap print_traceback EXIT
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
- "baseline")
- default_finish_at="update_baseline"
- ;;
"bisect")
- case "$(print_single_updated_component):$type_of_test" in
+ case "$(print_single_updated_component):${rr[ci_project]}" in
+ *:tcwg_gnu_mingw_*) default_start_at="clean_sysroot" ;;
binutils:*) default_start_at="build_abe-binutils" ;;
- gcc:*_cross) default_start_at="build_abe-stage1" ;;
- gcc:*) default_start_at="build_abe-${type_of_test#check_}" ;;
- glibc:*) default_start_at="build_abe-glibc" ;;
+ gcc:tcwg_gnu_embed_*) default_start_at="build_abe-stage1" ;;
+ gcc:tcwg_gnu_cross_*) default_start_at="build_abe-stage1" ;;
+ gcc:tcwg_gnu_native_*) default_start_at="build_abe-gcc" ;;
+ gcc:tcwg_gcc_*) default_start_at="build_abe-gcc" ;;
+ gcc:tcwg_bootstrap_*) default_start_at="build_abe-${type_of_test#check_}" ;;
+ glibc:tcwg_glibc_*) default_start_at="build_abe-glibc" ;;
+ linux:*|glibc:*|newlib:*) default_start_at="clean_sysroot" ;;
+ gdb:*) default_start_at="build_abe-gdb" ;;
qemu:*) default_start_at="build_abe-qemu" ;;
*) assert_with_msg "Trying to bisecting unknown component: $(print_single_updated_component)" false ;;
esac
- default_finish_at="check_regression"
;;
- "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
start_at="$default_start_at"
@@ -105,6 +160,86 @@ fi
run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+# Initialize sysroot directory in ABE's build/ with system headers
+# so that [native] glibc can find headers to build against.
+init_abe_sysroot ()
+{
+ (
+ set -euf -o pipefail
+
+ local abe_sysroot gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_target=$(print_gnu_target "$ci_target")
+ rm -rf "$abe_sysroot/usr/include/"
+ mkdir -p "$abe_sysroot/usr/include/"
+ rsync -a "/usr/include/" "$abe_sysroot/usr/include/"
+ # Debian/Ubuntu have arch headers under target-specific directory.
+ rsync -a "/usr/include/$gnu_target/" "$abe_sysroot/usr/include/"
+ )
+}
+
+# Build mingw parts: headers, crt and libs.
+build_mingw ()
+{
+ (
+ set -euf -o pipefail
+ local todo="$1"
+
+ if [ "$todo" = "headers" ]; then
+ clone_repo mingw
+ fi
+
+ local abe_sysroot gnu_host gnu_target
+ abe_sysroot=$(print_abe_sysroot)
+ gnu_host=$(print_gnu_target native)
+ gnu_target=$(print_gnu_target "$ci_target")
+
+ PATH="$(pwd)/abe/builds/destdir/$gnu_host/bin:$PATH"
+ export PATH
+
+ local dir="mingw-$todo"
+ rm -rf "$dir"
+ mkdir "$dir"
+ cd "$dir"
+
+ if [ "$todo" = "headers" ]; then
+ ../mingw/mingw-w64-headers/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target"
+ elif [ "$todo" = "crt" ]; then
+ ../mingw/mingw-w64-crt/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-default-msvcrt=msvcrt
+ elif [ "$todo" = "libs" ]; then
+ ../mingw/configure \
+ --prefix="$abe_sysroot" --host="$gnu_target" \
+ --enable-libarm64 \
+ --disable-lib32 \
+ --disable-lib64 \
+ --disable-libarm32 \
+ --disable-shared \
+ --with-libraries=winpthreads \
+ --with-default-msvcrt=msvcrt
+ fi
+
+ make -j"$(nproc --all)"
+ make install
+
+ if [ "$todo" = "headers" ]; then
+ # ??? I'm not sure whether this is a hack or an adorable percularity
+ # of building a mingw toolchain. The point of this symlink is that
+ # it's easiest for stage1 GCC mingw compiler to find headers in mingw/
+ # directory instead of the actual sysroot.
+ rm -rf "$abe_sysroot/../mingw"
+ ln -s "$abe_sysroot" "$abe_sysroot/../mingw"
+ fi
+ )
+}
+
# Exit with code 0 if no regression compared to base-artifacts/results.
no_regression_p ()
{
@@ -119,119 +254,271 @@ no_regression_p ()
local sumfiles_base=$ref_artifacts/sumfiles
local sumfiles_new=$new_artifacts/sumfiles
+ local xfails="$sumfiles_new/xfails.xfail"
+
+ # Remove files generated here (in case we are re-generating results in
+ # round-robin-baseline.sh).
+ rm -f "$xfails"
+
if ! [ -d $sumfiles_base ]; then
return 0
elif ! [ -d $sumfiles_new ]; then
return 1
fi
- local res
-
- # We use our modified version of GCC's comparison script
- clone_or_update_repo gcc-compare-results master https://git.linaro.org/toolchain/gcc-compare-results.git
-
- # (defined by init_step in jenkins-helpers)
- # shellcheck disable=SC2154
- gcc-compare-results/compare_tests -compr none -pass-thresh 0.9 \
- $sumfiles_base $sumfiles_new \
- | tee $run_step_artifacts/results.compare1 &
- res=0 && wait $! || res=$?
-
- local xfail="gcc-compare-results/contrib/testsuite-management/flaky"
- if [ -f "$xfail/${rr[ci_config]}.xfail" ]; then
- xfail="$xfail/${rr[ci_config]}"
- fi
+ cat > "$xfails" <<EOF
+# This file contains three sections:
+# - newly detected flaky tests (if any)
+# - known flaky tests (from baseline)
+# - known failures (from baseline)
+#
+EOF
- local ignore_ERRORs_opt=""
- if [ ${#runtestflags[@]} != 0 ]; then
- # We are running a subset of the testsuite, which might generate
- # ERRORs in GCC testsuites that will have no tests to run --
- # ignore these ERRORs, because they are unstable from run to run.
- ignore_ERRORs_opt="--ignore_ERRORs"
+ local flaky_tests="$run_step_artifacts/flaky.xfail"
+ local baseline_fails="$run_step_artifacts/baseline.xfail"
+ build_abe_check_xfails "$flaky_tests" "$baseline_fails"
+
+ # Add known flaky tests and baseline_fails to the xfails.
+ #
+ # Note #1: We generate $baseline_fails without regard for flaky
+ # tests. Therefore, validate_failures in no_regression_p() will
+ # see same tests with and without flaky attributes.
+ # Validate_failure uses python sets to store results, so the first
+ # entry wins. Therefore, we need to put lists of flaky tests before
+ # lists of expected fails -- $baseline_fails.
+ #
+ # Note #2: Order of expired and non-expired flaky tests is less of
+ # an issue because expired entries are discarded by validate_failures
+ # before adding them to the ResultSet. Still, for uniformity, we
+ # put fresh flaky entries before older ones.
+ cat "$flaky_tests" >> "$xfails"
+ echo "# Known failures (from baseline)" >> "$xfails"
+ cat "$baseline_fails" >> "$xfails"
+
+ # Set and save result_expiry_date so that we use the same expiration date
+ # when re-generating results in round-robin-baseline.sh.
+ if [ "${rr[result_expiry_date]-}" = "" ]; then
+ rr[result_expiry_date]=$(date +%Y%m%d)
+ # finalize_manifest() will not see declaration because we are running
+ # in a sub-shell. Update manifest manually.
+ cat <<EOF | manifest_out
+rr[result_expiry_date]="${rr[result_expiry_date]}"
+EOF
fi
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt \
- | tee $run_step_artifacts/results.compare2 &
+ local validate_failures="gcc-compare-results/contrib/testsuite-management/validate_failures.py"
+ "$validate_failures" --manifest="$xfails" \
+ --expiry_date="${rr[result_expiry_date]}" \
+ --build_dir=$sumfiles_new --verbosity=1 \
+ > $run_step_artifacts/fails.sum &
+ local res
res=0 && wait $! || res=$?
if [ $res != 0 ]; then
- local reg_lines
- for i in 1 2; do
- reg_lines=$(cat $run_step_artifacts/results.compare$i | wc -l)
- reg_lines=$(($reg_lines-100))
- cat $run_step_artifacts/results.compare$i | sed -e "s/^/# /" \
- | (head -n100; cat >/dev/null) \
- > $run_step_artifacts/results.regressions
- if [ $reg_lines -gt 0 ]; then
- echo "# ... and $reg_lines more entries" \
- >> $run_step_artifacts/results.regressions
- fi
- done
-
- local res1
- gcc-compare-results/contrib/testsuite-management/validate_failures.py \
- --manifest=$xfail.xfail --clean_build=$sumfiles_base \
- --build_dir=$sumfiles_new $ignore_ERRORs_opt --verbosity=1 \
- > $run_step_artifacts/fails.sum &
- res1=0 && wait $! || res1=$?
- assert_with_msg "Result comparison should have failed" \
- [ $res1 = $res ]
-
- printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
- local exp
- while read exp; do
- printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params
- done < <(cat $run_step_artifacts/fails.sum \
- | awk '/^Running .* \.\.\./ { print $2 }')
- printf "\n" >> $run_step_artifacts/extra-bisect-params
+ # Add a short marker to record the status (used by Jenkins build-name)
+ local n_regressions
+ n_regressions=$(grep -c "^[A-Z]\+:" $run_step_artifacts/fails.sum \
+ || true)
+ echo "# $n_regressions regressions" \
+ > $run_step_artifacts/results.regressions
+
+ if [ $res = 2 ]; then
+ # Result comparison found regressions (exit code 2)
+ #
+ # Exit code 1 means that the script has failed to process
+ # .sum files. This likely indicates malformed or very unusual
+ # results.
+
+ printf "extra_build_params=" > $run_step_artifacts/extra-bisect-params
+ local exp
+ while read exp; do
+ printf "++testsuites %s " $exp >> $run_step_artifacts/extra-bisect-params
+ done < <(cat $run_step_artifacts/fails.sum \
+ | awk '/^Running .* \.\.\./ { print $2 }')
+ printf "\n" >> $run_step_artifacts/extra-bisect-params
+ fi
fi
return $res
)
}
+# Implement rr[breakup_changed_components] hook.
+tcwg_gnu_breakup_changed_components ()
+{
+ (
+ set -euf -o pipefail
+
+ local cc=""
+ case "${rr[ci_project]}" in
+ *_check_*)
+ # Changes to "foo" of check_foo projects tend to cause the most
+ # regressions.
+ # Breakup changed components into "foo" and the rest of components
+ # to reduce the number of builds.
+ cc=$(echo "${rr[ci_project]}" | sed -e "s/.*_check_\(.*\)/\1/")
+ ;;
+ esac
+
+ if print_changed_components "\n" | grep "^$cc\$" >/dev/null; then
+ echo "$cc"
+ print_changed_components "\n" | grep -v "^$cc\$" | tr '\n' ' ' \
+ | sed -e "s/ \$//g"
+ echo
+ else
+ print_changed_components "\n"
+ fi
+ )
+}
+rr[breakup_changed_components]=tcwg_gnu_breakup_changed_components
+
+# Define gnu_data[] as needed below
+settings_for_ci_project_and_config "${rr[ci_project]}" "${rr[ci_config]}"
+
run_step stop_on_fail -10 reset_artifacts
run_step stop_on_fail x prepare_abe
-case "$type_of_test" in
- build_cross)
+case "${rr[ci_project]}" in
+ tcwg_gnu_cross_build|tcwg_gnu_embed_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail 1 build_abe binutils
+ run_step skip_on_fail 2 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail x clean_sysroot
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail 3 build_abe linux
+ run_step skip_on_fail 4 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail 4 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail 5 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail 6 build_abe gdb
+ run_step skip_on_fail 7 build_abe qemu
+ ;;
+ tcwg_gnu_cross_check_*|tcwg_gnu_embed_check_*)
+ run_step skip_on_fail -8 build_abe binutils
+ run_step skip_on_fail -7 build_abe stage1 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail x clean_sysroot
+ case "${rr[components]}" in
+ *glibc*)
+ run_step skip_on_fail -6 build_abe linux
+ run_step skip_on_fail -5 build_abe glibc
+ ;;
+ *newlib*)
+ run_step skip_on_fail -5 build_abe newlib
+ ;;
+ esac
+ run_step skip_on_fail -4 build_abe stage2 -- \
+ ${gnu_data[gcc_override_configure]}
+ run_step skip_on_fail -3 build_abe gdb
+ run_step skip_on_fail -2 build_abe qemu
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[ci_project]#*check_}" -- "${runtestflags[@]}" \
+ ${gnu_data[gcc_override_configure]} \
+ ${gnu_data[gcc_target_board_options]} \
+ ${gnu_data[qemu_cpu]}
+ ;;
+ tcwg_gnu_native_build)
run_step skip_on_fail 0 true
run_step skip_on_fail 1 build_abe binutils
- run_step skip_on_fail 2 build_abe stage1
- run_step skip_on_fail 3 build_abe linux
- run_step skip_on_fail 4 build_abe glibc
- run_step skip_on_fail 5 build_abe stage2
- run_step skip_on_fail 6 build_abe qemu
+ run_step skip_on_fail 2 build_abe gcc
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail 4 build_abe linux
+ run_step skip_on_fail 5 build_abe glibc
+ run_step skip_on_fail 6 build_abe gdb
;;
- check_cross)
+ tcwg_gnu_native_check_*|tcwg_gnu_native_fast_check_*)
+ component="${rr[ci_project]#tcwg_gnu_native_}"
+ component="${component#fast_}"
+
+ declare -a abe_arguments=()
+ if [ "${component}" = check_gdb ]; then
+ abe_arguments=(--set check_buffer_workaround=gdb-read1)
+ fi
+
run_step skip_on_fail -8 build_abe binutils
- run_step skip_on_fail -7 build_abe stage1
- run_step skip_on_fail -6 build_abe linux
- run_step skip_on_fail -5 build_abe glibc
- run_step skip_on_fail -4 build_abe stage2
- run_step skip_on_fail -3 build_abe qemu
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe check_gcc -- "${runtestflags[@]}"
- ;;
- check_binutils)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe check_binutils -- "${runtestflags[@]}"
- ;;
- check_gcc*|check_bootstrap*)
- run_step skip_on_fail -2 build_abe binutils
- run_step skip_on_fail -1 build_abe ${type_of_test#check_}
- run_step skip_on_fail 0 build_abe dejagnu
- run_step skip_on_fail 1 build_abe ${type_of_test} -- "${runtestflags[@]}"
- ;;
- *)
+ run_step skip_on_fail -7 build_abe gcc
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail -5 build_abe linux
+ run_step skip_on_fail -4 build_abe glibc
+ run_step skip_on_fail -3 build_abe gdb
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "$component" -- "${runtestflags[@]}" \
+ "${abe_arguments[@]}"
+ ;;
+ tcwg_bootstrap_build)
run_step skip_on_fail 0 true
run_step skip_on_fail 1 build_abe ${type_of_test}
;;
+ tcwg_bootstrap_check)
+ run_step skip_on_fail -2 build_abe ${type_of_test#check_}
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe ${type_of_test} -- "${runtestflags[@]}"
+ ;;
+ tcwg_binutils_build|tcwg_gcc_build|tcwg_gdb_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail 1 build_abe "${rr[components]}"
+ ;;
+ tcwg_binutils_check|tcwg_gcc_check)
+ run_step skip_on_fail -2 build_abe "${rr[components]}"
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}"
+ ;;
+ tcwg_gdb_check)
+ # GDB's testsuite has a limitation where it can only find debug info
+ # within the installation prefix. To allow it to find the
+ # distro-installed debug info for ld.so, use /usr as the prefix. We
+ # don't need to actually install GDB there though, so disable the
+ # install step.
+ run_step skip_on_fail -2 build_abe "${rr[components]}" \
+ -- --prefix /usr --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "check_${rr[components]}" \
+ -- "${runtestflags[@]}" --set check_buffer_workaround=gdb-read1
+ ;;
+ tcwg_glibc_build)
+ run_step skip_on_fail 0 init_abe_sysroot
+ # ABE tries to copy gcc runtime libraries on glibc install, which
+ # fails when we don't build gcc. Workaround by not installing glibc.
+ run_step skip_on_fail 1 build_abe glibc -- --disable install
+ ;;
+ tcwg_glibc_check)
+ run_step skip_on_fail -3 init_abe_sysroot
+ run_step skip_on_fail -2 build_abe glibc -- --disable install
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe check_glibc \
+ -- "${runtestflags[@]}" --disable install
+ ;;
+ tcwg_gnu_mingw_build)
+ run_step skip_on_fail 0 true
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail 1 build_abe binutils
+ run_step skip_on_fail 2 build_mingw headers
+ run_step skip_on_fail 3 build_abe stage1
+ run_step skip_on_fail 4 build_mingw crt
+ run_step skip_on_fail 5 build_mingw libs
+ run_step skip_on_fail 6 build_abe stage2
+ #run_step skip_on_fail 7 build_abe gdb
+ ;;
+ tcwg_gnu_mingw_check_*)
+ run_step skip_on_fail x clean_sysroot
+ run_step skip_on_fail -8 build_abe binutils
+ run_step skip_on_fail -7 build_mingw headers
+ run_step skip_on_fail -6 build_abe stage1
+ run_step skip_on_fail -5 build_mingw crt
+ run_step skip_on_fail -4 build_mingw libs
+ run_step skip_on_fail -3 build_abe stage2
+ #run_step skip_on_fail -2 build_abe gdb
+ run_step skip_on_fail -1 build_abe dejagnu
+ run_step skip_on_fail 0 build_abe "${rr[ci_project]#tcwg_gnu_mingw_}" \
+ -- "${runtestflags[@]}"
+ ;;
esac
run_step reset_on_fail x check_regression
-run_step stop_on_fail x update_baseline
-run_step stop_on_fail x push_baseline
trap "" EXIT
diff --git a/tcwg_gnu-config.sh b/tcwg_gnu-config.sh
new file mode 100644
index 00000000..489ffc7f
--- /dev/null
+++ b/tcwg_gnu-config.sh
@@ -0,0 +1,177 @@
+#!/bin/bash
+
+declare -A gnu_data
+
+# Populate gcc_override_configure, gcc_target_board_options, qemu_cpu,
+# pretty_project, pretty_config, long_config depending on CI project
+# and config
+#
+# $1: ci_project $2: ci_config
+settings_for_ci_project_and_config ()
+{
+ local project="$1"
+ local config="$2"
+
+ gnu_data[gcc_override_configure]=""
+ gnu_data[gcc_target_board_options]=""
+ gnu_data[qemu_cpu]=""
+ gnu_data[pretty_project]=""
+ gnu_data[pretty_config]="$config"
+ gnu_data[long_config]="$config"
+
+ # Build the right libs depending on the target. Hardcode the
+ # arch/cpu/float-abi/mode to speed up toolchain builds: the
+ # alternative of building rmprofile multilibs takes a very long
+ # time. Setting the values here has the drawback that we have to build
+ # one toolchain per target_board type.
+ # Also force the corresponding options via target_board_options. This
+ # mimics how several vendors test the toolchain.
+ case "$project/$config" in
+ tcwg_gnu_embed*/*-arm_v7a_soft_eabi)
+ # better to test full soft here (hard float for Thumb below)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=arm \
+ --set gcc_override_configure=--with-arch=armv7-a \
+ --set gcc_override_configure=--with-fpu=vfpv3-d16 \
+ --set gcc_override_configure=--with-float=soft"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-marm/-march=armv7-a/-mfpu=auto/-mfloat-abi=soft}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-a9"
+ gnu_data[pretty_config]="arm-eabi v7-a soft"
+ gnu_data[long_config]="arm-eabi -marm -march=armv7-a -mfpu=auto -mfloat-abi=soft"
+ ;;
+
+ tcwg_gnu_embed*/*-arm_v7a_softfp_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=arm \
+ --set gcc_override_configure=--with-arch=armv7-a \
+ --set gcc_override_configure=--with-fpu=vfpv3-d16 \
+ --set gcc_override_configure=--with-float=softfp"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-marm/-march=armv7-a/-mfpu=vfpv3-d16/-mfloat-abi=softfp}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-a9"
+ gnu_data[pretty_config]="arm-eabi v7-a softfp"
+ gnu_data[long_config]="arm-eabi -marm -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=softfp"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_v8a_hard_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-arch=armv8-a+simd \
+ --set gcc_override_configure=--with-fpu=crypto-neon-fp-armv8 \
+ --set gcc_override_configure=--with-float=hard"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv8-a+simd/-mfpu=auto/-mfloat-abi=hard}"
+ # FIXME The most recent A-profile CPU known by qemu is
+ # cortex-a15, which is v7-a. Use "any" so that it
+ # supports v8-a instructions.
+ gnu_data[qemu_cpu]="--qemu-cpu any"
+ gnu_data[pretty_config]="arm-eabi thumb v8-a hard"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m0_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-cpu=cortex-m0 \
+ --set gcc_override_configure=--with-float=soft"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv6s-m/-mtune=cortex-m0/-mfloat-abi=soft/-mfpu=auto}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m0"
+ gnu_data[pretty_config]="arm-eabi cortex-m0 soft"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv6s-m -mtune=cortex-m0 -mfloat-abi=soft -mfpu=auto"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m3_eabi)
+ # No v7-m variants support fp, so softfp is meaningless (though should work).
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-cpu=cortex-m3 \
+ --set gcc_override_configure=--with-float=softfp"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv7-m/-mtune=cortex-m3/-mfloat-abi=softfp/-mfpu=auto}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m3"
+ gnu_data[pretty_config]="arm-eabi cortex-m3 softfp"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv7-m -mtune=cortex-m3 -mfloat-abi=softfp -mfpu=auto"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m7_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-cpu=cortex-m7 \
+ --set gcc_override_configure=--with-float=hard"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv7e-m+fp.dp/-mtune=cortex-m7/-mfloat-abi=hard/-mfpu=auto}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m7"
+ gnu_data[pretty_config]="arm-eabi cortex-m7 hard"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv7e-m+fp.dp -mtune=cortex-m7 -mfloat-abi=hard -mfpu=auto"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m23_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-cpu=cortex-m23 \
+ --set gcc_override_configure=--with-float=soft"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv8-m.base/-mtune=cortex-m23/-mfloat-abi=soft/-mfpu=auto}"
+ # qemu does not support m23, use m33 as a superset (v8-m.base vs v8-m.main)
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m33"
+ gnu_data[pretty_config]="arm-eabi cortex-m23 soft"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv8-m.base -mtune=cortex-m23 -mfloat-abi=soft -mfpu=auto"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m33_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-cpu=cortex-m33 \
+ --set gcc_override_configure=--with-float=hard"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv8-m.main+dsp+fp/-mtune=cortex-m33/-mfloat-abi=hard/-mfpu=auto}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m33"
+ gnu_data[pretty_config]="arm-eabi cortex-m33 hard"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv8-m.main+dsp+fp -mtune=cortex-m33 -mfloat-abi=hard -mfpu=auto"
+ ;;
+
+ tcwg_gnu_embed*/*-thumb_m55_hard_eabi)
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--disable-multilib \
+ --set gcc_override_configure=--with-mode=thumb \
+ --set gcc_override_configure=--with-arch=armv8.1-m.main+mve.fp+fp.dp \
+ --set gcc_override_configure=--with-float=hard"
+ gnu_data[gcc_target_board_options]="\
+ --set target_board_options={-mthumb/-march=armv8.1-m.main+mve.fp+fp.dp/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto}"
+ gnu_data[qemu_cpu]="--qemu-cpu cortex-m55"
+ # shellcheck disable=SC2034
+ gnu_data[pretty_config]="arm-eabi cortex-m55 hard"
+ gnu_data[long_config]="arm-eabi -mthumb -march=armv8.1-m.main+mve.fp+fp.dp -mtune=cortex-m55 -mfloat-abi=hard -mfpu=auto"
+ ;;
+
+ tcwg_gnu_mingw*/*)
+ # Only mingw thread library (not cygwin) is supported at the moment,
+ # so use --enable-threads=win32
+ # FIXME: Add --disable-shared as a workaround for GCC currently
+ # failing to build several shared libraries.
+ gnu_data[gcc_override_configure]="\
+ --set gcc_override_configure=--enable-threads=win32 \
+ --set gcc_override_configure=--disable-shared"
+ ;;
+
+ tcwg_gnu*/*|tcwg_gcc/**)
+ ;;
+
+ */*)
+ # Remove useless leading "tcwg_"
+ # shellcheck disable=SC2034
+ gnu_data[pretty_project]=$(echo $project | sed 's/^tcwg_//')
+ ;;
+
+ esac
+}
diff --git a/tcwg_kernel-build.sh b/tcwg_kernel-build.sh
index bb923767..8eaaef5e 100755
--- a/tcwg_kernel-build.sh
+++ b/tcwg_kernel-build.sh
@@ -11,38 +11,28 @@ scripts=$(dirname $0)
convert_args_to_variables "$@"
obligatory_variables rr[ci_config]
+declare -A rr
-# Execution mode: baseline, bisect, jenkins-full
-# shellcheck disable=SC2154
-rr[mode]="${rr[mode]-baseline}"
+# Execution mode: build or bisect
+rr[mode]="${rr[mode]-build}"
# Set custom revision for one of the projects, and use baseline revisions
# for all other projects.
-# shellcheck disable=SC2154
rr[ci_project]="${rr[ci_project]-tcwg_kernel}"
-# shellcheck disable=SC2154
rr[baseline_branch]="${rr[baseline_branch]-linaro-local/ci/${rr[ci_project]}/${rr[ci_config]}}"
-# shellcheck disable=SC2154
-rr[update_baseline]="${rr[update_baseline]-update}"
-# shellcheck disable=SC2154
+rr[update_baseline]="${rr[update_baseline]-ignore}"
rr[top_artifacts]="${rr[top_artifacts]-$(pwd)/artifacts}"
# {toolchain_name}-{toolchain_ver}-{target}-{linux}-{linux_config}
IFS=- read -a ci_config <<EOF
${rr[ci_config]}
EOF
-# shellcheck disable=SC2154
rr[toolchain]=${rr[toolchain]-${ci_config[0]}}
-# shellcheck disable=SC2154
-rr[release]=${rr[release]-${ci_config[1]}}
-# shellcheck disable=SC2154
rr[target]=${rr[target]-${ci_config[2]}}
-# shellcheck disable=SC2154
-rr[linux_config]=${rr[linux_config]-${ci_config[4]}}
+linux_config=${linux_config-${ci_config[4]}}
case "${rr[toolchain]}" in
llvm)
- # shellcheck disable=SC2154
rr[components]="binutils llvm linux qemu" ;;
gnu)
rr[components]="binutils gcc linux qemu" ;;
@@ -67,21 +57,16 @@ trap print_traceback EXIT
default_start_at=""
default_finish_at=""
case "${rr[mode]}" in
- "baseline")
- default_finish_at="update_baseline"
- ;;
"bisect")
- case "${rr[toolchain]}:$(print_single_updated_component)" in
- gnu:binutils) default_start_at="build_abe-binutils" ;;
- gnu:gcc) default_start_at="build_abe-stage1" ;;
- llvm:llvm) default_start_at="build_llvm" ;;
- *:linux) default_start_at="build_linux" ;;
- *:qemu) default_start_at="build_abe-qemu" ;;
+ case "$(print_single_updated_component)" in
+ binutils) default_start_at="build_abe-binutils" ;;
+ gcc) default_start_at="build_abe-stage1" ;;
+ llvm) default_start_at="build_kernel_llvm" ;;
+ linux) default_start_at="build_linux" ;;
+ qemu) default_start_at="build_abe-qemu" ;;
*) assert false ;;
esac
- default_finish_at="check_regression"
;;
- "jenkins-full") ;;
esac
if [ x"$start_at" = x"default" ]; then
start_at="$default_start_at"
@@ -92,6 +77,30 @@ fi
run_step_init "$start_at" "$finish_at" "${rr[top_artifacts]}" "$verbose"
+# Build LLVM for building Linux kernel
+build_kernel_llvm ()
+{
+ (
+ set -euf -o pipefail
+
+ local host target extra_targets=""
+
+ host=$(print_llvm_target native)
+ target=$(print_llvm_target ${rr[target]})
+
+ if [ x"$target" != x"$host" ]; then
+ # We need $host target for HOSTCC
+ extra_targets="$extra_targets;$host"
+ fi
+ if [ x"$target" = x"AArch64" ]; then
+ # We need 32-bit ARM target to compile vdso32
+ extra_targets="$extra_targets;ARM"
+ fi
+
+ build_llvm "clang;lld" "$extra_targets"
+ )
+}
+
# Build Linux kernel
build_linux_1 ()
{
@@ -106,9 +115,10 @@ build_linux_1 ()
rm -rf "$(pwd)"/bin
mkdir "$(pwd)"/bin
- local abe_bin cc ld_opt=""
+ local gnu_host abe_bin cc ld hostcc ld_opt=""
- abe_bin="$(pwd)/abe/builds/destdir/x86_64-pc-linux-gnu/bin"
+ gnu_host=$(print_gnu_target native)
+ abe_bin="$(pwd)/abe/builds/destdir/$gnu_host/bin"
# Use binutils $abe_bin for both GNU and LLVM builds.
# For LLVM builds at the very least we need target assembler, since,
# otherwise, clang will try to use /usr/bin/as for cases that integrated
@@ -122,19 +132,25 @@ build_linux_1 ()
export PATH="$llvm_bin:$PATH"
cc="$llvm_bin/clang"
- ld_opt="LD=$llvm_bin/ld.lld"
+ ld="$llvm_bin/ld.lld"
+ hostcc="$cc"
+
+ ld_opt="LD=$ld"
;;
gnu)
cc="$abe_bin/$(print_gnu_target ${rr[target]})-gcc"
+ ld="$abe_bin/$(print_gnu_target ${rr[target]})-ld"
+ hostcc=gcc
;;
esac
+ echo -n "Compiler: "; $cc --version | head -n1
+ echo -n "Linker: "; $ld --version | head -n1
# Use ccache only when bisecting linux or qemu (or preparing to).
# Otherwise the compiler is new in every build and we would
# only clobber ccache volume.
local ccache=""
case "${rr[mode]}:$(print_single_updated_component)" in
- "baseline:") ccache="ccache" ;;
"bisect:linux") ccache="ccache" ;;
"bisect:qemu") ccache="ccache" ;;
esac
@@ -150,14 +166,15 @@ EOF
if [ x"${rr[target]}" != x"$(uname -m)" ]; then
opts="$opts ARCH=$(print_kernel_target ${rr[target]})"
opts="$opts CROSS_COMPILE=$(print_gnu_target ${rr[target]})-"
- opts="$opts HOSTCC=gcc"
+ opts="$opts HOSTCC=$hostcc"
+ echo -n "Host cc: "; $cc --version | head -n1
fi
cd linux
make $opts distclean
- make $opts ${rr[linux_config]}
+ make $opts $linux_config
sed -i -e 's:CONFIG_LOCALVERSION_AUTO=y:# CONFIG_LOCALVERSION_AUTO is not set:' .config
set +f; rm -f localversion*; set -f
make $opts oldconfig
@@ -213,8 +230,9 @@ boot_linux ()
*) assert false ;;
esac
- local qemu
- qemu="$(pwd)/abe/builds/hosttools/x86_64-pc-linux-gnu/bin/qemu-system-${rr[target]}"
+ local gnu_host qemu
+ gnu_host=$(print_gnu_target native)
+ qemu="$(pwd)/abe/builds/hosttools/$gnu_host/bin/qemu-system-${rr[target]}"
timeout --foreground 60s "$qemu" \
-kernel $image -machine virt $cpu -m 512 \
@@ -244,65 +262,48 @@ no_regression_p ()
base_linux_n_obj=$(grep -v "^#" $ref_artifacts/results | tail -n1)
fi
- # In log scan for errors below
- # - sed -e 's/"[^"]*"//g' -- removes quoted "error: app diagnostics" strings
- # - " error:" detects compiler errors from GCC and Clang (including GCC ICEs)
- # - "^ERROR:" detects linker errors
- # - ": undefined reference" detects missing symbols during linking
- # - "] Error " detects GNU make errors
- # Then grep for "grep" to exclude other uses of this search.
- # shellcheck disable=SC2154
- # (defined by init_step in jenkins-helpers)
- cat > $run_step_artifacts/results.regressions <<EOF
-# First few build errors in logs:
-$(cat $new_artifacts/console.log | sed -e 's/"[^"]*"//g' | grep " error:\|^ERROR:\|: undefined reference\|\] Error " | grep -v "grep" | head | sed -e "s/^/# /")
-EOF
-
+ local res=""
case "$linux_n_obj:$base_linux_n_obj" in
- boot:*) return 0 ;;
- *:boot) return 1 ;;
- all:*) return 0 ;;
- *:all) return 1 ;;
- *)
- if ! [ "$linux_n_obj" -ge "-10" ]; then
- # Something is very wrong with result (e.g., it's not a number).
- return 1
- fi
- if ! [ "$base_linux_n_obj" -ge "-10" ]; then
- # Something is very wrong with result (e.g., it's not a number).
- return 0
- fi
-
- if [ $linux_n_obj -ge $base_linux_n_obj ]; then
- return 0
- else
- return 1
- fi
- ;;
+ boot:*) res=0 ;;
+ *:boot) res=1 ;;
+ all:*) res=0 ;;
+ *:all) res=1 ;;
esac
- )
-}
-# Implement rr[breakup_updated_components] hook.
-tcwg_kernel_breakup_updated_components ()
-{
- (
- set -euf -o pipefail
-
- # Linux changes tend to cause the most regressions.
- # Breakup updated components into linux and the rest of components
- # to reduce the number of builds.
- if print_updated_components "\n" | grep -q "^linux\$"; then
- echo "linux"
- print_updated_components "\n" | grep -v "^linux\$" | tr '\n' ' ' | sed -e "s/ \$//g"
- echo
- else
- print_updated_components "\n"
+ if [ x"$res" != x"0" ]; then
+ local build_res
+
+ no_build_regression_p "$@" &
+ build_res=0 && wait $! || build_res=$?
+
+ if [ x"$res" = x"" ]; then
+ res=$build_res
+
+ # Handle a corner case when a linux commit removes a few
+ # files from the tree, causing number of object to decrease.
+ # We do not consider it a regression when the count of object
+ # files decreases by less than 1%.
+ if [ x"$res" != x"0" ]; then
+ if [ x"$(print_single_updated_component)" = x"linux" ] \
+ && [ $linux_n_obj -gt 0 ] \
+ && [ $linux_n_obj -lt $base_linux_n_obj ]; then
+ local ratio
+ ratio=$(($base_linux_n_obj - $linux_n_obj))
+ ratio=$((100 * $ratio / $base_linux_n_obj))
+ if [ $ratio = 0 ]; then
+ # Difference in object count is less than 1%.
+ res=0
+ fi
+ fi
+ fi
+ fi
fi
+
+ return $res
)
}
-# shellcheck disable=SC2154
-rr[breakup_updated_components]=tcwg_kernel_breakup_updated_components
+
+rr[breakup_changed_components]="breakup_changed_components linux"
run_step stop_on_fail -10 reset_artifacts
run_step stop_on_fail x prepare_abe
@@ -312,14 +313,12 @@ case "${rr[toolchain]}" in
run_step skip_on_fail -5 build_abe stage1
;;
llvm)
- run_step skip_on_fail -5 build_llvm
+ run_step skip_on_fail -5 build_kernel_llvm
;;
esac
run_step skip_on_fail -2 build_abe qemu
run_step skip_on_fail x build_linux
run_step skip_on_fail x boot_linux
run_step reset_on_fail x check_regression
-run_step stop_on_fail x update_baseline
-run_step stop_on_fail x push_baseline
trap "" EXIT
diff --git a/update_components_revs.sh b/update_components_revs.sh
new file mode 100755
index 00000000..567b55cb
--- /dev/null
+++ b/update_components_revs.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+scripts=$(dirname $0)
+
+# shellcheck source=jenkins-helpers.sh
+. $scripts/jenkins-helpers.sh
+
+convert_args_to_variables "$@"
+
+obligatory_variables rr[top_artifacts]
+declare -A rr
+dynamic_components_list="${dynamic_components_list-}"
+
+# parse the args
+# $ update_components_revs.sh --update_components "*"
+# $ update_components_revs.sh --update_components "dep1 dep2 dep3"
+
+# FIXME: hardcoded for the moment
+declare -A deps_branch=(
+ ["dawn"]="main"
+ ["chromium_variations"]="main"
+ ["catapult"]="main"
+ ["perfetto"]="main"
+ ["vulkan-deps"]="main"
+ ["angle"]="main"
+ ["v8"]="main"
+ ["skia"]="main"
+)
+
+if [ x"$dynamic_components_list" == x"*" ]; then
+ dynamic_components_list="${!deps_branch[*]}"
+fi
+
+# push the rev
+for dep in $dynamic_components_list; do
+ if ! [ "${deps_branch[$dep]+abc}" ]; then
+ echo "SKIPPING unknown dependency : $dep"
+ continue
+ fi
+
+ echo "UPDATING [$dep] : ${deps_branch[$dep]}"
+ echo "${deps_branch[$dep]}" | set_current_git ${dep}_rev
+done
+
+
diff --git a/wrappers/count-wrapper.sh b/wrappers/count-wrapper.sh
new file mode 100755
index 00000000..125e9fac
--- /dev/null
+++ b/wrappers/count-wrapper.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+orig_tool="#ORIG_TOOL#"
+log="#SHADOW_TOP#-counts/$(basename "$0")"
+
+(
+ /usr/bin/flock -e 9
+
+ echo "$0 $*" >>"$log"
+) 9>>"$log"
+
+exec "$orig_tool" "$@"
diff --git a/wrappers/install-wrappers.sh b/wrappers/install-wrappers.sh
new file mode 100755
index 00000000..461e69a4
--- /dev/null
+++ b/wrappers/install-wrappers.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+set -eu -o pipefail
+
+wrapper_dir=$(cd "$(dirname "$0")"; pwd)
+
+orig_bin_dir=$(cd "$1"; pwd)
+shadow_bin_dir=$(cd "$2"; pwd)
+top=$(cd "$3"; pwd)
+shadow_top=$(mkdir -p "$4"; cd "$4"; pwd)
+shift 4
+
+cd "$orig_bin_dir"
+
+if [ $# = 0 ]; then
+ # wrap all files
+ readarray -t to_be_wrapped < <(find ./ ! -type d | sed -e "s#^\./##")
+else
+ # wrap only listed files
+ to_be_wrapped=("$@")
+fi
+
+bin=$(basename "$(pwd)")
+
+for exe in "${to_be_wrapped[@]}"; do
+ if ! [ -x "$exe" ]; then
+ continue
+ fi
+
+ mkdir -p "../$bin.orig/$(dirname "$exe")"
+ mkdir -p "../$bin.wrapper/$(dirname "$exe")"
+
+ # Install or update the wrapper
+ case "$exe" in
+ "clang") wrapper="shadow-cc.sh" ;;
+ "clang++") wrapper="shadow-cc.sh" ;;
+ "llvm-ar") wrapper="shadow-ar.sh" ;;
+ "strip.sh") wrapper="shadow-strip.sh" ;;
+ *) wrapper="count-wrapper.sh" ;;
+ esac
+
+ exe_hash=$(git rev-parse HEAD)
+ ccache="CCACHE_COMPILERCHECK=string:$exe_hash /usr/bin/ccache"
+
+ # We copy wrappers to bin.wrapper directory so that we can re-run install
+ # procedure multiple times without side-effects.
+ cp "$wrapper_dir/$wrapper" "../$bin.wrapper/$exe"
+ sed -i \
+ -e "s%#TOP#%$top%" \
+ -e "s%#SHADOW_TOP#%$shadow_top%" \
+ -e "s%#ORIG_TOOL#%$orig_bin_dir/../$bin.orig/$exe%" \
+ -e "s%#SHADOW_TOOL#%$shadow_bin_dir/$exe%" \
+ -e "s%#CCACHE#%$ccache%" \
+ "../$bin.wrapper/$exe"
+
+ # Calculate path relative to $exe's directory -- for ln.
+ ln_rel_path="$bin.wrapper/$exe"
+ exe_path="$exe"
+ while true; do
+ ln_rel_path="../$ln_rel_path"
+ exe_path=$(dirname "$exe_path")
+ if [ "$exe_path" = "." ]; then
+ break
+ fi
+ done
+
+ # Check if executable was already moved to ../bin.orig
+ if [ -L "$exe" ] \
+ && [ x"$(readlink "$exe")" = x"$ln_rel_path" ]; then
+ continue
+ fi
+
+ # Move the executable and symlink to the wrapper.
+ mv "$exe" "../$bin.orig/$exe"
+ ln -s "$ln_rel_path" "$exe"
+done
+
+mkdir -p "${shadow_top}-counts"
+echo "benchmark,symbol,size" > $shadow_top.size
diff --git a/wrappers/shadow-ar.sh b/wrappers/shadow-ar.sh
new file mode 100755
index 00000000..b363034c
--- /dev/null
+++ b/wrappers/shadow-ar.sh
@@ -0,0 +1,246 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+top="#TOP#"
+shadow_top="#SHADOW_TOP#"
+
+orig_ar="#ORIG_TOOL#"
+shadow_ar="#SHADOW_TOOL#"
+
+errors=()
+warnings=()
+skips=()
+
+orig_out=()
+shadow_out=()
+orig_input=()
+shadow_input=()
+orig_opts=()
+shadow_opts=()
+
+dump_log ()
+{
+ (
+ set +u
+ IFS=";"
+ cat <<EOF
+errors=(${errors[@]})
+warnings=(${warnings[@]})
+skips=(${skips[@]})
+orig_out=(${orig_out[@]})
+shadow_out=(${shadow_out[@]})
+orig_input=(${orig_input[@]})
+shadow_input=(${shadow_input[@]})
+orig_opts=($0 ${orig_opts[@]})
+shadow_opts=(${shadow_opts[@]})
+
+EOF
+ )
+}
+
+report_errors () {
+ res=$?
+ if [ $res != 0 ]; then
+ errors+=("wrapper failed with $res")
+ fi
+ if [ ${#errors[@]} != 0 ] \
+ || [ ${#warnings[@]} != 0 ] \
+ || [ ${#skips[@]} != 0 ]; then
+ (
+ /usr/bin/flock -e 9
+
+ if [ ${#errors[@]} != 0 ]; then
+ log=$shadow_top.errors
+ elif [ ${#warnings[@]} != 0 ]; then
+ log=$shadow_top.warnings
+ else
+ log=$shadow_top.skips
+ fi
+ dump_log >>$log
+ ) 9>>$shadow_top.lock
+ fi
+}
+
+trap report_errors EXIT
+
+if [ "$#" -gt 0 ] && [ ${#orig_opts[@]} = 0 ]; then
+ opt="$1"
+ shift
+
+ orig_opts+=("$opt")
+
+ case "$opt" in
+ "-M") skips+=("mri-script unsupported") ;;
+ "c"*) ;;
+ *) errors+=("orig expecting: crsPD, cqsL, etc.; got: $opt") ;;
+ esac
+else
+ errors+=("orig missing: action")
+fi
+
+while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ orig_opts+=("$opt")
+
+ case "$opt" in
+ "-"*)
+ ;;
+ *)
+ if [ ${#orig_out[@]} = 0 ]; then
+ orig_out+=("$opt")
+ else
+ orig_input+=("$opt")
+ fi
+ ;;
+ esac
+done
+
+# orig_out is used only if command is not skipped
+if [ "${#orig_out[@]}" = 0 ] && [ ${#skips[@]} = 0 ]; then
+ errors+=("orig missing: orig_out")
+fi
+
+print_shadow_path ()
+{
+ local path="$1"
+ local prefix="${2-}"
+
+ case "$path" in
+ "/dev/null") echo "$path"; return ;;
+ "/"*) errors+=("Absolute path: $path") ;;
+ esac
+
+ # Check that we are under $top
+ case "$(pwd -P | sed -e "s#^$top##")" in
+ "/"*) errors+=("Running outside of $top") ;;
+ esac
+
+ echo "$(pwd -P)/$path" | sed -e "s#$top#$shadow_top${prefix}#"
+}
+
+generate_shadow_rsp ()
+{
+ local orig_rsp="$1"
+
+ local shadow_rsp orig_file shadow_file
+
+ shadow_rsp=$(print_shadow_path "$orig_rsp")
+ mkdir -p "$(dirname "$shadow_rsp")"
+ rm -f "$shadow_rsp"
+
+ if [ "$(stat --format=%s "$orig_rsp")" = 0 ]; then
+ touch "$shadow_rsp"
+ else
+ local quote delim=""
+ while read -d '' -r orig_file; do
+ # unquote orig
+ case "$orig_file" in
+ \'*\') quote="'"; orig_file=${orig_file:1:-1} ;;
+ *) quote="" ;;
+ esac
+ shadow_file=$(print_shadow_path "$orig_file")
+ if ! [ -e "$shadow_file" ]; then
+ shadow_file="$orig_file"
+ fi
+ # quote shadow
+ echo -ne "$delim$quote$shadow_file$quote" >> "$shadow_rsp"
+ delim=" "
+ done < <({ cat "$orig_rsp"; echo; } | xargs printf '%s\0')
+ fi
+
+ echo "$shadow_rsp"
+}
+
+make_shadow_opts ()
+{
+ local opt
+ local shadow_file
+
+ if [ "$#" -gt 0 ] && [ ${#shadow_opts[@]} = 0 ]; then
+ opt="$1"
+ shift
+
+ shadow_opts+=("$opt")
+
+ case "$opt" in
+ "c"*) ;;
+ *) errors+=("shadow expecting: crsPD, cqsL, etc.; got: $opt") ;;
+ esac
+ else
+ errors+=("shadow missing: action")
+ fi
+
+ while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ case "$opt" in
+ "-"*) ;;
+ "@"*".rsp")
+ opt="${opt#@}"
+ if [ x"$(basename "$opt" .rsp)" = x"$(basename "${orig_out[*]}")" ]; then
+ opt=$(generate_shadow_rsp "$opt")
+ else
+ errors+=("RSP file $opt does not match orig_out ${orig_out[*]}")
+ fi
+ opt="@$opt"
+ ;;
+ "@"*)
+ errors+=("Unexpected option: $opt")
+ opt="${opt#@}"
+ # shellcheck disable=SC2046
+ make_shadow_opts $(cat "$opt")
+ continue
+ ;;
+ *)
+ shadow_file=$(print_shadow_path "$opt")
+ if [ ${#shadow_out[@]} = 0 ]; then
+ opt="$shadow_file"
+ shadow_out+=("$shadow_file")
+ elif [ -e "$shadow_file" ]; then
+ opt="$shadow_file"
+ shadow_input+=("$shadow_file")
+ fi
+ ;;
+ esac
+ shadow_opts+=("$opt")
+ done
+}
+
+if [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ make_shadow_opts "${orig_opts[@]}"
+fi
+
+if [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ shadow_log=$(print_shadow_path "${orig_out[*]}" "-logs")
+
+ mkdir -p "$(dirname "${shadow_out[*]}")"
+ mkdir -p "$(dirname "$shadow_log")"
+
+ dump_log > "$shadow_log"
+ echo "$shadow_ar" "${shadow_opts[@]}" >> "$shadow_log"
+ "$shadow_ar" "${shadow_opts[@]}" >> "$shadow_log" 2>&1 &
+ shadow_res=0 && wait $! || shadow_res=$?
+
+ if [ $shadow_res != 0 ]; then
+ errors+=("shadow_ar failed with $shadow_res")
+ elif ! [ -e "${shadow_out[*]}" ]; then
+ errors+=("shadow_ar failed to generate output")
+ else
+ shadow_link=$(IFS=""; print_shadow_path "${orig_out[*]}" "-default-A")
+ mkdir -p "$(dirname "$shadow_link")"
+ ln -s -f "${shadow_out[*]}" "$shadow_link"
+ fi
+fi
+
+"$orig_ar" "${orig_opts[@]}" < /dev/stdin &
+orig_res=0 && wait $! || orig_res=$?
+
+if [ $orig_res != 0 ]; then
+ errors+=("orig_ar failed with $orig_res")
+fi
+
+exit $orig_res
diff --git a/wrappers/shadow-cc.sh b/wrappers/shadow-cc.sh
new file mode 100755
index 00000000..586c9d52
--- /dev/null
+++ b/wrappers/shadow-cc.sh
@@ -0,0 +1,345 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+top="#TOP#"
+shadow_top="#SHADOW_TOP#"
+
+orig_cc="#ORIG_TOOL#"
+shadow_cc="#SHADOW_TOOL#"
+
+errors=()
+warnings=()
+skips=()
+
+mode=()
+target=()
+opt_level=()
+lang=()
+stdin_input=()
+
+orig_out=()
+shadow_out=()
+orig_input=()
+shadow_input=()
+orig_opts=()
+shadow_opts=()
+
+dump_log ()
+{
+ (
+ set +u
+ IFS=";"
+ cat <<EOF
+errors=(${errors[@]})
+warnings=(${warnings[@]})
+skips=(${skips[@]})
+mode=(${mode[@]})
+target=(${target[@]})
+opt_level=(${opt_level[@]})
+lang=(${lang[@]})
+stdin_input=(${stdin_input[@]})
+orig_out=(${orig_out[@]})
+shadow_out=(${shadow_out[@]})
+orig_input=(${orig_input[@]})
+shadow_input=(${shadow_input[@]})
+orig_opts=($0 ${orig_opts[@]})
+shadow_opts=(${shadow_opts[@]})
+
+EOF
+ )
+}
+
+report_errors () {
+ res=$?
+ if [ $res != 0 ]; then
+ errors+=("wrapper failed with $res")
+ fi
+ if [ ${#errors[@]} != 0 ] \
+ || [ ${#warnings[@]} != 0 ] \
+ || [ ${#skips[@]} != 0 ]; then
+ (
+ /usr/bin/flock -e 9
+
+ if [ ${#errors[@]} != 0 ]; then
+ log=$shadow_top.errors
+ elif [ ${#warnings[@]} != 0 ]; then
+ log=$shadow_top.warnings
+ else
+ log=$shadow_top.skips
+ fi
+ dump_log >>$log
+ ) 9>>$shadow_top.lock
+ fi
+
+ # Log code-size
+ if [ ${#skips[@]} = 0 ] && [ -x "${orig_out[*]}" ]; then
+ if [ -x "${shadow_out[*]}" ]; then
+ text=$(/usr/bin/size "${shadow_out[*]}" | tail -n1 | awk '{ print $1 }')
+ else
+ text="-1"
+ fi
+ (
+ /usr/bin/flock -e 9
+ echo "${orig_out[*]},binary,$text" >> $shadow_top.size
+ ) 9>>$shadow_top.size
+ fi
+}
+
+trap report_errors EXIT
+
+while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ orig_opts+=("$opt")
+
+ case "$opt" in
+ "-c"|"-S"|"-E") mode+=("$opt") ;;
+ "-o")
+ if [ "$#" -gt 0 ]; then
+ orig_out=("$1" "${orig_out[@]}")
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+ "-O"*)
+ opt_level+=("$opt")
+ ;;
+ "-target")
+ if [ "$#" -gt 0 ]; then
+ target=("$1" "${target[@]}")
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+ "-x")
+ if [ "$#" -gt 0 ]; then
+ lang+=("$1")
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+ "-isystem"|"--sysroot"|"-MF"|"-MQ"|"-I"|"-include"|"-z"|"-D"|"-iquote")
+ if [ "$#" -gt 0 ]; then
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+ "-")
+ orig_input+=("$opt")
+ stdin_input+=("$opt")
+ ;;
+ "-"*)
+ ;;
+ *)
+ orig_input+=("$opt")
+ ;;
+ esac
+done
+
+if [ ${#mode[@]} = 0 ]; then
+ # Link mode
+ mode=("-L")
+elif [ ${#mode[@]} != 1 ]; then
+ warnings+=("Several modes: ${mode[*]}")
+fi
+
+if [ ${#stdin_input[@]} = 1 ]; then
+ skips+=("Stdin input")
+ if [ x"${orig_input[*]}" != x"${stdin_input[*]}" ]; then
+ errors+=("Extra inputs in presence of stdin: ${orig_input[*]}")
+ fi
+ if [ x"${mode[*]}" != x"-E" ]; then
+ warnings+=("Stdin input with mode: ${mode[*]}")
+ fi
+elif [ ${#stdin_input[@]} -gt 1 ]; then
+ errors+=("Several stdin inputs")
+fi
+
+if [ ${#lang[@]} -gt 1 ]; then
+ warnings+=("Several -x LANG options: ${lang[*]}")
+fi
+
+if [ ${#opt_level[@]} -gt 1 ]; then
+ warnings+=("Several opt_levels: ${opt_level[*]}")
+fi
+
+if [ x"${mode[*]}" != x"-L" ]; then
+ if [ ${#orig_input[@]} != 1 ]; then
+ errors+=("Several inputs with mode ${mode[*]}: ${orig_input[*]}")
+ fi
+fi
+
+if [ ${#orig_out[@]} = 0 ]; then
+ skips+=("No orig_out")
+elif [ ${#orig_out[@]} != 1 ]; then
+ errors+=("Several orig_outs: ${orig_out[*]}")
+fi
+
+if [ "${#target[@]}" -gt 1 ]; then
+ errors+=("Several targets: ${target[*]}")
+fi
+
+if ! echo "${target[*]}" | grep -e "^aarch64" >/dev/null; then
+ skips+=("Skipping target: ${target[*]}")
+fi
+
+if [ ${#errors[@]} = 0 ] && [ ${#skips[@]} != 0 ] \
+ && [ ${#orig_out[@]} = 0 ] && [ ${#stdin_input[@]} != 0 ]; then
+ # Bionic has several cases where orig_cc [expectedly?] fails.
+ # Ignore these.
+ report_errors
+ exec "$orig_cc" "${orig_opts[@]}"
+fi
+
+print_shadow_path ()
+{
+ local path="$1"
+ local prefix="${2-}"
+
+ case "$path" in
+ "/dev/null") echo "$path"; return ;;
+ "/"*) errors+=("Absolute path: $path") ;;
+ esac
+
+ # Check that we are under $top
+ case "$(pwd -P | sed -e "s#^$top##")" in
+ "/"*) errors+=("Running outside of $top") ;;
+ esac
+
+ echo "$(pwd -P)/$path" | sed -e "s#$top#$shadow_top${prefix}#"
+}
+
+generate_shadow_rsp ()
+{
+ local orig_rsp="$1"
+
+ local shadow_rsp orig_file shadow_file
+
+ shadow_rsp=$(print_shadow_path "$orig_rsp")
+ mkdir -p "$(dirname "$shadow_rsp")"
+ rm -f "$shadow_rsp"
+
+ if [ "$(stat --format=%s "$orig_rsp")" = 0 ]; then
+ touch "$shadow_rsp"
+ else
+ local quote delim=""
+ while read -d '' -r orig_file; do
+ # unquote orig
+ case "$orig_file" in
+ \'*\') quote="'"; orig_file=${orig_file:1:-1} ;;
+ *) quote="" ;;
+ esac
+ shadow_file=$(print_shadow_path "$orig_file")
+ if ! [ -e "$shadow_file" ]; then
+ shadow_file="$orig_file"
+ fi
+ # quote shadow
+ echo -ne "$delim$quote$shadow_file$quote" >> "$shadow_rsp"
+ delim=" "
+ done < <({ cat "$orig_rsp"; echo; } | xargs printf '%s\0')
+ fi
+
+ echo "$shadow_rsp"
+}
+
+make_shadow_opts ()
+{
+ local opt
+ local shadow_file
+
+ while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ case "$opt" in
+ "-o")
+ shadow_opts+=("$opt")
+ opt=$(print_shadow_path "$1")
+ shadow_out+=("$opt")
+ shift
+ ;;
+ "-fuse-ld=lld")
+ opt="-fuse-ld=$(dirname "$shadow_cc")/ld.lld"
+ ;;
+ "-target"|"-x"|"-isystem"|"--sysroot"|"-MF"|"-MQ"|"-I"|"-include"|"-z"|"-D"|"-iquote")
+ shadow_opts+=("$opt")
+ opt="$1"
+ shift
+ ;;
+ "-Werror"*) continue ;;
+ "-")
+ errors+=("Stdin input should have been skipped")
+ ;;
+ "-"*) ;;
+ "@"*".rsp")
+ opt="${opt#@}"
+ if [ x"$(basename "$opt" .rsp)" = x"$(basename "${orig_out[*]}")" ]; then
+ opt=$(generate_shadow_rsp "$opt")
+ else
+ errors+=("RSP file $opt does not match orig_out ${orig_out[*]}")
+ fi
+ opt="@$opt"
+ ;;
+ "@"*)
+ errors+=("Unexpected option: $opt")
+ opt="${opt#@}"
+ # shellcheck disable=SC2046
+ make_shadow_opts $(cat "$opt")
+ continue
+ ;;
+ *)
+ shadow_file=$(print_shadow_path "$opt")
+ if [ -e "$shadow_file" ]; then
+ opt="$shadow_file"
+ shadow_input+=("$shadow_file")
+ fi
+ ;;
+ esac
+ shadow_opts+=("$opt")
+ done
+}
+
+if [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ make_shadow_opts "${orig_opts[@]}"
+fi
+
+if [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ shadow_log=$(print_shadow_path "${orig_out[*]}" "-logs")
+
+ mkdir -p "$(dirname "${shadow_out[*]}")"
+ mkdir -p "$(dirname "$shadow_log")"
+
+ dump_log > "$shadow_log"
+ echo "$shadow_cc" "${shadow_opts[@]}" >> "$shadow_log"
+ "$shadow_cc" "${shadow_opts[@]}" >> "$shadow_log" 2>&1 &
+ shadow_res=0 && wait $! || shadow_res=$?
+
+ if [ $shadow_res != 0 ]; then
+ errors+=("shadow_cc failed with $shadow_res")
+ elif ! [ -e "${shadow_out[*]}" ]; then
+ errors+=("shadow_cc failed to generate output")
+ elif [ x"${shadow_out[*]}" != x"/dev/null" ]; then
+ shadow_link=$(IFS=""; print_shadow_path "${orig_out[*]}" "-${target[*]-default}${mode[*]}")
+ mkdir -p "$(dirname "$shadow_link")"
+ ln -s -f "${shadow_out[*]}" "$shadow_link"
+ fi
+fi
+
+#CCACHE# "$orig_cc" "${orig_opts[@]}" &
+orig_res=0 && wait $! || orig_res=$?
+
+if [ $orig_res != 0 ]; then
+ errors+=("orig_cc failed with $orig_res")
+fi
+
+exit $orig_res
diff --git a/wrappers/shadow-strip.sh b/wrappers/shadow-strip.sh
new file mode 100755
index 00000000..3ff7d831
--- /dev/null
+++ b/wrappers/shadow-strip.sh
@@ -0,0 +1,195 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+top="#TOP#"
+shadow_top="#SHADOW_TOP#"
+
+orig_strip="#ORIG_TOOL#"
+shadow_strip="#SHADOW_TOOL#"
+
+errors=()
+warnings=()
+skips=()
+
+orig_out=()
+shadow_out=()
+orig_input=()
+shadow_input=()
+orig_opts=()
+shadow_opts=()
+
+dump_log ()
+{
+ (
+ set +u
+ IFS=";"
+ cat <<EOF
+errors=(${errors[@]})
+warnings=(${warnings[@]})
+skips=(${skips[@]})
+orig_out=(${orig_out[@]})
+shadow_out=(${shadow_out[@]})
+orig_input=(${orig_input[@]})
+shadow_input=(${shadow_input[@]})
+orig_opts=($0 ${orig_opts[@]})
+shadow_opts=(${shadow_opts[@]})
+
+EOF
+ )
+}
+
+report_errors () {
+ res=$?
+ if [ $res != 0 ]; then
+ errors+=("wrapper failed with $res")
+ fi
+ if [ ${#errors[@]} != 0 ] \
+ || [ ${#warnings[@]} != 0 ] \
+ || [ ${#skips[@]} != 0 ]; then
+ (
+ $HOME/bin/myflock -e 9
+
+ if [ ${#errors[@]} != 0 ]; then
+ log=$shadow_top.errors
+ elif [ ${#warnings[@]} != 0 ]; then
+ log=$shadow_top.warnings
+ else
+ log=$shadow_top.skips
+ fi
+ dump_log >>$log
+ ) 9>>$shadow_top.lock
+ fi
+}
+
+trap report_errors EXIT
+
+while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ orig_opts+=("$opt")
+
+ case "$opt" in
+
+ "-o")
+ if [ "$#" -gt 0 ]; then
+ orig_out=("$1" "${orig_out[@]}")
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+
+ "-i")
+ if [ "$#" -gt 0 ]; then
+ orig_input=("$1" "${orig_input[@]}")
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+
+ "-d")
+ if [ "$#" -gt 0 ]; then
+ orig_opts+=("$1")
+ shift
+ else
+ errors+=("$opt with no argument")
+ fi
+ ;;
+
+ "-"*)
+ ;;
+ esac
+done
+
+print_shadow_path ()
+{
+ local path="$1"
+ local prefix="${2-}"
+
+ case "$path" in
+ "/dev/null") echo "$path"; return ;;
+ "/"*) errors+=("Absolute path: $path") ;;
+ esac
+
+ # Check that we are under $top
+ case "$(pwd -P | sed -e "s#^$top##")" in
+ "/"*) errors+=("Running outside of $top") ;;
+ esac
+
+ echo "$(pwd -P)/$path" | sed -e "s#$top#$shadow_top${prefix}#"
+}
+
+make_shadow_opts ()
+{
+ local opt
+
+ while [ "$#" -gt 0 ]; do
+ opt="$1"
+ shift
+
+ case "$opt" in
+ "-o")
+ shadow_opts+=("$opt")
+ opt=$(print_shadow_path "$1")
+ shadow_out+=("$opt")
+ shift
+ ;;
+
+ "-i")
+ shadow_opts+=("$opt")
+ opt=$(print_shadow_path "$1")
+ shadow_input+=("$opt")
+ shift
+ ;;
+
+ "-d")
+ shadow_opts+=("$opt")
+ opt="/dev/null"
+ shift
+ ;;
+ esac
+ shadow_opts+=("$opt")
+ done
+}
+
+if [ -e "${shadow_input[*]}" ] \
+ && [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ make_shadow_opts "${orig_opts[@]}"
+fi
+
+if [ -e "${shadow_input[*]}" ] \
+ && [ ${#errors[@]} = 0 ] && [ ${#skips[@]} = 0 ]; then
+ shadow_log=$(print_shadow_path "${orig_out[*]}" "-logs")
+
+ mkdir -p "$(dirname "${shadow_out[*]}")"
+ mkdir -p "$(dirname "$shadow_log")"
+
+ dump_log > "$shadow_log"
+ echo "$shadow_strip" "${shadow_opts[@]}" >> "$shadow_log"
+ "$shadow_strip" "${shadow_opts[@]}" >> "$shadow_log" 2>&1 &
+ shadow_res=0 && wait $! || shadow_res=$?
+
+ if [ $shadow_res != 0 ]; then
+ errors+=("shadow_strip failed with $shadow_res")
+ elif ! [ -e "${shadow_out[*]}" ]; then
+ errors+=("shadow_strip failed to generate output")
+ else
+ shadow_link=$(IFS=""; print_shadow_path "${orig_out[*]}" "-default-A")
+ mkdir -p "$(dirname "$shadow_link")"
+ ln -s -f "${shadow_out[*]}" "$shadow_link"
+ fi
+fi
+
+"$orig_strip" "${orig_opts[@]}" &
+orig_res=0 && wait $! || orig_res=$?
+
+if [ $orig_res != 0 ]; then
+ errors+=("orig_strip failed with $orig_res")
+fi
+
+exit $orig_res