summaryrefslogtreecommitdiff
path: root/tcwg-benchmark-bare.sh
diff options
context:
space:
mode:
authorChristophe Lyon <christophe.lyon@linaro.org>2020-08-07 13:42:08 +0000
committerChristophe Lyon <christophe.lyon@linaro.org>2020-08-17 13:16:21 +0000
commitc1856649fdd1f353c28d8c5158d3ba83f0dfdc1b (patch)
tree370fc6599753dff74917341f7f7bf8cd279937f9 /tcwg-benchmark-bare.sh
parent71dfb529fc5aa4da5714ddf13f204d0533c47832 (diff)
tcwg-benchmark-bare.sh: New script to run benchmarks on bare-metal
Starts a container with access to USB devices to build and run benchmarks on boards in bare-metal mode. Currently supports coremark only. Change-Id: I19876d32fc3c2b3322960f86798dc02a96aed25b
Diffstat (limited to 'tcwg-benchmark-bare.sh')
-rwxr-xr-xtcwg-benchmark-bare.sh243
1 files changed, 243 insertions, 0 deletions
diff --git a/tcwg-benchmark-bare.sh b/tcwg-benchmark-bare.sh
new file mode 100755
index 00000000..c90528b3
--- /dev/null
+++ b/tcwg-benchmark-bare.sh
@@ -0,0 +1,243 @@
+#!/bin/bash
+
+# Clean: shellcheck -e 2001 ./tcwg-benchmark.sh
+
+set -ex
+
+# Make shellcheck happy and workaround Jenkins not defining variables
+# for empty arguments.
+bench_container_tag="${bench_container_tag-bionic}"
+toolchain_url="$toolchain_url"
+toolchain_type="${toolchain_type-auto}"
+bench_list="$bench_list"
+cflags="$cflags"
+ldflags="$ldflags"
+sysroot="$sysroot"
+forceinstall="$forceinstall"
+builder="$builder"
+BUILD_NUMBER="$BUILD_NUMBER"
+NODE_NAME="$NODE_NAME"
+WORKSPACE="$WORKSPACE"
+
+# Jenkins doesn't define variables when parameter value is empty (like cflags),
+# so enable "set -u" only after above binding of variables.
+set -u
+
+if echo "$builder" | grep -q ".*-[0-9]\+"; then
+ docker_host_opt="--arch amd64 --node $builder"
+else
+ docker_host_opt="--label $builder"
+fi
+
+. jenkins-helpers.sh
+
+# Start a container to run the benchmarks in.
+# The board is connected to the slave via USB, the container needs
+# special rights to access it.
+./start-container-docker.sh \
+ $docker_host_opt \
+ --distro "$bench_container_tag" \
+ --task bench \
+ --docker_opts "--privileged -v /dev/bus/usb:/dev/bus/usb" \
+ --prefix run_ > run-container.sh
+trap "cleanup_all_containers" EXIT
+. ./run-container.sh
+
+# If $toolchain_url is of ssh:// type, don't create a remote build
+# container, just use the ssh command as provided.
+build_container_host=
+build_container_port=
+case "$toolchain_url" in
+ "ssh://"*)
+ ccprefix="${toolchain_url##ssh://}"
+
+ # Extract host:port: specification from ccprefix, we don't
+ # need to care about :parallelize here, just pass it to run.sh
+ # if present.
+ build=${ccprefix%:*}
+ build_container_host="$(echo $build | cut -d: -f 1)"
+ case ${ccprefix} in
+ *:*:*)
+ build_container_port="$(echo $build | cut -s -d: -f 2)"
+ ;;
+ *:*)
+ # If no port is specified, use 22 (ssh default port)
+ build_container_port=22
+ ;;
+ esac
+
+ if [ "x$build_container_host" = "x" ]; then
+ echo "ERROR: ssh:// toolchain_url lacks a host: $toolchain_url."
+ exit 1
+ fi
+ if [ "x$build_container_port" = "x" ]; then
+ echo "ERROR: ssh:// toolchain_url lacks a port: $toolchain_url."
+ exit 1
+ fi
+ ;;
+ *)
+ # When we copy the toolchain, access it from the 'run' container
+ # run_container_host is set with . ./run-container.sh above
+ # shellcheck disable=SC2154
+ build_container_host=$run_container_host
+ # shellcheck disable=SC2154
+ build_container_port=$run_container_port
+ ;;
+esac
+
+case "$toolchain_url" in
+ "ssh://"*)
+ if [ x"$sysroot" = x"tarball" ]; then
+ echo "ERROR: Unsupported sysroot $sysroot for toolchain_url $toolchain_url"
+ exit 1
+ fi
+ # Last component of ccprefix is the path, keep it
+ toolchaindir="$(dirname ${ccprefix##*:})"
+ ;;
+ "http://"*".tar.xz"|"https://"*".tar.xz"|"http://"*".tar.bz2"|"https://"*".tar.bz2")
+ toolchaindir=$(untar_url "$toolchain_url" "$WORKSPACE" "--strip-components 1")
+ ;;
+ "rsync://"*)
+ ccprefix="${toolchain_url##rsync://}"
+ # We want to access the remote toolchain via a container, to
+ # avoid problems with the hosts's ssh server restrictions on the
+ # number of simulaneous connexions.
+ # We copy it to the build container (assuming it uses the same
+ # architecture as the machine pointed to by $toolchain_url).
+ # Assume ccprefix looks like /path/bin/target-triplet-, and
+ # compute 'path'.
+ src_toolchaindir=$(dirname "$(dirname ${ccprefix})")
+ toolchaindir="${WORKSPACE}/toolchain-${BUILD_NUMBER}"
+ rsync -az --delete "$src_toolchaindir/" "$toolchaindir/"
+ ;;
+ *)
+ echo "ERROR: Cannot handle toolchain_url: $toolchain_url"
+ exit 1
+ ;;
+esac
+
+# Sanity check that toolchain_type is supported
+case "$toolchain_type" in
+ gnu|llvm) ;;
+ *)
+ echo "ERROR: Unsupported toolchain type: $toolchain_type"
+ exit 1
+ ;;
+esac
+
+case "$toolchain_url" in
+ "http://"*|"https://"*|"rsync://"*|"ssh://"*)
+
+ # In the ssh:// case, we have to perform the 'find' operations
+ # remotely.
+ case "$toolchain_url" in
+ "ssh://"*)
+ maybe_remote="ssh -p $build_container_port $build_container_host"
+ ;;
+ *)
+ maybe_remote=""
+ ;;
+ esac
+
+ case "$toolchain_type" in
+ "gnu"|"llvm") ;;
+ "auto")
+ if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*gcc" | wc -l)" != x"0" ]; then
+ toolchain_type="gnu"
+ elif [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*clang" | wc -l)" != x"0" ]; then
+ toolchain_type="llvm"
+ else
+ echo "ERROR: Cannot autodetect toolchain type"
+ exit 1
+ fi
+ ;;
+ esac
+
+ case "$toolchain_type" in
+ "gnu") ccname="gcc" ;;
+ "llvm") ccname="clang" ;;
+ esac
+ ccpath=$($maybe_remote find "$toolchaindir" -path "*bin/*$ccname")
+ if [ "$(echo "$ccpath" | wc -w)" -ne 1 ]; then
+ echo "ERROR: found more than one compiler: $ccpath"
+ exit 1
+ fi
+
+ # Non-ssh:// cases have to copy the just-copied toolchain to
+ # the remote build container. For ssh://, we'll access the
+ # toolchain remotely.
+ case "$toolchain_url" in
+ "ssh://"*) ;;
+ *)
+ ccprefix=$(echo "$ccpath" | sed -e "s/$ccname\$//")
+ # Copy toolchain to the build container.
+ rsync -a --delete -e "ssh -p$build_container_port" "$toolchaindir/" "$build_container_host:$toolchaindir/"
+ # We share the filesystem with build and run containers ccprefix="$build_container_host:$build_container_port:$ccprefix"
+ ;;
+ esac
+ ;;
+esac
+
+case "$sysroot" in
+ "tarball")
+ sysroot="$build_container_host:$build_container_port:$(find "$toolchaindir" -name "libc")"
+ ;;
+ "http://"*|"https://"*)
+ sysrootdir=$(untar_url "$sysroot" "$WORKSPACE" "--strip-components 1")
+ # Copy toolchain to the build container.
+ rsync -a --delete -e "ssh -p$build_container_port" "$sysrootdir/" "$build_container_host:$sysrootdir/"
+ #sysroot="$build_container_host:$build_container_port:$sysrootdir"
+ ;;
+ "ssh://"*)
+ sysroot="${sysroot##ssh://}"
+
+ # Check host:port specification from sysroot.
+ case ${sysroot} in
+ *:*) ;;
+ *)
+ echo "ERROR: ssh:// sysroot lacks a host: $sysroot"
+ exit 1
+ ;;
+ esac
+ ;;
+
+ "")
+ # Use system sysroot.
+ ;;
+ *)
+ echo "ERROR: Cannot handle sysroot: $sysroot"
+ exit 1
+ ;;
+esac
+
+results_id=$(echo "$results_id" \
+ | sed -e "s/<build_num>/$BUILD_NUMBER/g" \
+ -e "s/@build_num@/$BUILD_NUMBER/g")
+hw_tag="${results_id%%/*}"
+if echo "$results_id" | grep -q "\.\."; then
+ echo "ERROR: results_id should not escape /home/tcwg-benchmark/results* hierarchy; do not use \"..\""
+ exit 1
+fi
+case "$hw_tag" in
+ STM32*) ;;
+ *)
+ echo "ERROR: results_id does not start with a valid hw_tag"
+ exit 1
+ ;;
+esac
+
+# vars are from run-container.sh sourced above
+# shellcheck disable=SC2154
+case "$bench_list" in
+ *coremark*)
+ remote_exec "$run_container_host:$run_container_port:$WORKSPACE/bmk-scripts:-t -Snone" \
+ "$(pwd)/bmk-scripts/coremark.sh" \
+ --ccprefix "$ccprefix" \
+ --cflags "$cflags" \
+ --ldflags "$ldflags" \
+ --forceinstall "${forceinstall}" \
+ --hw_tag "$hw_tag" \
+ --resultsdest "bkp-01.tcwglab:/home/tcwg-benchmark/results-${results_id}/${NODE_NAME}" \
+ --verbose true
+ ;;
+esac