#!/bin/bash # Clean: shellcheck -e 2001 ./tcwg-benchmark.sh set -ex # Make shellcheck happy and workaround Jenkins not defining variables # for empty arguments. bench_container_tag="${bench_container_tag-bionic}" toolchain_url="$toolchain_url" toolchain_type="${toolchain_type-auto}" bench_list="$bench_list" cflags="$cflags" ldflags="$ldflags" fileserver="$fileserver" forceinstall="$forceinstall" results_id="$results_id" BUILD_NUMBER="$BUILD_NUMBER" NODE_NAME="$NODE_NAME" WORKSPACE="$WORKSPACE" # Jenkins doesn't define variables when parameter value is empty (like cflags), # so enable "set -u" only after above binding of variables. set -u . jenkins-helpers.sh # Start a container to run the benchmarks in. # The board is connected to the slave via USB, the container needs # special rights to access it. # tcwg-benchmark user already exists, re-creating it causes an error. newuser= [ "x$USER" != "xtcwg-benchmark" ] && newuser="--newuser $USER" ./start-container-docker.sh \ $newuser \ --distro "$bench_container_tag" \ --task bench \ --docker_opts "--privileged -v /dev/bus/usb:/dev/bus/usb" \ --prefix run_ > run-container.sh trap "cleanup_all_containers" EXIT . ./run-container.sh # If $toolchain_url is of ssh:// type, don't use a remote build # container, just use the ssh command as provided. build_container_host= build_container_port= case "$toolchain_url" in "ssh://"*) ccprefix="${toolchain_url##ssh://}" # Extract host:port: specification from ccprefix, we don't # need to care about :parallelize here, just pass it to run.sh # if present. build=${ccprefix%:*} build_container_host="$(echo $build | cut -d: -f 1)" case ${ccprefix} in *:*:*) build_container_port="$(echo $build | cut -s -d: -f 2)" ;; *:*) # If no port is specified, use 22 (ssh default port) build_container_port=22 ;; esac if [ "x$build_container_host" = "x" ]; then echo "ERROR: ssh:// toolchain_url lacks a host: $toolchain_url." exit 1 fi if [ "x$build_container_port" = "x" ]; then echo "ERROR: ssh:// toolchain_url lacks a port: $toolchain_url." exit 1 fi ;; *) # When we copy the toolchain, access it from the 'run' container # run_container_host is set with . ./run-container.sh above # shellcheck disable=SC2154 build_container_host=$run_container_host # shellcheck disable=SC2154 build_container_port=$run_container_port ;; esac case "$toolchain_url" in "ssh://"*) # Last component of ccprefix is the path, keep it toolchaindir="$(dirname ${ccprefix##*:})" ;; "http://"*".tar.xz"|"https://"*".tar.xz"|"http://"*".tar.bz2"|"https://"*".tar.bz2") toolchaindir=$(untar_url "$toolchain_url" "$WORKSPACE" "--strip-components 1") ;; "rsync://"*) ccprefix="${toolchain_url##rsync://}" # Extract host:port: specification from ccprefix, we don't # need to care about :parallelize here, just pass it to run.sh # if present. rsync_spec=${ccprefix%:*} rsync_host="$(echo $rsync_spec | cut -d: -f 1)" case ${ccprefix} in *:*:*) rsync_port="$(echo $rsync_spec | cut -s -d: -f 2)" ;; *:*) # If no port is specified, use 22 (ssh default port) rsync_port=22 ;; esac # We want to access the remote toolchain via a container, to # avoid problems with the hosts's ssh server restrictions on the # number of simulaneous connexions. # We copy it to the build container (assuming it uses the same # architecture as the machine pointed to by $toolchain_url). # Assume ccprefix looks like /path/bin/target-triplet-, and # compute 'path'. src_toolchaindir=$(dirname "$(dirname ${ccprefix##*:})") toolchaindir="${WORKSPACE}/toolchain-${BUILD_NUMBER}" rsync -az --delete -e "ssh -p$rsync_port" "$rsync_host:$src_toolchaindir/" "$toolchaindir/" ;; *) echo "ERROR: Cannot handle toolchain_url: $toolchain_url" exit 1 ;; esac # Sanity check that toolchain_type is supported case "$toolchain_type" in gnu|llvm) ;; *) echo "ERROR: Unsupported toolchain type: $toolchain_type" exit 1 ;; esac case "$toolchain_url" in "http://"*|"https://"*|"rsync://"*|"ssh://"*) # In the ssh:// case, we have to perform the 'find' operations # remotely. case "$toolchain_url" in "ssh://"*) maybe_remote="ssh -p $build_container_port $build_container_host" ;; *) maybe_remote="" ;; esac case "$toolchain_type" in "gnu"|"llvm") ;; "auto") if [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*gcc" | wc -l)" != x"0" ]; then toolchain_type="gnu" elif [ x"$($maybe_remote find "$toolchaindir" -path "*bin/*clang" | wc -l)" != x"0" ]; then toolchain_type="llvm" else echo "ERROR: Cannot autodetect toolchain type" exit 1 fi ;; esac case "$toolchain_type" in "gnu") ccname="gcc" ;; "llvm") ccname="clang" ;; esac ccpath=$($maybe_remote find "$toolchaindir" -path "*bin/*$ccname") if [ "$(echo "$ccpath" | wc -w)" -ne 1 ]; then echo "ERROR: found more than one compiler: $ccpath" exit 1 fi # No need to copy the toolchain to the build container: it # runs on the local machine and has access to $toolchaindir. # FIXME: ssh:// access is currently broken. case "$toolchain_url" in "ssh://"*) ;; *) ccprefix=$(echo "$ccpath" | sed -e "s/$ccname\$//") ;; esac ;; esac results_id=$(echo "$results_id" \ | sed -e "s//$BUILD_NUMBER/g" \ -e "s/@build_num@/$BUILD_NUMBER/g") hw_tag="${results_id%%/*}" if echo "$results_id" | grep -q "\.\."; then echo "ERROR: results_id should not escape /home/tcwg-benchmark/results* hierarchy; do not use \"..\"" exit 1 fi case "$hw_tag" in stm32*) hw_tag=${hw_tag##stm32_} ;; *) echo "ERROR: results_id does not start with a valid hw_tag" exit 1 ;; esac # vars are from run-container.sh sourced above # shellcheck disable=SC2154 case "$bench_list" in *coremark*) remote_exec "$run_container_host:$run_container_port:$WORKSPACE/bmk-scripts:-t -Snone" \ "./coremark.sh" \ --ccprefix "$ccprefix" \ --cflags "$cflags" \ --ldflags "$ldflags" \ --forceinstall "${forceinstall}" \ --hw_tag "$hw_tag" \ --resultsdest "bkp-01.tcwglab:/home/tcwg-benchmark/results-${results_id}/${NODE_NAME}" \ --verbose true ;; esac # Delete temporary toolchains to avoid filling disk case "$toolchain_url" in "ssh://"*) # Nothing to do, we didn't copy any toolchain ;; "http://"*|"https://"*|"rsync://"*) rm -rf $toolchaindir ;; esac