#!/bin/bash sanity_check_pwd () { ( set -euf -o pipefail if [[ $(pwd) == "/" ]]; then echo "pwd is unexpectedly \"/\", exiting" exit 1; fi ) } # Print absolute path to a file or directory # $1: Path (must exist) abs_path () { ( set -euf -o pipefail echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" ) } # Assert that $@[1:] returns success. # $1 should be a message to print on failure e.g. # assert "will always fail" false assert_with_msg () { ( set -euf -o pipefail local failure_message=$1 shift eval "$@" || (echo "$failure_message" && exit 1) ) } # Assert that $@ returns success. assert () { ( set -euf -o pipefail eval "$@" ) } # Cleanup contents of directory preserving specified parts. # $1: Directory to clean # $@: Find patterns of files to preserve (must start with $1) fresh_dir () { ( set -euf -o pipefail local dir="$1" shift 1 # Make sure $dir doesn't have "/" at the end. dir=$(dirname "$dir/something") mkdir -p "$dir" find_opts=("!" "-path" "$dir") for keep_pattern in "$@"; do while : ; do find_opts+=("!" "-path" "$keep_pattern") keep_pattern=$(dirname "$keep_pattern") if [ x"$keep_pattern" = x"$dir" ]; then break fi # This assert will trigger if one of keep_patterns doesn't start # with "$dir": eventually dirname will get to either "/" or ".". assert [ "$keep_pattern" != "/" -a "$keep_pattern" != "." ] done done find "$dir" "${find_opts[@]}" -delete ) } # $@: Jenkins labels # Prints nodes corresponding to jenkins labels. print_nodes_in_labels () { ( set -euf -o pipefail local labels=( "$@" ) local label local tmpfile tmpfile=$(mktemp) for label in "${labels[@]}"; do # Handle gracefully the case where the label contains no # machine: we don't want to generate an error, rather return # an empty string. Use a tmp file so that we still generate an # error in case wget fails. wget --retry-connrefused --waitretry=1 -O - https://ci.linaro.org/label/$label/api/json?pretty=true 2>/dev/null > $tmpfile grep nodeName $tmpfile | cut -d: -f 2 | sed -e 's/"//g' || true done rm $tmpfile ) } # $@: Jenkins labels, typically tcwg-t[kx]1_{32/64}-test # Returns node from one of the labels with least number of running containers. print_node_with_least_containers () { ( set -euf -o pipefail local tester_labels=( "$@" ) local testers local load_value local tester_min_load_name="" local tester_min_load_value="999" local ret # Re. --random-sort below: shuffle node list to mitigate races # when starting multiple containers at the same time testers=$(print_nodes_in_labels ${tester_labels[*]} | sort --random-sort) for tester in $testers; do ret=0 tester_host=$(print_host_for_node $tester "ignore_fail") if [ x"${tester_host}" != x"" ]; then load_value=$(timeout 30s ssh ${tester_host} docker ps | wc -l) || ret=$? if [ $ret -eq 0 ]; then if [ "$load_value" -lt "$tester_min_load_value" ]; then tester_min_load_name=$tester tester_min_load_value=$load_value fi fi fi done echo $tester_min_load_name ) } # $1: Jenkins tcwg-*-build label # Prints out architecture for container image print_arch_for_label () { ( set -euf -o pipefail local label="$1" case $label in tcwg-x86_64-*) echo amd64 ;; tcwg-x86_32-*) echo i386 ;; tcwg-amp_64-*|tcwg-apm_64-*|tcwg-armv8_64|tcwg-d05_64-*|tcwg-lc_64*|tcwg-sq_64-*|tcwg-thx1_64-*|tcwg-tx1_64-*) echo arm64 ;; tcwg-amp_32-*|tcwg-apm_32-*|tcwg-armv8_32|tcwg-d05_32-*|tcwg-sq_32-*|tcwg-tk1_32-*|tcwg-tx1_32-*) echo armhf ;; *) echo "ERROR: Unsupported label: $label" >&2; exit 1 ;; esac ) } # $1: Jenkins tcwg-*-build label # Prints out host type print_type_for_label () { ( set -euf -o pipefail echo "$1" | sed -e "s/^tcwg-\(.*\)-build\$/\1/" ) } # $1: Jenkins $NODE_NAME # $2: (optional) "ignore_fail" # Prints SSH host print_host_for_node () { ( set -euf -o pipefail local host # All jenkins nodes have either or .tcwglab entry in # .ssh/config (in dockerfiles.git/tcwg-base/tcwg-buildslave/). for suffix in "" ".tcwglab"; do host="$1$suffix" if timeout 30s ssh "$host" true >& /dev/null; then break fi host="" done if [ x"$host" = x"" ] && [ x"${2-}" != x"ignore_fail" ]; then echo "Error: print_host_for_node() cannot ssh to $1 or $1.tcwglab" >&2 exit 1 fi echo "$host" ) } # $1: Host name or "localhost". # Prints docker-friendly arch of host print_arch_for_host () { ( set -euf -o pipefail local host="$1" local arch case "$host" in "localhost") arch=$(uname -m) case "$arch" in "aarch64") arch="arm64" ;; "arm"*) arch="armhf" ;; "x86_64") arch="amd64" ;; *) echo "ERROR: Unknown uname -m arch: $arch" >&2; exit 1 ;; esac echo "$arch" ;; *) # While not strictly correct, print_arch_for_label is relaxed # enough to handle this. print_arch_for_label "$host" ;; esac ) } # $1: target triplet # Prints tester label for remote cross-testing print_tester_label_for_target () { ( set -euf -o pipefail local target="$1" case "$target" in aarch64-linux-gnu_ilp32) # We test ILP32 using QEMU KVM, and TX1s run 3.10 kernel that # doesn't support KVM. Test on APM builders for now. echo "tcwg-apm_64-build" ;; # We allocate all TK1/TX1 boards to benchmarking, so use APMs # for cross-testing. This means we no longer test on armv7 # hardware. aarch64-linux*) echo "tcwg-apm_64-test" ;; armv8l-linux*) echo "tcwg-apm_32-test" ;; arm-linux*) echo "tcwg-apm_32-test" ;; esac ) } # Run command on remote machine in given directory via ssh on a given port # "$1" -- [:[:[:[:]]]] # "$2, $3, etc" -- command and its arguments # E.g., remote_exec dev-01.tcwglab::/tmp find -name "my file.bak" # NOTE: The environment variables are not escaped, so pass only simple things. # This is because we want ability to pass multiple variables "a=b c=d", # and escaping will make that into a single a="b c=d" variable. remote_exec () { ( set -euf -o pipefail local host host="$(echo "$1" | cut -d: -f 1)" local port port="$(echo "$1" | cut -s -d: -f 2)" local dir dir="$(echo "$1" | cut -s -d: -f 3)" local opts opts="$(echo "$1" | cut -s -d: -f 4)" local env_vars env_vars="$(echo "$1" | cut -s -d: -f 5)" shift local -a cmd cmd=() # Add quotes to every parameter for i in "$@"; do cmd+=("$(printf '%q' "$i")"); done # Be careful to prepend statements before ${cmd[@]} only if necessary. # E.g., when triggering jobs via jenkins-cli, the command is not a binary, # so we can't "exec" it. # We use flock if $JENKINS_FLOCK is set. ${JENKINS_FLOCK+$JENKINS_FLOCK} ssh $opts ${port:+-p$port} $host "${env_vars:+export $env_vars && }${dir:+cd "$(printf '%q' "$dir")" && exec }${cmd[*]}" ) } # Resolve git ref to sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- remote name # $4 -- extra options to git rev-parse git_rev_parse_1 () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="$3" local opts="$4" local ret cd "$dir" # Convert git branch/tag names into SHA1 local sha1 try_ref case "$ref" in "refs/"*) try_ref="$ref";; *) try_ref="refs/remotes/$remote/$ref" ;; esac ret=0; sha1=$(git rev-parse $opts "$try_ref" 2>/dev/null) || ret=$? if [ $ret -ne 0 ]; then # Assume that $ref is already a SHA1 ret=0; sha1=$(git rev-parse $opts "$ref") || ret=$? if [ $ret -ne 0 ]; then echo "ERROR: Cannot parse $ref in repo $dir" >&2 exit 1 fi fi echo "$sha1" ) } # Resolve git ref to short sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- (optional) remote name git_rev_parse () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="origin" if [ $# -ge 3 ]; then remote="$3" fi git_rev_parse_1 "$dir" "$ref" "$remote" "--short" ) } # Resolve git ref to full sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- (optional) remote name git_rev_parse_long () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="origin" if [ $# -ge 3 ]; then remote="$3" fi git_rev_parse_1 "$dir" "$ref" "$remote" "" ) } # Run a command with the timeout program, with retries if the command times # out. # $1: is the DURATION to pass to timeout. # $2: is the maximum number of attempts at running the command. # $@: remainder is the command to run # Returns the status from the timeout command. # # Example: run_with_timeout_and_retry 90s 3 git checkout /large/repo function run_with_timeout_and_retry { local duration=$1 shift local tries=$1 shift local n=0 local ret=0 until [ ${n} -eq ${tries} ] do echo timeout "${duration}" "$@" timeout "${duration}" "$@" & ret=0 && wait $! || ret=$? if [ ${ret} -eq 0 ]; then break; fi # Handle special errors case "$@" in # git clone failed, removed the (incomplete) directory # (last parameter). # We could try to be smarter and handle the case where git # clone actually timed-out and where git suggests to retry # the checkout with 'git checkout -f HEAD', but this seems # a bit awkward for a case that happens very rarely. "git clone") # In case there was no (optional) $dir parameter, this # will expand to the repo URL, which probably does not # exist as a directory name. Still, don't try to # remove a directory named *://*. dir=$(echo "$@" | awk '{print $NF;}') case "$dir" in *://*) ;; *) if [ -d $dir ]; then rm -rf $dir fi ;; esac ;; esac n=$((n + 1)) done return ${ret} } # Configure git remote # $1: git repo directory # $2: remote name # $3: git url # $4: optional single branch git_set_remote () { ( set -euf -o pipefail local dir="$1" local remote="$2" local url="$3" local single_branch="${4-}" git -C "$dir" remote rm "$remote" > /dev/null 2>&1 || true git -C "$dir" remote add ${single_branch:+-t "$single_branch"} "$remote" "$url" ) } # Clone or update a git repo # $1 -- repo directory # $2 -- master git repo # $3 -- reference git repo (to speedup initial cloning) # $4 -- single-branch to reduce fetching from remote repo # $5 -- name of remote clone_or_update_repo_no_checkout () { ( set -euf -o pipefail local dir="$1" local url="$2" local reference="$3" local single_branch="$4" local remote="$5" local refopt="" case "$reference" in auto) local ref_dir for ref_dir in $url $dir; do ref_dir=$(basename $ref_dir .git) ref_dir="/home/tcwg-buildslave/snapshots-ref/$ref_dir.git" if git -C $ref_dir rev-parse --git-dir >/dev/null 2>&1; then refopt="--reference $ref_dir" break fi done ;; none) ;; *) refopt="--reference $reference" ;; esac if ! git -C "$dir" status >/dev/null 2>&1; then # Git repo doesn't exist or is corrupted. Make a new clone. rm -rf "$dir" local single_branch_opt="" if [ x"$single_branch" != x"" ]; then single_branch_opt="--single-branch --branch $single_branch" fi run_with_timeout_and_retry 1h 3 git clone $refopt $single_branch_opt "$url" "$dir" else # Clean up the clone (this is supposed to re-share objects from # reference clone and keep the size of the clone minimal). # It's possible that previous GC process was interrupted and left # a lock. Use --force to workaround that. It should be safe # to override the lock since directories should not be shared # between concurrent builds. # # Also, prune all loose objects to avoid "git gc --auto" failing # and creating .git/gc.log to warn us. rm -f "$dir/.git/gc.log" git -C "$dir" gc --auto --force --prune=all # Delete stale locks -- especially .git/refs/remotes/REMOTE/BRANCH.lock # These occur when builds are aborted during "git remote update" or similar. find "$dir/.git" -name "*.lock" -delete fi git_set_remote "$dir" "$remote" "$url" "$single_branch" local refspec if [ x"$single_branch" = x"" ]; then run_with_timeout_and_retry 1h 3 git -C "$dir" remote update -p "$remote" refspec="+refs/changes/*:refs/changes/*" else refspec="+refs/heads/$single_branch:refs/remotes/$remote/$single_branch" fi run_with_timeout_and_retry 1h 3 git -C "$dir" fetch -q $remote $refspec --prune ) } # Checkout branch/ref/SHA1 in a git repo # $1 -- repo directory # $2 -- ref to checkout # $3 -- name of the git remote git_checkout () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="$3" git_clean "$dir" # Convert git branch/tag names into SHA1 local sha1 sha1=$(git_rev_parse "$dir" "$ref" "$remote") # Checkout git -C "$dir" checkout --detach "$sha1" ) } # Clone or update a git repo # $1 -- repo directory # $2 -- ref to checkout # $3 -- master git repo # $4 -- optional reference git repo (to speedup initial cloning) # $5 -- optional single-branch to reduce fetching from remote repo # $6 -- optional name of remote (default is "origin") clone_or_update_repo () { ( set -euf -o pipefail local dir="$1" local ref="$2" local url="$3" local reference="${4-auto}" local single_branch="${5-}" local remote="${6-origin}" clone_or_update_repo_no_checkout "$dir" "$url" "$reference" \ "$single_branch" "$remote" git_checkout "$dir" "$ref" "$remote" ) } # Print baseline git repo # $1 -- project name # $3 -- whether to make the new remote read-only or read-write. print_baseline_repo () { ( set -euf -o pipefail local dir="$1" local read_only="$2" local repo case "$dir" in binutils) repo=binutils-gdb.git ;; llvm) repo=llvm-project.git ;; *) repo=$dir.git ;; esac # Use git-us.l.o to avoid delays between review.l.o and git.l.o local url="git-us.linaro.org/toolchain/ci/$repo" if $read_only; then url="https://$url" else # Use gitolite access. Gerrit's ssh access verifies pushed commits, # which can slow-down server on big pushes. url="ssh://$url" fi echo "$url" ) } # Wget files from URL that may have wildcards; only the last "basename" # part of URL is allowed to contain wildcards. Safe to use on normal URLs. # Return N-1 of files downloaded, or 127 if no files were downloaded. # $1 -- URL # $2,... -- additional parameters to wget wget_wildcard_url () { ( set -eu -o pipefail local url="$1" shift local url_basename url_basename="$(basename "$url")" local tmpdir tmpdir="$(mktemp -d)" wget_opts="" case "$(echo "$url" | cut -d/ -f3)" in *".tcwglab") wget_opts="$wget_opts --no-check-certificate" ;; esac # $(dirname "$url") may not be a valid URL. Since we only use '*' # as wildcards, check if a '*' is present in $url_basename, and if # not, do a direct wget on $url to avoid accessing $(dirname "$url") if echo "$url_basename" | grep '\*' ; then wget --progress=dot:giga -r --no-parent --no-directories --level 1 "--directory-prefix=$tmpdir" -A "$url_basename" $wget_opts "$@" "$(dirname "$url")/" else wget --progress=dot:giga -r --no-parent --no-directories --level 1 "--directory-prefix=$tmpdir" $wget_opts "$@" "$url" fi local count=-1 for i in "$tmpdir"/$url_basename; do mv "$i" . count=$((count+1)) done rm -rf "$tmpdir" return $count ) } # Fetch a tarball using wget_wildcard_url and untar it into a directory # named after the tarball. # $1 -- URL # $2 -- base directory to untar to # $3 -- extra tar options, e.g. "--strip-components 1" untar_url () { ( set -eu -o pipefail local url="$1" local basedir="$2" local taropts="$3" local tarball local dirname wget_wildcard_url "$url" # shellcheck disable=SC2046 tarball="$(ls $(basename "$url"))" dirname="$basedir/${tarball%.tar*}" mkdir "$dirname" tar xf "$tarball" --directory "$dirname" $taropts echo "$dirname" ) } # Wait until the ssh server is ready to accept connexions # $1: host # $2: port # $3: retry count (optional) # Returns 0 on success, 1 in case of error wait_for_ssh_server () { ( set -euf -o pipefail local session_host="$1" local session_port="$2" local count="${3-20}" while [ $count -gt 0 ] do timeout 30s ssh -p $session_port $session_host true && break echo "SSH server not ready, waiting....." sleep 5 count=$((count - 1)) done if [ $count -eq 0 ]; then echo "ERROR: SSH server did not respond ($session_host:$session_port)" return 1 fi return 0 ) } # Print CPU share allocation for $task and $weight. # $1: task # $2: weight print_cpu_shares () { ( set -euf -o pipefail local task="$1" local weight="$2" local cpus cpus=$(( $weight * 1000 )) # 1000 cpu shares per executor echo "$cpus" ) } # Print memory allocation for $task and $weight. # $1: task # $2: weight # $3: number of expected parallel processes # $4: amount of system RAM in MB print_memory_limit () { ( set -euf -o pipefail local task="$1" local weight="$2" local nproc="$3" local memlimit="$4" local memory case "$task" in build) # 2GB per compilation core, with 4GB minimum and # half of total system RAM maximum. memory=$(( 2000 * $weight * $nproc )) memlimit=$(( $memlimit / 2 )) if [ "$memlimit" -lt "4000" ]; then # Don't limit memory on machines with less than 8GB RAM. memory="unlimited" else # Use at most half of RAM if [ "$memory" -gt "$memlimit" ]; then memory="$memlimit" fi fi ;; test) # 0.75GB per session memory=$(( 750 * $weight )) ;; bench) memory="unlimited" ;; esac echo "$memory" ) } # Print PID allocation for $task and $weight. # $1: task # $2: weight print_pids_limit () { ( set -euf -o pipefail local task="$1" local weight="$2" local pids pids=$(( $weight * 5000 )) # 5000 processes per executor echo "$pids" ) } # Print default bind mounts for $task # $1: task print_bind_mounts () { ( set -euf -o pipefail local task="$1" local ssh="$2" local -a bind_mounts case $task in bench|build) if [ x"${WORKSPACE+set}" = x"set" ]; then bind_mounts+=("$WORKSPACE") fi ;; esac case $task in build) bind_mounts+=(/home/tcwg-buildslave/snapshots-ref:ro) ;; bench) bind_mounts+=(/home/shared/git:ro) ;; esac local key for key in $($ssh find /etc/ssh/ -name "ssh_host_*_key" \ -o -name "ssh_host_*_key.pub"); do bind_mounts+=("$key:ro") done echo "${bind_mounts[@]:+${bind_mounts[@]}}" ) } # Print default volume mounts for $job # $1: job # $2: Suffix to be appended to the volume names (e.g., -$container_arch-$distro) print_volume_mounts () { ( set -euf -o pipefail local job="$1" local suffix="$2" local -a mounts local volume_id case "$job" in tcwg_*-*) # Add ccache volume for tcwg_* jobs. # These jobs depend on ccache for fast rebuilds of LLVM and GCC with # the host compiler. # tcwg_* jobs use per-executor WORKSPACES, and ccache uses separate # cache entries for different paths. Therefore we need to use # separate caches for different $WORKSPACES. Otherwise we get # a lot of cache polution on high-executor machines, e.g., for # tcwg_bmk builds on tcwg-x86_64-dev-01 node. local prefix if [ x"${WORKSPACE+set}" = x"set" ]; then prefix=$(basename $WORKSPACE) else prefix=$(echo $job | cut -d- -f 1) fi volume_id=$(print_docker_name "$prefix$suffix") mounts+=(ccache-"$volume_id":"$HOME"/.ccache) ;; esac case "$job" in tcwg_bmk*) # Add scratch mount for tcwg-benchmark's $HOME. # tcwg_bmk-* jobs trigger tcwg-benchmark jenkins jobs, which # then ssh to the build container to compile benchmark objects # and then link them into executables (via ssh:// toolchain_url # parameter -- see tcwg_bmk-build.sh:benchmark()). # This generates a fair bit of disk trafic on /home/tcwg-benchmark, # and it's best to use docker scratch volume, rather than overlayfs. mounts+=(/home/tcwg-benchmark) ;; esac echo "${mounts[@]:+${mounts[@]}}" ) } # Return zero if bash array is defined. # $1: Name of bash array test_array() { local var var="$1[@]" if [ x"${!var+set}" = x"set" ]; then return 0 else return 1 fi } # Manifest filename for manifest_out and convert_args_to_variables to write to. # This is set using "%%" directive to convert_args_to_variables. # This is a stack maintained by manifest_push and manifest_pop. __manifest_filename=("/dev/null") # Set new file name for manifest # $1: File name manifest_push () { local filename="$1" # Resolve absolute path to manifest. local dir dir=$(dirname "$filename") mkdir -p "$dir" dir=$(cd "$dir"; pwd) __manifest_filename=("$dir/$(basename "$filename")" "${__manifest_filename[@]}") rm -f "${__manifest_filename[0]}" } # Return to previous manifest filename manifest_pop () { __manifest_filename=("${__manifest_filename[@]:1}") } # Output stdout to the manifest file. Most common case would be # cat << EOF | manifest_out # # Component revision # component_rev="$component_rev" # EOF manifest_out () { cat >> "${__manifest_filename[0]}" } # Process "--var value" and "++arr elem" arguments and define corresponding # variables and arrays. # "--var value" defines shell variable "$var" to "value". # "++arr elem" defines shell array "$arr[@]" and adds "elem" to it. # "==arr[key] value" defines shell associative array "$arr[@]" and sets # "${arr[key]}" to "value". # "@@ file" sources file. # "%% file" starts manifest in file. Also see "^^ true". # "^^ true/false %% manifest" whether to reproduce the build using manifest. # If "true" -- source manifest instead of generating it, then discard # all following options at to separator "--". # If "false" -- do nothing and proceed as usual. # # Shell array $CONVERTED_ARGS is set to the arguments processed. # Shell variable $SHIFT_CONVERTED_ARGS is set to number of arguments processed. # $@: Pairs of def/val arguments, stops at "--" marker. convert_args_to_variables () { local arr name num local total="0" eval "CONVERTED_ARGS=(--)" while [ $# -gt 0 ]; do case "$1" in "--") # Finish marker total=$(($total+1)) shift 1 break ;; "--"*) name="${1#--}" declare -g "$name=$2" cat <'" exit 1 fi # Source the manifest for reproduction. # shellcheck disable=SC1090 source "$4" # Skip processing all following arguments. num=0 for i in "$@"; do if [ x"$i" = x"--" ]; then break fi num=$(($num+1)) done else num=2 fi ;; *) echo "ERROR: option does not start with '--' / '++' / '@@' / '%%' : $1" exit 1 ;; esac total=$(($total+$num)) while [ $num -gt 0 ]; do eval "CONVERTED_ARGS+=(\"$1\")" shift 1 num=$(($num-1)) done done eval "SHIFT_CONVERTED_ARGS=$total" cat <&2; exit 1 ;; esac echo "$target" ) } # Print LLVM-friendly target for "uname -m" target # $1: "uname -m"-style target or "native" print_llvm_target () { ( set -euf -o pipefail local target="$1" if [ x"$target" = x"native" ]; then target=$(uname -m) fi case "$target" in "aarch64") target="AArch64" ;; "arm"*) target="ARM" ;; "x86_64") target="X86" ;; *) echo "ERROR: Unknown target $target" >&2; exit 1 ;; esac echo "$target" ) } # Print Linux make-friendly target for "uname -m" target # $1: "uname -m"-style target or "native" print_kernel_target () { ( set -euf -o pipefail local target="$1" if [ x"$target" = x"native" ]; then target=$(uname -m) fi case "$target" in "aarch64") target="arm64" ;; "arm"*) target="arm" ;; *) echo "ERROR: Unknown target $target" >&2; exit 1 ;; esac echo "$target" ) } # Thoroughly clean git repo, leave only .git/ directory # $1: Git clone directory git_clean () { ( set -euf -o pipefail fresh_dir "$1" "$1/.git/*" git -C "$1" reset --hard ) } # Add git remote pointing to linaro's git repo/mirrors with writable # toolchain/ci/* repo. Deduce repo's URL from URL of existing # "origin" git remote. # $1: Git clone directory (must have "origin" remote configured) # $2: Name of the new remote. # $3: Whether to make the new remote read-only or read-write. git_init_linaro_local_remote () { ( set -euf -o pipefail local dir="$1" local remote="$2" local read_only="$3" local origin_url local new_url origin_url=$(git -C "$dir" remote get-url origin) # Figure out mirror repo on linaro's servers. case "$origin_url" in *"kernel.org/"*"/linux"*) new_url="toolchain/ci/linux.git" ;; *"linaro.org/toolchain/gcc-compare-results.git") new_url="toolchain/gcc-compare-results.git" ;; *) new_url="toolchain/ci/$(basename $origin_url)" ;; esac # Use git-us.l.o to avoid delays between review.l.o and git.l.o new_url="git-us.linaro.org/$new_url" if $read_only; then new_url="https://$new_url" else # Use gitolite access. Gerrit's ssh access verifies pushed commits, # which can slow-down server on big pushes. new_url="ssh://$new_url" fi git_set_remote "$dir" "$remote" "$new_url" ) } # Push HEAD of git repo to a given remote/branch # $1: Git clone dir # $2: remote name # $3: branch to force push to git_push () { ( set -euf -o pipefail local dir="$1" local remote="$2" local branch="$3" cd $dir case "$branch" in "refs/"*) ;; *) branch=refs/heads/$branch ;; esac git push --force $remote HEAD:$branch ) } # Initialize run_step state # $1: Step to start execution at (or "" to start at the very first step) # $2: Step to finish execution at (or "" to run till the very end) # $3: Top artifact directory # $4: Whether to enable "set -x" verbosity for execution steps. run_step_init () { run_step_start_at="$1" run_step_finish_at="$2" run_step_top_artifacts="$3" run_step_verbose="$4" echo "run_step_init: starting at step \"$run_step_start_at\" \ finishing at step \"$run_step_finish_at\"" run_step_count="0" run_step_prev_step="" run_step_active=false run_step_status=0 run_step_artifacts="" # We need absolute paths for $run_step_artifacts, which is constructed from # $run_step_top_artifacts. mkdir -p "$run_step_top_artifacts" run_step_top_artifacts=$(cd "$run_step_top_artifacts"; pwd) rm -f $run_step_top_artifacts/console.log rm -f $run_step_top_artifacts/results } # Run execution step and handle its failure as requested # This function manages # 1. step skipping -- skip steps before START_AT and after FINISH_AT, # as well as "skip_on_fail" steps during failure. # 2. artifact handling -- create/clean artifact directories per step. # Also, copy baseline artifacts for steps before START_AT to simulate # skipped steps. # Step commands have $run_step_artifacts pointing to artifact directory # for current step. # 3. logging -- dump stdout and and stderr output of step commands # into per-step console.log files, and, also, into the top-level # console.log file. # 4. result handling -- output provided success result to artifacts/results # for successful steps. Special value "x" means to let the step itself # update artifacts/results. Results are written to artifacts/results # for both skipped and executed steps as long as $run_step_status doesn't # indicate failure. # 5. in run mode "stop_on_fail" -- kill script if $run_step_status indicates # failure, otherwise run the step and kill script on failure. # 6. in run mode "skip_on_fail" -- skip step if $run_step_status indicates # a failure, otherwise run the step and set $run_step_status to the exit # status of the step. Normally, a series of "skip_on_fail" steps should # be followed by "reset_on_fail" step that handles the cumulative result. # 7. in run mode "reset_on_fail" -- run the step regardless and set # $run_step_status to the exit status of the step (thus resetting it). # $1: Run mode: stop_on_fail, skip_on_fail, reset_on_fail. # $2: Result to write to artifacts/results in the absence of failures. # $@: Step command and its arguments run_step () { local run_mode="$1" local success_result="$2" shift 2 local -a step local pretty_step step=("$@") pretty_step="$1" shift while [ $# -gt 0 ]; do if [ x"$1" = x"--" ]; then break fi pretty_step="$pretty_step-$1" shift done pretty_step=$(echo "$pretty_step" | tr " /" "-") run_step_count=$(($run_step_count+1)) # Start running steps if: # the current step is the starting step OR # we haven't run any steps yet and # there is no set starting step if [ x"$pretty_step" = x"$run_step_start_at" ] || \ ( [ x"$run_step_start_at" = x"" ] && \ [ x"$run_step_prev_step" = x"" ] ); then run_step_active=true fi if $run_step_active; then local skip=false case "$run_step_status:$run_mode" in 0:*) ;; $EXTERNAL_FAIL:stop_on_fail) echo "STOPPING before ${step[*]} due to previous external failure" return $EXTERNAL_FAIL ;; *:stop_on_fail) echo "STOPPING before ${step[*]} due to previous internal failure" return $INTERNAL_FAIL ;; *:skip_on_fail) echo "SKIPPING ${step[*]} due to previous failure" skip=true ;; *:reset_on_fail) echo "HANDLING ${step[*]} will handle previous failure" ;; *) assert false esac if ! $skip; then local full_step_name full_step_name=$(printf "%02d" $run_step_count)-$pretty_step # This is used when accessing the workspace run_step_artifacts=$run_step_top_artifacts/$full_step_name local log_url="" if [ -v BUILD_URL ]; then # Link to jenkins, valid once the job has finished log_url="(${BUILD_URL}artifact/artifacts/$full_step_name/console.log)" fi rm -rf "$run_step_artifacts" mkdir -p "$run_step_artifacts" echo "RUNNING ${step[*]}; see tail -f $run_step_artifacts/console.log" $log_url run_step_status=0 eval "if $run_step_verbose; then set -x; else set +x; fi; ${step[*]}" 2>&1 | ts -s "%T" | tee -a $run_step_top_artifacts/console.log > $run_step_artifacts/console.log & wait $! || run_step_status=$? case "$run_step_status:$run_mode" in 0:*) ;; $EXTERNAL_FAIL:stop_on_fail|$EXTERNAL_FAIL:reset_on_fail) echo "STOPPING at ${step[*]} due to external failure" return $EXTERNAL_FAIL ;; *:stop_on_fail|*:reset_on_fail) echo "STOPPING at ${step[*]} due to internal failure" return $INTERNAL_FAIL ;; *:skip_on_fail) echo "CARRYING ON after failure in ${step[*]}" ;; *) assert false esac fi else echo "SKIPPING ${step[*]}" fi if [ x"$run_step_status" = x"0" ] && [ x"$success_result" != x"x" ]; then cat >> $run_step_top_artifacts/results <=1 to skip the trap handler entry # Start from end-2 to skip the top level "main" entry # which isn't useful for (( i=${#FUNCNAME[@]}-2 ; i>=1 ; i-- )) ; do source_file=${BASH_SOURCE[$i+1]} line_no=${BASH_LINENO[$i]} echo " File: $source_file, line $line_no" # Remove leading whitespace to keep indentation readable echo " $(sed -e "${line_no}!d" -e 's/^[[:space:]]*//' "$source_file")" done # We don't know the line number of the exit itself when we trap EXIT echo " File: ${BASH_SOURCE[0]}, line ${BASH_LINENO[0]}" echo " (trap handler, exit line unknown, exit status was $exit_status)" ;; esac }