#!/bin/bash # Assert that $@ returns success. assert () { ( set -euf -o pipefail eval "$@" ) } # Cleanup contents of directory preserving specified parts. # $1: Directory to clean # $@: Find patterns of files to preserve (must start with $1) fresh_dir () { ( set -euf -o pipefail local dir="$1" shift 1 # Make sure $dir doesn't have "/" at the end. dir=$(dirname "$dir/something") mkdir -p "$dir" find_opts=("!" "-path" "$dir") for keep_pattern in "$@"; do while : ; do find_opts+=("!" "-path" "$keep_pattern") keep_pattern=$(dirname "$keep_pattern") if [ x"$keep_pattern" = x"$dir" ]; then break fi # This assert will trigger if one of keep_patterns doesn't start # with "$dir": eventually dirname will get to either "/" or ".". assert [ "$keep_pattern" != "/" -a "$keep_pattern" != "." ] done done find "$dir" "${find_opts[@]}" -delete ) } # $@: Jenkins labels # Prints nodes corresponding to jenkins labels. print_nodes_in_labels () { ( set -euf -o pipefail local labels="$@" local label for label in $labels; do wget --retry-connrefused --waitretry=1 -O - https://ci.linaro.org/label/$label/api/json?pretty=true 2>/dev/null | grep nodeName | cut -d: -f 2 | sed -e 's/"//g' done ) } # $@: Jenkins labels, typically tcwg-t[kx]1_{32/64}-test # Returns node from one of the labels with least number of running containers. print_node_with_least_containers () { ( set -euf -o pipefail local tester_labels="$@" local tester local testers local load_value local tester_min_load_name="" local tester_min_load_value="999" local ret # Re. --random-sort below: shuffle node list to mitigate races # when starting multiple containers at the same time testers=$(print_nodes_in_labels $tester_labels | sort --random-sort) for tester in $testers; do ret=0 tester_host=$(print_host_for_node $tester) load_value=$(timeout 10s ssh ${tester_host} docker ps | wc -l) || ret=$? if [ $ret -eq 0 ]; then if [ "$load_value" -lt "$tester_min_load_value" ]; then tester_min_load_name=$tester tester_min_load_value=$load_value fi fi done echo $tester_min_load_name ) } # $1: Jenkins tcwg-*-build label # Prints out architecture for container image print_arch_for_label () { ( set -euf -o pipefail local label="$1" case $label in tcwg-x86_64-*) echo amd64 ;; tcwg-x86_32-*) echo i386 ;; tcwg-tx1_64-*|tcwg-apm_64-*|tcwg-d05_64-*|tcwg-sq_64-*|tcwg-thx1_64-*) echo arm64 ;; tcwg-tk1_32-*|tcwg-tx1_32-*|tcwg-apm_32-*|tcwg-d05_32-*|tcwg-sq_32-*) echo armhf ;; *) echo "ERROR: Unsupported label: $label" >&2; exit 1 ;; esac ) } # $1: Jenkins tcwg-*-build label # Prints out host type print_type_for_label () { ( set -euf -o pipefail echo "$1" | sed -e "s/^tcwg-\(.*\)-build\$/\1/" ) } # $1: Jenkins tcwg node # Prints out hardware type, which is second to last field print_hw_id_for_node () { ( set -euf -o pipefail local hw_id hw_id=$(echo "$1" | sed -e "s/^tcwg.*-\([^-]\+\)-[0-9]\+/\1/") if [ x"$hw_id" = x"$1" ]; then echo "ERROR: Bad node name: $1" >&2 exit 1 fi echo "$hw_id" ) } # $1: Jenkins $NODE_NAME # Prints DNS hostname print_host_for_node () { ( set -euf -o pipefail # We first check if $1 is known by the DNS inside .tcwglab. If # not, it might be an external machine for which we have # configured ssh local host="$1.tcwglab" if ! host "$host" >& /dev/null; then # No DNS entry, try to see if it is handled by our ssh config if ! ssh "$1" true >& /dev/null; then echo "Error: no DNS entry and no SSH configuration for $host" >&2 exit 1 else host="$1" fi fi echo "$host" ) } # $1: Host name or "localhost". # Prints docker-friendly arch of host print_arch_for_host () { ( set -euf -o pipefail local host="$1" local arch case "$host" in "localhost") arch=$(uname -m) case "$arch" in "aarch64") arch="arm64" ;; "arm"*) arch="armhf" ;; "x86_64") arch="amd64" ;; *) echo "ERROR: Unknown uname -m arch: $arch" >&2; exit 1 ;; esac echo "$arch" ;; *) # While not strictly correct, print_arch_for_label is relaxed # enough to handle this. print_arch_for_label "$host" ;; esac ) } # $1: target triplet # Prints tester label for remote cross-testing print_tester_label_for_target () { ( set -euf -o pipefail local target="$1" case "$target" in aarch64-linux-gnu_ilp32) # We test ILP32 using QEMU KVM, and TX1s run 3.10 kernel that # doesn't support KVM. Test on APM builders for now. echo "tcwg-apm_64-build" ;; aarch64-linux*) echo "tcwg-tx1_64-test" ;; armv8l-linux*) echo "tcwg-tx1_32-test" ;; arm-linux*) echo "tcwg-tk1_32-test" ;; esac ) } # Run command on remote machine in given directory via ssh on a given port # "$1" -- [:[:[:]]] # "$2, $3, etc" -- command and its arguments # E.g., remote_exec dev-01.tcwglab::/tmp find -name "my file.bak" remote_exec () { ( set -euf -o pipefail local host="$(echo $1 | cut -d: -f 1)" local port="$(echo $1 | cut -s -d: -f 2)" local dir="$(echo $1 | cut -s -d: -f 3)" local opts="$(echo $1 | cut -s -d: -f 4)" shift local -a cmd cmd=() # Add quotes to every parameter for i in "$@"; do cmd+=("$(printf '%q' "$i")"); done ssh $opts ${port:+-p$port} $host "${dir:+cd "$(printf '%q' "$dir")" &&} exec ${cmd[@]}" ) } # Resolve git ref to sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- remote name # $4 -- extra options to git rev-parse git_rev_parse_1 () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="$3" local opts="$4" local ret cd "$dir" # Convert git branch/tag names into SHA1 local sha1 try_ref case "$ref" in "refs/"*) try_ref="$ref";; *) try_ref="refs/remotes/$remote/$ref" ;; esac ret=0; sha1=$(git rev-parse $opts "$try_ref" 2>/dev/null) || ret=$? if [ $ret -ne 0 ]; then # Assume that $ref is already a SHA1 ret=0; sha1=$(git rev-parse $opts "$ref") || ret=$? if [ $ret -ne 0 ]; then echo "ERROR: Cannot parse $ref in repo $dir" >&2 exit 1 fi fi echo "$sha1" ) } # Resolve git ref to short sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- (optional) remote name git_rev_parse () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="origin" if [ $# -ge 3 ]; then remote="$3" fi git_rev_parse_1 "$dir" "$ref" "$remote" "--short" ) } # Resolve git ref to full sha1 # $1 -- repo directory # $2 -- branch, tag or refspec # $3 -- (optional) remote name git_rev_parse_long () { ( set -euf -o pipefail local dir="$1" local ref="$2" local remote="origin" if [ $# -ge 3 ]; then remote="$3" fi git_rev_parse_1 "$dir" "$ref" "$remote" "" ) } # Run a command with the timeout program, with retries if the command times # out. # $1: is the DURATION to pass to timeout. # $2: is the maximum number of attempts at running the command. # $@: remainder is the command to run # Returns the status from the timeout command. # # Example: run_with_timeout_and_retry 90s 3 git checkout /large/repo function run_with_timeout_and_retry { local duration=$1 shift local tries=$1 shift local n=0 local ret=0 until [ ${n} -eq ${tries} ] do echo timeout "${duration}" "$@" timeout "${duration}" "$@" & ret=0 && wait $! || ret=$? if [ ${ret} -eq 0 ]; then break; fi n=$((n + 1)) done return ${ret} } # Clone or update a git repo # $1 -- repo directory # $2 -- master git repo # $3 -- reference git repo (to speedup initial cloning) clone_or_update_repo_no_checkout () { ( set -euf -o pipefail local dir="$1" local url="$2" local reference="$3" local single_branch="$4" local refopt="" case "$reference" in auto) local ref_dir for ref_dir in $url $dir; do ref_dir=$(basename $ref_dir .git) ref_dir="/home/tcwg-buildslave/snapshots-ref/$ref_dir.git" if git -C $ref_dir rev-parse --git-dir >/dev/null 2>&1; then refopt="--reference $ref_dir" break fi done ;; none) ;; *) refopt="--reference $reference" ;; esac if ! [ -d "$dir/.git" ]; then rm -rf "$dir" local single_branch_opt="" if [ x"$single_branch" != x"" ]; then single_branch_opt="--single-branch --branch $single_branch" fi run_with_timeout_and_retry 1h 3 git clone $refopt $single_branch_opt "$url" "$dir" else # Clean up the clone (this is supposed to re-share objects from # reference clone and keep the size of the clone minimal). # It's possible that previous GC process was interrupted and left # a lock. Use --force to workaround that. It should be safe # to override the lock since directories should not be shared # between concurrent builds. # # Also, prune all loose objects to avoid "git gc --auto" failing # and creating .git/gc.log, which will stop future "git gc --auto" runs. git -C "$dir" gc --auto --force --prune=all fi ( cd "$dir" # Update from URL. git remote add origin "$url" > /dev/null 2>&1 || true git remote set-url origin "$url" local refspec="+refs/changes/*:refs/changes/*" if [ x"$single_branch" = x"" ]; then run_with_timeout_and_retry 1h 3 git remote update -p else refspec="+refs/heads/$single_branch:refs/remotes/origin/$single_branch" fi run_with_timeout_and_retry 1h 3 git fetch -q origin $refspec --prune ) ) } # Clone or update a git repo # $1 -- repo directory # $2 -- ref to checkout # $3 -- master git repo # $4 -- optional reference git repo (to speedup initial cloning) clone_or_update_repo () { ( set -ef -o pipefail local dir="$1" local ref="$2" local url="$3" local reference="auto" local single_branch="" if [ $# -ge 4 ]; then if [ $# -ge 5 ]; then single_branch="$5" fi reference="$4" fi clone_or_update_repo_no_checkout "$dir" "$url" "$reference" "$single_branch" git_clean "$dir" # Convert git branch/tag names into SHA1 local sha1 sha1=$(git_rev_parse "$dir" "$ref") # Checkout git -C "$dir" checkout --detach "$sha1" ) } # Wget files from URL that may have wildcards; only the last "basename" # part of URL is allowed to contain wildcards. Safe to use on normal URLs. # Return N-1 of files downloaded, or 127 if no files were downloaded. # $1 -- URL # $2,... -- additional parameters to wget wget_wildcard_url () { ( set -eu -o pipefail local url="$1" shift local url_basename url_basename="$(basename "$url")" local tmpdir tmpdir="$(mktemp -d)" wget_opts="" case "$(echo "$url" | cut -d/ -f3)" in *".tcwglab") wget_opts="$wget_opts --no-check-certificate" ;; esac # $(dirname "$url") may not be a valid URL. Since we only use '*' # as wildcards, check if a '*' is present in $url_basename, and if # not, do a direct wget on $url to avoid accessing $(dirname "$url") if echo "$url_basename" | grep '*' ; then wget --progress=dot:giga -r --no-parent --no-directories --level 1 "--directory-prefix=$tmpdir" -A "$url_basename" $wget_opts "$@" "$(dirname "$url")/" else wget --progress=dot:giga -r --no-parent --no-directories --level 1 "--directory-prefix=$tmpdir" $wget_opts "$@" "$url" fi local count=-1 for i in "$tmpdir"/$url_basename; do mv "$i" . count=$((count+1)) done rm -rf "$tmpdir" return $count ) } # Fetch a tarball using wget_wildcard_url and untar it into a directory # named after the tarball. # $1 -- URL # $2 -- base directory to untar to # $3 -- extra tar options, e.g. "--strip-components 1" untar_url () { ( set -eu -o pipefail local url="$1" local basedir="$2" local taropts="$3" local tarball local dirname wget_wildcard_url "$url" # shellcheck disable=SC2046 tarball="$(ls $(basename "$url"))" dirname="$basedir/${tarball%.tar*}" mkdir "$dirname" tar xf "$tarball" --directory "$dirname" $taropts echo "$dirname" ) } # Wait until the ssh server is ready to accept connexions # $1: host # $2: port # $3: retry count (optional) # Returns 0 on success, 1 in case of error wait_for_ssh_server () { ( set -euf -o pipefail local session_host="$1" local session_port="$2" local count="${3-20}" while [ $count -gt 0 ] do ssh -p $session_port $session_host true && break echo "SSH server not ready, waiting....." sleep 5 count=$((count - 1)) done if [ $count -eq 0 ]; then echo "ERROR: SSH server did not respond ($session_host:$session_port)" return 1 fi return 0 ) } # Print CPU share allocation for $task and $weight. # $1: task # $2: weight print_cpu_shares () { ( set -euf -o pipefail local task="$1" local weight="$2" local cpus cpus=$(( $weight * 1000 )) # 1000 cpu shares per executor echo "$cpus" ) } # Print memory allocation for $task and $weight. # $1: task # $2: weight print_memory_limit () { ( set -euf -o pipefail local task="$1" local weight="$2" local nproc="$3" local memory case "$task" in build) # 1GB per compilation core memory=$(( 1000 * $weight * $nproc )) if [ "$memory" -gt "30000" ]; then memory="30000" fi ;; test) # 0.75GB per session memory=$(( 750 * $weight )) ;; esac echo "$memory" ) } # Print PID allocation for $task and $weight. # $1: task # $2: weight print_pids_limit () { ( set -euf -o pipefail local task="$1" local weight="$2" local pids pids=$(( $weight * 5000 )) # 5000 processes per executor echo "$pids" ) } # Print default bind mounts for $task # $1: task print_bind_mounts () { ( set -euf -o pipefail local task="$1" local -a bind_mounts case $task in build) if [ x"$WORKSPACE" = x ]; then echo "WORKSPACE is not defined. Are you executing this from Jenkins?" >&2 exit 1 fi bind_mounts=( /home/tcwg-buildslave/snapshots-ref:ro /home/tcwg-buildslave/llvm-reference:ro $WORKSPACE ) esac echo "${bind_mounts[@]:+${bind_mounts[@]}}" ) } # Print default volume mounts for $job # $1: job # $2: Suffix to be appended to the volume names (e.g., -$container_arch-$distro) print_volume_mounts () { ( set -euf -o pipefail local job="$1" local suffix="$2" local -a mounts local volume_id case "$job" in tcwg_kernel-*) # Add ccache volume for tcwg_kernel jobs. These jobs # depend on ccache for fast rebuilds of LLVM and GCC with # the host compiler. job="tcwg_kernel" volume_id=$(print_docker_name "$job$suffix") mounts+=(ccache-$volume_id:$HOME/.ccache) ;; esac echo "${mounts[@]:+${mounts[@]}}" ) } # Print path converted to docker's daemon level # $1: path print_docker_path () { ( set -euf -o pipefail local path="$1" if [ -f "/.dockerenv" ] && mount | grep -q "/run/docker.sock "; then # If inside "host" container (with proxied docker and /home from host-home volume), # convert paths to refer to volume's path on bare-metal. echo "$path" | sed -e "s#^/home/#/var/lib/docker/volumes/host-home/_data/#" else echo "$path" fi ) } # Return zero if bash array is defined. # $1: Name of bash array test_array() { local var var="$1[@]" if [ x"${!var+set}" = x"set" ]; then return 0 else return 1 fi } # Manifest filename for manifest_out and convert_args_to_variables to write to. # This is set using "%%" directive to convert_args_to_variables. # This is a stack maintained by manifest_push and manifest_pop. __manifest_filename=("/dev/null") # Set new file name for manifest # $1: File name manifest_push () { local filename="$1" # Resolve absolute path to manifest. local dir=$(dirname "$filename") mkdir -p "$dir" dir=$(cd "$dir"; pwd) __manifest_filename=("$dir/$(basename "$filename")" "${__manifest_filename[@]}") rm -f "${__manifest_filename[0]}" } # Return to previous manifest filename manifest_pop () { __manifest_filename=("${__manifest_filename[@]:1}") } # Output stdout to the manifest file. Most common case would be # cat << EOF | manifest_out # # Component revision # component_rev="$component_rev" # EOF manifest_out () { cat >> "${__manifest_filename[0]}" } # Process "--var value" and "++arr elem" arguments and define corresponding # variables and arrays. # "--var value" defines shell variable "$var" to "value". # "++arr elem" defines shell array "$arr[@]" and adds "elem" to it. # "@@ file" sources file. # "%% file" starts manifest in file. Also see "^^ true". # "^^ true/false %% manifest" whether to reproduce the build using manifest. # If "true" -- source manifest instead of generating it, then discard # all following options at to separator "--". # If "false" -- do nothing and proceed as usual. # # Shell array $CONVERTED_ARGS is set to the arguments processed. # Shell variable $SHIFT_CONVERTED_ARGS is set to number of arguments processed. # $@: Pairs of def/val arguments, stops at "--" marker. convert_args_to_variables () { local arr name num local total="0" eval "CONVERTED_ARGS=(--)" while [ $# -gt 0 ]; do case "$1" in "--") # Finish marker total=$(($total+1)) shift 1 break ;; "--"*) name="${1#--}" declare -g $name="$2" cat <'" exit 1 fi # Source the manifest for reproduction. source "$4" # Skip processing all following arguments. num=0 for i in "$@"; do if [ x"$i" = x"--" ]; then break fi num=$(($num+1)) done else num=2 fi ;; *) echo "ERROR: option does not start with '--' / '++' / '@@' / '%%' : $1" exit 1 ;; esac total=$(($total+$num)) while [ $num -gt 0 ]; do eval "CONVERTED_ARGS+=(\"$1\")" shift 1 num=$(($num-1)) done done eval "SHIFT_CONVERTED_ARGS=$total" cat <&2; exit 1 ;; esac echo "$target" ) } # Print LLVM-friendly target for "uname -m" target # $1: "uname -m"-style target or "native" print_llvm_target () { ( set -euf -o pipefail local target="$1" if [ x"$target" = x"native" ]; then target=$(uname -m) fi case "$target" in "aarch64") target="AArch64" ;; "arm"*) target="ARM" ;; "x86_64") target="X86" ;; *) echo "ERROR: Unknown target $target" >&2; exit 1 ;; esac echo "$target" ) } # Print Linux make-friendly target for "uname -m" target # $1: "uname -m"-style target or "native" print_kernel_target () { ( set -euf -o pipefail local target="$1" if [ x"$target" = x"native" ]; then target=$(uname -m) fi case "$target" in "aarch64") target="arm64" ;; "arm"*) target="arm" ;; *) echo "ERROR: Unknown target $target" >&2; exit 1 ;; esac echo "$target" ) } # Thoroughly clean git repo, leave only .git/ directory # $1: Git clone directory git_clean () { ( set -euf -o pipefail fresh_dir "$1" "$1/.git/*" git -C "$1" reset --hard ) } # Add git remote pointing to linaro's git repo/mirros with writable # toolchain/ci/* repo. Deduce repo's URL from URL of existing # "origin" git remote. # $1: Git clone directory (must have "origin" remote configured) # $2: Name of the new remote. # $3: Whether to make the new remote read-only or read-write. git_init_linaro_local_remote () { ( set -euf -o pipefail local dir="$1" local remote="$2" local read_only="$3" cd $dir local origin_url local new_url origin_url=$(git remote get-url origin) # Figure out mirror repo on linaro's servers. case "$origin_url" in *"kernel.org/"*"/linux"*) new_url="toolchain/ci/linux.git" ;; *) new_url="toolchain/ci/$(basename $origin_url)" ;; esac # Use git-us.l.o to avoid delays between review.l.o and git.l.o new_url="git-us.linaro.org/$new_url" if $read_only; then new_url="https://$new_url" else # Use gitolite access. Gerrit's ssh access verifies pushed commits, # which can slow-down server on big pushes. new_url="ssh://$new_url" fi # Set-url fails if there's no existing remote, so, rather than # check-and-add-if-not-present, always add and ignore result. git remote add $remote "$new_url" > /dev/null 2>&1 || true git remote set-url $remote "$new_url" ) } # Push HEAD of git repo to a given remote/branch # $1: Git clone dir # $2: remote name # $3: branch to force push to git_push () { ( set -euf -o pipefail local dir="$1" local remote="$2" local branch="$3" cd $dir case "$branch" in "refs/"*) ;; *) branch=refs/heads/$branch ;; esac git push --force $remote HEAD:$branch ) } # Initialize run_step state # $1: Step to start execution at (or "" to start at the very first step) # $2: Step to finish execution at (or "" to run till the very end) # $3: Top artifact directory # $4: Top baseline artifact directory (see Note 1) # $5: Whether to enable "set -x" verbosity for execution steps. # # Note 1: Artifacts for steps before $run_step_start_at/$1 will be copied over # from $base_artifacts/$4 run_step_init () { run_step_start_at="$1" run_step_finish_at="$2" run_step_top_artifacts="$3" run_step_base_artifacts="$4" run_step_verbose="$5" run_step_count="0" run_step_prev_step="" run_step_active=false run_step_status=0 run_step_artifacts="" if [ x"$run_step_base_artifacts" != x"" ]; then run_step_use_baseline=true else run_step_use_baseline=false fi # We need absolute paths for $run_step_artifacts, which is constructed from # $run_step_top_artifacts. mkdir -p "$run_step_top_artifacts" run_step_top_artifacts=$(cd "$run_step_top_artifacts"; pwd) rm -f $run_step_top_artifacts/console.log rm -f $run_step_top_artifacts/results } # Run execution step and handle its failure as requested # This function manages # 1. step skipping -- skip steps before START_AT and after FINISH_AT, # as well as "skip_on_fail" steps during failure. # 2. artifact handling -- create/clean artifact directories per step. # Also, copy baseline artifacts for steps before START_AT to simulate # skipped steps. # Step commands have $run_step_artifacts and $run_step_prev_artifacts # pointing to artifact directories for current and previous step # respectively. # 3. logging -- dump stdout and and stderr output of step commands # into per-step console.log files, and, also, into the top-level # console.log file. # 4. result handling -- output provided success result to artifacts/results # for successful steps. Special value "x" means to let the step itself # update artifacts/results. Results are written to artifacts/results # for both skipped and executed steps as long as $run_step_status doesn't # indicate failure. # 5. in run mode "stop_on_fail" -- kill script if $run_step_status indicates # failure, otherwise run the step and kill script on failure. # 6. in run mode "skip_on_fail" -- skip step if $run_step_status indicates # a failure, otherwise run the step and set $run_step_status to the exit # status of the step. Normally, a series of "skip_on_fail" steps should # be followed by "reset_on_fail" step that handles the cumulative result. # 7. in run mode "reset_on_fail" -- run the step regardless and set # $run_step_status to the exit status of the step (thus resetting it). # $1: Run mode: stop_on_fail, skip_on_fail, reset_on_fail. # $2: Result to write to artifacts/results in the absence of failures. # $@: Step command and its arguments run_step () { local run_mode="$1" local success_result="$2" shift 2 local -a step local pretty_step step=("$@") pretty_step=$(echo "${step[@]}" | tr " " "-") run_step_count=$(($run_step_count+1)) if [ x"$pretty_step" = x"$run_step_start_at" \ -o x"$run_step_start_at" = x"" -a x"$run_step_prev_step" = x"" ]; then run_step_active=true run_step_use_baseline=false fi run_step_prev_artifacts=$run_step_artifacts run_step_artifacts=$run_step_top_artifacts/$run_step_count-$pretty_step rm -rf "$run_step_artifacts" mkdir -p "$run_step_artifacts" if $run_step_active; then local skip=false case "$run_step_status:$run_mode" in 0:*) ;; *:stop_on_fail) echo "STOPPING before ${step[@]} due to previous failure" false ;; *:skip_on_fail) echo "SKIPPING ${step[@]} due to previous failure" skip=true ;; *:reset_on_fail) echo "HANDLING previous failure in ${step[@]}" ;; *) assert false esac if ! $skip; then echo "RUNNING ${step[@]}; see tail -f $run_step_artifacts/console.log" eval "if $run_step_verbose; then set -x; else set +x; fi; ${step[@]}" 2>&1 | tee -a $run_step_top_artifacts/console.log > $run_step_artifacts/console.log & wait $! || run_step_status=$? case "$run_step_status:$run_mode" in 0:*) ;; *:stop_on_fail|*:reset_on_fail) echo "STOPPING at ${step[@]} due to failure" false ;; *:skip_on_fail) echo "CARRYING ON after failure in ${step[@]}" ;; *) assert false esac fi elif $run_step_use_baseline; then echo "COPYING BASE-ARTIFACTS for ${step[@]}" local base_artifacts base_artifacts="$run_step_base_artifacts/$(basename $run_step_artifacts)" if [ -d "$base_artifacts" ]; then rsync -a --del "$base_artifacts/" "$run_step_artifacts/" fi else echo "SKIPPING ${step[@]}" fi if [ x"$run_step_status" = x"0" -a x"$success_result" != x"x" ]; then cat >> $run_step_top_artifacts/results <