diff --git a/Jenkinsfile b/Jenkinsfile index 653a45f..da11bc0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -61,29 +61,53 @@ node { } executeStage(stagePubAndPromote, 'publish and promote CV') - def stagePrepVms = { + def stagePrepTestVms = { if (params.REBUILD_VMS == true) { executeScript("${SCRIPTS_DIR}/buildtestvms.sh") } else { executeScript("${SCRIPTS_DIR}/starttestvms.sh") } } - executeStage(stagePrepVms, 'prepare VMs') + executeStage(stagePrepTestVms, 'prepare test VMs') def stageRunTests = { executeScript("${SCRIPTS_DIR}/pushtests.sh") - step([$class: "TapPublisher", testResults: "test_results/*.tap", ]) + step([$class: "TapPublisher", testResults: "test_results/*.tap", failedTestsMarkBuildAsFailure: true ]) + if (currentBuild.result == 'FAILURE') { + isInErrorState = true + error('There were test failures') + } } executeStage(stageRunTests, 'run tests') - def stagePowerOff = { + def stagePowerOffTestVMs = { if (params.POWER_OFF_VMS_AFTER_BUILD == true) { executeScript("${SCRIPTS_DIR}/powerofftestvms.sh") } else { println "test VMs are not shut down as per passed configuration" } } - executeStage(stagePowerOff, 'power off VMs') + executeStage(stagePowerOffTestVMs, 'power off test VMs') + +/* +* promote to GOLDENVM_ENV here or do we do both test and golden VMs from same LCE? +* the latter is actually fine as long as the pipeline exits on failure at one of the previous steps +* the former gives us a nicer separation (so (C)CVs in the LCE can be used for other tasks that want only a version where automated testing passed) +*/ + def stagePromote2GoldenLCE = { + executeScript("${SCRIPTS_DIR}/promote2goldenlce.sh") + executeScript("${SCRIPTS_DIR}/capsule-sync-check.sh") + } + executeStage(stagePromote2GoldenLCE, 'promote CV to golden') + + def stagePrepGoldenVms = { + executeScript("${SCRIPTS_DIR}/buildgoldenvms.sh") + executeScript("${SCRIPTS_DIR}/wait4goldenvmsup.sh") + executeScript("${SCRIPTS_DIR}/shutdowngoldenvms.sh") + } + executeStage(stagePrepGoldenVms, 'prepare golden VMs') + + // where do we run virt-sysprep (1) after this is successful? Ideally on the machine doing qemu-img convert def stageCleanup = { executeScript("${SCRIPTS_DIR}/cleanup.sh") diff --git a/buildgoldenvms.sh b/buildgoldenvms.sh new file mode 100755 index 0000000..2556f6f --- /dev/null +++ b/buildgoldenvms.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# Instruct Foreman to rebuild the VMs you wan to image (as in these will make your golden image) +# +# e.g ${WORKSPACE}/scripts/buildgoldenvms.sh 'test' +# +# this will tell Foreman to rebuild all machines in host collection GOLDENVM_HOSTCOLLECTION + +# Load common parameter variables +. $(dirname "${0}")/common.sh + +if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] || [[ -z ${RSA_ID} ]] \ + || [[ -z ${ORG} ]] || [[ -z ${GOLDENVM_HOSTCOLLECTION} ]] +then + err "Environment variable PUSH_USER, SATELLITE, RSA_ID, ORG " \ + "or GOLDENVM_HOSTCOLLECTION not set or not found." + exit ${WORKSPACE_ERR} +fi + +get_golden_vm_list # populate GOLDEN_VM_LIST + +# TODO: Error out if no golden VM's are available. +if [ $(echo ${#GOLDEN_VM_LIST[@]}) -eq 0 ]; then + err "No golden VMs configured in Satellite" +fi + +# rebuild golden VMs +for I in "${GOLDEN_VM_LIST[@]}" +do + inform "Rebuilding VM ID $I" + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host update --name $I --build yes" + + _PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --name $I" | grep Power | cut -f2 -d: | tr -d ' ') + + # different hypervisors report power status with different words. parse and get a single word per status + # KVM uses running / shutoff + # VMware uses poweredOn / poweredOff + # libvirt uses running / off + # add other hypervisors as you come across them and please submit to https://github.com/RedHatEMEA/soe-ci + + case "${_PROBED_STATUS}" in + running) + _STATUS=On + ;; + poweredOn) + _STATUS=On + ;; + up) + _STATUS=On + ;; + shutoff) + _STATUS=Off + ;; + poweredOff) + _STATUS=Off + ;; + down) + _STATUS=Off + ;; + off) + _STATUS=Off + ;; + *) + echo "can not parse power status, please review $0 for status ${_PROBED_STATUS}" + esac + + if [[ ${_STATUS} == 'On' ]] + then + # forcefully poweroff the SUT + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host stop --force --name $I" + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host start --name $I" + elif [[ ${_STATUS} == 'Off' ]] + then + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host start --name $I" + else + err "Host $I is neither running nor shutoff. No action possible!" + exit 1 + fi +done + + +# we need to wait until all the golden machines have been rebuilt by foreman +# this check was previously only in pushtests, but when using pipelines +# it's more sensible to wait here while the machines are in build mode +# the ping and ssh checks must remain in pushtests.sh +# as a pupet only build will not call this script + +declare -A vmcopy # declare an associative array to copy our VM array into +for I in "${GOLDEN_VM_LIST[@]}"; do vmcopy[$I]=$I; done + +WAIT=0 +while [[ ${#vmcopy[@]} -gt 0 ]] +do + inform "Waiting 1 minute" + sleep 60 + ((WAIT+=60)) + for I in "${vmcopy[@]}" + do + inform "Checking if host $I is in build mode." + status=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host info --name $I | \ + grep -e \"Managed.*yes\" -e \"Enabled.*yes\" -e \"Build.*no\" \ + | wc -l") + # Check if status is OK, then the SUT will have left build mode + if [[ ${status} == 3 ]] + then + tell "host $I no longer in build mode." + unset vmcopy[$I] + else + tell "host $I is still in build mode." + fi + done + if [[ ${WAIT} -gt 6000 ]] + then + err "At least one host still in build mode after 6000 seconds. Exiting." + exit 1 + fi +done + +inform "A host that exited build mode is given 3 minutes to finish anaconda cleanly" +sleep 180 diff --git a/buildtestvms.sh b/buildtestvms.sh index 879af70..20f9bc7 100755 --- a/buildtestvms.sh +++ b/buildtestvms.sh @@ -27,11 +27,11 @@ fi # rebuild test VMs for I in "${TEST_VM_LIST[@]}" do - inform "Rebuilding VM ID $I" + inform "Rebuilding VM $I" ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ - "hammer host update --id $I --build yes" + "hammer host update --name $I --build yes" - _PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --id $I" | grep Power | cut -f2 -d: | tr -d ' ') + _PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --name $I" | grep Power | cut -f2 -d: | tr -d ' ') # different hypervisors report power status with different words. parse and get a single word per status # KVM uses running / shutoff @@ -62,20 +62,20 @@ do _STATUS=Off ;; *) - echo "can not parse power status, please review $0" + echo "can not parse power status, please review $0 for status ${_PROBED_STATUS}" esac if [[ ${_STATUS} == 'On' ]] then # forcefully poweroff the SUT ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ - "hammer host stop --force --id $I" + "hammer host stop --force --name $I" ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ - "hammer host start --id $I" + "hammer host start --name $I" elif [[ ${_STATUS} == 'Off' ]] then ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ - "hammer host start --id $I" + "hammer host start --name $I" else err "Host $I is neither running nor shutoff. No action possible!" exit 1 diff --git a/common.sh b/common.sh index a2286da..4cc5231 100755 --- a/common.sh +++ b/common.sh @@ -64,3 +64,28 @@ function get_test_vm_list() { fi done } + +# and the same again, but for GOLDEN_VM_LIST +function get_golden_vm_list() { + local K=0 + for I in $(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host-collection hosts --organization \"${ORG}\" \ + --name \"$GOLDENVM_HOSTCOLLECTION\" \ + | tail -n +4 | cut -f2 -d \"|\" | head -n -1") + do + # If CONDITIONAL_VM_BUILD is 'true', only keep VMs commented + # with modified #content# as listed in $MODIFIED_CONTENT_FILE + # If the file is empty or doesn't exist, we test everything + # as it hints at a script change. + if [[ "${CONDITIONAL_VM_BUILD}" != 'true' ]] || \ + [[ ! -s "${MODIFIED_CONTENT_FILE}" ]] || \ + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer --output yaml host info --name \"${I}\"" \ + | grep "^Comment:" \ + | grep -Fqf "${MODIFIED_CONTENT_FILE}" + then + GOLDEN_VM_LIST[$K]=$I + ((K+=1)) + fi + done +} diff --git a/powerofftestvms.sh b/powerofftestvms.sh index 948d39e..a85b701 100755 --- a/powerofftestvms.sh +++ b/powerofftestvms.sh @@ -19,9 +19,9 @@ fi get_test_vm_list # populate TEST_VM_LIST -# TODO: Error out if no test VM's are available. if [ $(echo ${#TEST_VM_LIST[@]}) -eq 0 ]; then err "No test VMs configured in Satellite" + exit 1 fi # shutdown test VMs diff --git a/promote2goldenlce.sh b/promote2goldenlce.sh new file mode 100755 index 0000000..f294bb5 --- /dev/null +++ b/promote2goldenlce.sh @@ -0,0 +1,62 @@ +#!/bin/bash -x + +# promote the content view(s) from TESTVM_ENV to GOLDENVM_ENV + +# Load common parameter variables +. $(dirname "${0}")/common.sh + +# If MODIFIED_CONTENT_FILE is not 0 bytes, then publishcv.sh has +# attempted a (C)CV publish step plus a promotion to LCE TESTVM_ENV +# thus we can promote to GOLDENVM_ENV now +# (since this script is called by a pipelline step that is only executed if the prior steps did NOT fail) +if [[ ! -s "${MODIFIED_CONTENT_FILE}" ]] +then + echo "No entries in ${MODIFIED_CONTENT_FILE} no need to continue with $0" + exit 0 +fi + +# Create an array from all the content view names +oldIFS="${IFS}" +i=0 +IFS=',' +for cv in ${CV} ${CV_PASSIVE_LIST} +do + CV_LIST[$i]="${cv}" + ((i++)) +done +IFS="${oldIFS}" + +# Get a list of all CV version IDs +for cv in "${CV_LIST[@]}" +do + # get the latest version of each CV, add it to the array + inform "Get the latest version of CV ${cv}" + VER_ID_LIST+=( "$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer content-view info --name \"${cv}\" --organization \"${ORG}\" \ + | sed -n \"/Versions:/,/Components:/p\" | grep \"ID:\" | tr -d ' ' | cut -f2 -d ':' | sort -n | tail -n 1")" ) +done + +if [[ -n ${GOLDENVM_ENV} ]] +then + for (( i = 0; i < ${#CV_LIST[@]}; i++ )) + do # promote the latest version of each CV + cv=${CV_LIST[$i]} + ver_id=${VER_ID_LIST[$i]} + + inform "Promoting version ID ${ver_id} of ${cv} to LCE ${GOLDENVM_ENV}" + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer content-view version promote --content-view \"${cv}\" --organization \"${ORG}\" \ + --to-lifecycle-environment-id \"${GOLDENVM_ENV}\" --force --id ${ver_id}" + done + + # we also promote the latest version of each CCV + for ccv_id in ${CCV_IDS[@]} + do + ccv_ver=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer --csv content-view version list --content-view-id ${ccv_id} --organization \"${ORG}\"" | awk -F',' '$1 ~ /^[0-9]+$/ {if ($3 > maxver) {maxver = $3; maxid = $1} } END {print maxid}') + inform "Promoting version ${ccv_ver} of CCV ID ${ccv_id} to LCE ${GOLDENVM_ENV}" + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer content-view version promote --content-view-id \"${ccv_id}\" --organization \"${ORG}\" \ + --to-lifecycle-environment-id \"${GOLDENVM_ENV}\" --force --id ${ccv_ver}" + done +fi diff --git a/publishcv.sh b/publishcv.sh index 609eba4..e0df465 100755 --- a/publishcv.sh +++ b/publishcv.sh @@ -123,7 +123,7 @@ then cv=${CV_LIST[$i]} ver_id=${VER_ID_LIST[$i]} - inform "Promoting version ${ver_id} of ${cv} to LCE ${TESTVM_ENV}" + inform "Promoting version ID ${ver_id} of ${cv} to LCE ${TESTVM_ENV}" ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ "hammer content-view version promote --content-view \"${cv}\" --organization \"${ORG}\" \ --to-lifecycle-environment-id \"${TESTVM_ENV}\" --force --id ${ver_id}" diff --git a/puppetpush.sh b/puppetpush.sh index 2972f92..5758de9 100755 --- a/puppetpush.sh +++ b/puppetpush.sh @@ -8,6 +8,14 @@ # Load common parameter variables . $(dirname "${0}")/common.sh +### disable for a quick test +### pcfe, 2020-08-27 + +inform "temporarily disabled" +exit 0 + +### + if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] then err "PUSH_USER or SATELLITE not set or not found" diff --git a/rhel-8-script-env-vars-puppet-only.groovy b/rhel-8-script-env-vars-puppet-only.groovy new file mode 100644 index 0000000..f51da0e --- /dev/null +++ b/rhel-8-script-env-vars-puppet-only.groovy @@ -0,0 +1,10 @@ +env.REPO_ID="3825" +env.PUPPET_REPO_ID="9" +env.TESTVM_HOSTCOLLECTION="hc-soe-el8-test" +env.GOLDENVM_HOSTCOLLECTION="hc-soe-el8-golden" +env.PUPPET_REPO="/var/www/html/pub/soe-puppet-only" +env.CV="cv-puppet-only" +env.CV_PASSIVE_LIST="" +env.CCV_NAME_PATTERN="" +env.CONDITIONAL_VM_BUILD=false +env.PUPPET_DONE_SLEEP="0" diff --git a/rhel-8-script-env-vars-rpm.groovy b/rhel-8-script-env-vars-rpm.groovy new file mode 100644 index 0000000..f2adcc3 --- /dev/null +++ b/rhel-8-script-env-vars-rpm.groovy @@ -0,0 +1,13 @@ +//this is for RHEL8 only as we build packages and shove them in a RHEL8 only yum repo +env.REPO_ID="3825" +env.PUPPET_REPO_ID="8" +env.TESTVM_HOSTCOLLECTION="hc-soe-el8-test" +env.GOLDENVM_HOSTCOLLECTION="hc-soe-el8-golden" +env.YUM_REPO="/var/www/html/pub/soe-repo/rhel8" +env.PUPPET_REPO="/var/www/html/pub/soe-puppet" +env.CV="cv-soe-ci-el8" +env.CV_PASSIVE_LIST="" +env.CCV_NAME_PATTERN="" +env.CONDITIONAL_VM_BUILD=false +env.MOCK_CONFIG="rhel-8-x86_64" +env.PUPPET_DONE_SLEEP="75" diff --git a/script-env-vars.groovy b/script-env-vars.groovy index 40fe606..aab35e5 100644 --- a/script-env-vars.groovy +++ b/script-env-vars.groovy @@ -6,4 +6,5 @@ env.KNOWN_HOSTS="/var/lib/jenkins/.ssh/known_hosts" env.SATELLITE="satellite.internal.pcfe.net" env.ORG="Sat Test" env.TESTVM_ENV="2" +env.GOLDENVM_ENV="14" env.TEST_ROOT="redhat geheim" diff --git a/shutdowngoldenvms.sh b/shutdowngoldenvms.sh new file mode 100755 index 0000000..2edc2d6 --- /dev/null +++ b/shutdowngoldenvms.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# power off the golden VMs +# hammer (without the --force flag) will attempt a clean shutdown +# +# e.g ${WORKSPACE}/scripts/poweroffgoldenvms.sh 'test' +# + +# Load common parameter variables +. $(dirname "${0}")/common.sh + +if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] || [[ -z ${RSA_ID} ]] \ + || [[ -z ${ORG} ]] || [[ -z ${GOLDENVM_HOSTCOLLECTION} ]] +then + err "Environment variable PUSH_USER, SATELLITE, RSA_ID, ORG " \ + "or GOLDENVM_HOSTCOLLECTION not set or not found." + exit ${WORKSPACE_ERR} +fi + +get_golden_vm_list # populate GOLDEN_VM_LIST + +if [ $(echo ${#GOLDEN_VM_LIST[@]}) -eq 0 ]; then + err "No golden VMs configured in Satellite" + exit 1 +fi + +# shutdown golden VMs +for I in "${GOLDEN_VM_LIST[@]}" +do + inform "Checking status of VM ID $I" + + _PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --id $I" | grep Power | cut -f2 -d: | tr -d ' ') + + # different hypervisors report power status with different words. parse and get a single word per status + # KVM uses running / shutoff + # VMware uses poweredOn / poweredOff + # libvirt uses running / off + # add other hypervisors as you come across them and please submit to https://github.com/RedHatEMEA/soe-ci + + case "${_PROBED_STATUS}" in + running) + _STATUS=On + ;; + poweredOn) + _STATUS=On + ;; + up) + _STATUS=On + ;; + shutoff) + _STATUS=Off + ;; + poweredOff) + _STATUS=Off + ;; + down) + _STATUS=Off + ;; + off) + _STATUS=Off + ;; + *) + echo "can not parse power status, please review $0" + esac + + # n.b. kickstart can either reboot or power down at the end, so we must handle both cases + # also, since we do not pushtests.sh aginst the golden VMs, + # ensure it's up before attempting clean shutdown + if [[ ${_STATUS} == 'On' ]] + then + inform "Shutting down VM ID $I" + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host stop --id $I" + elif [[ ${_STATUS} == 'Off' ]] + then + inform "VM ID $I seems off already, no action done." + else + err "Host $I is neither running nor shutoff. No action possible!" + exit 1 + fi +done diff --git a/startgoldenvms.sh b/startgoldenvms.sh new file mode 100755 index 0000000..3872f39 --- /dev/null +++ b/startgoldenvms.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# Instruct Foreman to start the golden VMs (just in case they are off) +# +# e.g ${WORKSPACE}/scripts/startgoldenvms.sh 'test' +# +# this will tell Foreman to start all machines in hostgroup GOLDENVM_HOSTGROUP + +# Load common parameter variables +. $(dirname "${0}")/common.sh + +if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] || [[ -z ${RSA_ID} ]] \ + || [[ -z ${ORG} ]] || [[ -z ${GOLDENVM_HOSTCOLLECTION} ]] +then + err "Environment variable PUSH_USER, SATELLITE, RSA_ID, ORG " \ + "or GOLDENVM_HOSTCOLLECTION not set or not found." + exit ${WORKSPACE_ERR} +fi + +get_golden_vm_list # populate GOLDEN_VM_LIST + +if [ $(echo ${#GOLDEN_VM_LIST[@]}) -eq 0 ]; then + err "No golden VMs configured in Satellite" + exit 1 +fi + +# rebuild golden VMs +for I in "${GOLDEN_VM_LIST[@]}" +do + inform "Making sure VM ID $I is on" + + _PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --id $I" | grep Power | cut -f2 -d: | tr -d ' ') + + # different hypervisors report power status with different words. parse and get a single word per status + # KVM uses running / shutoff + # VMware uses poweredOn / poweredOff + # add other hypervisors as you come across them and please submit to https://github.com/RedHatEMEA/soe-ci + + case "${_PROBED_STATUS}" in + running) + _STATUS=On + ;; + poweredOn) + _STATUS=On + ;; + up) + _STATUS=On + ;; + shutoff) + _STATUS=Off + ;; + poweredOff) + _STATUS=Off + ;; + down) + _STATUS=Off + ;; + off) + _STATUS=Off + ;; + *) + echo "can not parse power status, please review $0" + esac + + if [[ ${_STATUS} == 'On' ]] + then + inform "Host $I is already on." + elif [[ ${_STATUS} == 'Off' ]] + then + inform "Host $I is already off, switching it on." + ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host start --id $I" + else + err "Host $I is neither running nor shutoff. No action possible!" + exit 1 + fi +done diff --git a/wait4goldenvmsup.sh b/wait4goldenvmsup.sh new file mode 100755 index 0000000..90bcf81 --- /dev/null +++ b/wait4goldenvmsup.sh @@ -0,0 +1,52 @@ +#!/bin/bash -x + +# Wait for golden VMs to be up (is run by pipeline before shutdowngoldenvms.sh) +# +# e.g ${WORKSPACE}/scripts/wait4goldenvmsup.sh 'test' +# + +# Load common parameter variables +. $(dirname "${0}")/common.sh + +get_golden_vm_list # populate GOLDEN_VM_LIST + +# If buildgoldenvms.sh ended cleanly but the VMs remain powered up, then +# we need to wait until all the machines are up and can be ssh-ed to. +# Only then will we tell the hypervisor (via Satellite) to shut down cleanly +# with a pipeline step shutdowngoldenvms.sh +declare -A vmcopy # declare an associative array to copy our VM array into +for I in "${GOLDEN_VM_LIST[@]}"; do vmcopy[$I]=$I; done + +WAIT=0 +while [[ ${#vmcopy[@]} -gt 0 ]] +do + inform "Waiting 15 seconds" + sleep 15 + ((WAIT+=15)) + for I in "${vmcopy[@]}" + do + inform "Checking if golden VM $I has rebooted into OS before next pipeine step attemopts clean shutdown." + status=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \ + "hammer host info --name $I | \ + grep -e \"Managed.*yes\" -e \"Enabled.*yes\" -e \"Build.*no\" \ + | wc -l") + # Check if status is OK, ping reacts and SSH is there, then success! + if [[ ${status} == 3 ]] && ping -c 1 -q $I && nc -w 1 $I 22 + then + tell "Success!" + unset vmcopy[$I] + else + tell "Not yet." + fi + done + if [[ ${WAIT} -gt 6000 ]] + then + err "Golden VM not reachable via ssh after 6000 seconds. Exiting." + exit 1 + fi +done + +# Wait another 30s to be on the safe side +sleep 30 + +# since a golden VM is meant to be imaged, do NOT do anything else here. \ No newline at end of file