Skip to content
This repository has been archived by the owner on Sep 12, 2023. It is now read-only.

promote after test success #127

Open
wants to merge 22 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 29 additions & 5 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -61,29 +61,53 @@ node {
}
executeStage(stagePubAndPromote, 'publish and promote CV')

def stagePrepVms = {
def stagePrepTestVms = {
if (params.REBUILD_VMS == true) {
executeScript("${SCRIPTS_DIR}/buildtestvms.sh")
} else {
executeScript("${SCRIPTS_DIR}/starttestvms.sh")
}
}
executeStage(stagePrepVms, 'prepare VMs')
executeStage(stagePrepTestVms, 'prepare test VMs')

def stageRunTests = {
executeScript("${SCRIPTS_DIR}/pushtests.sh")
step([$class: "TapPublisher", testResults: "test_results/*.tap", ])
step([$class: "TapPublisher", testResults: "test_results/*.tap", failedTestsMarkBuildAsFailure: true ])
if (currentBuild.result == 'FAILURE') {
isInErrorState = true
error('There were test failures')
}
}
executeStage(stageRunTests, 'run tests')

def stagePowerOff = {
def stagePowerOffTestVMs = {
if (params.POWER_OFF_VMS_AFTER_BUILD == true) {
executeScript("${SCRIPTS_DIR}/powerofftestvms.sh")
} else {
println "test VMs are not shut down as per passed configuration"
}
}
executeStage(stagePowerOff, 'power off VMs')
executeStage(stagePowerOffTestVMs, 'power off test VMs')

/*
* promote to GOLDENVM_ENV here or do we do both test and golden VMs from same LCE?
* the latter is actually fine as long as the pipeline exits on failure at one of the previous steps
* the former gives us a nicer separation (so (C)CVs in the LCE can be used for other tasks that want only a version where automated testing passed)
*/
def stagePromote2GoldenLCE = {
executeScript("${SCRIPTS_DIR}/promote2goldenlce.sh")
executeScript("${SCRIPTS_DIR}/capsule-sync-check.sh")
}
executeStage(stagePromote2GoldenLCE, 'promote CV to golden')

def stagePrepGoldenVms = {
executeScript("${SCRIPTS_DIR}/buildgoldenvms.sh")
executeScript("${SCRIPTS_DIR}/wait4goldenvmsup.sh")
executeScript("${SCRIPTS_DIR}/shutdowngoldenvms.sh")
}
executeStage(stagePrepGoldenVms, 'prepare golden VMs')

// where do we run virt-sysprep (1) after this is successful? Ideally on the machine doing qemu-img convert

def stageCleanup = {
executeScript("${SCRIPTS_DIR}/cleanup.sh")
Expand Down
125 changes: 125 additions & 0 deletions buildgoldenvms.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
#!/bin/bash

# Instruct Foreman to rebuild the VMs you wan to image (as in these will make your golden image)
#
# e.g ${WORKSPACE}/scripts/buildgoldenvms.sh 'test'
#
# this will tell Foreman to rebuild all machines in host collection GOLDENVM_HOSTCOLLECTION

# Load common parameter variables
. $(dirname "${0}")/common.sh

if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] || [[ -z ${RSA_ID} ]] \
|| [[ -z ${ORG} ]] || [[ -z ${GOLDENVM_HOSTCOLLECTION} ]]
then
err "Environment variable PUSH_USER, SATELLITE, RSA_ID, ORG " \
"or GOLDENVM_HOSTCOLLECTION not set or not found."
exit ${WORKSPACE_ERR}
fi

get_golden_vm_list # populate GOLDEN_VM_LIST

# TODO: Error out if no golden VM's are available.
if [ $(echo ${#GOLDEN_VM_LIST[@]}) -eq 0 ]; then
err "No golden VMs configured in Satellite"
fi

# rebuild golden VMs
for I in "${GOLDEN_VM_LIST[@]}"
do
inform "Rebuilding VM ID $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host update --name $I --build yes"

_PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --name $I" | grep Power | cut -f2 -d: | tr -d ' ')

# different hypervisors report power status with different words. parse and get a single word per status
# KVM uses running / shutoff
# VMware uses poweredOn / poweredOff
# libvirt uses running / off
# add other hypervisors as you come across them and please submit to https://github.com/RedHatEMEA/soe-ci

case "${_PROBED_STATUS}" in
running)
_STATUS=On
;;
poweredOn)
_STATUS=On
;;
up)
_STATUS=On
;;
shutoff)
_STATUS=Off
;;
poweredOff)
_STATUS=Off
;;
down)
_STATUS=Off
;;
off)
_STATUS=Off
;;
*)
echo "can not parse power status, please review $0 for status ${_PROBED_STATUS}"
esac

if [[ ${_STATUS} == 'On' ]]
then
# forcefully poweroff the SUT
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host stop --force --name $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host start --name $I"
elif [[ ${_STATUS} == 'Off' ]]
then
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host start --name $I"
else
err "Host $I is neither running nor shutoff. No action possible!"
exit 1
fi
done


# we need to wait until all the golden machines have been rebuilt by foreman
# this check was previously only in pushtests, but when using pipelines
# it's more sensible to wait here while the machines are in build mode
# the ping and ssh checks must remain in pushtests.sh
# as a pupet only build will not call this script

declare -A vmcopy # declare an associative array to copy our VM array into
for I in "${GOLDEN_VM_LIST[@]}"; do vmcopy[$I]=$I; done

WAIT=0
while [[ ${#vmcopy[@]} -gt 0 ]]
do
inform "Waiting 1 minute"
sleep 60
((WAIT+=60))
for I in "${vmcopy[@]}"
do
inform "Checking if host $I is in build mode."
status=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host info --name $I | \
grep -e \"Managed.*yes\" -e \"Enabled.*yes\" -e \"Build.*no\" \
| wc -l")
# Check if status is OK, then the SUT will have left build mode
if [[ ${status} == 3 ]]
then
tell "host $I no longer in build mode."
unset vmcopy[$I]
else
tell "host $I is still in build mode."
fi
done
if [[ ${WAIT} -gt 6000 ]]
then
err "At least one host still in build mode after 6000 seconds. Exiting."
exit 1
fi
done

inform "A host that exited build mode is given 3 minutes to finish anaconda cleanly"
sleep 180
14 changes: 7 additions & 7 deletions buildtestvms.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ fi
# rebuild test VMs
for I in "${TEST_VM_LIST[@]}"
do
inform "Rebuilding VM ID $I"
inform "Rebuilding VM $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host update --id $I --build yes"
"hammer host update --name $I --build yes"

_PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --id $I" | grep Power | cut -f2 -d: | tr -d ' ')
_PROBED_STATUS=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} "hammer host status --name $I" | grep Power | cut -f2 -d: | tr -d ' ')

# different hypervisors report power status with different words. parse and get a single word per status
# KVM uses running / shutoff
Expand Down Expand Up @@ -62,20 +62,20 @@ do
_STATUS=Off
;;
*)
echo "can not parse power status, please review $0"
echo "can not parse power status, please review $0 for status ${_PROBED_STATUS}"
esac

if [[ ${_STATUS} == 'On' ]]
then
# forcefully poweroff the SUT
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host stop --force --id $I"
"hammer host stop --force --name $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host start --id $I"
"hammer host start --name $I"
elif [[ ${_STATUS} == 'Off' ]]
then
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host start --id $I"
"hammer host start --name $I"
else
err "Host $I is neither running nor shutoff. No action possible!"
exit 1
Expand Down
25 changes: 25 additions & 0 deletions common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,28 @@ function get_test_vm_list() {
fi
done
}

# and the same again, but for GOLDEN_VM_LIST
function get_golden_vm_list() {
local K=0
for I in $(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host-collection hosts --organization \"${ORG}\" \
--name \"$GOLDENVM_HOSTCOLLECTION\" \
| tail -n +4 | cut -f2 -d \"|\" | head -n -1")
do
# If CONDITIONAL_VM_BUILD is 'true', only keep VMs commented
# with modified #content# as listed in $MODIFIED_CONTENT_FILE
# If the file is empty or doesn't exist, we test everything
# as it hints at a script change.
if [[ "${CONDITIONAL_VM_BUILD}" != 'true' ]] || \
[[ ! -s "${MODIFIED_CONTENT_FILE}" ]] || \
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer --output yaml host info --name \"${I}\"" \
| grep "^Comment:" \
| grep -Fqf "${MODIFIED_CONTENT_FILE}"
then
GOLDEN_VM_LIST[$K]=$I
((K+=1))
fi
done
}
2 changes: 1 addition & 1 deletion powerofftestvms.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ fi

get_test_vm_list # populate TEST_VM_LIST

# TODO: Error out if no test VM's are available.
if [ $(echo ${#TEST_VM_LIST[@]}) -eq 0 ]; then
err "No test VMs configured in Satellite"
exit 1
fi

# shutdown test VMs
Expand Down
62 changes: 62 additions & 0 deletions promote2goldenlce.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/bin/bash -x

# promote the content view(s) from TESTVM_ENV to GOLDENVM_ENV

# Load common parameter variables
. $(dirname "${0}")/common.sh

# If MODIFIED_CONTENT_FILE is not 0 bytes, then publishcv.sh has
# attempted a (C)CV publish step plus a promotion to LCE TESTVM_ENV
# thus we can promote to GOLDENVM_ENV now
# (since this script is called by a pipelline step that is only executed if the prior steps did NOT fail)
if [[ ! -s "${MODIFIED_CONTENT_FILE}" ]]
then
echo "No entries in ${MODIFIED_CONTENT_FILE} no need to continue with $0"
exit 0
fi

# Create an array from all the content view names
oldIFS="${IFS}"
i=0
IFS=','
for cv in ${CV} ${CV_PASSIVE_LIST}
do
CV_LIST[$i]="${cv}"
((i++))
done
IFS="${oldIFS}"

# Get a list of all CV version IDs
for cv in "${CV_LIST[@]}"
do
# get the latest version of each CV, add it to the array
inform "Get the latest version of CV ${cv}"
VER_ID_LIST+=( "$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer content-view info --name \"${cv}\" --organization \"${ORG}\" \
| sed -n \"/Versions:/,/Components:/p\" | grep \"ID:\" | tr -d ' ' | cut -f2 -d ':' | sort -n | tail -n 1")" )
done

if [[ -n ${GOLDENVM_ENV} ]]
then
for (( i = 0; i < ${#CV_LIST[@]}; i++ ))
do # promote the latest version of each CV
cv=${CV_LIST[$i]}
ver_id=${VER_ID_LIST[$i]}

inform "Promoting version ID ${ver_id} of ${cv} to LCE ${GOLDENVM_ENV}"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer content-view version promote --content-view \"${cv}\" --organization \"${ORG}\" \
--to-lifecycle-environment-id \"${GOLDENVM_ENV}\" --force --id ${ver_id}"
done

# we also promote the latest version of each CCV
for ccv_id in ${CCV_IDS[@]}
do
ccv_ver=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer --csv content-view version list --content-view-id ${ccv_id} --organization \"${ORG}\"" | awk -F',' '$1 ~ /^[0-9]+$/ {if ($3 > maxver) {maxver = $3; maxid = $1} } END {print maxid}')
inform "Promoting version ${ccv_ver} of CCV ID ${ccv_id} to LCE ${GOLDENVM_ENV}"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer content-view version promote --content-view-id \"${ccv_id}\" --organization \"${ORG}\" \
--to-lifecycle-environment-id \"${GOLDENVM_ENV}\" --force --id ${ccv_ver}"
done
fi
2 changes: 1 addition & 1 deletion publishcv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ then
cv=${CV_LIST[$i]}
ver_id=${VER_ID_LIST[$i]}

inform "Promoting version ${ver_id} of ${cv} to LCE ${TESTVM_ENV}"
inform "Promoting version ID ${ver_id} of ${cv} to LCE ${TESTVM_ENV}"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer content-view version promote --content-view \"${cv}\" --organization \"${ORG}\" \
--to-lifecycle-environment-id \"${TESTVM_ENV}\" --force --id ${ver_id}"
Expand Down
8 changes: 8 additions & 0 deletions puppetpush.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,14 @@
# Load common parameter variables
. $(dirname "${0}")/common.sh

### disable for a quick test
### pcfe, 2020-08-27

inform "temporarily disabled"
exit 0

###

if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]]
then
err "PUSH_USER or SATELLITE not set or not found"
Expand Down
10 changes: 10 additions & 0 deletions rhel-8-script-env-vars-puppet-only.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
env.REPO_ID="3825"
env.PUPPET_REPO_ID="9"
env.TESTVM_HOSTCOLLECTION="hc-soe-el8-test"
env.GOLDENVM_HOSTCOLLECTION="hc-soe-el8-golden"
env.PUPPET_REPO="/var/www/html/pub/soe-puppet-only"
env.CV="cv-puppet-only"
env.CV_PASSIVE_LIST=""
env.CCV_NAME_PATTERN=""
env.CONDITIONAL_VM_BUILD=false
env.PUPPET_DONE_SLEEP="0"
13 changes: 13 additions & 0 deletions rhel-8-script-env-vars-rpm.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
//this is for RHEL8 only as we build packages and shove them in a RHEL8 only yum repo
env.REPO_ID="3825"
env.PUPPET_REPO_ID="8"
env.TESTVM_HOSTCOLLECTION="hc-soe-el8-test"
env.GOLDENVM_HOSTCOLLECTION="hc-soe-el8-golden"
env.YUM_REPO="/var/www/html/pub/soe-repo/rhel8"
env.PUPPET_REPO="/var/www/html/pub/soe-puppet"
env.CV="cv-soe-ci-el8"
env.CV_PASSIVE_LIST=""
env.CCV_NAME_PATTERN=""
env.CONDITIONAL_VM_BUILD=false
env.MOCK_CONFIG="rhel-8-x86_64"
env.PUPPET_DONE_SLEEP="75"
1 change: 1 addition & 0 deletions script-env-vars.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ env.KNOWN_HOSTS="/var/lib/jenkins/.ssh/known_hosts"
env.SATELLITE="satellite.internal.pcfe.net"
env.ORG="Sat Test"
env.TESTVM_ENV="2"
env.GOLDENVM_ENV="14"
env.TEST_ROOT="redhat geheim"
Loading