Skip to content

Instantly share code, notes, and snippets.

@masayag
Last active November 20, 2020 16:55
Show Gist options
  • Select an option

  • Save masayag/53716316e9444b3016ac4a8c8aa34c91 to your computer and use it in GitHub Desktop.

Select an option

Save masayag/53716316e9444b3016ac4a8c8aa34c91 to your computer and use it in GitHub Desktop.
Use assisted-installer for cnv chaos testing
# Run from a user which is a passwordless sudoer
#----------------------------------------------
# Install assisted-installer and deploy cluster
#----------------------------------------------
dnf install -y git make
git clone https://github.com/openshift/assisted-test-infra.git
cd assisted-test-infra
# for the first time only
make create_full_environment
# for next runs
# make create_environment
# Get a valid pull secret (JSON string) from redhat.com
export PULL_SECRET='...'
export NUM_WORKERS=2
export WORKER_MEMORY=20000MB
export MASTER_MEMORY=20000MB
export MASTER_DISK=42949672960
export WORKER_DISK=42949672960
export CLUSTER_NAME=chaos-cluster
# Deploy minikube, install assisted service and register OCP cluster of 3 masters and 2 workers
make run_full_flow_with_install
BM_INVENTORY_SERVER="$(hostname):6008"
CLUSTER_ID=$(curl -s http://${BM_INVENTORY_SERVER}/api/assisted-install/v1/clusters/ | jq '.[] | .id' | xargs)
CLUSTER_API_VIP=$(curl -s -X GET "http://${BM_INVENTORY_SERVER}/api/assisted-install/v1/clusters/${CLUSTER_ID}" | jq '.api_vip' | xargs)
CLUSTER_INGRESS_VIP=$(curl -s -X GET "http://${BM_INVENTORY_SERVER}/api/assisted-install/v1/clusters/${CLUSTER_ID}" | jq '.ingress_vip' | xargs)
# Before d/w kubeconfig and setting it, the cluster address should be resolvable
echo "${CLUSTER_INGRESS_VIP} console-openshift-console.apps.${CLUSTER_NAME}-assisted-installer.redhat.com oauth-openshift.apps.${CLUSTER_NAME}-assisted-installer.redhat.com grafana-openshift-monitoring.apps.${CLUSTER_NAME}-assisted-installer.redhat.com prometheus-k8s-openshift-monitoring.apps.${CLUSTER_NAME}-assisted-installer.redhat.com thanos-querier-openshift-monitoring.apps.${CLUSTER_NAME}-assisted-installer.redhat.com alertmanager-main-openshift-monitoring.apps.${CLUSTER_NAME}-assisted-installer.redhat.com" | sudo tee -a /etc/hosts
echo "${CLUSTER_API_VIP} api.${CLUSTER_NAME}-assisted-installer.redhat.com" | sudo tee -a /etc/hosts
# Replace with a safer location
KUBECONFIG_PATH=/tmp/kubeconfig
curl -s -X GET "http://${BM_INVENTORY_SERVER}/api/assisted-install/v1/clusters/${CLUSTER_ID}/downloads/kubeconfig" -H "accept: application/octet-stream" > $KUBECONFIG_PATH
export KUBECONFIG=$KUBECONFIG_PATH
#-------------------
# Installing Litmus
#-------------------
oc apply -f https://litmuschaos.github.io/litmus/litmus-operator-v1.9.0.yaml
oc patch -n litmus deployment.apps/chaos-operator-ce --type=json --patch '
[
{
"op": "add",
"path": "/spec/template/spec/containers/0/env/-",
"value": {
"name": "ANALYTICS",
"value": "FALSE"
}
}
]'
oc wait deploy -n litmus chaos-operator-ce --for=condition=Available
#----------------
# Installing CNV
#----------------
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-cnv
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: kubevirt-hyperconverged-group
namespace: openshift-cnv
spec:
targetNamespaces:
- openshift-cnv
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: hco-operatorhub
namespace: openshift-cnv
spec:
source: redhat-operators
sourceNamespace: openshift-marketplace
name: kubevirt-hyperconverged
startingCSV: kubevirt-hyperconverged-operator.v2.4.0
channel: "2.4"
installPlanApproval: "Automatic"
EOF
sleep 30
cat <<EOF | kubectl apply -f -
---
apiVersion: hco.kubevirt.io/v1alpha1
kind: HyperConverged
metadata:
name: kubevirt-hyperconverged
namespace: openshift-cnv
spec:
BareMetalPlatform: true
EOF
while ! oc wait --for condition=Ready pods -l name=hyperconverged-cluster-operator -n openshift-cnv --timeout=60s; do sleep 10 ; done
#-------------------------
# Take Snapshot of cluster
#-------------------------
# Remove CD devices from VMs
DOMAINS=$(sudo virsh -r list | grep $CLUSTER_NAME | awk '{print $1}')
for vm in $DOMAINS
do
sudo virsh change-media $vm hda --eject
done
# suspend vms before taking their snapshots
for vm in $DOMAINS
do
sudo virsh suspend $vm
done
# take snapshots
for vm in $DOMAINS
do
sudo virsh snapshot-create-as --domain $vm --name "initial-state"
done
# resume vms after snapshots were taken
for vm in $DOMAINS
do
sudo virsh resume $vm
done
#---------------------------------
# Restore cluster to initial state
#---------------------------------
# suspend vms before reverting snapshot
for vm in $DOMAINS
do
sudo virsh suspend $vm
done
# Revert to initial-state snapshot and start vms
DOMAINS=$(sudo virsh -r list --all | grep $CLUSTER_NAME | awk '{print $2}' | sort)
snapshot_name="initial-state"
for vm in $DOMAINS
do
echo "Reverting $vm to '$snapshot_name' snapshot"
sudo virsh snapshot-revert --domain $vm --snapshotname $snapshot_name --paused --force
done
# resume vms after snapshots were taken
for vm in $DOMAINS
do
sudo virsh resume $vm
done
# Wait on components to become available
oc wait deploy -n litmus chaos-operator-ce --for=condition=Available
while ! oc wait --for condition=Ready pods -l name=hyperconverged-cluster-operator -n openshift-cnv --timeout=60s; do sleep 10 ; done
#------------------
# Tear-down cluster
#------------------
# Delete snapshots
DOMAINS=$(sudo virsh -r list --all | grep $CLUSTER_NAME | awk '{print $2}')
for vm in $DOMAINS
do
sudo virsh snapshot-delete --domain $vm --snapshotname "initial-state"
done
#---------------------
# Delete all resources
#---------------------
# delete entries from /etc/hosts
sudo sed -i "/${CLUSTER_INGRESS_VIP}/d" /etc/hosts
sudo sed -i "/${CLUSTER_API_VIP}/d" /etc/hosts
make destroy
rm $KUBECONFIG_PATH
unset KUBECONFIG_PATH
unset KUBECONFIG
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment