From 73cd188c96f8c87c1f90f2350819b26db2a06f47 Mon Sep 17 00:00:00 2001 From: Nawaz Hussain Khazielakha Date: Thu, 14 Nov 2024 14:57:05 -0800 Subject: [PATCH] Update Tiltfile with AKS VNet peering and deletion logic - update aks-as-mgmt scripts with VNet creation and all clusters deletion - internal LB IP can be set using a env variable --- Tiltfile | 122 +++++++++++++++--- scripts/aks-as-mgmt.sh | 42 ++++-- templates/cluster-template-apiserver-ilb.yaml | 4 +- ...luster-template-windows-apiserver-ilb.yaml | 6 +- .../apiserver-ilb/patches/control-plane.yaml | 2 +- .../patches/kubeadm-config-template.yaml | 2 +- .../windows-apiserver-ilb/kustomization.yaml | 6 +- .../patches/kubeadm-config-template.yaml | 2 +- 8 files changed, 150 insertions(+), 36 deletions(-) diff --git a/Tiltfile b/Tiltfile index aa9a4927a37..b3a4c4a8ca1 100644 --- a/Tiltfile +++ b/Tiltfile @@ -23,8 +23,8 @@ settings = { "capi_version": "v1.8.5", "caaph_version": "v0.2.5", "cert_manager_version": "v1.16.1", - "kubernetes_version": "v1.28.3", - "aks_kubernetes_version": "v1.28.3", + "kubernetes_version": "v1.28.15", + "aks_kubernetes_version": "v1.28.15", "flatcar_version": "3374.2.1", "azure_location": "eastus", "control_plane_machine_count": "1", @@ -212,10 +212,10 @@ def capz(): yaml = str(kustomizesub("./hack/observability")) # build an observable kind deployment by default # add extra_args if they are defined - if settings.get("extra_args"): - azure_extra_args = settings.get("extra_args").get("azure") + if settings.get("container_args"): + capz_container_args = settings.get("container_args").get("capz-controller-manager") yaml_dict = decode_yaml_stream(yaml) - append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args) + append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", capz_container_args) yaml = str(encode_yaml_stream(yaml_dict)) yaml = fixup_yaml_empty_arrays(yaml) @@ -317,9 +317,14 @@ def flavors(): for template in template_list: deploy_worker_templates(template, substitutions) + delete_all_workload_clusters = kubectl_cmd + " delete clusters --all --wait=false;" + + if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""): + delete_all_workload_clusters += clear_aks_vnet_peerings() + local_resource( name = "delete-all-workload-clusters", - cmd = kubectl_cmd + " delete clusters --all --wait=false", + cmd = ["sh", "-ec", delete_all_workload_clusters], auto_init = False, trigger_mode = TRIGGER_MODE_MANUAL, labels = ["flavors"], @@ -382,16 +387,35 @@ def deploy_worker_templates(template, substitutions): yaml = shlex.quote(yaml) flavor_name = os.path.basename(flavor) - flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\"" + flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); " + + apiserver_lb_private_ip = os.getenv("AZURE_INTERNAL_LB_PRIVATE_IP", "") + if "windows-apiserver-ilb" in flavor and apiserver_lb_private_ip == "": + flavor_cmd += "export AZURE_INTERNAL_LB_PRIVATE_IP=\"40.0.11.100\"; " + elif "apiserver-ilb" in flavor and apiserver_lb_private_ip == "": + flavor_cmd += "export AZURE_INTERNAL_LB_PRIVATE_IP=\"30.0.11.100\"; " + + flavor_cmd += "export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; " + flavor_cmd += "echo \"Cluster ${CLUSTER_NAME} created, don't forget to delete\"; " # wait for kubeconfig to be available - flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done" + flavor_cmd += "echo \"Waiting for kubeconfig to be available\"; " + flavor_cmd += "until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + flavor_cmd += kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; " + flavor_cmd += "chmod 600 ./${CLUSTER_NAME}.kubeconfig; " + flavor_cmd += "echo \"Kubeconfig for ${CLUSTER_NAME} created and saved in the local\"; " + flavor_cmd += "echo \"Waiting for ${CLUSTER_NAME} API Server to be accessible\"; " + flavor_cmd += "until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done; " + flavor_cmd += "echo \"API Server of ${CLUSTER_NAME} is accessible\"; " # copy the kubeadm configmap to the calico-system namespace. # This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace. if "windows" in flavor_name: - flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done" - flavor_cmd += "; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -" + flavor_cmd += "until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done; " + flavor_cmd += kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + + if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""): + flavor_cmd += peer_vnets() flavor_cmd += get_addons(flavor_name) @@ -409,14 +433,15 @@ def get_addons(flavor_name): if "aks" in flavor_name: return "" - addon_cmd = "; export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}')" - addon_cmd += "; export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ',')" - addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}" + addon_cmd = "export CIDRS=$(" + kubectl_cmd + " get cluster ${CLUSTER_NAME} -o jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[*]}'); " + addon_cmd += "export CIDR_LIST=$(bash -c 'echo $CIDRS' | tr ' ' ','); " + addon_cmd += helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME} --set cloudControllerManager.clusterCIDR=${CIDR_LIST}" if "flatcar" in flavor_name: # append caCetDir location to the cloud-provider-azure helm install command for flatcar flavor addon_cmd += " --set-string cloudControllerManager.caCertDir=/usr/share/ca-certificates" + addon_cmd += "; " if "azure-cni-v1" in flavor_name: - addon_cmd += "; " + kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig" + addon_cmd += kubectl_cmd + " apply -f ./templates/addons/azure-cni-v1.yaml --kubeconfig ./${CLUSTER_NAME}.kubeconfig; " else: # install calico if "ipv6" in flavor_name: @@ -425,7 +450,7 @@ def get_addons(flavor_name): calico_values = "./templates/addons/calico-dual-stack/values.yaml" else: calico_values = "./templates/addons/calico/values.yaml" - addon_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace" + addon_cmd += helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://docs.tigera.io/calico/charts --version ${CALICO_VERSION} calico tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace; " return addon_cmd @@ -454,6 +479,73 @@ def waitforsystem(): local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-control-plane-system") local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-system") +def peer_vnets(): + # TODO: check for az cli to be installed in local + # wait for AKS VNet to be in the state created + peering_cmd = ''' + echo \"--------Peering VNETs--------\"; + az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180; + export MGMT_VNET_ID=$(az network vnet show --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --query id --output tsv); + echo \" 1/8 ${AKS_MGMT_VNET_NAME} found \"; ''' + + # wait for workload VNet to be created + peering_cmd += ''' + az network vnet wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --created --timeout 180; + export WORKLOAD_VNET_ID=$(az network vnet show --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --query id --output tsv); + echo \" 2/8 ${CLUSTER_NAME}-vnet found \"; ''' + + # peer mgmt vnet + peering_cmd += ''' + az network vnet peering create --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --remote-vnet \"${WORKLOAD_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none; + az network vnet peering wait --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --created --timeout 300 --only-show-errors --output none; + echo \" 3/8 mgmt-to-${CLUSTER_NAME} peering created in ${AKS_MGMT_VNET_NAME}\"; ''' + + # peer workload vnet + peering_cmd += ''' + az network vnet peering create --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --remote-vnet \"${MGMT_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none; + az network vnet peering wait --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --created --timeout 300 --only-show-errors --output none; + echo \" 4/8 ${CLUSTER_NAME}-to-mgmt peering created in ${CLUSTER_NAME}-vnet\"; ''' + + # create private DNS zone + peering_cmd += ''' + az network private-dns zone create --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --only-show-errors --output none; + az network private-dns zone wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --created --timeout 300 --only-show-errors --output none; + echo \" 5/8 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com private DNS zone created in ${CLUSTER_NAME}\"; ''' + + # link private DNS Zone to workload vnet + peering_cmd += ''' + az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --virtual-network \"${WORKLOAD_VNET_ID}\" --registration-enabled false --only-show-errors --output none; + az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --created --timeout 300 --only-show-errors --output none; + echo \" 6/8 workload cluster vnet ${CLUSTER_NAME}-vnet linked with private DNS zone\"; ''' + + # link private DNS Zone to mgmt vnet + peering_cmd += ''' + az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --virtual-network \"${MGMT_VNET_ID}\" --registration-enabled false --only-show-errors --output none; + az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --created --timeout 300 --only-show-errors --output none; + echo \" 7/8 management cluster vnet ${AKS_MGMT_VNET_NAME} linked with private DNS zone\"; ''' + + # create private DNS zone record + peering_cmd += ''' + az network private-dns record-set a add-record --resource-group ${CLUSTER_NAME} --zone-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com --record-set-name \"@\" --ipv4-address ${AZURE_INTERNAL_LB_PRIVATE_IP} --only-show-errors --output none; + echo \" 8/8 \"@\" private DNS zone record created to point ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com to ${AZURE_INTERNAL_LB_PRIVATE_IP}\"; ''' + + return peering_cmd + +def clear_aks_vnet_peerings(): + # + delete_peering_cmd = ''' + echo \"--------Clearing AKS MGMT VNETs Peerings--------\"; + az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180; + echo \" VNet ${AKS_MGMT_VNET_NAME} found \"; ''' + + # List all peering names and store them in an array + delete_peering_cmd += ''' + PEERING_NAMES=$(az network vnet peering list --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --query \"[].name\" --output tsv); + for PEERING_NAME in ${PEERING_NAMES[@]}; do echo \"Deleting peering: ${PEERING_NAME}\"; az network vnet peering delete --name ${PEERING_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME}; done; + echo \"All VNETs Peerings deleted in ${AKS_MGMT_VNET_NAME}\"; ''' + + return delete_peering_cmd + ############################## # Actual work happens here ############################## diff --git a/scripts/aks-as-mgmt.sh b/scripts/aks-as-mgmt.sh index 1d97708c420..1fffb9a08bc 100755 --- a/scripts/aks-as-mgmt.sh +++ b/scripts/aks-as-mgmt.sh @@ -30,7 +30,7 @@ make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${AZWI##*/}" export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # management cluster name export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # resource group name export AKS_NODE_RESOURCE_GROUP="node-${AKS_RESOURCE_GROUP}" -export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.30.2}" +export AKS_MGMT_KUBERNETES_VERSION="${AKS_MGMT_KUBERNETES_VERSION:-v1.30.2}" export AZURE_LOCATION="${AZURE_LOCATION:-westus2}" export AKS_NODE_VM_SIZE="${AKS_NODE_VM_SIZE:-"Standard_B2s"}" export AKS_NODE_COUNT="${AKS_NODE_COUNT:-1}" @@ -42,6 +42,13 @@ export AZWI_STORAGE_CONTAINER="\$web" export SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH="${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH:-}" export SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH="${SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH:-}" export REGISTRY="${REGISTRY:-}" +export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME:-"aks-mgmt-vnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR:-"20.255.0.0/16"}" +export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR:-"20.255.254.0/24"}" +export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP:-"20.255.254.100"}" +export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME:-"aks-mgmt-subnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR:-"20.255.0.0/24"}" + export AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}" export AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}" @@ -63,7 +70,7 @@ main() { echo "MGMT_CLUSTER_NAME: $MGMT_CLUSTER_NAME" echo "AKS_RESOURCE_GROUP: $AKS_RESOURCE_GROUP" echo "AKS_NODE_RESOURCE_GROUP: $AKS_NODE_RESOURCE_GROUP" - echo "KUBERNETES_VERSION: $KUBERNETES_VERSION" + echo "AKS_MGMT_KUBERNETES_VERSION: $AKS_MGMT_KUBERNETES_VERSION" echo "AZURE_LOCATION: $AZURE_LOCATION" echo "AKS_NODE_VM_SIZE: $AKS_NODE_VM_SIZE" echo "AZURE_NODE_MACHINE_TYPE: $AZURE_NODE_MACHINE_TYPE" @@ -76,6 +83,12 @@ main() { echo "SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH: $SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH" echo "REGISTRY: $REGISTRY" echo "APISERVER_LB_DNS_SUFFIX: $APISERVER_LB_DNS_SUFFIX" + echo "AKS_MGMT_VNET_NAME: $AKS_MGMT_VNET_NAME" + echo "AKS_MGMT_VNET_CIDR: $AKS_MGMT_VNET_CIDR" + echo "AKS_MGMT_SERVICE_CIDR: $AKS_MGMT_SERVICE_CIDR" + echo "AKS_MGMT_DNS_SERVICE_IP: $AKS_MGMT_DNS_SERVICE_IP" + echo "AKS_MGMT_SUBNET_NAME: $AKS_MGMT_SUBNET_NAME" + echo "AKS_MGMT_SUBNET_CIDR: $AKS_MGMT_SUBNET_CIDR" echo "AZURE_SUBSCRIPTION_ID: $AZURE_SUBSCRIPTION_ID" echo "AZURE_CLIENT_ID: $AZURE_CLIENT_ID" @@ -102,6 +115,16 @@ create_aks_cluster() { --location "${AZURE_LOCATION}" \ --output none --only-show-errors \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" + + echo "creating vnet for the resource group ${AKS_RESOURCE_GROUP}" + az network vnet create \ + --resource-group "${AKS_RESOURCE_GROUP}"\ + --name "${AKS_MGMT_VNET_NAME}" \ + --address-prefix "${AKS_MGMT_VNET_CIDR}" \ + --subnet-name "${AKS_MGMT_SUBNET_NAME}" \ + --subnet-prefix "${AKS_MGMT_SUBNET_CIDR}" \ + --output none --only-show-errors \ + --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" fi aks_exists=$(az aks show --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" 2>&1 || true) # true because we want to continue if the command fails @@ -110,13 +133,16 @@ create_aks_cluster() { az aks create --name "${MGMT_CLUSTER_NAME}" \ --resource-group "${AKS_RESOURCE_GROUP}" \ --location "${AZURE_LOCATION}" \ - --kubernetes-version "${KUBERNETES_VERSION}" \ + --kubernetes-version "${AKS_MGMT_KUBERNETES_VERSION}" \ --node-count "${AKS_NODE_COUNT}" \ --node-vm-size "${AKS_NODE_VM_SIZE}" \ --node-resource-group "${AKS_NODE_RESOURCE_GROUP}" \ --vm-set-type VirtualMachineScaleSets \ --generate-ssh-keys \ --network-plugin azure \ + --vnet-subnet-id "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AKS_RESOURCE_GROUP}/providers/Microsoft.Network/virtualNetworks/${AKS_MGMT_VNET_NAME}/subnets/${AKS_MGMT_SUBNET_NAME}" \ + --service-cidr "${AKS_MGMT_SERVICE_CIDR}" \ + --dns-service-ip "${AKS_MGMT_DNS_SERVICE_IP}" \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" \ --output none --only-show-errors; elif echo "$aks_exists" | grep -q "${MGMT_CLUSTER_NAME}"; then @@ -127,6 +153,7 @@ create_aks_cluster() { fi # check and save kubeconfig + echo -e "\n" echo "saving credentials of cluster ${MGMT_CLUSTER_NAME} in ${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" az aks get-credentials --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" \ --file "${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" --only-show-errors @@ -179,15 +206,10 @@ create_aks_cluster() { set_env_varaibles(){ cat < tilt-settings-temp.yaml kustomize_substitutions: - MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}" AKS_RESOURCE_GROUP: "${AKS_RESOURCE_GROUP}" AKS_NODE_RESOURCE_GROUP: "${AKS_NODE_RESOURCE_GROUP}" - MGMT_CLUSTER_KUBECONFIG: "${MGMT_CLUSTER_KUBECONFIG}" - AKS_MI_CLIENT_ID: "${AKS_MI_CLIENT_ID}" - AKS_MI_OBJECT_ID: "${AKS_MI_OBJECT_ID}" - AKS_MI_RESOURCE_ID: "${AKS_MI_RESOURCE_ID}" - MANAGED_IDENTITY_NAME: "${MANAGED_IDENTITY_NAME}" - MANAGED_IDENTITY_RG: "${MANAGED_IDENTITY_RG}" + AKS_MGMT_VNET_NAME: "${AKS_MGMT_VNET_NAME}" + MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}" AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY: "${AKS_MI_CLIENT_ID}" CI_RG: "${MANAGED_IDENTITY_RG}" USER_IDENTITY: "${MANAGED_IDENTITY_NAME}" diff --git a/templates/cluster-template-apiserver-ilb.yaml b/templates/cluster-template-apiserver-ilb.yaml index b0a5519f307..fee9a9cadac 100644 --- a/templates/cluster-template-apiserver-ilb.yaml +++ b/templates/cluster-template-apiserver-ilb.yaml @@ -36,7 +36,7 @@ spec: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb - name: ${CLUSTER_NAME}-internal-lb-private-ip - privateIP: 30.0.0.100 + privateIP: ${AZURE_INTERNAL_LB_PRIVATE_IP:-30.0.0.100} subnets: - cidrBlocks: - 30.0.0.0/16 @@ -204,7 +204,7 @@ spec: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: - - echo '30.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP:-30.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/templates/cluster-template-windows-apiserver-ilb.yaml b/templates/cluster-template-windows-apiserver-ilb.yaml index fff149a4e53..80b9470222a 100644 --- a/templates/cluster-template-windows-apiserver-ilb.yaml +++ b/templates/cluster-template-windows-apiserver-ilb.yaml @@ -40,7 +40,7 @@ spec: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb - name: ${CLUSTER_NAME}-internal-lb-private-ip - privateIP: 40.0.11.100 + privateIP: ${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} subnets: - cidrBlocks: - 40.0.0.0/16 @@ -208,7 +208,7 @@ spec: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: - - echo '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -306,7 +306,7 @@ spec: - powershell C:/defender-exclude-calico.ps1 preKubeadmCommands: - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' - -Value '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" + -Value '${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" users: - groups: Administrators name: capi diff --git a/templates/flavors/apiserver-ilb/patches/control-plane.yaml b/templates/flavors/apiserver-ilb/patches/control-plane.yaml index 3d954fc8584..9e41e78b75a 100644 --- a/templates/flavors/apiserver-ilb/patches/control-plane.yaml +++ b/templates/flavors/apiserver-ilb/patches/control-plane.yaml @@ -13,7 +13,7 @@ spec: name: ${CLUSTER_NAME}-api-lb dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com - name: ${CLUSTER_NAME}-internal-lb-private-ip - privateIP: 30.0.0.100 + privateIP: ${AZURE_INTERNAL_LB_PRIVATE_IP:-30.0.0.100} vnet: cidrBlocks: - 30.0.0.0/8 diff --git a/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml b/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml index 84c9cd4d07f..3b657cc407b 100644 --- a/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml +++ b/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml @@ -9,4 +9,4 @@ spec: # This custom DNS Resolution of the API server ensures that the worker nodes can reach the API server when # the public IP of the API server is not accessible. preKubeadmCommands: - - echo '30.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP:-30.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts diff --git a/templates/flavors/windows-apiserver-ilb/kustomization.yaml b/templates/flavors/windows-apiserver-ilb/kustomization.yaml index 83f7ba8fe03..bb1a5e96279 100644 --- a/templates/flavors/windows-apiserver-ilb/kustomization.yaml +++ b/templates/flavors/windows-apiserver-ilb/kustomization.yaml @@ -14,13 +14,13 @@ patches: patch: |- - op: replace path: /spec/template/spec/preKubeadmCommands/0 - value: echo '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + value: echo '${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts - target: kind: AzureCluster patch: |- - op: replace path: /spec/networkSpec/apiServerLB/frontendIPs/1/privateIP - value: 40.0.11.100 + value: ${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} - target: kind: AzureCluster patch: |- @@ -46,7 +46,7 @@ patches: - op: replace path: /spec/template/spec/preKubeadmCommands/0 value: - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" + powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" sortOptions: order: fifo diff --git a/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml b/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml index 3509792752a..cb22f9843db 100644 --- a/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml +++ b/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml @@ -9,4 +9,4 @@ spec: # so that worker nodes can access the API server using the internal IP. # 10.0.0.100 is the default IP that gets assigned to a internal load balancer. preKubeadmCommands: - - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" + - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '${AZURE_INTERNAL_LB_PRIVATE_IP:-40.0.0.100} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'"