diff --git a/Tiltfile b/Tiltfile index aa9a4927a37..d5aa30d721a 100644 --- a/Tiltfile +++ b/Tiltfile @@ -23,8 +23,8 @@ settings = { "capi_version": "v1.8.5", "caaph_version": "v0.2.5", "cert_manager_version": "v1.16.1", - "kubernetes_version": "v1.28.3", - "aks_kubernetes_version": "v1.28.3", + "kubernetes_version": "v1.28.15", + "aks_kubernetes_version": "v1.28.15", "flatcar_version": "3374.2.1", "azure_location": "eastus", "control_plane_machine_count": "1", @@ -212,10 +212,10 @@ def capz(): yaml = str(kustomizesub("./hack/observability")) # build an observable kind deployment by default # add extra_args if they are defined - if settings.get("extra_args"): - azure_extra_args = settings.get("extra_args").get("azure") + if settings.get("container_args"): + capz_container_args = settings.get("container_args").get("capz-controller-manager") yaml_dict = decode_yaml_stream(yaml) - append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", azure_extra_args) + append_arg_for_container_in_deployment(yaml_dict, "capz-controller-manager", "capz-system", "cluster-api-azure-controller", capz_container_args) yaml = str(encode_yaml_stream(yaml_dict)) yaml = fixup_yaml_empty_arrays(yaml) @@ -317,9 +317,14 @@ def flavors(): for template in template_list: deploy_worker_templates(template, substitutions) + delete_all_workload_clusters = kubectl_cmd + " delete clusters --all --wait=false" + + if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""): + delete_all_workload_clusters += clear_aks_vnet_peerings() + local_resource( name = "delete-all-workload-clusters", - cmd = kubectl_cmd + " delete clusters --all --wait=false", + cmd = ["sh", "-ec", delete_all_workload_clusters], auto_init = False, trigger_mode = TRIGGER_MODE_MANUAL, labels = ["flavors"], @@ -382,10 +387,19 @@ def deploy_worker_templates(template, substitutions): yaml = shlex.quote(yaml) flavor_name = os.path.basename(flavor) - flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\"" + flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM')" + flavor_cmd += "; export CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; echo " + yaml + "> ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -" + flavor_cmd += "; echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\"" # wait for kubeconfig to be available - flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done" + flavor_cmd += "; echo \"Waiting for kubeconfig to be available\"" + flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done" + flavor_cmd += "; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig" + flavor_cmd += "; chmod 600 ./${CLUSTER_NAME}.kubeconfig" + flavor_cmd += "; echo \"Kubeconfig for $CLUSTER_NAME created and saved in the local\"" + flavor_cmd += "; echo \"Waiting for $CLUSTER_NAME API Server to be accessible\"" + flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done" + flavor_cmd += "; echo \"API Server of $CLUSTER_NAME is accessible\"" # copy the kubeadm configmap to the calico-system namespace. # This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace. @@ -393,6 +407,9 @@ def deploy_worker_templates(template, substitutions): flavor_cmd += "; until " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system > /dev/null 2>&1; do sleep 5; done" flavor_cmd += "; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig create namespace calico-system --dry-run=client -o yaml | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -; " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig get configmap kubeadm-config --namespace=kube-system -o yaml | sed 's/namespace: kube-system/namespace: calico-system/' | " + kubectl_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f -" + if "aks" in settings.get("kustomize_substitutions", {}).get("MGMT_CLUSTER_NAME", ""): + flavor_cmd += peer_vnets() + flavor_cmd += get_addons(flavor_name) local_resource( @@ -454,6 +471,63 @@ def waitforsystem(): local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-kubeadm-control-plane-system") local(kubectl_cmd + " wait --for=condition=ready --timeout=300s pod --all -n capi-system") +def peer_vnets(): + # TODO: check for az cli to be installed in local + # wait for AKS VNet to be in the state created + peering_cmd = "; echo \"--------Peering VNETs--------\"" + peering_cmd += "; az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180" + peering_cmd += "; export MGMT_VNET_ID=$(az network vnet show --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --query id --output tsv)" + peering_cmd += "; echo \" 1/8 ${AKS_MGMT_VNET_NAME} found \"" + + # wait for workload VNet to be created + peering_cmd += "; az network vnet wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --created --timeout 180" + peering_cmd += "; export WORKLOAD_VNET_ID=$(az network vnet show --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --query id --output tsv)" + peering_cmd += "; echo \" 2/8 ${CLUSTER_NAME}-vnet found \"" + + # peer mgmt vnet + peering_cmd += "; az network vnet peering create --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --remote-vnet \"${WORKLOAD_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none" + peering_cmd += "; az network vnet peering wait --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --created --timeout 300 --only-show-errors --output none" + peering_cmd += "; echo \" 3/8 mgmt-to-${CLUSTER_NAME} peering created in ${AKS_MGMT_VNET_NAME}\"" + + # peer workload vnet + peering_cmd += "; az network vnet peering create --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --remote-vnet \"${MGMT_VNET_ID}\" --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none" + peering_cmd += "; az network vnet peering wait --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --created --timeout 300 --only-show-errors --output none" + peering_cmd += "; echo \" 4/8 ${CLUSTER_NAME}-to-mgmt peering created in ${CLUSTER_NAME}-vnet\"" + + # create private DNS zone + peering_cmd += "; az network private-dns zone create --resource-group ${CLUSTER_NAME} --name ${AZURE_LOCATION}.cloudapp.azure.com --only-show-errors --output none" + peering_cmd += "; az network private-dns zone wait --resource-group ${CLUSTER_NAME} --name ${AZURE_LOCATION}.cloudapp.azure.com --created --timeout 300 --only-show-errors --output none" + peering_cmd += "; echo \" 5/8 ${AZURE_LOCATION}.cloudapp.azure.com private DNS zone created in ${CLUSTER_NAME}\"" + + # link private DNS Zone to workload vnet + peering_cmd += "; az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --virtual-network \"${WORKLOAD_VNET_ID}\" --registration-enabled false --only-show-errors --output none" + peering_cmd += "; az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --created --timeout 300 --only-show-errors --output none" + peering_cmd += "; echo \" 6/8 workload cluster vnet ${CLUSTER_NAME}-vnet linked with private DNS zone\"" + + # link private DNS Zone to mgmt vnet + peering_cmd += "; az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --virtual-network \"${MGMT_VNET_ID}\" --registration-enabled false --only-show-errors --output none" + peering_cmd += "; az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --created --timeout 300 --only-show-errors --output none" + peering_cmd += "; echo \" 7/8 management cluster vnet ${AKS_MGMT_VNET_NAME} linked with private DNS zone\"" + + # create private DNS zone record + # TODO: 10.0.0.100 should be customizable + peering_cmd += "; az network private-dns record-set a add-record --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --record-set-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX} --ipv4-address 10.0.0.100 --only-show-errors --output none" + peering_cmd += "; echo \" 8/8 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX} private DNS zone record created\n\"" + + return peering_cmd + +def clear_aks_vnet_peerings(): + delete_peering_cmd = "; echo \"--------Clearing AKS MGMT VNETs Peerings--------\"" + delete_peering_cmd += "; az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180" + delete_peering_cmd += "; echo \" ${AKS_MGMT_VNET_NAME} found \"" + + # List all peering names and store them in an array + delete_peering_cmd += "; PEERING_NAMES=$(az network vnet peering list --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --query \"[].name\" --output tsv)" + delete_peering_cmd += "; for PEERING_NAME in ${PEERING_NAMES[@]}; do echo \"Deleting peering: ${PEERING_NAME}\"; az network vnet peering delete --name ${PEERING_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME}; done" + delete_peering_cmd += "; echo \"All VNETs Peerings deleted in ${AKS_MGMT_VNET_NAME}\"" + + return delete_peering_cmd + ############################## # Actual work happens here ############################## diff --git a/scripts/aks-as-mgmt.sh b/scripts/aks-as-mgmt.sh index 1d97708c420..1fffb9a08bc 100755 --- a/scripts/aks-as-mgmt.sh +++ b/scripts/aks-as-mgmt.sh @@ -30,7 +30,7 @@ make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${AZWI##*/}" export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # management cluster name export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # resource group name export AKS_NODE_RESOURCE_GROUP="node-${AKS_RESOURCE_GROUP}" -export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.30.2}" +export AKS_MGMT_KUBERNETES_VERSION="${AKS_MGMT_KUBERNETES_VERSION:-v1.30.2}" export AZURE_LOCATION="${AZURE_LOCATION:-westus2}" export AKS_NODE_VM_SIZE="${AKS_NODE_VM_SIZE:-"Standard_B2s"}" export AKS_NODE_COUNT="${AKS_NODE_COUNT:-1}" @@ -42,6 +42,13 @@ export AZWI_STORAGE_CONTAINER="\$web" export SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH="${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH:-}" export SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH="${SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH:-}" export REGISTRY="${REGISTRY:-}" +export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME:-"aks-mgmt-vnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR:-"20.255.0.0/16"}" +export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR:-"20.255.254.0/24"}" +export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP:-"20.255.254.100"}" +export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME:-"aks-mgmt-subnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR:-"20.255.0.0/24"}" + export AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}" export AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}" @@ -63,7 +70,7 @@ main() { echo "MGMT_CLUSTER_NAME: $MGMT_CLUSTER_NAME" echo "AKS_RESOURCE_GROUP: $AKS_RESOURCE_GROUP" echo "AKS_NODE_RESOURCE_GROUP: $AKS_NODE_RESOURCE_GROUP" - echo "KUBERNETES_VERSION: $KUBERNETES_VERSION" + echo "AKS_MGMT_KUBERNETES_VERSION: $AKS_MGMT_KUBERNETES_VERSION" echo "AZURE_LOCATION: $AZURE_LOCATION" echo "AKS_NODE_VM_SIZE: $AKS_NODE_VM_SIZE" echo "AZURE_NODE_MACHINE_TYPE: $AZURE_NODE_MACHINE_TYPE" @@ -76,6 +83,12 @@ main() { echo "SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH: $SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH" echo "REGISTRY: $REGISTRY" echo "APISERVER_LB_DNS_SUFFIX: $APISERVER_LB_DNS_SUFFIX" + echo "AKS_MGMT_VNET_NAME: $AKS_MGMT_VNET_NAME" + echo "AKS_MGMT_VNET_CIDR: $AKS_MGMT_VNET_CIDR" + echo "AKS_MGMT_SERVICE_CIDR: $AKS_MGMT_SERVICE_CIDR" + echo "AKS_MGMT_DNS_SERVICE_IP: $AKS_MGMT_DNS_SERVICE_IP" + echo "AKS_MGMT_SUBNET_NAME: $AKS_MGMT_SUBNET_NAME" + echo "AKS_MGMT_SUBNET_CIDR: $AKS_MGMT_SUBNET_CIDR" echo "AZURE_SUBSCRIPTION_ID: $AZURE_SUBSCRIPTION_ID" echo "AZURE_CLIENT_ID: $AZURE_CLIENT_ID" @@ -102,6 +115,16 @@ create_aks_cluster() { --location "${AZURE_LOCATION}" \ --output none --only-show-errors \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" + + echo "creating vnet for the resource group ${AKS_RESOURCE_GROUP}" + az network vnet create \ + --resource-group "${AKS_RESOURCE_GROUP}"\ + --name "${AKS_MGMT_VNET_NAME}" \ + --address-prefix "${AKS_MGMT_VNET_CIDR}" \ + --subnet-name "${AKS_MGMT_SUBNET_NAME}" \ + --subnet-prefix "${AKS_MGMT_SUBNET_CIDR}" \ + --output none --only-show-errors \ + --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" fi aks_exists=$(az aks show --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" 2>&1 || true) # true because we want to continue if the command fails @@ -110,13 +133,16 @@ create_aks_cluster() { az aks create --name "${MGMT_CLUSTER_NAME}" \ --resource-group "${AKS_RESOURCE_GROUP}" \ --location "${AZURE_LOCATION}" \ - --kubernetes-version "${KUBERNETES_VERSION}" \ + --kubernetes-version "${AKS_MGMT_KUBERNETES_VERSION}" \ --node-count "${AKS_NODE_COUNT}" \ --node-vm-size "${AKS_NODE_VM_SIZE}" \ --node-resource-group "${AKS_NODE_RESOURCE_GROUP}" \ --vm-set-type VirtualMachineScaleSets \ --generate-ssh-keys \ --network-plugin azure \ + --vnet-subnet-id "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AKS_RESOURCE_GROUP}/providers/Microsoft.Network/virtualNetworks/${AKS_MGMT_VNET_NAME}/subnets/${AKS_MGMT_SUBNET_NAME}" \ + --service-cidr "${AKS_MGMT_SERVICE_CIDR}" \ + --dns-service-ip "${AKS_MGMT_DNS_SERVICE_IP}" \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" \ --output none --only-show-errors; elif echo "$aks_exists" | grep -q "${MGMT_CLUSTER_NAME}"; then @@ -127,6 +153,7 @@ create_aks_cluster() { fi # check and save kubeconfig + echo -e "\n" echo "saving credentials of cluster ${MGMT_CLUSTER_NAME} in ${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" az aks get-credentials --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" \ --file "${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" --only-show-errors @@ -179,15 +206,10 @@ create_aks_cluster() { set_env_varaibles(){ cat < tilt-settings-temp.yaml kustomize_substitutions: - MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}" AKS_RESOURCE_GROUP: "${AKS_RESOURCE_GROUP}" AKS_NODE_RESOURCE_GROUP: "${AKS_NODE_RESOURCE_GROUP}" - MGMT_CLUSTER_KUBECONFIG: "${MGMT_CLUSTER_KUBECONFIG}" - AKS_MI_CLIENT_ID: "${AKS_MI_CLIENT_ID}" - AKS_MI_OBJECT_ID: "${AKS_MI_OBJECT_ID}" - AKS_MI_RESOURCE_ID: "${AKS_MI_RESOURCE_ID}" - MANAGED_IDENTITY_NAME: "${MANAGED_IDENTITY_NAME}" - MANAGED_IDENTITY_RG: "${MANAGED_IDENTITY_RG}" + AKS_MGMT_VNET_NAME: "${AKS_MGMT_VNET_NAME}" + MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}" AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY: "${AKS_MI_CLIENT_ID}" CI_RG: "${MANAGED_IDENTITY_RG}" USER_IDENTITY: "${MANAGED_IDENTITY_NAME}" diff --git a/templates/cluster-template-private.yaml b/templates/cluster-template-private.yaml index 5663fb7403d..a2be5af9905 100644 --- a/templates/cluster-template-private.yaml +++ b/templates/cluster-template-private.yaml @@ -24,7 +24,10 @@ metadata: namespace: default spec: bastionSpec: - azureBastion: {} + azureBastion: + subnet: + cidrBlocks: + - 70.2.0.0/16 identityRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: AzureClusterIdentity @@ -32,6 +35,9 @@ spec: location: ${AZURE_LOCATION} networkSpec: apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 70.0.11.100 name: ${CLUSTER_NAME}-internal-lb type: Internal controlPlaneOutboundLB: @@ -39,11 +45,17 @@ spec: nodeOutboundLB: frontendIPsCount: 1 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 70.0.0.0/16 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 70.1.0.0/16 + name: node-subnet role: node vnet: + cidrBlocks: + - 70.0.0.0/8 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -203,7 +215,9 @@ spec: kubeletExtraArgs: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' - preKubeadmCommands: [] + preKubeadmCommands: + - echo '70.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: AzureClusterIdentity diff --git a/templates/flavors/README.md b/templates/flavors/README.md index db10f8d1b2f..fe1d53158d0 100644 --- a/templates/flavors/README.md +++ b/templates/flavors/README.md @@ -91,3 +91,27 @@ worker-templates: KUBERNETES_VERSION: v1.22.1 WORKER_MACHINE_COUNT: "1" ``` + +#### Tilt flavors and their CIDRs +- Below VNet CIDRs are opininated and can be changed as per the requirement. +- AKS cluster created as part of [`aks-as-mgmt.sh`](../../hack/aks-as-mgmt.sh) script uses the `20.255.0.0/16` as the VNet CIDR. So the templates should have non overlapping CIDRs to that of AKS mgmt cluster. + +| Flavor | VNet | Control Plane Subnet | Private IP | Node Subnet | +|-----------------------------------------------------|---------------|----------------------|----------------|----------------| +| [`default`](default/kustomization.yaml) | `10.0.0.0/8` | `10.0.0.0/16` | `10.0.0.100` | `10.1.0.0/16` | +| [`aad`](aad/kustomization.yaml) | `30.0.0.0/8` | `30.0.0.0/16` | `30.0.11.100` | `30.1.0.0/16` | +| [`azure-bastion`](azure-bastion/kustomization.yaml) | `40.0.0.0/8` | `40.0.0.0/16` | `40.0.11.100` | `40.1.0.0/16` | +| [`azure-cni-v1`](azure-cni-v1/kustomization.yaml) | `50.0.0.0/8` | `50.0.0.0/16` | `50.0.11.100` | `50.1.0.0/16` | +| [`edgezone`](edgezone/kustomization.yaml) | `60.0.0.0/8` | `60.0.0.0/16` | `60.0.11.100` | `60.1.0.0/16` | +| [`private`](private/kustomization.yaml) | `70.0.0.0/8` | `70.0.0.0/16` | `70.0.11.100` | `70.1.0.0/16` | +| [`dual-stack`](dual-stack/kustomization.yaml) | `10.0.0.0/8` | `10.0.0.0/16` | `10.0.11.100` | `10.1.0.0/16` | +| [`ipv6`](ipv6/kustomization.yaml) | `10.0.0.0/8` | `10.0.0.0/16` | `10.0.11.100` | `10.1.0.0/16` | +| [`flatcar`](flatcar/kustomization.yaml) | `80.0.0.0/8` | `80.0.0.0/16` | `80.0.11.100` | `80.1.0.0/16` | +| [`nvdia-gpu`](nvidia-gpu/kustomization.yaml) | `90.0.0.0/8` | `90.0.0.0/16` | `90.0.11.100` | `90.1.0.0/16` | +| [`windows`](windows/kustomization.yaml) | `10.0.0.0/8` | `10.0.0.0/16` | `10.0.11.100` | `10.1.0.0/16` | +| [`ephemeral`](ephemeral/kustomization.yaml) | `100.0.0.0/8` | `100.0.0.0/16` | `100.0.11.100` | `100.1.0.0/16` | + +Note: +- Dual-stack has not been updated with non-overlapping CIDR and uses the `10.0.0.0` based CIDR. +- IPv6 has not been updated with non-overlapping CIDR and uses the `10.0.0.0` based CIDR. +- Windows has not been updated with non-overlapping CIDR and uses the `10.0.0.0` based CIDR. diff --git a/templates/flavors/apiserver-ilb/patches/azure-cluster-cidrs-and-frontend-ips.yaml b/templates/flavors/apiserver-ilb/patches/azure-cluster-cidrs-and-frontend-ips.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/templates/flavors/azure-bastion/patches/azure-cluster.yaml b/templates/flavors/azure-bastion/patches/azure-cluster.yaml index 74d517df341..ab6d1efd426 100644 --- a/templates/flavors/azure-bastion/patches/azure-cluster.yaml +++ b/templates/flavors/azure-bastion/patches/azure-cluster.yaml @@ -5,4 +5,7 @@ metadata: name: ${CLUSTER_NAME} spec: bastionSpec: - azureBastion: {} + azureBastion: + subnet: + cidrBlocks: + - 40.2.0.0/16 diff --git a/templates/flavors/dual-stack/patches/dual-stack.yaml b/templates/flavors/dual-stack/patches/dual-stack.yaml index 95aca4fd28f..5613adb731d 100644 --- a/templates/flavors/dual-stack/patches/dual-stack.yaml +++ b/templates/flavors/dual-stack/patches/dual-stack.yaml @@ -24,6 +24,16 @@ metadata: name: ${CLUSTER_NAME} spec: networkSpec: + apiServerLB: + # We pre-create this public IP and the DNS name to use it in the + # worker node's /etc/hosts. + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + name: ${CLUSTER_NAME}-api-lb + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.11.100 vnet: cidrBlocks: - "10.0.0.0/8" diff --git a/templates/flavors/ipv6/patches/ipv6.yaml b/templates/flavors/ipv6/patches/ipv6.yaml index ea0eff7286d..8bd72e4ab3d 100644 --- a/templates/flavors/ipv6/patches/ipv6.yaml +++ b/templates/flavors/ipv6/patches/ipv6.yaml @@ -18,6 +18,16 @@ metadata: name: ${CLUSTER_NAME} spec: networkSpec: + apiServerLB: + # We pre-create this public IP and the DNS name to use it in the + # worker node's /etc/hosts. + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + name: ${CLUSTER_NAME}-api-lb + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.11.100 vnet: cidrBlocks: - "10.0.0.0/8" diff --git a/templates/flavors/private/kustomization.yaml b/templates/flavors/private/kustomization.yaml index e8b87f0ff6b..82fce965117 100644 --- a/templates/flavors/private/kustomization.yaml +++ b/templates/flavors/private/kustomization.yaml @@ -11,6 +11,48 @@ patches: - path: patches/private-lb.yaml - path: patches/apiserver-host-dns.yaml - path: patches/azure-bastion.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml +- path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: echo '70.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/apiServerLB/frontendIPs/1/privateIP + value: 70.0.11.100 +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/vnet/cidrBlocks/0 + value: 70.0.0.0/8 +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/0/cidrBlocks/0 + value: 70.0.0.0/16 +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/1/cidrBlocks/0 + value: 70.1.0.0/16 +- target: + kind: AzureCluster + patch: |- + - op: remove + path: /spec/networkSpec/apiServerLB/frontendIPs/0 +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: remove + path: /spec/template/spec/preKubeadmCommands/0 sortOptions: order: fifo diff --git a/templates/flavors/private/patches/azure-bastion.yaml b/templates/flavors/private/patches/azure-bastion.yaml index 74d517df341..6d9c3e8773f 100644 --- a/templates/flavors/private/patches/azure-bastion.yaml +++ b/templates/flavors/private/patches/azure-bastion.yaml @@ -5,4 +5,7 @@ metadata: name: ${CLUSTER_NAME} spec: bastionSpec: - azureBastion: {} + azureBastion: + subnet: + cidrBlocks: + - 70.2.0.0/16 diff --git a/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml b/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml index 3509792752a..5bf33864cbf 100644 --- a/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml +++ b/templates/flavors/windows-apiserver-ilb/patches/kubeadm-config-template.yaml @@ -9,4 +9,4 @@ spec: # so that worker nodes can access the API server using the internal IP. # 10.0.0.100 is the default IP that gets assigned to a internal load balancer. preKubeadmCommands: - - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" + - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" diff --git a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml index 0a3d54c620f..0222980a42b 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml @@ -45,6 +45,14 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.11.100 subnets: - cidrBlocks: - 10.0.0.0/16 @@ -448,6 +456,8 @@ spec: preKubeadmCommands: - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/kubeadm-bootstrap.sh + - echo '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts verbosity: 5 --- apiVersion: cluster.x-k8s.io/v1beta1 diff --git a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml index cbf94be6a1b..8dc7193f684 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml @@ -43,6 +43,14 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.11.100 subnets: - cidrBlocks: - 10.0.0.0/16 @@ -466,6 +474,8 @@ spec: preKubeadmCommands: - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/kubeadm-bootstrap.sh + - echo '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts verbosity: 5 --- apiVersion: cluster.x-k8s.io/v1beta1 diff --git a/templates/test/ci/cluster-template-prow-private.yaml b/templates/test/ci/cluster-template-prow-private.yaml index 4f6613665e1..425fe50f438 100644 --- a/templates/test/ci/cluster-template-prow-private.yaml +++ b/templates/test/ci/cluster-template-prow-private.yaml @@ -236,7 +236,9 @@ spec: kubeletExtraArgs: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' - preKubeadmCommands: [] + preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: AzureClusterIdentity diff --git a/templates/test/ci/prow-ci-version-dual-stack/kustomization.yaml b/templates/test/ci/prow-ci-version-dual-stack/kustomization.yaml index 4d01fd9ab39..798d0702dbe 100644 --- a/templates/test/ci/prow-ci-version-dual-stack/kustomization.yaml +++ b/templates/test/ci/prow-ci-version-dual-stack/kustomization.yaml @@ -28,6 +28,13 @@ patches: - path: ../prow-dual-stack/patches/cluster-label-calico-dual-stack.yaml - path: patches/machine-deployment.yaml - path: ../patches/windows-addons-disabled.yaml +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: echo '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + sortOptions: order: fifo diff --git a/templates/test/ci/prow-ci-version-ipv6/kustomization.yaml b/templates/test/ci/prow-ci-version-ipv6/kustomization.yaml index 7298ad5aaab..24a94fd2c8d 100644 --- a/templates/test/ci/prow-ci-version-ipv6/kustomization.yaml +++ b/templates/test/ci/prow-ci-version-ipv6/kustomization.yaml @@ -28,6 +28,13 @@ patches: - path: ../prow-ipv6/patches/cluster-label-calico-ipv6.yaml - path: patches/machine-deployment.yaml - path: ../patches/windows-addons-disabled.yaml +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: echo '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + sortOptions: order: fifo diff --git a/templates/test/ci/prow-custom-vnet/kustomization.yaml b/templates/test/ci/prow-custom-vnet/kustomization.yaml index e078480baf6..9fd3b612e63 100644 --- a/templates/test/ci/prow-custom-vnet/kustomization.yaml +++ b/templates/test/ci/prow-custom-vnet/kustomization.yaml @@ -17,6 +17,47 @@ patches: - path: ../patches/uami-control-plane.yaml - path: ../patches/cluster-label-calico.yaml - path: ../patches/cluster-label-cloud-provider-azure.yaml +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: echo '10.0.11.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/apiServerLB/frontendIPs/1/privateIP + value: 10.0.11.100 +- target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/vnet/cidrBlocks/0 + value: 10.0.0.0/8 +- target: + kind: AzureCluster + patch: |- + - op: add + path: /spec/networkSpec/subnets/0/cidrBlocks + value: [] + - op: add + path: /spec/networkSpec/subnets/0/cidrBlocks/0 + value: 10.0.0.0/16 +- target: + kind: AzureCluster + patch: |- + - op: add + path: /spec/networkSpec/subnets/1/cidrBlocks + value: [] + - op: add + path: /spec/networkSpec/subnets/1/cidrBlocks/0 + value: 10.1.0.0/16 +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: remove + path: /spec/template/spec/preKubeadmCommands/0 sortOptions: order: fifo diff --git a/templates/test/ci/prow-private/kustomization.yaml b/templates/test/ci/prow-private/kustomization.yaml index 059607b987f..b79ec24d2b4 100644 --- a/templates/test/ci/prow-private/kustomization.yaml +++ b/templates/test/ci/prow-private/kustomization.yaml @@ -20,6 +20,16 @@ patches: - path: ../patches/uami-control-plane.yaml - path: ../patches/cluster-label-calico.yaml - path: ../patches/cluster-label-cloud-provider-azure.yaml +# we need to add the default private IP to the hosts file of the worker nodes +- target: + kind: KubeadmConfigTemplate + patch: |- + - op: remove + path: /spec/template/spec/preKubeadmCommands/0 + - op: add + path: /spec/template/spec/preKubeadmCommands/- + value: echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + configMapGenerator: - files: - resources=../../../addons/calico.yaml