From 220d59756d27f7db2f9b6d92da2e9d00884bff54 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Thu, 9 Mar 2023 02:29:01 +0000 Subject: [PATCH] Fix delete for VMSS flex --- azure/services/scalesetvms/scalesetvms.go | 6 +- .../services/scalesetvms/scalesetvms_test.go | 66 +++++++++++++++---- test/e2e/azure_machinepools.go | 2 - test/e2e/azure_test.go | 2 +- 4 files changed, 56 insertions(+), 20 deletions(-) diff --git a/azure/services/scalesetvms/scalesetvms.go b/azure/services/scalesetvms/scalesetvms.go index dc76a39a1da..f257497d3d4 100644 --- a/azure/services/scalesetvms/scalesetvms.go +++ b/azure/services/scalesetvms/scalesetvms.go @@ -151,11 +151,7 @@ func (s *Service) deleteVMSSFlexVM(ctx context.Context, resourceID string) error if err != nil { return errors.Wrap(err, fmt.Sprintf("failed to parse resource id %q", resourceID)) } - - resourceGroup := parsed.ResourceGroup - resourceName := strings.TrimPrefix(s.Scope.ProviderID(), azure.ProviderIDPrefix) - resourceNameSplits := strings.Split(resourceName, "/") - resourceName = resourceNameSplits[len(resourceNameSplits)-3] + "_" + resourceNameSplits[len(resourceNameSplits)-1] + resourceGroup, resourceName := parsed.ResourceGroup, parsed.ResourceName log.V(4).Info("entering delete") future := s.Scope.GetLongRunningOperationState(resourceName, serviceName, infrav1.DeleteFuture) diff --git a/azure/services/scalesetvms/scalesetvms_test.go b/azure/services/scalesetvms/scalesetvms_test.go index 8c6ee624363..b4ccc5b56c5 100644 --- a/azure/services/scalesetvms/scalesetvms_test.go +++ b/azure/services/scalesetvms/scalesetvms_test.go @@ -18,6 +18,7 @@ package scalesetvms import ( "context" + "fmt" "net/http" "testing" "time" @@ -35,6 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" "sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms/mock_scalesetvms" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/virtualmachines/mock_virtualmachines" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" gomock2 "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -172,17 +174,18 @@ func TestService_Reconcile(t *testing.T) { func TestService_Delete(t *testing.T) { cases := []struct { Name string - Setup func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) + Setup func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) Err error CheckIsErr bool }{ { Name: "should start deleting successfully if no long running operation is active", - Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { s.ResourceGroup().Return("rg") s.InstanceID().Return("0") s.ProviderID().Return("foo") s.ScaleSetName().Return("scaleset") + s.OrchestrationMode().Return(infrav1.UniformOrchestrationMode) s.GetLongRunningOperationState("0", serviceName, infrav1.DeleteFuture).Return(nil) future := &infrav1.Future{ Type: infrav1.DeleteFuture, @@ -199,11 +202,12 @@ func TestService_Delete(t *testing.T) { }, { Name: "should finish deleting successfully when there's a long running operation that has completed", - Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { s.ResourceGroup().Return("rg") s.InstanceID().Return("0") s.ProviderID().Return("foo") s.ScaleSetName().Return("scaleset") + s.OrchestrationMode().Return(infrav1.UniformOrchestrationMode) future := &infrav1.Future{ Type: infrav1.DeleteFuture, } @@ -215,11 +219,12 @@ func TestService_Delete(t *testing.T) { }, { Name: "should not error when deleting, but resource is 404", - Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { s.ResourceGroup().Return("rg") s.InstanceID().Return("0") s.ProviderID().Return("foo") s.ScaleSetName().Return("scaleset") + s.OrchestrationMode().Return(infrav1.UniformOrchestrationMode) s.GetLongRunningOperationState("0", serviceName, infrav1.DeleteFuture).Return(nil) m.DeleteAsync(gomock2.AContext(), "rg", "scaleset", "0").Return(nil, autorest404) m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) @@ -227,11 +232,12 @@ func TestService_Delete(t *testing.T) { }, { Name: "should error when deleting, but a non-404 error is returned from DELETE call", - Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { s.ResourceGroup().Return("rg") s.InstanceID().Return("0") s.ProviderID().Return("foo") s.ScaleSetName().Return("scaleset") + s.OrchestrationMode().Return(infrav1.UniformOrchestrationMode) s.GetLongRunningOperationState("0", serviceName, infrav1.DeleteFuture).Return(nil) m.DeleteAsync(gomock2.AContext(), "rg", "scaleset", "0").Return(nil, errors.New("boom")) m.Get(gomock2.AContext(), "rg", "scaleset", "0").Return(compute.VirtualMachineScaleSetVM{}, nil) @@ -240,11 +246,12 @@ func TestService_Delete(t *testing.T) { }, { Name: "should return error when a long running operation is active and getting the result returns an error", - Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder) { + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { s.ResourceGroup().Return("rg") s.InstanceID().Return("0") s.ProviderID().Return("foo") s.ScaleSetName().Return("scaleset") + s.OrchestrationMode().Return(infrav1.UniformOrchestrationMode) future := &infrav1.Future{ Type: infrav1.DeleteFuture, } @@ -254,26 +261,61 @@ func TestService_Delete(t *testing.T) { }, Err: errors.Wrap(errors.New("boom"), "failed to get result of long running operation"), }, + { + Name: "(flex) should delete successfully if no long-running operation is active", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { + s.ResourceGroup().Return("my-cluster") + s.ScaleSetName().Return("scaleset") + s.InstanceID().Return("0") + s.ProviderID().Return("azure:///subscriptions/1234-5678/resourceGroups/my-cluster/providers/Microsoft.Compute/virtualMachines/my-cluster_1234abcd") + s.OrchestrationMode().Return(infrav1.FlexibleOrchestrationMode) + s.GetLongRunningOperationState("my-cluster_1234abcd", serviceName, infrav1.DeleteFuture).Return(nil) + vmGetter := &VMSSFlexVMGetter{ + Name: "my-cluster_1234abcd", + ResourceGroup: "my-cluster", + } + future := &infrav1.Future{ + Type: infrav1.DeleteFuture, + } + sdkFuture, _ := converters.FutureToSDK(*future) + v.DeleteAsync(gomock2.AContext(), vmGetter).Return(sdkFuture, nil) + s.DeleteLongRunningOperationState("my-cluster_1234abcd", serviceName, infrav1.DeleteFuture) + v.GetByID(gomock2.AContext(), "/subscriptions/1234-5678/resourceGroups/my-cluster/providers/Microsoft.Compute/virtualMachines/my-cluster_1234abcd").Return(compute.VirtualMachine{}, nil) + }, + }, + { + Name: "(flex) should error if providerID is invalid", + Setup: func(s *mock_scalesetvms.MockScaleSetVMScopeMockRecorder, m *mock_scalesetvms.MockclientMockRecorder, v *mock_virtualmachines.MockClientMockRecorder) { + s.ResourceGroup().Return("my-cluster") + s.ScaleSetName().Return("scaleset") + s.InstanceID().Return("0") + s.ProviderID().Return("foo") + s.OrchestrationMode().Return(infrav1.FlexibleOrchestrationMode) + v.GetByID(gomock2.AContext(), "foo").Return(compute.VirtualMachine{}, nil) + }, + Err: errors.Wrap(fmt.Errorf("parsing failed for %s. Invalid resource Id format", "foo"), fmt.Sprintf("failed to parse resource id %q", "foo")), + }, } for _, c := range cases { t.Run(c.Name, func(t *testing.T) { var ( - g = NewWithT(t) - mockCtrl = gomock.NewController(t) - scopeMock = mock_scalesetvms.NewMockScaleSetVMScope(mockCtrl) - clientMock = mock_scalesetvms.NewMockclient(mockCtrl) + g = NewWithT(t) + mockCtrl = gomock.NewController(t) + scopeMock = mock_scalesetvms.NewMockScaleSetVMScope(mockCtrl) + clientMock = mock_scalesetvms.NewMockclient(mockCtrl) + vmClientMock = mock_virtualmachines.NewMockClient(mockCtrl) ) defer mockCtrl.Finish() scopeMock.EXPECT().SubscriptionID().Return("subID").AnyTimes() scopeMock.EXPECT().BaseURI().Return("https://localhost/").AnyTimes() scopeMock.EXPECT().Authorizer().Return(nil).AnyTimes() - scopeMock.EXPECT().OrchestrationMode().Return(infrav1.UniformOrchestrationMode).AnyTimes() service := NewService(scopeMock) service.Client = clientMock - c.Setup(scopeMock.EXPECT(), clientMock.EXPECT()) + service.VMClient = vmClientMock + c.Setup(scopeMock.EXPECT(), clientMock.EXPECT(), vmClientMock.EXPECT()) if err := service.Delete(context.TODO()); c.Err == nil { g.Expect(err).To(Succeed()) diff --git a/test/e2e/azure_machinepools.go b/test/e2e/azure_machinepools.go index 909124ba25d..bf588d97f82 100644 --- a/test/e2e/azure_machinepools.go +++ b/test/e2e/azure_machinepools.go @@ -117,7 +117,6 @@ func AzureMachinePoolsSpec(ctx context.Context, inputGetter func() AzureMachineP } wg.Wait() - /* TODO: uncomment with scale down fix for flexible mode for _, mp := range machinepools { goalReplicas := pointer.Int32Deref(mp.Spec.Replicas, 0) - 1 Byf("Scaling machine pool %s in from %d to %d", mp.Name, *mp.Spec.Replicas, goalReplicas) @@ -135,7 +134,6 @@ func AzureMachinePoolsSpec(ctx context.Context, inputGetter func() AzureMachineP }(mp) } wg.Wait() - */ By("verifying that workload nodes are schedulable") clientset := workloadClusterProxy.GetClientSet() diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 71ef510ef82..258e1f3981e 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -522,7 +522,7 @@ var _ = Describe("Workload cluster creation", func() { }, }, result) - By("Verifying machinepool can scale out", func() { + By("Verifying machinepool can scale out and in", func() { AzureMachinePoolsSpec(ctx, func() AzureMachinePoolsSpecInput { return AzureMachinePoolsSpecInput{ Cluster: result.Cluster,