From 5106805d4ab63f65fbac53c5f7359e18621dbf91 Mon Sep 17 00:00:00 2001 From: chungen0126 Date: Thu, 19 Dec 2024 03:28:11 +0800 Subject: [PATCH] HDDS-11959. Remove tests for non-Ratis SCM --- .../upgrade/TestDatanodeUpgradeToScmHA.java | 604 ------------------ .../hdds/scm/block/TestDeletedBlockLog.java | 2 - ...uration.java => TestSCMConfiguration.java} | 73 +-- .../TestStatefulServiceStateManagerImpl.java | 2 - .../hdds/scm/node/TestSCMNodeManager.java | 3 + ...SCMHAUnfinalizedStateValidationAction.java | 54 +- .../hdds/scm/TestStorageContainerManager.java | 50 +- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 1 - .../ozone/recon/TestReconScmHASnapshot.java | 65 -- .../recon/TestReconScmNonHASnapshot.java | 64 -- .../shell/TestDeletedBlocksTxnShell.java | 2 - 11 files changed, 16 insertions(+), 904 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java rename hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/{TestSCMHAConfiguration.java => TestSCMConfiguration.java} (80%) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java deleted file mode 100644 index d4a27e74cda..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ /dev/null @@ -1,604 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.upgrade; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.container.common.ScmTestMock; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.replication.ContainerImporter; -import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.ozone.test.LambdaTestUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.io.FileOutputStream; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Tests upgrading a single datanode from pre-SCM HA volume format that used - * SCM ID to the post-SCM HA volume format using cluster ID. If SCM HA was - * already being used before the upgrade, there should be no changes. - */ -public class TestDatanodeUpgradeToScmHA { - @TempDir - private Path tempFolder; - - private DatanodeStateMachine dsm; - private ContainerDispatcher dispatcher; - private OzoneConfiguration conf; - private static final String CLUSTER_ID = "clusterID"; - private boolean scmHAAlreadyEnabled; - - private RPC.Server scmRpcServer; - private InetSocketAddress address; - private ScmTestMock scmServerImpl; - - private void setScmHAEnabled(boolean enableSCMHA) - throws Exception { - this.scmHAAlreadyEnabled = enableSCMHA; - conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, scmHAAlreadyEnabled); - setup(); - } - - private void setup() throws Exception { - address = SCMTestUtils.getReuseableAddress(); - conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address); - } - - @AfterEach - public void teardown() throws Exception { - if (scmRpcServer != null) { - scmRpcServer.stop(); - } - - if (dsm != null) { - dsm.close(); - } - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testReadsDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Add data to read. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - - // Create thread to keep reading during finalization. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future readFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - } - // Make sure we can read after finalizing too. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure reading during the upgrade, the exception will - // be thrown here. - readFuture.get(); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testImportContainer(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Pre-export a container to continuously import and delete. - final long exportContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID, pipeline); - File exportedContainerFile = exportContainer(exportContainerID); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - - // Export another container to import while pre-finalized and read - // finalized. - final long exportContainerID2 = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk2 = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID2, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID2, pipeline); - File exportedContainerFile2 = exportContainer(exportContainerID2); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID2, pipeline); - - // Make sure we can import and read a container pre-finalized. - importContainer(exportContainerID2, exportedContainerFile2); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - // Make sure the existing container can be read. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Create thread to keep importing containers during the upgrade. - // Since the datanode's MLV is behind SCM's, container creation is not - // allowed. We will keep importing and deleting the same container since - // we cannot create new ones to import here. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future importFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - } - // Make sure we can import after finalizing too. - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure importing during the upgrade, the exception will - // be thrown here. - importFuture.get(); - - // Make sure we can read the container that was imported while - // pre-finalized after finalizing. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFailedVolumeDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // FINALIZE: With failed volume /// - - failVolume(volume); - // Since volume is failed, container should be marked unhealthy. - // Finalization should proceed anyways. - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline, - ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR); - State containerState = dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerState(); - assertEquals(State.UNHEALTHY, containerState); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Volume marked failed but gets restored on disk /// - - // Check that volume is marked failed during finalization. - assertEquals(0, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(1, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Since the volume was out during the upgrade, it should maintain its - // original format. - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Now that we are done finalizing, restore the volume. - restoreVolume(volume); - // After restoring the failed volume, its containers are readable again. - // However, since it is marked as failed no containers can be created or - // imported to it. - // This should log a warning about reading from an unhealthy container - // but otherwise proceed successfully. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - /// FINALIZED: Restart datanode to upgrade the failed volume /// - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - /// PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID /// - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - // A new volume is added that must be formatted. - File preFinVolume2 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(2, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Because DN mlv would be behind SCM mlv, only reads are allowed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - // On restart, there should have been no changes to the paths already used. - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // No new containers can be created on this volume since SCM MLV is ahead - // of DN MLV at this point. - // cluster ID should always be used for the new volume since SCM HA is now - // enabled. - checkVolumePathID(preFinVolume2, CLUSTER_ID); - - /// FINALIZE /// - - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Add a new volume and check its formatting /// - - // Add a new volume that should be formatted with cluster ID only, since - // DN has finalized. - File finVolume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - // Yet another SCM ID is received this time, but it should not matter. - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(3, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkVolumePathID(preFinVolume2, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // New volume should have been formatted with cluster ID only, since the - // datanode is finalized. - checkVolumePathID(finVolume, CLUSTER_ID); - - /// FINALIZED: Read old data and write + read new data /// - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - /// CHECKS FOR TESTING /// - - public void checkContainerPathID(long containerID, String scmID, - String clusterID) { - if (scmHAAlreadyEnabled) { - checkContainerPathID(containerID, clusterID); - } else { - checkContainerPathID(containerID, scmID); - } - } - - public void checkContainerPathID(long containerID, String expectedID) { - KeyValueContainerData data = - (KeyValueContainerData) dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - assertThat(data.getChunksPath()).contains(expectedID); - assertThat(data.getMetadataPath()).contains(expectedID); - } - - public void checkFinalizedVolumePathID(File volume, String scmID, - String clusterID) throws Exception { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - List subdirs = getHddsSubdirs(volume); - File hddsRoot = getHddsRoot(volume); - - // Volume should have SCM ID and cluster ID directory, where cluster ID - // is a symlink to SCM ID. - assertEquals(2, subdirs.size()); - - File scmIDDir = new File(hddsRoot, scmID); - assertThat(subdirs).contains(scmIDDir); - - File clusterIDDir = new File(hddsRoot, CLUSTER_ID); - assertThat(subdirs).contains(clusterIDDir); - assertTrue(Files.isSymbolicLink(clusterIDDir.toPath())); - Path symlinkTarget = Files.readSymbolicLink(clusterIDDir.toPath()); - assertEquals(scmID, symlinkTarget.toString()); - } - } - - public void checkPreFinalizedVolumePathID(File volume, String scmID, - String clusterID) { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - checkVolumePathID(volume, scmID); - } - - } - - public void checkVolumePathID(File volume, String expectedID) { - List subdirs; - File hddsRoot; - if (dnThinksVolumeFailed(volume)) { - // If the volume is failed, read from the failed location it was - // moved to. - subdirs = getHddsSubdirs(getFailedVolume(volume)); - hddsRoot = getHddsRoot(getFailedVolume(volume)); - } else { - subdirs = getHddsSubdirs(volume); - hddsRoot = getHddsRoot(volume); - } - - // Volume should only have the specified ID directory. - assertEquals(1, subdirs.size()); - File idDir = new File(hddsRoot, expectedID); - assertThat(subdirs).contains(idDir); - } - - public List getHddsSubdirs(File volume) { - File[] subdirsArray = getHddsRoot(volume).listFiles(File::isDirectory); - assertNotNull(subdirsArray); - return Arrays.asList(subdirsArray); - } - - public File getHddsRoot(File volume) { - return new File(HddsVolumeUtil.getHddsRoot(volume.getAbsolutePath())); - } - - /// CLUSTER OPERATIONS /// - - private void startScmServer() throws Exception { - scmServerImpl = new ScmTestMock(CLUSTER_ID); - scmRpcServer = SCMTestUtils.startScmRpcServer(conf, - scmServerImpl, address, 10); - } - - /** - * Updates the SCM ID on the SCM server. Datanode will not be aware of this - * until {@link UpgradeTestHelper#callVersionEndpointTask} is called. - * @return the new scm ID. - */ - private String changeScmID() { - String scmID = UUID.randomUUID().toString(); - scmServerImpl.setScmId(scmID); - return scmID; - } - - /// CONTAINER OPERATIONS /// - - /** - * Exports the specified container to a temporary file and returns the file. - */ - private File exportContainer(long containerId) throws Exception { - final ContainerReplicationSource replicationSource = - new OnDemandContainerReplicationSource( - dsm.getContainer().getController()); - - replicationSource.prepare(containerId); - - File destination = - Files.createFile(tempFolder.resolve("destFile" + containerId)).toFile(); - try (FileOutputStream fos = new FileOutputStream(destination)) { - replicationSource.copyData(containerId, fos, NO_COMPRESSION); - } - return destination; - } - - /** - * Imports the container found in {@code source} to the datanode with the ID - * {@code containerID}. - */ - private void importContainer(long containerID, File source) throws Exception { - ContainerImporter replicator = - new ContainerImporter(dsm.getConf(), - dsm.getContainer().getContainerSet(), - dsm.getContainer().getController(), - dsm.getContainer().getVolumeSet()); - - File tempFile = Files.createFile( - tempFolder.resolve(ContainerUtils.getContainerTarName(containerID))) - .toFile(); - Files.copy(source.toPath(), tempFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - replicator.importContainer(containerID, tempFile.toPath(), null, - NO_COMPRESSION); - } - - /// VOLUME OPERATIONS /// - - /** - * Renames the specified volume directory so it will appear as failed to - * the datanode. - */ - public void failVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(volume.renameTo(failedVolume)); - } - - /** - * Convert the specified volume from its failed name back to its original - * name. The File passed should be the original volume path, not the one it - * was renamed to to fail it. - */ - public void restoreVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(failedVolume.renameTo(volume)); - } - - /** - * @return The file name that will be used to rename a volume to fail it. - */ - public File getFailedVolume(File volume) { - return new File(volume.getParent(), volume.getName() + "-failed"); - } - - /** - * Checks whether the datanode thinks the volume has failed. - * This could be outdated information if the volume was restored already - * and the datanode has not been restarted since then. - */ - public boolean dnThinksVolumeFailed(File volume) { - return dsm.getContainer().getVolumeSet().getFailedVolumesList().stream() - .anyMatch(v -> - getHddsRoot(v.getStorageDir()).equals(getHddsRoot(volume))); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index c8e2f267aff..5768dbc7566 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -112,7 +111,6 @@ public class TestDeletedBlockLog { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); replicationManager = mock(ReplicationManager.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java similarity index 80% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java index 75a943ee8da..2d9a18c5a8e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java @@ -18,13 +18,11 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmRatisServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.common.Storage; @@ -35,13 +33,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; @@ -63,8 +58,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -72,7 +65,7 @@ /** * Test for SCM HA-related configuration. */ -class TestSCMHAConfiguration { +class TestSCMConfiguration { private OzoneConfiguration conf; @TempDir private File tempDir; @@ -85,7 +78,7 @@ void setup() { } @Test - public void testSCMHAConfig() throws Exception { + public void testSCMConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -225,7 +218,7 @@ public void testSCMHAConfig() throws Exception { @Test - public void testHAWithSamePortConfig() throws Exception { + public void testSamePortConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -301,25 +294,7 @@ public void testHAWithSamePortConfig() throws Exception { } @Test - public void testRatisEnabledDefaultConfigWithoutInitializedSCM() - throws IOException { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()).thenReturn(Storage.StorageState.NOT_INITIALIZED); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertEquals(SCMHAUtils.isSCMHAEnabled(conf), - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertFalse(SCMHAUtils.isSCMHAEnabled(conf)); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); - } - - @Test - public void testRatisEnabledDefaultConfigWithInitializedSCM() + public void testDefaultConfigWithInitializedSCM() throws IOException { SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); when(scmStorageConfig.getState()) @@ -333,44 +308,4 @@ public void testRatisEnabledDefaultConfigWithInitializedSCM() DefaultConfigManager.clearDefaultConfigs(); assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); } - - @Test - public void testRatisEnabledDefaultConflictConfigWithInitializedSCM() { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()) - .thenReturn(Storage.StorageState.INITIALIZED); - when(scmStorageConfig.isSCMHAEnabled()).thenReturn(true); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - assertThrows(ConfigurationException.class, - () -> SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig)); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testHAConfig(boolean ratisEnabled) throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ratisEnabled); - SCMStorageConfig scmStorageConfig = newStorageConfig(ratisEnabled); - StorageContainerManager.scmInit(conf, scmStorageConfig.getClusterID()); - assertEquals(ratisEnabled, DefaultConfigManager.getValue( - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, !ratisEnabled)); - } - - @Test - void testInvalidHAConfig() throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMStorageConfig scmStorageConfig = newStorageConfig(true); - String clusterID = scmStorageConfig.getClusterID(); - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, clusterID)); - } - - private SCMStorageConfig newStorageConfig( - boolean ratisEnabled) throws IOException { - final SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); - scmStorageConfig.setClusterId(UUID.randomUUID().toString()); - scmStorageConfig.setSCMHAFlag(ratisEnabled); - scmStorageConfig.initialize(); - return scmStorageConfig; - } - } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 4e69f46b6e9..33da298423d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -20,7 +20,6 @@ import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; @@ -48,7 +47,6 @@ public class TestStatefulServiceStateManagerImpl { @BeforeEach void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 6d11cb5fe58..2263d0908d0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -77,6 +77,7 @@ import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -277,6 +278,7 @@ public void testGetLastHeartbeatTimeDiff() throws Exception { * @throws TimeoutException */ @Test + @Unhealthy("HDDS-11986") public void testScmLayoutOnHeartbeat() throws Exception { OzoneConfiguration conf = getConf(); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, @@ -394,6 +396,7 @@ private void assertPipelineClosedAfterLayoutHeartbeat( * @throws TimeoutException */ @Test + @Unhealthy("HDDS-11986") public void testScmLayoutOnRegister() throws Exception { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java index 8b4bc906e0d..91dfaa1dafb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.upgrade; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; @@ -26,19 +25,16 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.upgrade.UpgradeException; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -62,20 +58,12 @@ public static void setup() { ExitUtils.disableSystemExit(); } - @ParameterizedTest - @CsvSource({ - "true, true", - "true, false", - "false, true", - "false, false", - }) - public void testUpgrade(boolean haEnabledBefore, - boolean haEnabledPreFinalized, @TempDir Path dataPath) throws Exception { + @Test + public void testUpgrade(@TempDir Path dataPath) throws Exception { // Write version file for original version. OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledBefore); conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dataPath.toString()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, dataPath.toString()); // This init should always succeed, since SCM is not pre-finalized yet. @@ -83,43 +71,17 @@ public void testUpgrade(boolean haEnabledBefore, boolean initResult1 = StorageContainerManager.scmInit(conf, CLUSTER_ID); assertTrue(initResult1); - // Set up new pre-finalized SCM. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - haEnabledPreFinalized); - /* Clusters from Ratis SCM -> Non Ratis SCM - Ratis SCM -> Non Ratis SCM not supported - */ - if (haEnabledPreFinalized != haEnabledBefore) { - if (haEnabledBefore) { - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } else { - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } - return; - } StorageContainerManager scm = HddsTestUtils.getScm(conf); assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED, scm.getFinalizationManager().getUpgradeFinalizer().getStatus()); - final boolean shouldFail = !haEnabledBefore && haEnabledPreFinalized; + DefaultConfigManager.clearDefaultConfigs(); - if (shouldFail) { - // Start on its own should fail. - assertThrows(UpgradeException.class, scm::start); + boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); + assertTrue(initResult2); + scm.start(); + scm.stop(); - // Init followed by start should both fail. - // Init is not necessary here, but is allowed to be run. - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - assertThrows(UpgradeException.class, scm::start); - } else { - boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); - assertTrue(initResult2); - scm.start(); - scm.stop(); - } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 95d7faa9174..9661ff86132 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -93,7 +93,6 @@ import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -143,15 +142,12 @@ import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; @@ -561,6 +557,7 @@ private Map> createDeleteTXLog( @Test public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -575,22 +572,6 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception { StorageContainerManager.scmInit(conf, testClusterId); assertEquals(NodeType.SCM, scmStore.getNodeType()); assertEquals(testClusterId, scmStore.getClusterID()); - assertTrue(scmStore.isSCMHAEnabled()); - } - - @Test - public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - Path scmPath = tempDir.resolve("scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); validateRatisGroupExists(conf, clusterId.toString()); } @@ -967,35 +948,6 @@ public void testIncrementalContainerReportQueue() throws Exception { containerReportExecutors.close(); } - @Test - public void testNonRatisToRatis() - throws IOException, AuthenticationException, InterruptedException, - TimeoutException { - final OzoneConfiguration conf = new OzoneConfiguration(); - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build()) { - final StorageContainerManager nonRatisSCM = cluster - .getStorageContainerManager(); - assertNull(nonRatisSCM.getScmHAManager().getRatisServer()); - assertFalse(nonRatisSCM.getScmStorageConfig().isSCMHAEnabled()); - nonRatisSCM.stop(); - nonRatisSCM.join(); - - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, cluster.getClusterId()); - conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); - conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - cluster.restartStorageContainerManager(false); - - final StorageContainerManager ratisSCM = cluster - .getStorageContainerManager(); - assertNotNull(ratisSCM.getScmHAManager().getRatisServer()); - assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled()); - } - } - private void addTransactions(StorageContainerManager scm, DeletedBlockLog delLog, Map> containerBlocksMap) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 39c2250b73c..8688e866493 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -564,7 +564,6 @@ protected SCMHAService createSCMService() OzoneConfiguration scmConfig = new OzoneConfiguration(conf); scmConfig.set(OZONE_METADATA_DIRS, metaDirPath); scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); - scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); configureSCM(); if (i == 1) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java deleted file mode 100644 index 6006ce67580..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Timeout; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; - -/** - * Test Recon SCM HA Snapshot Download implementation. - */ -@Timeout(300) -public class TestReconScmHASnapshot { - private OzoneConfiguration conf; - private MiniOzoneCluster ozoneCluster = null; - - @BeforeEach - public void setup() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); - conf.setBoolean( - ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); - conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5); - ozoneCluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .includeRecon(true) - .build(); - ozoneCluster.waitForClusterToBeReady(); - } - - @Test - public void testScmHASnapshot() throws Exception { - TestReconScmSnapshot.testSnapshot(ozoneCluster); - } - - @AfterEach - public void shutdown() throws Exception { - if (ozoneCluster != null) { - ozoneCluster.shutdown(); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java deleted file mode 100644 index ae342e63e8c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; - -/** - * Test Recon SCM HA Snapshot Download implementation. - */ -@Timeout(300) -public class TestReconScmNonHASnapshot { - private OzoneConfiguration conf; - private MiniOzoneCluster ozoneCluster = null; - - @BeforeEach - public void setup() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); - conf.setBoolean( - ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); - conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5); - ozoneCluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .includeRecon(true) - .build(); - ozoneCluster.waitForClusterToBeReady(); - } - - @Test - public void testScmNonHASnapshot() throws Exception { - TestReconScmSnapshot.testSnapshot(ozoneCluster); - } - - @AfterEach - public void shutdown() throws Exception { - if (ozoneCluster != null) { - ozoneCluster.shutdown(); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 730a2479a51..fd27652791b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -93,7 +92,6 @@ public void init() throws Exception { conf = new OzoneConfiguration(); scmServiceId = "scm-service-test1"; - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); cluster = MiniOzoneCluster.newHABuilder(conf)